Skip to content

Standard library imports style check #50116

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
6 changes: 2 additions & 4 deletions pandas/tests/apply/test_series_apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
Counter,
defaultdict,
)
from decimal import Decimal
import math

import numpy as np
import pytest
Expand Down Expand Up @@ -37,8 +39,6 @@ def test_apply(datetime_series):
tm.assert_series_equal(datetime_series.apply(np.sqrt), np.sqrt(datetime_series))

# element-wise apply
import math

tm.assert_series_equal(datetime_series.apply(math.exp), np.exp(datetime_series))

# empty series
Expand Down Expand Up @@ -525,8 +525,6 @@ def test_map_type_inference():


def test_map_decimal(string_series):
from decimal import Decimal

result = string_series.map(lambda x: Decimal(str(x)))
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
Expand Down
4 changes: 1 addition & 3 deletions pandas/tests/arrays/test_datetimelike.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from __future__ import annotations

import array
import re

import numpy as np
Expand Down Expand Up @@ -1346,9 +1347,6 @@ def array_likes(request):
if name == "memoryview":
data = memoryview(arr)
elif name == "array":
# stdlib array
import array

data = array.array("i", arr)
elif name == "dask":
import dask.array
Expand Down
3 changes: 1 addition & 2 deletions pandas/tests/frame/indexing/test_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
datetime,
timedelta,
)
from decimal import Decimal
import re

import numpy as np
Expand Down Expand Up @@ -467,8 +468,6 @@ def test_setitem_corner2(self):

def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal

# Created as float type
dm = DataFrame(index=range(3), columns=range(3))

Expand Down
5 changes: 2 additions & 3 deletions pandas/tests/frame/methods/test_to_records.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
from collections import abc
import email
from email.parser import Parser

import numpy as np
import pytest
Expand Down Expand Up @@ -58,9 +60,6 @@ def test_to_records_with_multindex(self):
assert "one" not in r

def test_to_records_with_Mapping_type(self):
import email
from email.parser import Parser

abc.Mapping.register(email.message.Message)

headers = Parser().parsestr(
Expand Down
19 changes: 5 additions & 14 deletions pandas/tests/frame/test_constructors.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,18 @@
import array
from collections import (
OrderedDict,
abc,
defaultdict,
namedtuple,
)
from dataclasses import make_dataclass
from datetime import (
date,
datetime,
timedelta,
)
import functools
import random
import re
from typing import Iterator
import warnings
Expand Down Expand Up @@ -466,8 +471,6 @@ def test_constructor_numpy_uints(self, values):
assert result[0][0] == value

def test_constructor_ordereddict(self):
import random

nitems = 100
nums = list(range(nitems))
random.shuffle(nums)
Expand Down Expand Up @@ -718,8 +721,6 @@ def test_constructor_subclass_dict(self, dict_subclass):

def test_constructor_defaultdict(self, float_frame):
# try with defaultdict
from collections import defaultdict

data = {}
float_frame.loc[: float_frame.index[10], "B"] = np.nan

Expand Down Expand Up @@ -1343,8 +1344,6 @@ def __len__(self) -> int:
def test_constructor_stdlib_array(self):
# GH 4297
# support Array
import array

result = DataFrame({"A": array.array("i", range(10))})
expected = DataFrame({"A": list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
Expand Down Expand Up @@ -1544,8 +1543,6 @@ def test_constructor_list_of_tuples(self):

def test_constructor_list_of_namedtuples(self):
# GH11181
from collections import namedtuple

named_tuple = namedtuple("Pandas", list("ab"))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({"a": [1, 2], "b": [3, 4]})
Expand All @@ -1559,8 +1556,6 @@ def test_constructor_list_of_namedtuples(self):

def test_constructor_list_of_dataclasses(self):
# GH21910
from dataclasses import make_dataclass

Point = make_dataclass("Point", [("x", int), ("y", int)])

data = [Point(0, 3), Point(1, 3)]
Expand All @@ -1570,8 +1565,6 @@ def test_constructor_list_of_dataclasses(self):

def test_constructor_list_of_dataclasses_with_varying_types(self):
# GH21910
from dataclasses import make_dataclass

# varying types
Point = make_dataclass("Point", [("x", int), ("y", int)])
HLine = make_dataclass("HLine", [("x0", int), ("x1", int), ("y", int)])
Expand All @@ -1586,8 +1579,6 @@ def test_constructor_list_of_dataclasses_with_varying_types(self):

def test_constructor_list_of_dataclasses_error_thrown(self):
# GH21910
from dataclasses import make_dataclass

Point = make_dataclass("Point", [("x", int), ("y", int)])

# expect TypeError
Expand Down
4 changes: 2 additions & 2 deletions pandas/tests/groupby/test_filters.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
from string import ascii_lowercase

import numpy as np
import pytest

Expand Down Expand Up @@ -192,8 +194,6 @@ def test_filter_against_workaround():
tm.assert_series_equal(new_way.sort_values(), old_way.sort_values())

# Set up DataFrame of ints, floats, strings.
from string import ascii_lowercase

letters = np.array(list(ascii_lowercase))
N = 1000
random_letters = letters.take(np.random.randint(0, 26, N))
Expand Down
14 changes: 7 additions & 7 deletions pandas/tests/groupby/test_grouping.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,10 @@
""" test where we are determining what we are grouping, or getting groups """
"""
test where we are determining what we are grouping, or getting groups
"""
from datetime import (
date,
timedelta,
)

import numpy as np
import pytest
Expand Down Expand Up @@ -167,14 +173,8 @@ def test_grouper_index_types(self, index):
df.groupby(list("abcde"), group_keys=False).apply(lambda x: x)

def test_grouper_multilevel_freq(self):

# GH 7885
# with level and freq specified in a Grouper
from datetime import (
date,
timedelta,
)

d0 = date.today() - timedelta(days=14)
dates = date_range(d0, date.today())
date_index = MultiIndex.from_product([dates, dates], names=["foo", "bar"])
Expand Down
20 changes: 14 additions & 6 deletions pandas/tests/groupby/test_timegrouper.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
""" test with the TimeGrouper / grouping with datetimes """

from datetime import datetime
"""
test with the TimeGrouper / grouping with datetimes
"""
from datetime import (
datetime,
timedelta,
)
from io import StringIO

import numpy as np
Expand Down Expand Up @@ -763,8 +767,6 @@ def test_first_last_max_min_on_time_data(self):
# GH 10295
# Verify that NaT is not in the result of max, min, first and last on
# Dataframe with datetime or timedelta values.
from datetime import timedelta as td

df_test = DataFrame(
{
"dt": [
Expand All @@ -774,7 +776,13 @@ def test_first_last_max_min_on_time_data(self):
"2015-07-23 12:12",
np.nan,
],
"td": [np.nan, td(days=1), td(days=2), td(days=3), np.nan],
"td": [
np.nan,
timedelta(days=1),
timedelta(days=2),
timedelta(days=3),
np.nan,
],
}
)
df_test.dt = pd.to_datetime(df_test.dt)
Expand Down
9 changes: 4 additions & 5 deletions pandas/tests/indexes/test_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@
any index subclass except for MultiIndex. Makes use of the `index_flat`
fixture defined in pandas/conftest.py.
"""
from copy import (
copy,
deepcopy,
)
import re

import numpy as np
Expand Down Expand Up @@ -132,11 +136,6 @@ def test_set_name_methods(self, index_flat):
assert index.names == [name]

def test_copy_and_deepcopy(self, index_flat):
from copy import (
copy,
deepcopy,
)

index = index_flat

for func in (copy, deepcopy):
Expand Down
12 changes: 6 additions & 6 deletions pandas/tests/indexing/multiindex/test_slice.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
from datetime import (
datetime,
timedelta,
)

import numpy as np
import pytest

Expand Down Expand Up @@ -248,12 +253,7 @@ def test_multiindex_slicers_datetimelike(self):

# GH 7429
# buggy/inconsistent behavior when slicing with datetime-like
import datetime

dates = [
datetime.datetime(2012, 1, 1, 12, 12, 12) + datetime.timedelta(days=i)
for i in range(6)
]
dates = [datetime(2012, 1, 1, 12, 12, 12) + timedelta(days=i) for i in range(6)]
freq = [1, 2]
index = MultiIndex.from_product([dates, freq], names=["date", "frequency"])

Expand Down
3 changes: 1 addition & 2 deletions pandas/tests/io/excel/test_readers.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from functools import partial
import os
from pathlib import Path
import platform
from urllib.error import URLError
from zipfile import BadZipFile

Expand Down Expand Up @@ -897,8 +898,6 @@ def test_read_from_file_url(self, read_ext, datapath):
url_table = pd.read_excel("file://localhost/" + localtable)
except URLError:
# fails on some systems
import platform

platform_info = " ".join(platform.uname()).strip()
pytest.skip(f"failing on {platform_info}")

Expand Down
4 changes: 2 additions & 2 deletions pandas/tests/io/formats/test_printing.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import string

import numpy as np
import pytest

Expand All @@ -19,8 +21,6 @@ def test_adjoin():


def test_repr_binary_type():
import string

letters = string.ascii_letters
try:
raw = bytes(letters, encoding=cf.get_option("display.encoding"))
Expand Down
3 changes: 1 addition & 2 deletions pandas/tests/io/formats/test_to_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import sys
from zipfile import ZipFile

from _csv import Error
import numpy as np
import pytest

Expand Down Expand Up @@ -94,8 +95,6 @@ def test_to_csv_doublequote(self):
with open(path) as f:
assert f.read() == expected

from _csv import Error

with tm.ensure_clean("test.csv") as path:
with pytest.raises(Error, match="escapechar"):
df.to_csv(path, doublequote=False) # no escapechar set
Expand Down
3 changes: 1 addition & 2 deletions pandas/tests/io/json/test_pandas.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import json
import os
import sys
import time

import numpy as np
import pytest
Expand Down Expand Up @@ -1730,8 +1731,6 @@ def test_json_multiindex(self, dataframe, expected):

@pytest.mark.single_cpu
def test_to_s3(self, s3_resource, s3so):
import time

# GH 28375
mock_bucket_name, target_file = "pandas-test", "test.json"
df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]})
Expand Down
4 changes: 1 addition & 3 deletions pandas/tests/io/parser/test_c_parser_only.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
these tests out of this module as soon as the Python parser can accept
further arguments when parsing.
"""

from decimal import Decimal
from io import (
BytesIO,
StringIO,
Expand Down Expand Up @@ -169,8 +169,6 @@ def test_unsupported_dtype(c_parser_only, match, kwargs):
@td.skip_if_32bit
@pytest.mark.slow
def test_precise_conversion(c_parser_only):
from decimal import Decimal

parser = c_parser_only

normal_errors = []
Expand Down
7 changes: 4 additions & 3 deletions pandas/tests/io/parser/test_encoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,10 @@
Tests encoding functionality during parsing
for all of the parsers defined in parsers.py
"""
from io import BytesIO
from io import (
BytesIO,
TextIOWrapper,
)
import os
import tempfile
import uuid
Expand Down Expand Up @@ -59,8 +62,6 @@ def test_utf16_bom_skiprows(all_parsers, sep, encoding):
utf8 = "utf-8"

with tm.ensure_clean(path) as path:
from io import TextIOWrapper

bytes_data = data.encode(encoding)

with open(path, "wb") as f:
Expand Down
Loading