Skip to content

CLN:Unused Variables #21974

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ def time_frame_nth(self, dtype):
def time_series_nth_any(self, dtype):
self.df['values'].groupby(self.df['key']).nth(0, dropna='any')

def time_groupby_nth_all(self, dtype):
def time_series_nth_all(self, dtype):
self.df['values'].groupby(self.df['key']).nth(0, dropna='all')

def time_series_nth(self, dtype):
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/internals.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ def get_blkno_indexers(int64_t[:] blknos, bint group=True):
start = 0
cur_blkno = blknos[start]

if group == False:
if not group:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Group is false

for i in range(1, n):
if blknos[i] != cur_blkno:
yield cur_blkno, slice(start, i)
Expand Down
4 changes: 2 additions & 2 deletions pandas/_libs/ops.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -260,8 +260,8 @@ def maybe_convert_bool(ndarray[object] arr,
result = np.empty(n, dtype=np.uint8)

# the defaults
true_vals = set(('True', 'TRUE', 'true'))
false_vals = set(('False', 'FALSE', 'false'))
true_vals = {'True', 'TRUE', 'true'}
false_vals = {'False', 'FALSE', 'false'}

if true_values is not None:
true_vals = true_vals | set(true_values)
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/tslibs/frequencies.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ _lite_rule_alias = {
'us': 'U',
'ns': 'N'}

_dont_uppercase = set(('MS', 'ms'))
_dont_uppercase = {'MS', 'ms'}

# ----------------------------------------------------------------------

Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/tslibs/nattype.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ from util cimport (get_nat,

# ----------------------------------------------------------------------
# Constants
nat_strings = set(['NaT', 'nat', 'NAT', 'nan', 'NaN', 'NAN'])
nat_strings = {'NaT', 'nat', 'NAT', 'nan', 'NaN', 'NAN'}

cdef int64_t NPY_NAT = get_nat()
iNaT = NPY_NAT # python-visible constant
Expand Down
4 changes: 2 additions & 2 deletions pandas/_libs/tslibs/offsets.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -252,12 +252,12 @@ def _validate_business_time(t_input):
# ---------------------------------------------------------------------
# Constructor Helpers

relativedelta_kwds = set([
relativedelta_kwds = {
'years', 'months', 'weeks', 'days',
'year', 'month', 'week', 'day', 'weekday',
'hour', 'minute', 'second', 'microsecond',
'nanosecond', 'nanoseconds',
'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'])
'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'}


def _determine_offset(kwds):
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/tslibs/period.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -1965,6 +1965,6 @@ def _validate_end_alias(how):
'START': 'S', 'FINISH': 'E',
'BEGIN': 'S', 'END': 'E'}
how = how_dict.get(str(how).upper())
if how not in set(['S', 'E']):
if how not in {'S', 'E'}:
raise ValueError('How must be one of S or E')
return how
3 changes: 0 additions & 3 deletions pandas/compat/pickle_compat.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,6 @@ def load_reduce(self):
args = stack.pop()
func = stack[-1]

if len(args) and type(args[0]) is type:
n = args[0].__name__ # noqa
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we know what this is for? “Noqa” suggests author knewflake would complain


try:
stack[-1] = func(*args)
return
Expand Down
1 change: 0 additions & 1 deletion pandas/core/arrays/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,6 @@ def __init__(self, values, categories=None, ordered=None, dtype=None,
" or `ordered`.")

categories = dtype.categories
ordered = dtype.ordered

elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
Expand Down
4 changes: 1 addition & 3 deletions pandas/core/arrays/interval.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@

from . import ExtensionArray, Categorical

_VALID_CLOSED = set(['left', 'right', 'both', 'neither'])
_VALID_CLOSED = {'left', 'right', 'both', 'neither'}
_interval_shared_docs = {}
_shared_docs_kwargs = dict(
klass='IntervalArray',
Expand Down Expand Up @@ -401,7 +401,6 @@ def from_tuples(cls, data, closed='right', copy=False, dtype=None):
msg = ('{name}.from_tuples received an invalid '
'item, {tpl}').format(name=name, tpl=d)
raise TypeError(msg)
lhs, rhs = d
left.append(lhs)
right.append(rhs)

Expand Down Expand Up @@ -815,7 +814,6 @@ def _format_data(self):
summary = '[{head} ... {tail}]'.format(
head=', '.join(head), tail=', '.join(tail))
else:
head = []
tail = [formatter(x) for x in self]
summary = '[{tail}]'.format(tail=', '.join(tail))

Expand Down
6 changes: 3 additions & 3 deletions pandas/core/computation/expressions.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@

# the set of dtypes that we will allow pass to numexpr
_ALLOWED_DTYPES = {
'evaluate': set(['int64', 'int32', 'float64', 'float32', 'bool']),
'where': set(['int64', 'float64', 'bool'])
'evaluate': {'int64', 'int32', 'float64', 'float32', 'bool'},
'where': {'int64', 'float64', 'bool'}
}

# the minimum prod shape that we will use numexpr
Expand Down Expand Up @@ -81,7 +81,7 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check):
return False
dtypes |= set(s.index)
elif isinstance(o, np.ndarray):
dtypes |= set([o.dtype.name])
dtypes |= {o.dtype.name}

# allowed are a superset
if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/dtypes/concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,8 +188,8 @@ def is_nonempty(x):
typs = get_dtype_kinds(to_concat)
if len(typs) != 1:

if (not len(typs - set(['i', 'u', 'f'])) or
not len(typs - set(['bool', 'i', 'u']))):
if (not len(typs - {'i', 'u', 'f'}) or
not len(typs - {'bool', 'i', 'u'})):
# let numpy coerce
pass
else:
Expand Down Expand Up @@ -599,7 +599,7 @@ def convert_sparse(x, axis):
to_concat = [convert_sparse(x, axis) for x in to_concat]
result = np.concatenate(to_concat, axis=axis)

if not len(typs - set(['sparse', 'f', 'i'])):
if not len(typs - {'sparse', 'f', 'i'}):
# sparsify if inputs are sparse and dense numerics
# first sparse input's fill_value and SparseIndex is used
result = SparseArray(result.ravel(), fill_value=fill_values[0],
Expand Down
1 change: 0 additions & 1 deletion pandas/core/dtypes/dtypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,6 @@ def _hash_categories(categories, ordered=True):
# everything to a str first, which means we treat
# {'1', '2'} the same as {'1', 2}
# find a better solution
cat_array = np.array([hash(x) for x in categories])
hashed = hash((tuple(categories), ordered))
return hashed
cat_array = hash_array(np.asarray(categories), categorize=False)
Expand Down
5 changes: 0 additions & 5 deletions pandas/core/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -1022,9 +1022,6 @@ def rename(self, *args, **kwargs):
copy = kwargs.pop('copy', True)
inplace = kwargs.pop('inplace', False)
level = kwargs.pop('level', None)
axis = kwargs.pop('axis', None)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think part of the point of this is to empty out the kwargs dict, no?

if axis is not None:
axis = self._get_axis_number(axis)

if kwargs:
raise TypeError('rename() got an unexpected keyword '
Expand Down Expand Up @@ -5206,8 +5203,6 @@ def __copy__(self, deep=True):
return self.copy(deep=deep)

def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)

def _convert(self, datetime=False, numeric=False, timedelta=False,
Expand Down
1 change: 0 additions & 1 deletion pandas/core/groupby/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,6 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True,
obj = self.obj[data.items[locs]]
s = groupby(obj, self.grouper)
result = s.aggregate(lambda x: alt(x, axis=self.axis))
newb = result._data.blocks[0]

finally:

Expand Down
2 changes: 1 addition & 1 deletion pandas/core/groupby/grouper.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,7 +481,7 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
return grouper, {key.key}, obj

# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/groupby/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,6 +387,7 @@ def get_func(fname):

# otherwise find dtype-specific version, falling back to object
for dt in [dtype_str, 'object']:
# TODO: Should dtype_str below be replaced with dt?
f = getattr(libgroupby, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
Expand Down Expand Up @@ -582,7 +583,6 @@ def _transform(self, result, values, comp_ids, transform_func,
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):

chunk = chunk.squeeze()
transform_func(result[:, :, i], values,
comp_ids, is_datetimelike, **kwargs)
else:
Expand Down
9 changes: 1 addition & 8 deletions pandas/core/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ class Index(IndexOpsMixin, PandasObject):

_engine_type = libindex.ObjectEngine

_accessors = set(['str'])
_accessors = {'str'}

str = CachedAccessor("str", StringMethods)

Expand Down Expand Up @@ -979,8 +979,6 @@ def __copy__(self, **kwargs):
return self.copy(**kwargs)

def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)

def _validate_names(self, name=None, names=None, deep=False):
Expand Down Expand Up @@ -1622,11 +1620,6 @@ def is_int(v):
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
# TODO: i, j are not used anywhere
if start is not None:
i = self.get_loc(start) # noqa
if stop is not None:
j = self.get_loc(stop) # noqa
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
Expand Down
1 change: 1 addition & 0 deletions pandas/core/indexes/category.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,7 @@ def _create_from_codes(self, codes, categories=None, ordered=None,
ordered = self.ordered
if name is None:
name = self.name
# TODO: ordered above is unused, should ordered be passed here?
cat = Categorical.from_codes(codes, categories=categories,
ordered=self.ordered)
return CategoricalIndex(cat, name=name)
Expand Down
3 changes: 1 addition & 2 deletions pandas/core/indexes/interval.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
from pandas.core.arrays.interval import (IntervalArray,
_interval_shared_docs)

_VALID_CLOSED = set(['left', 'right', 'both', 'neither'])
_VALID_CLOSED = {'left', 'right', 'both', 'neither'}
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass='IntervalIndex',
Expand Down Expand Up @@ -939,7 +939,6 @@ def _format_data(self, name=None):
summary = '[{head} ... {tail}]'.format(
head=', '.join(head), tail=', '.join(tail))
else:
head = []
tail = [formatter(x) for x in self]
summary = '[{tail}]'.format(tail=', '.join(tail))

Expand Down
1 change: 0 additions & 1 deletion pandas/core/indexes/period.py
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,6 @@ def __contains__(self, key):
return True
except Exception:
return False
return False

contains = __contains__

Expand Down
6 changes: 0 additions & 6 deletions pandas/core/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -789,9 +789,6 @@ def _align_frame(self, indexer, df):
if isinstance(indexer, tuple):

aligners = [not com.is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
# TODO: single_aligner is not used
single_aligner = sum_aligners == 1 # noqa

idx, cols = None, None
sindexers = []
Expand Down Expand Up @@ -865,9 +862,6 @@ def _align_frame(self, indexer, df):
raise ValueError('Incompatible indexer with DataFrame')

def _align_panel(self, indexer, df):
# TODO: is_frame, is_panel are unused
is_frame = self.obj.ndim == 2 # noqa
is_panel = self.obj.ndim >= 3 # noqa
raise NotImplementedError("cannot set using an indexer with a Panel "
"yet!")

Expand Down
4 changes: 0 additions & 4 deletions pandas/core/internals.py
Original file line number Diff line number Diff line change
Expand Up @@ -1255,7 +1255,6 @@ def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
values = self.get_values()

if fill_tuple is None:
fill_value = self.fill_value
new_values = algos.take_nd(values, indexer, axis=axis,
allow_fill=False)
else:
Expand Down Expand Up @@ -2708,7 +2707,6 @@ def _try_coerce_args(self, values, other):

values_mask = isna(values)
values = values.view('i8')
other_mask = False

if isinstance(other, bool):
raise TypeError
Expand Down Expand Up @@ -2881,11 +2879,9 @@ def _try_coerce_args(self, values, other):
values_mask = _block_shape(isna(values), ndim=self.ndim)
# asi8 is a view, needs copy
values = _block_shape(values.asi8, ndim=self.ndim)
other_mask = False

if isinstance(other, ABCSeries):
other = self._holder(other)
other_mask = isna(other)

if isinstance(other, bool):
raise TypeError
Expand Down
2 changes: 0 additions & 2 deletions pandas/core/nanops.py
Original file line number Diff line number Diff line change
Expand Up @@ -479,7 +479,6 @@ def nanvar(values, axis=None, skipna=True, ddof=1):

@disallow('M8', 'm8')
def nansem(values, axis=None, skipna=True, ddof=1):
var = nanvar(values, axis, skipna, ddof=ddof)

mask = isna(values)
if not is_float_dtype(values.dtype):
Expand Down Expand Up @@ -635,7 +634,6 @@ def nankurt(values, axis=None, skipna=True):
adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3))
numer = count * (count + 1) * (count - 1) * m4
denom = (count - 2) * (count - 3) * m2**2
result = numer / denom - adj

# floating point error
#
Expand Down
3 changes: 0 additions & 3 deletions pandas/core/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -1743,9 +1743,6 @@ def na_op(x, y):

@Appender('Wrapper for comparison method {name}'.format(name=op_name))
def f(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
axis = self._get_axis_number(axis)

if isinstance(other, self._constructor):
return self._compare_constructor(other, na_op, try_cast=False)
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/panel.py
Original file line number Diff line number Diff line change
Expand Up @@ -716,7 +716,7 @@ def dropna(self, axis=0, how='any', inplace=False):
values = self.values
mask = notna(values)

for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):
for ax in reversed(sorted(set(range(self._AXIS_LEN)) - {axis})):
mask = mask.sum(ax)

per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/resample.py
Original file line number Diff line number Diff line change
Expand Up @@ -1199,7 +1199,7 @@ def __init__(self, freq='Min', closed=None, label=None, how='mean',

freq = to_offset(freq)

end_types = set(['M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W'])
end_types = {'M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W'}
rule = freq.rule_code
if (rule in end_types or
('-' in rule and rule[:rule.find('-')] in end_types)):
Expand Down
Loading