Skip to content

ENH: replace uses of np.isscalar with pd.lib.isscalar #12459

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion pandas/computation/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import pandas as pd
from pandas.compat import PY3, string_types, text_type
import pandas.core.common as com
import pandas.lib as lib
from pandas.core.base import StringMixin
from pandas.computation.common import _ensure_decoded, _result_type_many
from pandas.computation.scope import _DEFAULT_GLOBALS
Expand Down Expand Up @@ -98,7 +99,7 @@ def update(self, value):

@property
def isscalar(self):
return np.isscalar(self._value)
return lib.isscalar(self._value)

@property
def type(self):
Expand Down
17 changes: 9 additions & 8 deletions pandas/computation/tests/test_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@

import pandas.computation.expr as expr
import pandas.util.testing as tm
import pandas.lib as lib
from pandas.util.testing import (assert_frame_equal, randbool,
assertRaisesRegexp, assert_numpy_array_equal,
assert_produces_warning, assert_series_equal)
Expand Down Expand Up @@ -196,7 +197,7 @@ def check_complex_cmp_op(self, lhs, cmp1, rhs, binop, cmp2):
ex = '(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)'.format(cmp1=cmp1,
binop=binop,
cmp2=cmp2)
scalar_with_in_notin = (np.isscalar(rhs) and (cmp1 in skip_these or
scalar_with_in_notin = (lib.isscalar(rhs) and (cmp1 in skip_these or
cmp2 in skip_these))
if scalar_with_in_notin:
with tm.assertRaises(TypeError):
Expand Down Expand Up @@ -327,7 +328,7 @@ def check_pow(self, lhs, arith1, rhs):
expected = self.get_expected_pow_result(lhs, rhs)
result = pd.eval(ex, engine=self.engine, parser=self.parser)

if (np.isscalar(lhs) and np.isscalar(rhs) and
if (lib.isscalar(lhs) and lib.isscalar(rhs) and
_is_py3_complex_incompat(result, expected)):
self.assertRaises(AssertionError, tm.assert_numpy_array_equal,
result, expected)
Expand Down Expand Up @@ -360,16 +361,16 @@ def check_compound_invert_op(self, lhs, cmp1, rhs):
skip_these = 'in', 'not in'
ex = '~(lhs {0} rhs)'.format(cmp1)

if np.isscalar(rhs) and cmp1 in skip_these:
if lib.isscalar(rhs) and cmp1 in skip_these:
self.assertRaises(TypeError, pd.eval, ex, engine=self.engine,
parser=self.parser, local_dict={'lhs': lhs,
'rhs': rhs})
else:
# compound
if np.isscalar(lhs) and np.isscalar(rhs):
if lib.isscalar(lhs) and lib.isscalar(rhs):
lhs, rhs = map(lambda x: np.array([x]), (lhs, rhs))
expected = _eval_single_bin(lhs, cmp1, rhs, self.engine)
if np.isscalar(expected):
if lib.isscalar(expected):
expected = not expected
else:
expected = ~expected
Expand Down Expand Up @@ -639,17 +640,17 @@ def test_identical(self):
x = 1
result = pd.eval('x', engine=self.engine, parser=self.parser)
self.assertEqual(result, 1)
self.assertTrue(np.isscalar(result))
self.assertTrue(lib.isscalar(result))

x = 1.5
result = pd.eval('x', engine=self.engine, parser=self.parser)
self.assertEqual(result, 1.5)
self.assertTrue(np.isscalar(result))
self.assertTrue(lib.isscalar(result))

x = False
result = pd.eval('x', engine=self.engine, parser=self.parser)
self.assertEqual(result, False)
self.assertTrue(np.isscalar(result))
self.assertTrue(lib.isscalar(result))

x = np.array([1])
result = pd.eval('x', engine=self.engine, parser=self.parser)
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -468,7 +468,7 @@ def _get_score(at):

return score

if np.isscalar(q):
if lib.isscalar(q):
return _get_score(q)
else:
q = np.asarray(q, np.float64)
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/categorical.py
Original file line number Diff line number Diff line change
Expand Up @@ -1901,7 +1901,7 @@ def _convert_to_list_like(list_like):
if (is_sequence(list_like) or isinstance(list_like, tuple) or
isinstance(list_like, types.GeneratorType)):
return list(list_like)
elif np.isscalar(list_like):
elif lib.isscalar(list_like):
return [list_like]
else:
# is this reached?
Expand Down
10 changes: 5 additions & 5 deletions pandas/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ def notnull(obj):
pandas.isnull : boolean inverse of pandas.notnull
"""
res = isnull(obj)
if np.isscalar(res):
if lib.isscalar(res):
return not res
return ~res

Expand All @@ -343,7 +343,7 @@ def is_null_datelike_scalar(other):
but guard against passing a non-scalar """
if other is pd.NaT or other is None:
return True
elif np.isscalar(other):
elif lib.isscalar(other):

# a timedelta
if hasattr(other, 'dtype'):
Expand Down Expand Up @@ -489,7 +489,7 @@ def mask_missing(arr, values_to_mask):

# if x is a string and arr is not, then we get False and we must
# expand the mask to size arr.shape
if np.isscalar(mask):
if lib.isscalar(mask):
mask = np.zeros(arr.shape, dtype=bool)
else:

Expand Down Expand Up @@ -1276,7 +1276,7 @@ def changeit():

# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if (np.isscalar(other) or
if (lib.isscalar(other) or
(isinstance(other, np.ndarray) and other.ndim < 1)):
if isnull(other):
return changeit()
Expand Down Expand Up @@ -1336,7 +1336,7 @@ def _possibly_downcast_to_dtype(result, dtype):
or could be an astype of float64->float32
"""

if np.isscalar(result):
if lib.isscalar(result):
return result

def trans(x):
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -898,7 +898,7 @@ def bool(self):
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif np.isscalar(v):
elif lib.isscalar(v):
raise ValueError("bool cannot act on a non-boolean single element "
"{0}".format(self.__class__.__name__))

Expand Down Expand Up @@ -1750,10 +1750,10 @@ def xs(self, key, axis=0, level=None, copy=None, drop_level=True):
else:
return self.take(loc, axis=axis, convert=True)

if not np.isscalar(loc):
if not lib.isscalar(loc):
new_index = self.index[loc]

if np.isscalar(loc):
if lib.isscalar(loc):
from pandas import Series
new_values = self._data.fast_xs(loc)

Expand Down
5 changes: 3 additions & 2 deletions pandas/core/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -712,7 +712,7 @@ def _try_cast(self, result, obj):
else:
dtype = obj.dtype

if not np.isscalar(result):
if not lib.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)

return result
Expand Down Expand Up @@ -2384,7 +2384,8 @@ def is_in_obj(gpr):


def _is_label_like(val):
return isinstance(val, compat.string_types) or np.isscalar(val)
return isinstance(val, compat.string_types) or\
(not val is None and lib.isscalar(val))


def _convert_grouper(axis, grouper):
Expand Down
9 changes: 5 additions & 4 deletions pandas/core/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from pandas.compat import range, zip
import pandas.compat as compat
import pandas.core.common as com
import pandas.lib as lib
from pandas.core.common import (is_bool_indexer, is_integer_dtype,
_asarray_tuplesafe, is_list_like, isnull,
is_null_slice, is_full_slice, ABCSeries,
Expand Down Expand Up @@ -67,7 +68,7 @@ def __getitem__(self, key):
if type(key) is tuple:
try:
values = self.obj.get_value(*key)
if np.isscalar(values):
if lib.isscalar(values):
return values
except Exception:
pass
Expand Down Expand Up @@ -677,7 +678,7 @@ def _align_series(self, indexer, ser, multiindex_indexer=False):

return ser

elif np.isscalar(indexer):
elif lib.isscalar(indexer):
ax = self.obj._get_axis(1)

if ser.index.equals(ax):
Expand Down Expand Up @@ -753,7 +754,7 @@ def _align_frame(self, indexer, df):
val = df.reindex(index=ax)._values
return val

elif np.isscalar(indexer) and is_panel:
elif lib.isscalar(indexer) and is_panel:
idx = self.obj.axes[1]
cols = self.obj.axes[2]

Expand Down Expand Up @@ -960,7 +961,7 @@ def _getitem_nested_tuple(self, tup):
axis += 1

# if we have a scalar, we are done
if np.isscalar(obj) or not hasattr(obj, 'ndim'):
if lib.isscalar(obj) or not hasattr(obj, 'ndim'):
break

# has the dim of the obj changed?
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/internals.py
Original file line number Diff line number Diff line change
Expand Up @@ -665,7 +665,7 @@ def _is_scalar_indexer(indexer):
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return all([np.isscalar(idx) for idx in indexer])
return all([lib.isscalar(idx) for idx in indexer])
return False

def _is_empty_indexer(indexer):
Expand Down Expand Up @@ -702,7 +702,7 @@ def _is_empty_indexer(indexer):
values[indexer] = value

# coerce and try to infer the dtypes of the result
if np.isscalar(value):
if lib.isscalar(value):
dtype, _ = _infer_dtype_from_scalar(value)
else:
dtype = 'infer'
Expand Down Expand Up @@ -3209,7 +3209,7 @@ def get(self, item, fastpath=True):
indexer = np.arange(len(self.items))[isnull(self.items)]

# allow a single nan location indexer
if not np.isscalar(indexer):
if not lib.isscalar(indexer):
if len(indexer) == 1:
loc = indexer.item()
else:
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/nanops.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ def _get_counts_nanvar(mask, axis, ddof, dtype=float):
d = count - dtype.type(ddof)

# always return NaN, never inf
if np.isscalar(count):
if lib.isscalar(count):
if count <= ddof:
count = np.nan
d = np.nan
Expand Down Expand Up @@ -623,7 +623,7 @@ def _get_counts(mask, axis, dtype=float):
return dtype.type(mask.size - mask.sum())

count = mask.shape[axis] - mask.sum(axis)
if np.isscalar(count):
if lib.isscalar(count):
return dtype.type(count)
try:
return count.astype(dtype)
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/panel.py
Original file line number Diff line number Diff line change
Expand Up @@ -576,7 +576,7 @@ def __setitem__(self, key, value):
'object was {1}'.format(
shape[1:], tuple(map(int, value.shape))))
mat = np.asarray(value)
elif np.isscalar(value):
elif lib.isscalar(value):
dtype, value = _infer_dtype_from_scalar(value)
mat = np.empty(shape[1:], dtype=dtype)
mat.fill(value)
Expand Down Expand Up @@ -703,7 +703,7 @@ def _combine(self, other, func, axis=0):
return self._combine_panel(other, func)
elif isinstance(other, DataFrame):
return self._combine_frame(other, func, axis=axis)
elif np.isscalar(other):
elif lib.isscalar(other):
return self._combine_const(other, func)
else:
raise NotImplementedError("%s is not supported in combine "
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -559,7 +559,7 @@ def __getitem__(self, key):
try:
result = self.index.get_value(self, key)

if not np.isscalar(result):
if not lib.isscalar(result):
if is_list_like(result) and not isinstance(result, Series):

# we need to box if we have a non-unique index here
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/strings.py
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,7 @@ def str_repeat(arr, repeats):
-------
repeated : Series/Index of objects
"""
if np.isscalar(repeats):
if lib.isscalar(repeats):

def rep(x):
try:
Expand Down
10 changes: 5 additions & 5 deletions pandas/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None,
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or np.isscalar(data):
elif data is None or lib.isscalar(data):
cls._scalar_data_error(data)
else:
if (tupleize_cols and isinstance(data, list) and data and
Expand Down Expand Up @@ -486,7 +486,7 @@ def _coerce_to_ndarray(cls, data):
"""

if not isinstance(data, (np.ndarray, Index)):
if data is None or np.isscalar(data):
if data is None or lib.isscalar(data):
cls._scalar_data_error(data)

# other iterable of some kind
Expand Down Expand Up @@ -1269,7 +1269,7 @@ def __getitem__(self, key):
getitem = self._data.__getitem__
promote = self._shallow_copy

if np.isscalar(key):
if lib.isscalar(key):
return getitem(key)

if isinstance(key, slice):
Expand All @@ -1282,7 +1282,7 @@ def __getitem__(self, key):

key = _values_from_object(key)
result = getitem(key)
if not np.isscalar(result):
if not lib.isscalar(result):
return promote(result)
else:
return result
Expand Down Expand Up @@ -1941,7 +1941,7 @@ def get_value(self, series, key):
raise e1
except TypeError:
# python 3
if np.isscalar(key): # pragma: no cover
if lib.isscalar(key): # pragma: no cover
raise IndexError(key)
raise InvalidIndexError(key)

Expand Down
2 changes: 1 addition & 1 deletion pandas/indexes/multi.py
Original file line number Diff line number Diff line change
Expand Up @@ -978,7 +978,7 @@ def __setstate__(self, state):
self._reset_identity()

def __getitem__(self, key):
if np.isscalar(key):
if lib.isscalar(key):
retval = []
for lev, lab in zip(self.levels, self.labels):
if lab[key] == -1:
Expand Down
4 changes: 2 additions & 2 deletions pandas/indexes/numeric.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ def _format_native_types(self, na_rep='', float_format=None, decimal='.',

def get_value(self, series, key):
""" we always want to get an index value, never a value """
if not np.isscalar(key):
if not lib.isscalar(key):
raise InvalidIndexError

from pandas.core.indexing import maybe_droplevels
Expand All @@ -305,7 +305,7 @@ def get_value(self, series, key):
loc = self.get_loc(k)
new_values = com._values_from_object(series)[loc]

if np.isscalar(new_values) or new_values is None:
if lib.isscalar(new_values) or new_values is None:
return new_values

new_index = self[loc]
Expand Down
Loading