Skip to content

PERF: use ndarray.take instead of algos.take #40852

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 13, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions pandas/core/groupby/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,6 @@
maybe_fill,
)

from pandas.core import algorithms
from pandas.core.arrays import ExtensionArray
from pandas.core.base import SelectionMixin
import pandas.core.common as com
Expand Down Expand Up @@ -766,7 +765,7 @@ def _aggregate_series_fast(self, obj: Series, func: F):
# avoids object / Series creation overhead
indexer = get_group_index_sorter(group_index, ngroups)
obj = obj.take(indexer)
group_index = algorithms.take_nd(group_index, indexer, allow_fill=False)
group_index = group_index.take(indexer)
grouper = libreduction.SeriesGrouper(obj, func, group_index, ngroups)
result, counts = grouper.get_result()
return result, counts
Expand Down Expand Up @@ -997,7 +996,7 @@ def __init__(self, data: FrameOrSeries, labels, ngroups: int, axis: int = 0):
@cache_readonly
def slabels(self) -> np.ndarray: # np.ndarray[np.intp]
# Sorted labels
return algorithms.take_nd(self.labels, self._sort_idx, allow_fill=False)
return self.labels.take(self._sort_idx)

@cache_readonly
def _sort_idx(self) -> np.ndarray: # np.ndarray[np.intp]
Expand Down
6 changes: 2 additions & 4 deletions pandas/core/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -2998,7 +2998,7 @@ def _union(self, other: Index, sort):
missing = algos.unique1d(self.get_indexer_non_unique(other)[1])

if len(missing) > 0:
other_diff = algos.take_nd(rvals, missing, allow_fill=False)
other_diff = rvals.take(missing)
result = concat_compat((lvals, other_diff))
else:
# error: Incompatible types in assignment (expression has type
Expand Down Expand Up @@ -4237,9 +4237,7 @@ def _get_leaf_sorter(labels: list[np.ndarray]) -> np.ndarray:
)

if right_lev_indexer is not None:
right_indexer = algos.take_nd(
right_lev_indexer, join_index.codes[level], allow_fill=False
)
right_indexer = right_lev_indexer.take(join_index.codes[level])
else:
right_indexer = join_index.codes[level]

Expand Down
8 changes: 2 additions & 6 deletions pandas/core/indexes/multi.py
Original file line number Diff line number Diff line change
Expand Up @@ -3533,14 +3533,10 @@ def equals(self, other: object) -> bool:
if not np.array_equal(self_mask, other_mask):
return False
self_codes = self_codes[~self_mask]
self_values = algos.take_nd(
np.asarray(self.levels[i]._values), self_codes, allow_fill=False
)
self_values = self.levels[i]._values.take(self_codes)

other_codes = other_codes[~other_mask]
other_values = other_values = algos.take_nd(
np.asarray(other.levels[i]._values), other_codes, allow_fill=False
)
other_values = other.levels[i]._values.take(other_codes)

# since we use NaT both datetime64 and timedelta64 we can have a
# situation where a level is typed say timedelta64 in self (IOW it
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/internals/managers.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ def items(self) -> Index:

def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return algos.take_nd(dtypes, self.blknos, allow_fill=False)
return dtypes.take(self.blknos)

@property
def arrays(self) -> list[ArrayLike]:
Expand Down
5 changes: 2 additions & 3 deletions pandas/core/sorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@
)
from pandas.core.dtypes.missing import isna

from pandas.core import algorithms
from pandas.core.construction import extract_array

if TYPE_CHECKING:
Expand Down Expand Up @@ -668,10 +667,10 @@ def _reorder_by_uniques(uniques, labels):
mask = labels < 0

# move labels to right locations (ie, unsort ascending labels)
labels = algorithms.take_nd(reverse_indexer, labels, allow_fill=False)
labels = reverse_indexer.take(labels)
np.putmask(labels, mask, -1)

# sort observed ids
uniques = algorithms.take_nd(uniques, sorter, allow_fill=False)
uniques = uniques.take(sorter)

return uniques, labels