Skip to content

Reduce overhead of function call and deprecate rarely used utilities #1024

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Oct 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
249 changes: 122 additions & 127 deletions pytensor/compile/function/types.py

Large diffs are not rendered by default.

3 changes: 0 additions & 3 deletions pytensor/gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,9 +128,6 @@ def fiter_variable(self, other):
" a symbolic placeholder."
)

def may_share_memory(a, b):
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Removing this avoids having to check for aliasing in the first place, during the function call

return False

def value_eq(a, b, force_same_dtype=True):
raise AssertionError(
"If you're assigning to a DisconnectedType you're"
Expand Down
3 changes: 0 additions & 3 deletions pytensor/graph/null_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,6 @@ def filter(self, data, strict=False, allow_downcast=None):
def filter_variable(self, other, allow_convert=True):
raise ValueError("No values may be assigned to a NullType")

def may_share_memory(a, b):
return False

def values_eq(self, a, b, force_same_dtype=True):
raise ValueError("NullType has no values to compare")

Expand Down
13 changes: 10 additions & 3 deletions pytensor/graph/op.py
Original file line number Diff line number Diff line change
Expand Up @@ -513,17 +513,24 @@ def make_py_thunk(
"""
node_input_storage = [storage_map[r] for r in node.inputs]
node_output_storage = [storage_map[r] for r in node.outputs]
node_compute_map = [compute_map[r] for r in node.outputs]

if debug and hasattr(self, "debug_perform"):
p = node.op.debug_perform
else:
p = node.op.perform

@is_thunk_type
def rval(p=p, i=node_input_storage, o=node_output_storage, n=node):
def rval(
p=p,
i=node_input_storage,
o=node_output_storage,
n=node,
cm=node_compute_map,
):
r = p(n, [x[0] for x in i], o)
for o in node.outputs:
compute_map[o][0] = True
for entry in cm:
entry[0] = True
return r

rval.inputs = node_input_storage
Expand Down
5 changes: 1 addition & 4 deletions pytensor/graph/type.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,7 @@ def in_same_class(self, otype: "Type") -> bool | None:
unique element (i.e. it uses `self.__eq__`).

"""
if self == otype:
return True

return False
return self == otype

def is_super(self, otype: "Type") -> bool | None:
"""Determine if `self` is a supertype of `otype`.
Expand Down
7 changes: 0 additions & 7 deletions pytensor/scalar/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,13 +303,6 @@ def clone(self, dtype=None, **kwargs):
dtype = self.dtype
return type(self)(dtype)

@staticmethod
def may_share_memory(a, b):
# This class represent basic c type, represented in python
# with numpy.scalar. They are read only. So from python, they
# can never share memory.
return False

def filter(self, data, strict=False, allow_downcast=None):
py_type = self.dtype_specs()[0]
if strict and not isinstance(data, py_type):
Expand Down
17 changes: 5 additions & 12 deletions pytensor/tensor/random/op.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,24 +387,17 @@ def dist_params(self, node) -> Sequence[Variable]:
return node.inputs[2:]

def perform(self, node, inputs, outputs):
rng_var_out, smpl_out = outputs

rng, size, *args = inputs

# Draw from `rng` if `self.inplace` is `True`, and from a copy of `rng` otherwise.
if not self.inplace:
rng = copy(rng)

rng_var_out[0] = rng

if size is not None:
size = tuple(size)
smpl_val = self.rng_fn(rng, *([*args, size]))

if not isinstance(smpl_val, np.ndarray) or str(smpl_val.dtype) != self.dtype:
smpl_val = np.asarray(smpl_val, dtype=self.dtype)

smpl_out[0] = smpl_val
outputs[0][0] = rng
outputs[1][0] = np.asarray(
self.rng_fn(rng, *args, None if size is None else tuple(size)),
dtype=self.dtype,
)

def grad(self, inputs, outputs):
return [
Expand Down
6 changes: 0 additions & 6 deletions pytensor/tensor/type_other.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,12 +126,6 @@ def filter(self, x, strict=False, allow_downcast=None):
else:
raise TypeError("Expected None!")

@staticmethod
def may_share_memory(a, b):
# None never share memory between object, in the sense of DebugMode.
# Python None are singleton
return False


none_type_t = NoneTypeT()

Expand Down
Loading
Loading