Skip to content

Deprecate redefinition of np.testing.assert_allclose #784

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions doc/extending/creating_an_op.rst
Original file line number Diff line number Diff line change
Expand Up @@ -551,7 +551,6 @@ exception. You can use the ``assert`` keyword to automatically raise an

import numpy as np
import pytensor
from tests import unittest_tools as utt


class TestDouble(utt.InferShapeTester):
Expand All @@ -569,9 +568,9 @@ exception. You can use the ``assert`` keyword to automatically raise an
inp = np.asarray(rng.random((5, 4)), dtype=pytensor.config.floatX)
out = f(inp)
# Compare the result computed to the expected value.
utt.assert_allclose(inp * 2, out)
np.testing.assert_allclose(inp * 2, out)

We call ``utt.assert_allclose(expected_value, value)`` to compare
We call ``np.testing.assert_allclose(expected_value, value)`` to compare
NumPy ndarray.This raise an error message with more information. Also,
the default tolerance can be changed with the PyTensor flags
``config.tensor__cmp_sloppy`` that take values in 0, 1 and 2. The
Expand Down
12 changes: 0 additions & 12 deletions pytensor/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,18 +125,6 @@ def _get_atol_rtol(a, b):
return atol, rtol


def _allclose(a, b, rtol=None, atol=None):
a = np.asarray(a)
b = np.asarray(b)
atol_, rtol_ = _get_atol_rtol(a, b)
if rtol is not None:
rtol_ = rtol
if atol is not None:
atol_ = atol

return np.allclose(a, b, atol=atol_, rtol=rtol_)


class Argmax(COp):
"""
Calculate the argmax over a given axis or over all axes.
Expand Down
7 changes: 6 additions & 1 deletion pytensor/tensor/type.py
Original file line number Diff line number Diff line change
Expand Up @@ -663,7 +663,12 @@ def values_eq_approx(
if str(a.dtype) not in continuous_dtypes:
return np.all(a == b)
else:
cmp = pytensor.tensor.math._allclose(a, b, rtol=rtol, atol=atol)
atol_, rtol_ = pytensor.tensor.math._get_atol_rtol(a, b)
if rtol is not None:
rtol_ = rtol
if atol is not None:
atol_ = atol
Comment on lines +666 to +670
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Coverage says this is not being used, can we just remove?

cmp = np.allclose(np.asarray(a), np.asarray(b), rtol=rtol_, atol=atol_)
if cmp:
# Numpy claims they are close, this is good enough for us.
return True
Expand Down
37 changes: 30 additions & 7 deletions tests/graph/test_compute_test_value.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from pytensor.graph.op import Op
from pytensor.graph.type import Type
from pytensor.link.c.op import COp
from pytensor.tensor.math import _allclose, dot
from pytensor.tensor.math import _get_atol_rtol, dot
from pytensor.tensor.type import fmatrix, iscalar, matrix, vector


Expand Down Expand Up @@ -85,7 +85,15 @@ def test_variable_only(self):
z = dot(x, y)
assert hasattr(z.tag, "test_value")
f = pytensor.function([x, y], z)
assert _allclose(f(x.tag.test_value, y.tag.test_value), z.tag.test_value)
atol_, rtol_ = _get_atol_rtol(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If this ends up only being used in the tests here, we can remove from math and include only in the test file?

f(x.tag.test_value, y.tag.test_value), z.tag.test_value
)
assert np.allclose(
f(x.tag.test_value, y.tag.test_value),
z.tag.test_value,
atol=atol_,
rtol=rtol_,
)

# this test should fail
y.tag.test_value = np.random.random((6, 5)).astype(config.floatX)
Expand Down Expand Up @@ -122,7 +130,16 @@ def test_string_var(self):
out = dot(dot(x, y), z)
assert hasattr(out.tag, "test_value")
tf = pytensor.function([x, y], out)
assert _allclose(tf(x.tag.test_value, y.tag.test_value), out.tag.test_value)

atol_, rtol_ = _get_atol_rtol(
tf(x.tag.test_value, y.tag.test_value), out.tag.test_value
)
assert np.allclose(
tf(x.tag.test_value, y.tag.test_value),
out.tag.test_value,
atol=atol_,
rtol=rtol_,
)

def f(x, y, z):
return dot(dot(x, y), z)
Expand All @@ -141,7 +158,10 @@ def test_shared(self):
z = dot(x, y)
assert hasattr(z.tag, "test_value")
f = pytensor.function([x], z)
assert _allclose(f(x.tag.test_value), z.tag.test_value)
atol_, rtol_ = _get_atol_rtol(f(x.tag.test_value), z.tag.test_value)
assert np.allclose(
f(x.tag.test_value), z.tag.test_value, atol=atol_, rtol=rtol_
)

# this test should fail
y.set_value(np.random.random((5, 6)).astype(config.floatX))
Expand All @@ -156,7 +176,8 @@ def test_ndarray(self):
z = dot(x, y)
assert hasattr(z.tag, "test_value")
f = pytensor.function([], z)
assert _allclose(f(), z.tag.test_value)
atol_, rtol_ = _get_atol_rtol(f(), z.tag.test_value)
assert np.allclose(f(), z.tag.test_value, atol=atol_, rtol=rtol_)

# this test should fail
x = np.random.random((2, 4)).astype(config.floatX)
Expand All @@ -170,7 +191,8 @@ def test_empty_elemwise(self):
z = (x + 2) * 3
assert hasattr(z.tag, "test_value")
f = pytensor.function([], z)
assert _allclose(f(), z.tag.test_value)
atol_, rtol_ = _get_atol_rtol(f(), z.tag.test_value)
assert np.allclose(f(), z.tag.test_value, atol=atol_, rtol=rtol_)

def test_constant(self):
x = pt.constant(np.random.random((2, 3)), dtype=config.floatX)
Expand All @@ -180,7 +202,8 @@ def test_constant(self):
z = dot(x, y)
assert hasattr(z.tag, "test_value")
f = pytensor.function([], z)
assert _allclose(f(), z.tag.test_value)
atol_, rtol_ = _get_atol_rtol(f(), z.tag.test_value)
assert np.allclose(f(), z.tag.test_value, atol=atol_, rtol=rtol_)

# this test should fail
x = pt.constant(np.random.random((2, 4)), dtype=config.floatX)
Expand Down
5 changes: 2 additions & 3 deletions tests/graph/test_replace.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
vectorize_node,
)
from pytensor.tensor import dvector, fvector, vector
from tests import unittest_tools as utt
from tests.graph.utils import MyOp, MyVariable, op_multiple_outputs


Expand Down Expand Up @@ -133,10 +132,10 @@ def test(x, y, mention_y):
return function([], out)()

x = shared(np.asarray(0.0, dtype=config.floatX))
utt.assert_allclose(
np.testing.assert_allclose(
test(x, pt.sum((x + 1) ** 2), mention_y=False), 1.21000003815
)
utt.assert_allclose(
np.testing.assert_allclose(
test(x, pt.sum((x + 1) ** 2), mention_y=True), 1.21000003815
)

Expand Down
5 changes: 2 additions & 3 deletions tests/link/c/test_params_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
from pytensor.link.c.type import EnumList, Generic
from pytensor.scalar import ScalarType
from pytensor.tensor.type import TensorType, matrix
from tests import unittest_tools as utt


tensor_type_0d = TensorType("float64", shape=tuple())
Expand Down Expand Up @@ -348,5 +347,5 @@ def test_op_params(self):
vy1 = f1(vx)
vy2 = f2(vx)
ref = a * (vx**2) + b * vx + c
utt.assert_allclose(vy1, vy2)
utt.assert_allclose(ref, vy1)
np.testing.assert_allclose(vy1, vy2)
np.testing.assert_allclose(ref, vy1)
3 changes: 1 addition & 2 deletions tests/link/test_vm.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
from pytensor.tensor.math import cosh, tanh
from pytensor.tensor.type import lscalar, scalar, scalars, vector, vectors
from pytensor.tensor.variable import TensorConstant
from tests import unittest_tools as utt


class SomeOp(Op):
Expand Down Expand Up @@ -221,7 +220,7 @@ def test_partial_function(linker):
assert f(3, output_subset=[0, 1, 2]) == f(3)
assert f(4, output_subset=[0, 2]) == [f(4)[0], f(4)[2]]

utt.assert_allclose(f(5), np.array([32.0, 16.0, 1.7857142857142858]))
np.testing.assert_allclose(f(5), np.array([32.0, 16.0, 1.7857142857142858]))


@pytest.mark.parametrize(
Expand Down
11 changes: 5 additions & 6 deletions tests/scalar/test_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

import pytensor
import pytensor.tensor as pt
import tests.unittest_tools as utt
from pytensor.compile.mode import Mode
from pytensor.graph.fg import FunctionGraph
from pytensor.link.c.basic import DualLinker
Expand Down Expand Up @@ -477,11 +476,11 @@ def test_grad_inrange():
# x is equal to the lower or higher bound but in that case
# PyTensor defines the gradient to be zero for stability.
f = pytensor.function([x, low, high], [gx, glow, ghigh])
utt.assert_allclose(f(0, 1, 5), [0, 0, 0])
utt.assert_allclose(f(1, 1, 5), [0, 0, 0])
utt.assert_allclose(f(2, 1, 5), [0, 0, 0])
utt.assert_allclose(f(5, 1, 5), [0, 0, 0])
utt.assert_allclose(f(7, 1, 5), [0, 0, 0])
np.testing.assert_allclose(f(0, 1, 5), [0, 0, 0])
np.testing.assert_allclose(f(1, 1, 5), [0, 0, 0])
np.testing.assert_allclose(f(2, 1, 5), [0, 0, 0])
np.testing.assert_allclose(f(5, 1, 5), [0, 0, 0])
np.testing.assert_allclose(f(7, 1, 5), [0, 0, 0])


def test_grad_abs():
Expand Down
Loading
Loading