|
4 | 4 |
|
5 | 5 | import pytensor.sparse as ps
|
6 | 6 | import pytensor.tensor as pt
|
7 |
| -from pytensor import function |
8 |
| -from pytensor.graph import FunctionGraph |
| 7 | +from pytensor.graph import Constant, FunctionGraph |
| 8 | +from pytensor.tensor.type import DenseTensorType |
9 | 9 | from tests.link.jax.test_basic import compare_jax_and_py
|
10 | 10 |
|
11 | 11 |
|
|
19 | 19 | # structured_dot only allows matrix @ matrix
|
20 | 20 | (ps.structured_dot, pt.matrix, ps.matrix),
|
21 | 21 | (ps.structured_dot, ps.matrix, pt.matrix),
|
| 22 | + (ps.structured_dot, ps.matrix, ps.matrix), |
22 | 23 | ],
|
23 | 24 | )
|
24 |
| -def test_sparse_dot_constant_sparse(x_type, y_type, op): |
| 25 | +@pytest.mark.parametrize("x_constant", (False, True)) |
| 26 | +@pytest.mark.parametrize("y_constant", (False, True)) |
| 27 | +def test_sparse_dot(x_type, y_type, op, x_constant, y_constant): |
25 | 28 | inputs = []
|
26 | 29 | test_values = []
|
27 | 30 |
|
28 | 31 | if x_type is ps.matrix:
|
29 |
| - x_sp = scipy.sparse.random(5, 40, density=0.25, format="csr", dtype="float32") |
30 |
| - x_pt = ps.as_sparse_variable(x_sp, name="x") |
| 32 | + x_test = scipy.sparse.random(5, 40, density=0.25, format="csr", dtype="float32") |
| 33 | + x_pt = ps.as_sparse_variable(x_test, name="x") |
31 | 34 | else:
|
32 |
| - x_pt = x_type("x", dtype="float32") |
33 |
| - if x_pt.ndim == 1: |
| 35 | + if x_type is pt.vector: |
34 | 36 | x_test = np.arange(40, dtype="float32")
|
35 | 37 | else:
|
36 | 38 | x_test = np.arange(5 * 40, dtype="float32").reshape(5, 40)
|
| 39 | + x_pt = pt.as_tensor_variable(x_test, name="x") |
| 40 | + assert isinstance(x_pt, Constant) |
| 41 | + |
| 42 | + if not x_constant: |
| 43 | + x_pt = x_pt.type(name="x") |
37 | 44 | inputs.append(x_pt)
|
38 | 45 | test_values.append(x_test)
|
39 | 46 |
|
40 | 47 | if y_type is ps.matrix:
|
41 |
| - y_sp = scipy.sparse.random(40, 3, density=0.25, format="csc", dtype="float32") |
42 |
| - y_pt = ps.as_sparse_variable(y_sp, name="y") |
| 48 | + y_test = scipy.sparse.random(40, 3, density=0.25, format="csc", dtype="float32") |
| 49 | + y_pt = ps.as_sparse_variable(y_test, name="y") |
43 | 50 | else:
|
44 |
| - y_pt = y_type("y", dtype="float32") |
45 |
| - if y_pt.ndim == 1: |
| 51 | + if y_type is pt.vector: |
46 | 52 | y_test = np.arange(40, dtype="float32")
|
47 | 53 | else:
|
48 | 54 | y_test = np.arange(40 * 3, dtype="float32").reshape(40, 3)
|
| 55 | + y_pt = pt.as_tensor_variable(y_test, name="y") |
| 56 | + assert isinstance(y_pt, Constant) |
| 57 | + |
| 58 | + if not y_constant: |
| 59 | + y_pt = y_pt.type(name="y") |
49 | 60 | inputs.append(y_pt)
|
50 | 61 | test_values.append(y_test)
|
51 | 62 |
|
52 | 63 | dot_pt = op(x_pt, y_pt)
|
53 | 64 | fgraph = FunctionGraph(inputs, [dot_pt])
|
54 |
| - compare_jax_and_py(fgraph, test_values) |
55 |
| - |
56 |
| - |
57 |
| -def test_sparse_dot_non_const_raises(): |
58 |
| - x_pt = pt.vector("x") |
59 |
| - |
60 |
| - y_sp = scipy.sparse.random(40, 3, density=0.25, format="csc", dtype="float32") |
61 |
| - y_pt = ps.as_sparse_variable(y_sp, name="y").type() |
62 |
| - |
63 |
| - out = ps.dot(x_pt, y_pt) |
64 |
| - |
65 |
| - msg = "JAX sparse dot only implemented for constant sparse inputs" |
66 |
| - |
67 |
| - with pytest.raises(NotImplementedError, match=msg): |
68 |
| - function([x_pt, y_pt], out, mode="JAX") |
69 |
| - |
70 |
| - y_pt_shared = ps.shared(y_sp, name="y") |
71 | 65 |
|
72 |
| - out = ps.dot(x_pt, y_pt_shared) |
| 66 | + def assert_fn(x, y): |
| 67 | + [x] = x |
| 68 | + [y] = y |
| 69 | + if hasattr(x, "todense"): |
| 70 | + x = x.todense() |
| 71 | + if hasattr(y, "todense"): |
| 72 | + y = y.todense() |
| 73 | + np.testing.assert_allclose(x, y) |
73 | 74 |
|
74 |
| - with pytest.raises(NotImplementedError, match=msg): |
75 |
| - function([x_pt], out, mode="JAX") |
| 75 | + compare_jax_and_py( |
| 76 | + fgraph, |
| 77 | + test_values, |
| 78 | + must_be_device_array=isinstance(dot_pt.type, DenseTensorType), |
| 79 | + assert_fn=assert_fn, |
| 80 | + ) |
0 commit comments