Skip to content

Commit e427954

Browse files
tvwengerricardoV94
authored andcommitted
prevent log10 L_op from upcasting
1 parent 61c15af commit e427954

File tree

2 files changed

+12
-1
lines changed

2 files changed

+12
-1
lines changed

pytensor/scalar/basic.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3030,7 +3030,7 @@ def L_op(self, inputs, outputs, gout):
30303030
else:
30313031
return [x.zeros_like()]
30323032

3033-
return (gz / (x * np.log(10.0)),)
3033+
return (gz / (x * np.asarray(math.log(10.0)).astype(x.dtype)),)
30343034

30353035
def c_code(self, node, name, inputs, outputs, sub):
30363036
(x,) = inputs

tests/scalar/test_basic.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -544,3 +544,14 @@ def test_shape():
544544
assert b.shape.type.ndim == 1
545545
assert b.shape.type.shape == (0,)
546546
assert b.shape.type.dtype == "int64"
547+
548+
549+
def test_grad_log10():
550+
# Ensure that log10 does not upcast gradient
551+
# This is a regression test for
552+
# https://github.com/pymc-devs/pytensor/issues/667
553+
a = float32("log10_a")
554+
b = log10(a)
555+
b_grad = pytensor.gradient.grad(b, a)
556+
assert b.dtype == "float32"
557+
assert b_grad.dtype == "float32"

0 commit comments

Comments
 (0)