@@ -270,7 +270,6 @@ class Eig(Op):
270
270
271
271
"""
272
272
273
- _numop = staticmethod (np .linalg .eig )
274
273
__props__ : Tuple [str , ...] = ()
275
274
276
275
def make_node (self , x ):
@@ -283,7 +282,7 @@ def make_node(self, x):
283
282
def perform (self , node , inputs , outputs ):
284
283
(x ,) = inputs
285
284
(w , v ) = outputs
286
- w [0 ], v [0 ] = (z .astype (x .dtype ) for z in self . _numop (x ))
285
+ w [0 ], v [0 ] = (z .astype (x .dtype ) for z in np . linalg . eig (x ))
287
286
288
287
def infer_shape (self , fgraph , node , shapes ):
289
288
n = shapes [0 ][0 ]
@@ -299,7 +298,6 @@ class Eigh(Eig):
299
298
300
299
"""
301
300
302
- _numop = staticmethod (np .linalg .eigh )
303
301
__props__ = ("UPLO" ,)
304
302
305
303
def __init__ (self , UPLO = "L" ):
@@ -314,15 +312,15 @@ def make_node(self, x):
314
312
# LAPACK. Rather than trying to reproduce the (rather
315
313
# involved) logic, we just probe linalg.eigh with a trivial
316
314
# input.
317
- w_dtype = self . _numop ([[np .dtype (x .dtype ).type ()]])[0 ].dtype .name
315
+ w_dtype = np . linalg . eigh ([[np .dtype (x .dtype ).type ()]])[0 ].dtype .name
318
316
w = vector (dtype = w_dtype )
319
317
v = matrix (dtype = w_dtype )
320
318
return Apply (self , [x ], [w , v ])
321
319
322
320
def perform (self , node , inputs , outputs ):
323
321
(x ,) = inputs
324
322
(w , v ) = outputs
325
- w [0 ], v [0 ] = self . _numop (x , self .UPLO )
323
+ w [0 ], v [0 ] = np . linalg . eigh (x , self .UPLO )
326
324
327
325
def grad (self , inputs , g_outputs ):
328
326
r"""The gradient function should return
@@ -445,7 +443,6 @@ class QRFull(Op):
445
443
446
444
"""
447
445
448
- _numop = staticmethod (np .linalg .qr )
449
446
__props__ = ("mode" ,)
450
447
451
448
def __init__ (self , mode ):
@@ -477,7 +474,7 @@ def make_node(self, x):
477
474
def perform (self , node , inputs , outputs ):
478
475
(x ,) = inputs
479
476
assert x .ndim == 2 , "The input of qr function should be a matrix."
480
- res = self . _numop (x , self .mode )
477
+ res = np . linalg . qr (x , self .mode )
481
478
if self .mode != "r" :
482
479
outputs [0 ][0 ], outputs [1 ][0 ] = res
483
480
else :
@@ -546,7 +543,6 @@ class SVD(Op):
546
543
"""
547
544
548
545
# See doc in the docstring of the function just after this class.
549
- _numop = staticmethod (np .linalg .svd )
550
546
__props__ = ("full_matrices" , "compute_uv" )
551
547
552
548
def __init__ (self , full_matrices = True , compute_uv = True ):
@@ -574,10 +570,10 @@ def perform(self, node, inputs, outputs):
574
570
assert x .ndim == 2 , "The input of svd function should be a matrix."
575
571
if self .compute_uv :
576
572
u , s , vt = outputs
577
- u [0 ], s [0 ], vt [0 ] = self . _numop (x , self .full_matrices , self .compute_uv )
573
+ u [0 ], s [0 ], vt [0 ] = np . linalg . svd (x , self .full_matrices , self .compute_uv )
578
574
else :
579
575
(s ,) = outputs
580
- s [0 ] = self . _numop (x , self .full_matrices , self .compute_uv )
576
+ s [0 ] = np . linalg . svd (x , self .full_matrices , self .compute_uv )
581
577
582
578
def infer_shape (self , fgraph , node , shapes ):
583
579
(x_shape ,) = shapes
@@ -729,7 +725,6 @@ class TensorInv(Op):
729
725
PyTensor utilization of numpy.linalg.tensorinv;
730
726
"""
731
727
732
- _numop = staticmethod (np .linalg .tensorinv )
733
728
__props__ = ("ind" ,)
734
729
735
730
def __init__ (self , ind = 2 ):
@@ -743,7 +738,7 @@ def make_node(self, a):
743
738
def perform (self , node , inputs , outputs ):
744
739
(a ,) = inputs
745
740
(x ,) = outputs
746
- x [0 ] = self . _numop (a , self .ind )
741
+ x [0 ] = np . linalg . tensorinv (a , self .ind )
747
742
748
743
def infer_shape (self , fgraph , node , shapes ):
749
744
sp = shapes [0 ][self .ind :] + shapes [0 ][: self .ind ]
@@ -789,7 +784,6 @@ class TensorSolve(Op):
789
784
790
785
"""
791
786
792
- _numop = staticmethod (np .linalg .tensorsolve )
793
787
__props__ = ("axes" ,)
794
788
795
789
def __init__ (self , axes = None ):
@@ -808,7 +802,7 @@ def perform(self, node, inputs, outputs):
808
802
b ,
809
803
) = inputs
810
804
(x ,) = outputs
811
- x [0 ] = self . _numop (a , b , self .axes )
805
+ x [0 ] = np . linalg . tensorsolve (a , b , self .axes )
812
806
813
807
814
808
def tensorsolve (a , b , axes = None ):
0 commit comments