@@ -2842,10 +2842,10 @@ def matmul(x1: "ArrayLike", x2: "ArrayLike", dtype: Optional["DTypeLike"] = None
2842
2842
2843
2843
2844
2844
def vecdot (
2845
- x1 : " TensorLike" ,
2846
- x2 : " TensorLike" ,
2845
+ x1 : TensorLike ,
2846
+ x2 : TensorLike ,
2847
2847
dtype : Optional ["DTypeLike" ] = None ,
2848
- ) -> " TensorVariable" :
2848
+ ) -> TensorVariable :
2849
2849
"""Compute the vector dot product of two arrays.
2850
2850
2851
2851
Parameters
@@ -2872,21 +2872,20 @@ def vecdot(
2872
2872
--------
2873
2873
>>> import pytensor.tensor as pt
2874
2874
>>> # Vector dot product with shape (5,) inputs
2875
- >>> x = pt.vector("x") # shape (5,)
2876
- >>> y = pt.vector("y") # shape (5,)
2875
+ >>> x = pt.vector("x", shape=(5,) ) # shape (5,)
2876
+ >>> y = pt.vector("y", shape=(5,) ) # shape (5,)
2877
2877
>>> z = pt.vecdot(x, y) # scalar output
2878
- >>> # Equivalent to numpy.vecdot(x, y) or numpy.sum(x * y)
2878
+ >>> # Equivalent to numpy.vecdot(x, y)
2879
2879
>>>
2880
2880
>>> # With batched inputs of shape (3, 5)
2881
- >>> x_batch = pt.matrix("x") # shape (3, 5)
2882
- >>> y_batch = pt.matrix("y") # shape (3, 5)
2881
+ >>> x_batch = pt.matrix("x", shape=(3, 5) ) # shape (3, 5)
2882
+ >>> y_batch = pt.matrix("y", shape=(3, 5) ) # shape (3, 5)
2883
2883
>>> z_batch = pt.vecdot(x_batch, y_batch) # shape (3,)
2884
- >>> # Equivalent to numpy.sum (x_batch * y_batch, axis=-1 )
2884
+ >>> # Equivalent to numpy.vecdot (x_batch, y_batch )
2885
2885
"""
2886
2886
x1 = as_tensor_variable (x1 )
2887
2887
x2 = as_tensor_variable (x2 )
2888
2888
2889
- # Use the inner product operation along the last axis
2890
2889
out = _inner_prod (x1 , x2 )
2891
2890
2892
2891
if dtype is not None :
@@ -2896,8 +2895,8 @@ def vecdot(
2896
2895
2897
2896
2898
2897
def matvec (
2899
- x1 : " TensorLike" , x2 : " TensorLike" , dtype : Optional ["DTypeLike" ] = None
2900
- ) -> " TensorVariable" :
2898
+ x1 : TensorLike , x2 : TensorLike , dtype : Optional ["DTypeLike" ] = None
2899
+ ) -> TensorVariable :
2901
2900
"""Compute the matrix-vector product.
2902
2901
2903
2902
Parameters
@@ -2918,23 +2917,23 @@ def matvec(
2918
2917
2919
2918
Notes
2920
2919
-----
2921
- This is equivalent to `numpy.matmul` where the second argument is a vector,
2922
- but with more intuitive broadcasting rules. Broadcasting happens over all but
2923
- the last two dimensions of x1 and all dimensions of x2 except the last.
2920
+ This is equivalent to `numpy.matvec` and computes the matrix-vector product
2921
+ with broadcasting over batch dimensions.
2924
2922
2925
2923
Examples
2926
2924
--------
2927
2925
>>> import pytensor.tensor as pt
2928
2926
>>> # Matrix-vector product
2929
- >>> A = pt.matrix("A") # shape (3, 4)
2930
- >>> v = pt.vector("v") # shape (4,)
2927
+ >>> A = pt.matrix("A", shape=(3, 4) ) # shape (3, 4)
2928
+ >>> v = pt.vector("v", shape=(4,) ) # shape (4,)
2931
2929
>>> result = pt.matvec(A, v) # shape (3,)
2932
- >>> # Equivalent to numpy.matmul (A, v)
2930
+ >>> # Equivalent to numpy.matvec (A, v)
2933
2931
>>>
2934
2932
>>> # Batched matrix-vector product
2935
- >>> batched_A = pt.tensor3("A") # shape (2, 3, 4)
2936
- >>> batched_v = pt.matrix("v") # shape (2, 4)
2933
+ >>> batched_A = pt.tensor3("A", shape=(2, 3, 4) ) # shape (2, 3, 4)
2934
+ >>> batched_v = pt.matrix("v", shape=(2, 4) ) # shape (2, 4)
2937
2935
>>> result = pt.matvec(batched_A, batched_v) # shape (2, 3)
2936
+ >>> # Equivalent to numpy.matvec(batched_A, batched_v)
2938
2937
"""
2939
2938
x1 = as_tensor_variable (x1 )
2940
2939
x2 = as_tensor_variable (x2 )
@@ -2948,8 +2947,8 @@ def matvec(
2948
2947
2949
2948
2950
2949
def vecmat (
2951
- x1 : " TensorLike" , x2 : " TensorLike" , dtype : Optional ["DTypeLike" ] = None
2952
- ) -> " TensorVariable" :
2950
+ x1 : TensorLike , x2 : TensorLike , dtype : Optional ["DTypeLike" ] = None
2951
+ ) -> TensorVariable :
2953
2952
"""Compute the vector-matrix product.
2954
2953
2955
2954
Parameters
@@ -2970,23 +2969,23 @@ def vecmat(
2970
2969
2971
2970
Notes
2972
2971
-----
2973
- This is equivalent to `numpy.matmul` where the first argument is a vector,
2974
- but with more intuitive broadcasting rules. Broadcasting happens over all but
2975
- the last dimension of x1 and all but the last two dimensions of x2.
2972
+ This is equivalent to `numpy.vecmat` and computes the vector-matrix product
2973
+ with broadcasting over batch dimensions.
2976
2974
2977
2975
Examples
2978
2976
--------
2979
2977
>>> import pytensor.tensor as pt
2980
2978
>>> # Vector-matrix product
2981
- >>> v = pt.vector("v") # shape (3,)
2982
- >>> A = pt.matrix("A") # shape (3, 4)
2979
+ >>> v = pt.vector("v", shape=(3,) ) # shape (3,)
2980
+ >>> A = pt.matrix("A", shape=(3, 4) ) # shape (3, 4)
2983
2981
>>> result = pt.vecmat(v, A) # shape (4,)
2984
- >>> # Equivalent to numpy.matmul (v, A)
2982
+ >>> # Equivalent to numpy.vecmat (v, A)
2985
2983
>>>
2986
2984
>>> # Batched vector-matrix product
2987
- >>> batched_v = pt.matrix("v") # shape (2, 3)
2988
- >>> batched_A = pt.tensor3("A") # shape (2, 3, 4)
2985
+ >>> batched_v = pt.matrix("v", shape=(2, 3) ) # shape (2, 3)
2986
+ >>> batched_A = pt.tensor3("A", shape=(2, 3, 4) ) # shape (2, 3, 4)
2989
2987
>>> result = pt.vecmat(batched_v, batched_A) # shape (2, 4)
2988
+ >>> # Equivalent to numpy.vecmat(batched_v, batched_A)
2990
2989
"""
2991
2990
x1 = as_tensor_variable (x1 )
2992
2991
x2 = as_tensor_variable (x2 )
0 commit comments