@@ -4122,6 +4122,154 @@ def matmul(x1: "ArrayLike", x2: "ArrayLike", dtype: Optional["DTypeLike"] = None
4122
4122
return out
4123
4123
4124
4124
4125
+ def vecdot (
4126
+ x1 : TensorLike ,
4127
+ x2 : TensorLike ,
4128
+ dtype : Optional ["DTypeLike" ] = None ,
4129
+ ) -> TensorVariable :
4130
+ """Compute the vector dot product of two arrays.
4131
+
4132
+ Parameters
4133
+ ----------
4134
+ x1, x2
4135
+ Input arrays with the same shape.
4136
+ dtype
4137
+ The desired data-type for the result. If not given, then the type will
4138
+ be determined as the minimum type required to hold the objects in the
4139
+ sequence.
4140
+
4141
+ Returns
4142
+ -------
4143
+ TensorVariable
4144
+ The vector dot product of the inputs.
4145
+
4146
+ Notes
4147
+ -----
4148
+ This is equivalent to `numpy.vecdot` and computes the dot product of
4149
+ vectors along the last axis of both inputs. Broadcasting is supported
4150
+ across all other dimensions.
4151
+
4152
+ Examples
4153
+ --------
4154
+ >>> import pytensor.tensor as pt
4155
+ >>> # Vector dot product with shape (5,) inputs
4156
+ >>> x = pt.vector("x", shape=(5,)) # shape (5,)
4157
+ >>> y = pt.vector("y", shape=(5,)) # shape (5,)
4158
+ >>> z = pt.vecdot(x, y) # scalar output
4159
+ >>> # Equivalent to numpy.vecdot(x, y)
4160
+ >>>
4161
+ >>> # With batched inputs of shape (3, 5)
4162
+ >>> x_batch = pt.matrix("x", shape=(3, 5)) # shape (3, 5)
4163
+ >>> y_batch = pt.matrix("y", shape=(3, 5)) # shape (3, 5)
4164
+ >>> z_batch = pt.vecdot(x_batch, y_batch) # shape (3,)
4165
+ >>> # Equivalent to numpy.vecdot(x_batch, y_batch)
4166
+ """
4167
+ out = _inner_prod (x1 , x2 )
4168
+
4169
+ if dtype is not None :
4170
+ out = out .astype (dtype )
4171
+
4172
+ return out
4173
+
4174
+
4175
+ def matvec (
4176
+ x1 : TensorLike , x2 : TensorLike , dtype : Optional ["DTypeLike" ] = None
4177
+ ) -> TensorVariable :
4178
+ """Compute the matrix-vector product.
4179
+
4180
+ Parameters
4181
+ ----------
4182
+ x1
4183
+ Input array for the matrix with shape (..., M, K).
4184
+ x2
4185
+ Input array for the vector with shape (..., K).
4186
+ dtype
4187
+ The desired data-type for the result. If not given, then the type will
4188
+ be determined as the minimum type required to hold the objects in the
4189
+ sequence.
4190
+
4191
+ Returns
4192
+ -------
4193
+ TensorVariable
4194
+ The matrix-vector product with shape (..., M).
4195
+
4196
+ Notes
4197
+ -----
4198
+ This is equivalent to `numpy.matvec` and computes the matrix-vector product
4199
+ with broadcasting over batch dimensions.
4200
+
4201
+ Examples
4202
+ --------
4203
+ >>> import pytensor.tensor as pt
4204
+ >>> # Matrix-vector product
4205
+ >>> A = pt.matrix("A", shape=(3, 4)) # shape (3, 4)
4206
+ >>> v = pt.vector("v", shape=(4,)) # shape (4,)
4207
+ >>> result = pt.matvec(A, v) # shape (3,)
4208
+ >>> # Equivalent to numpy.matvec(A, v)
4209
+ >>>
4210
+ >>> # Batched matrix-vector product
4211
+ >>> batched_A = pt.tensor3("A", shape=(2, 3, 4)) # shape (2, 3, 4)
4212
+ >>> batched_v = pt.matrix("v", shape=(2, 4)) # shape (2, 4)
4213
+ >>> result = pt.matvec(batched_A, batched_v) # shape (2, 3)
4214
+ >>> # Equivalent to numpy.matvec(batched_A, batched_v)
4215
+ """
4216
+ out = _matrix_vec_prod (x1 , x2 )
4217
+
4218
+ if dtype is not None :
4219
+ out = out .astype (dtype )
4220
+
4221
+ return out
4222
+
4223
+
4224
+ def vecmat (
4225
+ x1 : TensorLike , x2 : TensorLike , dtype : Optional ["DTypeLike" ] = None
4226
+ ) -> TensorVariable :
4227
+ """Compute the vector-matrix product.
4228
+
4229
+ Parameters
4230
+ ----------
4231
+ x1
4232
+ Input array for the vector with shape (..., K).
4233
+ x2
4234
+ Input array for the matrix with shape (..., K, N).
4235
+ dtype
4236
+ The desired data-type for the result. If not given, then the type will
4237
+ be determined as the minimum type required to hold the objects in the
4238
+ sequence.
4239
+
4240
+ Returns
4241
+ -------
4242
+ TensorVariable
4243
+ The vector-matrix product with shape (..., N).
4244
+
4245
+ Notes
4246
+ -----
4247
+ This is equivalent to `numpy.vecmat` and computes the vector-matrix product
4248
+ with broadcasting over batch dimensions.
4249
+
4250
+ Examples
4251
+ --------
4252
+ >>> import pytensor.tensor as pt
4253
+ >>> # Vector-matrix product
4254
+ >>> v = pt.vector("v", shape=(3,)) # shape (3,)
4255
+ >>> A = pt.matrix("A", shape=(3, 4)) # shape (3, 4)
4256
+ >>> result = pt.vecmat(v, A) # shape (4,)
4257
+ >>> # Equivalent to numpy.vecmat(v, A)
4258
+ >>>
4259
+ >>> # Batched vector-matrix product
4260
+ >>> batched_v = pt.matrix("v", shape=(2, 3)) # shape (2, 3)
4261
+ >>> batched_A = pt.tensor3("A", shape=(2, 3, 4)) # shape (2, 3, 4)
4262
+ >>> result = pt.vecmat(batched_v, batched_A) # shape (2, 4)
4263
+ >>> # Equivalent to numpy.vecmat(batched_v, batched_A)
4264
+ """
4265
+ out = _vec_matrix_prod (x1 , x2 )
4266
+
4267
+ if dtype is not None :
4268
+ out = out .astype (dtype )
4269
+
4270
+ return out
4271
+
4272
+
4125
4273
@_vectorize_node .register (Dot )
4126
4274
def vectorize_node_dot (op , node , batched_x , batched_y ):
4127
4275
old_x , old_y = node .inputs
@@ -4218,6 +4366,9 @@ def nan_to_num(x, nan=0.0, posinf=None, neginf=None):
4218
4366
"max_and_argmax" ,
4219
4367
"max" ,
4220
4368
"matmul" ,
4369
+ "vecdot" ,
4370
+ "matvec" ,
4371
+ "vecmat" ,
4221
4372
"argmax" ,
4222
4373
"min" ,
4223
4374
"argmin" ,
0 commit comments