Skip to content

Implement Dot and BatchedDot in PyTensor #878

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 13 commits into from
Jul 18, 2024
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pytensor/link/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from pytensor.link.pytorch.linker import PytorchLinker
3 changes: 3 additions & 0 deletions pytensor/link/pytorch/dispatch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,11 @@
from pytensor.link.pytorch.dispatch.basic import pytorch_funcify, pytorch_typify

# # Load dispatch specializations
import pytensor.link.pytorch.dispatch.blas
import pytensor.link.pytorch.dispatch.scalar
import pytensor.link.pytorch.dispatch.elemwise
import pytensor.link.pytorch.dispatch.math
import pytensor.link.pytorch.dispatch.extra_ops
import pytensor.link.pytorch.dispatch.sort

# isort: on
14 changes: 14 additions & 0 deletions pytensor/link/pytorch/dispatch/blas.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import torch

from pytensor.link.pytorch.dispatch import pytorch_funcify
from pytensor.tensor.blas import BatchedDot


@pytorch_funcify.register(BatchedDot)
def pytorch_funcify_BatchedDot(op, **kwargs):
def batched_dot(a, b):
if a.shape[0] != b.shape[0]:
raise TypeError("Shapes must match in the 0-th dimension")
return torch.bmm(a, b)

return batched_dot
12 changes: 12 additions & 0 deletions pytensor/link/pytorch/dispatch/math.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
import torch

from pytensor.link.pytorch.dispatch import pytorch_funcify
from pytensor.tensor.math import Dot


@pytorch_funcify.register(Dot)
def pytorch_funcify_Dot(op, **kwargs):
def dot(x, y):
return torch.matmul(x, y)

return dot
36 changes: 36 additions & 0 deletions tests/link/pytorch/test_blas.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import numpy as np
import pytest

from pytensor.compile.function import function
from pytensor.compile.mode import Mode
from pytensor.configdefaults import config
from pytensor.graph.fg import FunctionGraph
from pytensor.graph.op import get_test_value
from pytensor.graph.rewriting.db import RewriteDatabaseQuery
from pytensor.link.pytorch import PytorchLinker
from pytensor.tensor import blas as pt_blas
from pytensor.tensor.type import tensor3
from tests.link.pytorch.test_basic import compare_pytorch_and_py


def test_pytorch_BatchedDot():
# tensor3 . tensor3
a = tensor3("a")
a.tag.test_value = (
np.linspace(-1, 1, 10 * 5 * 3).astype(config.floatX).reshape((10, 5, 3))
)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We are getting rid of the test_value machinery. Just pass these directly to the test function, no point in putting them in the tag to then retrieve it again

b = tensor3("b")
b.tag.test_value = (
np.linspace(1, -1, 10 * 3 * 2).astype(config.floatX).reshape((10, 3, 2))
)
out = pt_blas.BatchedDot()(a, b)
fgraph = FunctionGraph([a, b], [out])
compare_pytorch_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])

# A dimension mismatch should raise a TypeError for compatibility
inputs = [get_test_value(a)[:-1], get_test_value(b)]
opts = RewriteDatabaseQuery(include=[None], exclude=["cxx_only", "BlasOpt"])
pytorch_mode = Mode(PytorchLinker(), opts)
pytensor_pytorch_fn = function(fgraph.inputs, fgraph.outputs, mode=pytorch_mode)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This does the same?

Suggested change
opts = RewriteDatabaseQuery(include=[None], exclude=["cxx_only", "BlasOpt"])
pytorch_mode = Mode(PytorchLinker(), opts)
pytensor_pytorch_fn = function(fgraph.inputs, fgraph.outputs, mode=pytorch_mode)
pytorch_mode_no_rewrites = Mode(PytorchLinker(), None)
pytensor_pytorch_fn = function(fgraph.inputs, fgraph.outputs, mode= pytorch_mode_no_rewrites)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

But if I am not mistaken compare_pytorch_and_py returns the torch function, so you could just reuse it?

with pytest.raises(TypeError):
pytensor_pytorch_fn(*inputs)
30 changes: 30 additions & 0 deletions tests/link/pytorch/test_math.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import numpy as np

from pytensor.configdefaults import config
from pytensor.graph.fg import FunctionGraph
from pytensor.graph.op import get_test_value
from pytensor.tensor.type import matrix, scalar, vector
from tests.link.pytorch.test_basic import compare_pytorch_and_py


def test_pytorch_dot():
y = vector("y")
y.tag.test_value = np.r_[1.0, 2.0].astype(config.floatX)
x = vector("x")
x.tag.test_value = np.r_[3.0, 4.0].astype(config.floatX)
A = matrix("A")
A.tag.test_value = np.array([[6, 3], [3, 0]], dtype=config.floatX)
alpha = scalar("alpha")
alpha.tag.test_value = np.array(3.0, dtype=config.floatX)
beta = scalar("beta")
beta.tag.test_value = np.array(5.0, dtype=config.floatX)

# 2D * 2D
out = A.dot(A * alpha) + beta * A
fgraph = FunctionGraph([A, alpha, beta], [out])
compare_pytorch_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])

# 1D * 2D and 1D * 1D
out = y.dot(alpha * A).dot(x) + beta * y
fgraph = FunctionGraph([y, x, A, alpha, beta], [out])
compare_pytorch_and_py(fgraph, [get_test_value(i) for i in fgraph.inputs])
Loading