Skip to content

Commit

Permalink
Improve Functional convolution tests
Browse files Browse the repository at this point in the history
  • Loading branch information
Alejandro Gaston Alvarez Franceschi committed Jan 9, 2024
1 parent 9be8f1b commit f917759
Showing 1 changed file with 112 additions and 20 deletions.
132 changes: 112 additions & 20 deletions coremltools/converters/mil/frontend/torch/test/test_torch_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -1694,21 +1694,25 @@ def test_convolution1d(
self,
compute_unit,
backend,
padding,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups=1,
):
class DynamicConv(nn.Module):
if padding == "same" and stride != 1:
# configuration not supported
return

class FunctionalConv1D(nn.Module):
def forward(self, input_data, weights):
return nn.functional.conv1d(
input_data, weights, stride=stride, padding=padding
input_data, weights, stride=stride, padding=padding, groups=groups
)

model = DynamicConv()
model = FunctionalConv1D()
input_shape = [
(1, in_channels, width),
(out_channels, int(in_channels / groups), kernel_size),
Expand All @@ -1725,29 +1729,30 @@ def forward(self, input_data, weights):
[
"compute_unit",
"backend",
"padding",
"height",
"width",
"in_channels",
"out_channels",
"kernel_size",
"stride",
"padding",
]
),
[
(compute_unit, backend, *param)
for compute_unit, backend, param in itertools.product(
(compute_unit, backend, padding, *param)
for compute_unit, backend, padding, param in itertools.product(
compute_units,
backends,
["same", "valid", 1, 0],
[
(5, 3, 1, 1, 1, 2, 0),
(3, 3, 1, 1, 1, 2, 1),
(4, 3, 3, 3, 1, 2, 0),
(7, 3, 3, 3, 1, 3, 0),
(5, 5, 3, 3, 2, 1, 0),
(3, 5, 3, 3, 1, 3, 0),
(3, 5, 3, 3, 1, 3, 1),
(7, 5, 3, 3, 2, 3, 1),
(5, 3, 1, 1, 1, 2),
(3, 3, 1, 1, 1, 2),
(4, 3, 3, 3, 1, 2),
(7, 3, 3, 3, 1, 3),
(5, 5, 3, 3, 2, 1),
(3, 5, 3, 3, 1, 3),
(3, 5, 3, 3, 1, 3),
(7, 5, 3, 3, 2, 3),
],
)
],
Expand All @@ -1756,31 +1761,118 @@ def test_convolution2d(
self,
compute_unit,
backend,
padding,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
padding,
groups=1,
):
class DynamicConv(nn.Module):
if padding == "same" and stride != 1:
# configuration not supported
return

class FunctionalConv2D(nn.Module):
def forward(self, input_data, weights):
return nn.functional.conv2d(
input_data, weights, stride=stride, padding=padding
input_data, weights, stride=stride, padding=padding, groups=groups
)

model = DynamicConv()
model = FunctionalConv2D()

input_shape = [
(1, in_channels, height, width),
(out_channels, int(in_channels / groups), kernel_size, kernel_size),
]
self.run_compare_torch(
input_shape, model, backend=backend, compute_unit=compute_unit
input_shape,
model,
backend=backend,
compute_unit=compute_unit,
)

@pytest.mark.parametrize(
",".join(
[
"compute_unit",
"backend",
"padding",
"depth",
"height",
"width",
"in_channels",
"out_channels",
"kernel_size",
"stride",
]
),
[
(compute_unit, backend, padding, *param)
for compute_unit, backend, padding, param in itertools.product(
compute_units,
backends,
["same", "valid", 1, 0],
[
(5, 3, 2, 1, 1, 1, 2),
(3, 3, 1, 1, 1, 1, 2),
(4, 3, 3, 3, 3, 1, 2),
(7, 3, 4, 3, 3, 1, 3),
(5, 5, 3, 3, 3, 2, 1),
(3, 5, 1, 3, 3, 1, 3),
(3, 5, 4, 3, 3, 1, 3),
(7, 5, 6, 3, 3, 2, 3),
],
)
],
)
def test_convolution3d(
self,
compute_unit,
backend,
padding,
depth,
height,
width,
in_channels,
out_channels,
kernel_size,
stride,
groups=1,
):
if padding == "same" and stride != 1:
# configuration not supported
return

class FunctionalConv3D(nn.Module):
def forward(self, input_data, weights):
return nn.functional.conv3d(
input_data, weights, stride=stride, padding=padding, groups=groups
)

model = FunctionalConv3D()
input_shape = [
(1, in_channels, depth, height, width),
(out_channels, int(in_channels / groups), kernel_size, kernel_size, kernel_size),
]

if "neuralnetwork" in backend:
with pytest.raises(ValueError, match="3D Convolution doesn't support dynamic weights."):
self.run_compare_torch(
input_shape,
model,
backend=backend,
compute_unit=compute_unit,
)
else:
self.run_compare_torch(
input_shape,
model,
backend=backend,
compute_unit=compute_unit,
)


class TestConvTranspose(TorchBaseTest):
@pytest.mark.parametrize(
Expand Down

0 comments on commit f917759

Please sign in to comment.