-
-
Notifications
You must be signed in to change notification settings - Fork 59
/
Copy pathtest_module.py
134 lines (117 loc) · 3.86 KB
/
test_module.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
from typing import Iterable, Union
import pytest
import torch
import torch.nn.functional as f
from fft_conv_pytorch.fft_conv import _FFTConv
from fft_conv_pytorch.utils import _assert_almost_equal, _gcd
@pytest.mark.parametrize("in_channels", [2, 3])
@pytest.mark.parametrize("out_channels", [2, 3])
@pytest.mark.parametrize("groups", [1, 2, 3])
@pytest.mark.parametrize("kernel_size", [2, 3])
@pytest.mark.parametrize("padding", [0, 1])
@pytest.mark.parametrize("stride", [1, 2])
@pytest.mark.parametrize("dilation", [1, 2])
@pytest.mark.parametrize("bias", [True])
@pytest.mark.parametrize("ndim", [1, 2, 3])
@pytest.mark.parametrize("input_size", [7, 8])
def test_fft_conv_module(
in_channels: int,
out_channels: int,
kernel_size: Union[int, Iterable[int]],
padding: Union[int, Iterable[int]],
stride: Union[int, Iterable[int]],
dilation: Union[int, Iterable[int]],
groups: int,
bias: bool,
ndim: int,
input_size: int,
):
if padding == "same" and (stride != 1 or dilation != 1):
# padding='same' is not compatible with strided convolutions
return
torch_conv = getattr(f, f"conv{ndim}d")
groups = _gcd(in_channels, _gcd(out_channels, groups))
fft_conv_layer = _FFTConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
bias=bias,
ndim=ndim,
)
batch_size = 2 # TODO: Make this non-constant?
dims = ndim * [input_size]
signal = torch.randn(batch_size, in_channels, *dims)
weight = fft_conv_layer.weight
bias = fft_conv_layer.bias
kwargs = dict(
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
)
y0 = fft_conv_layer(signal)
y1 = torch_conv(signal, weight, bias=bias, **kwargs)
_assert_almost_equal(y0, y1)
@pytest.mark.parametrize("in_channels", [2, 3])
@pytest.mark.parametrize("out_channels", [2, 3])
@pytest.mark.parametrize("groups", [1, 2, 3])
@pytest.mark.parametrize("kernel_size", [2, 3])
@pytest.mark.parametrize("padding", [0, 1])
@pytest.mark.parametrize("stride", [1, 2])
@pytest.mark.parametrize("dilation", [1, 2])
@pytest.mark.parametrize("bias", [True])
@pytest.mark.parametrize("ndim", [1, 2, 3])
@pytest.mark.parametrize("input_size", [7, 8])
def test_fft_conv_backward_module(
in_channels: int,
out_channels: int,
kernel_size: Union[int, Iterable[int]],
padding: Union[int, Iterable[int]],
stride: Union[int, Iterable[int]],
dilation: Union[int, Iterable[int]],
groups: int,
bias: bool,
ndim: int,
input_size: int,
):
if padding == "same" and (stride != 1 or dilation != 1):
# padding='same' is not compatible with strided convolutions
return
torch_conv = getattr(f, f"conv{ndim}d")
groups = _gcd(in_channels, _gcd(out_channels, groups))
fft_conv_layer = _FFTConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
bias=bias,
ndim=ndim,
)
batch_size = 2 # TODO: Make this non-constant?
dims = ndim * [input_size]
signal = torch.randn(batch_size, in_channels, *dims)
w0 = fft_conv_layer.weight
w1 = w0.detach().clone().requires_grad_()
b0 = fft_conv_layer.bias
b1 = b0.detach().clone().requires_grad_() if bias else None
kwargs = dict(
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
)
y0 = fft_conv_layer(signal)
y1 = torch_conv(signal, w1, bias=b1, **kwargs)
# Compute pseudo-loss and gradient
y0.sum().backward()
y1.sum().backward()
_assert_almost_equal(w0.grad, w1.grad)
if bias:
_assert_almost_equal(b0.grad, b1.grad)