Skip to content

Commit ceb300e

Browse files
authored
[0-size Tensor Job2 No.27] Add 0-size Tensor support for paddle.incubate.nn.functional.fused_linear (#74152)
* Fix * Fix
1 parent af08923 commit ceb300e

File tree

1 file changed

+19
-0
lines changed

1 file changed

+19
-0
lines changed

test/legacy_test/test_fused_matmul_bias.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,5 +163,24 @@ def test_static_graph(self):
163163
paddle.disable_static()
164164

165165

166+
@unittest.skipIf(
167+
not is_fused_matmul_bias_supported(),
168+
"fused_gemm_epilogue is only supported when CUDA version >= 11.6",
169+
)
170+
class TestFusedLinear_ZeroSize(unittest.TestCase):
171+
def check_fused_linear(self, transpose):
172+
x = paddle.randn([0, 40])
173+
x.stop_gradient = False
174+
linear = FusedLinear(40, 50, transpose_weight=transpose)
175+
y1 = linear(x)
176+
y2 = fused_linear(x, linear.weight, linear.bias, transpose)
177+
np.testing.assert_array_equal(y1.numpy(), y2.numpy())
178+
y2.sum().backward()
179+
np.testing.assert_allclose(x.grad.shape, x.shape)
180+
181+
def test_non_transpose(self):
182+
self.check_fused_linear(False)
183+
184+
166185
if __name__ == "__main__":
167186
unittest.main()

0 commit comments

Comments
 (0)