@@ -31,13 +31,20 @@ class TestPooling:
3131 ],
3232 )
3333 @pytest .mark .parametrize ("dtype" , [tp .float32 , tp .float16 , tp .int8 ])
34- def test_maxpool_2d (self , kernel_dims , stride , padding , dtype ):
34+ @pytest .mark .parametrize ("pool_type" , ["max" , "avg" ])
35+ def test_pool_2d (self , kernel_dims , stride , padding , dtype , pool_type ):
3536 inp_tp = tp .reshape (tp .arange (64 , dtype = dtype ), (1 , 1 , 8 , 8 ))
36- out = tp .maxpool (inp_tp , kernel_dims = kernel_dims , stride = stride , padding = padding )
37- out_torch = torch .from_dlpack (out ).to ("cpu" )
38-
3937 torch_padding = (padding [0 ][0 ], padding [1 ][0 ])
40- pool_torch = torch .nn .MaxPool2d (kernel_size = kernel_dims , stride = stride , padding = torch_padding )
38+
39+ if pool_type == "max" :
40+ out = tp .maxpool (inp_tp , kernel_dims = kernel_dims , stride = stride , padding = padding )
41+ pool_torch = torch .nn .MaxPool2d (kernel_size = kernel_dims , stride = stride , padding = torch_padding )
42+ elif pool_type == "avg" :
43+ pytest .skip ("https://github.com/NVIDIA/TensorRT-Incubator/issues/237: Average pooling is not functional." )
44+ out = tp .avgpool (inp_tp , kernel_dims = kernel_dims , stride = stride , padding = padding )
45+ pool_torch = torch .nn .AvgPool2d (kernel_size = kernel_dims , stride = stride , padding = torch_padding )
46+
47+ out_torch = torch .from_dlpack (out ).to ("cpu" )
4148 expected = pool_torch (torch .from_dlpack (inp_tp ).to ("cpu" ))
4249 assert torch .allclose (expected , out_torch )
4350 assert expected .shape == out_torch .shape
@@ -49,13 +56,21 @@ def test_maxpool_2d(self, kernel_dims, stride, padding, dtype):
4956 ],
5057 )
5158 @pytest .mark .parametrize ("dtype" , [tp .float32 , tp .float16 ])
52- def test_maxpool_3d (self , kernel_dims , stride , padding , dtype ):
59+ @pytest .mark .parametrize ("pool_type" , ["max" , "avg" ])
60+ def test_pool_3d (self , kernel_dims , stride , padding , dtype , pool_type ):
5361 inp_tp = tp .reshape (tp .arange (512 , dtype = dtype ), (1 , 1 , 8 , 8 , 8 ))
54- out = tp .maxpool (inp_tp , kernel_dims = kernel_dims , stride = stride , padding = padding )
62+ torch_padding = (padding [0 ][0 ], padding [1 ][0 ], padding [2 ][0 ])
63+
64+ if pool_type == "max" :
65+ out = tp .maxpool (inp_tp , kernel_dims = kernel_dims , stride = stride , padding = padding )
66+ pool_torch = torch .nn .MaxPool3d (kernel_size = kernel_dims , stride = stride , padding = torch_padding )
67+ elif pool_type == "avg" :
68+ pytest .skip ("https://github.com/NVIDIA/TensorRT-Incubator/issues/237: Average pooling is not functional." )
69+ out = tp .avgpool (inp_tp , kernel_dims = kernel_dims , stride = stride , padding = padding )
70+ pool_torch = torch .nn .AvgPool3d (kernel_size = kernel_dims , stride = stride , padding = torch_padding )
71+
5572 out_torch = torch .from_dlpack (out ).to ("cpu" )
5673
57- torch_padding = (padding [0 ][0 ], padding [1 ][0 ], padding [2 ][0 ])
58- pool_torch = torch .nn .MaxPool3d (kernel_size = kernel_dims , stride = stride , padding = torch_padding )
5974 expected = pool_torch (torch .from_dlpack (inp_tp ).to ("cpu" ))
6075 assert torch .allclose (expected , out_torch )
6176 assert expected .shape == out_torch .shape
0 commit comments