|
| 1 | +# |
| 2 | +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. |
| 3 | +# SPDX-License-Identifier: Apache-2.0 |
| 4 | +# |
| 5 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | +# you may not use this file except in compliance with the License. |
| 7 | +# You may obtain a copy of the License at |
| 8 | +# |
| 9 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | +# |
| 11 | +# Unless required by applicable law or agreed to in writing, software |
| 12 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | +# See the License for the specific language governing permissions and |
| 15 | +# limitations under the License. |
| 16 | +# |
| 17 | + |
| 18 | +import pytest |
| 19 | +import re |
| 20 | + |
| 21 | +import cupy as cp |
| 22 | +import numpy as np |
| 23 | +import torch |
| 24 | + |
| 25 | +import tripy as tp |
| 26 | +from tripy.utils import are_strides_equivalent |
| 27 | +from tests.helper import raises |
| 28 | + |
| 29 | + |
| 30 | +class TestStride: |
| 31 | + def assert_error_message(self, excinfo, tensor_type, expected_suggestion): |
| 32 | + error_message = str(excinfo.value) |
| 33 | + assert "Non-canonical strides are not supported for Tripy tensors." in error_message |
| 34 | + assert f"For {tensor_type}, use {expected_suggestion}" in error_message |
| 35 | + |
| 36 | + def tripy_byte_order_strides(self, data): |
| 37 | + return tuple(s * data.dtype.itemsize for s in tp.Tensor(data).stride()) |
| 38 | + |
| 39 | + def test_non_canonical_stride(self): |
| 40 | + # PyTorch test |
| 41 | + t_torch = torch.arange(12, dtype=torch.float32).reshape(3, 4) |
| 42 | + a_torch = t_torch.transpose(0, 1) |
| 43 | + with pytest.raises(tp.TripyException) as excinfo: |
| 44 | + tp.Tensor(a_torch) |
| 45 | + self.assert_error_message(excinfo, "PyTorch Tensor", "tensor.contiguous() or tensor.clone()") |
| 46 | + |
| 47 | + assert tp.Tensor(a_torch.contiguous()).stride() == a_torch.contiguous().stride() |
| 48 | + assert ( |
| 49 | + tp.Tensor(a_torch.clone(memory_format=torch.contiguous_format)).stride() |
| 50 | + == a_torch.clone(memory_format=torch.contiguous_format).stride() |
| 51 | + ) |
| 52 | + |
| 53 | + # CuPy test |
| 54 | + t_cupy = cp.arange(12, dtype=cp.float32).reshape(3, 4) |
| 55 | + a_cupy = t_cupy.transpose(1, 0) |
| 56 | + with pytest.raises(tp.TripyException) as excinfo: |
| 57 | + tp.Tensor(a_cupy) |
| 58 | + self.assert_error_message(excinfo, "CuPy Array", "cp.ascontiguousarray(array) or array.copy(order='C')") |
| 59 | + |
| 60 | + # CuPy and NumPy's strides attribute returns byte-based strides in their frontend APIs. |
| 61 | + # However, when these arrays are converted to DLPack tensors (which Tripy uses internally), |
| 62 | + # the strides are represented as element-wise strides. |
| 63 | + # As a result, Tripy's `get_canonical_stride` method produces element-wise strides |
| 64 | + # that match the expected strides of a memref. |
| 65 | + # Multiply Tripy's strides by the item size when comparing to the original CuPy or NumPy strides here. |
| 66 | + assert self.tripy_byte_order_strides(cp.ascontiguousarray(a_cupy)) == cp.ascontiguousarray(a_cupy).strides |
| 67 | + assert self.tripy_byte_order_strides(a_cupy.copy(order="C")) == a_cupy.copy(order="C").strides |
| 68 | + |
| 69 | + # NumPy test |
| 70 | + t_numpy = np.arange(12, dtype=np.float32).reshape(3, 4) |
| 71 | + a_numpy = t_numpy.transpose(1, 0) |
| 72 | + with pytest.raises(tp.TripyException) as excinfo: |
| 73 | + tp.Tensor(a_numpy) |
| 74 | + self.assert_error_message(excinfo, "NumPy Array", "np.ascontiguousarray(array) or array.copy(order='C')") |
| 75 | + |
| 76 | + assert self.tripy_byte_order_strides(np.ascontiguousarray(a_numpy)) == np.ascontiguousarray(a_numpy).strides |
| 77 | + assert self.tripy_byte_order_strides(a_numpy.copy(order="C")) == a_numpy.copy(order="C").strides |
| 78 | + |
| 79 | + # Test for canonical strides (should not raise an exception) |
| 80 | + assert tp.Tensor(t_torch).stride() == t_torch.stride() |
| 81 | + assert self.tripy_byte_order_strides(t_cupy) == t_cupy.strides |
| 82 | + assert self.tripy_byte_order_strides(t_numpy) == t_numpy.strides |
| 83 | + |
| 84 | + @pytest.mark.parametrize( |
| 85 | + "shape", |
| 86 | + [ |
| 87 | + (0,), |
| 88 | + (0, 3), |
| 89 | + (2, 0, 4), |
| 90 | + (3, 0, 0, 5), |
| 91 | + (1,), |
| 92 | + (1, 3), |
| 93 | + (2, 1, 4), |
| 94 | + (1, 3, 1, 5), |
| 95 | + (3, 1, 1, 1), |
| 96 | + (0, 1, 3), |
| 97 | + (2, 0, 1, 4), |
| 98 | + (1, 0, 3, 1, 5), |
| 99 | + (3, 1, 0, 2, 1), |
| 100 | + ], |
| 101 | + ) |
| 102 | + def test_tensor_stride(self, shape): |
| 103 | + torch_tensor = torch.empty(shape) |
| 104 | + torch_stride = torch_tensor.stride() |
| 105 | + tripy_stride = tp.Tensor(torch_tensor).stride() |
| 106 | + |
| 107 | + assert are_strides_equivalent( |
| 108 | + shape, tripy_stride, torch_stride |
| 109 | + ), f"Mismatch for shape {shape}. Calculated: {tripy_stride}, Torch: {torch_stride}" |
0 commit comments