diff --git a/test/infinicore/ops/abs.py b/test/infinicore/ops/abs.py new file mode 100644 index 000000000..abe965463 --- /dev/null +++ b/test/infinicore/ops/abs.py @@ -0,0 +1,99 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (in_shape, in_strides_or_None) +_TEST_CASES_DATA = [ + ((2, 3), None), + ((1, 4, 8), (32, 8, 1)), + ((3, 2, 5, 7), None), + ((2, 1, 16), None), + ((1, 8, 9, 11), (792, 99, 11, 1)), + ((2, 6, 10), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0.0, "rtol": 0.0}, + infinicore.float32: {"atol": 0.0, "rtol": 0.0}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + # Out-of-place + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Abs - OUT_OF_PLACE", + ) + ) + + # Explicit out + out_spec = TensorSpec.from_tensor(shape, None, dtype) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="Abs - INPLACE(out)", + ) + ) + + # In-place on input (out=0) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="Abs - INPLACE(a)", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Abs operator test with simplified implementation""" + + def __init__(self): + super().__init__("Abs") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.abs(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.abs(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/acos.py b/test/infinicore/ops/acos.py new file mode 100644 index 000000000..90640a328 --- /dev/null +++ b/test/infinicore/ops/acos.py @@ -0,0 +1,108 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# ======================================================================= +# Test cases format: (shape, input_strides_or_None) +# ======================================================================= + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 16), None), + ((8, 16), (40, 1)), + ((2, 3, 4), None), + ((16, 5632), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="acos - OUT_OF_PLACE", + ) + ) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="acos - INPLACE(out)", + ) + ) + + if supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="acos - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Acos operator test with simplified implementation""" + + def __init__(self): + super().__init__("Acos") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.acos(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.acos(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/acosh.py b/test/infinicore/ops/acosh.py new file mode 100644 index 000000000..1527e5273 --- /dev/null +++ b/test/infinicore/ops/acosh.py @@ -0,0 +1,109 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# ======================================================================= +# Test cases format: (shape, input_strides_or_None) +# Note: acosh domain is [1, inf); tests should use valid ranges when generating tensors. +# ======================================================================= + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 16), None), + ((8, 16), (40, 1)), + ((2, 3, 4), None), + ((16, 5632), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="acosh - OUT_OF_PLACE", + ) + ) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="acosh - INPLACE(out)", + ) + ) + + if supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="acosh - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Acosh operator test with simplified implementation""" + + def __init__(self): + super().__init__("Acosh") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.acosh(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.acosh(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/adaptive_avg_pool1d.py b/test/infinicore/ops/adaptive_avg_pool1d.py new file mode 100644 index 000000000..bb72f9712 --- /dev/null +++ b/test/infinicore/ops/adaptive_avg_pool1d.py @@ -0,0 +1,81 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, output_size_or_None) + +_TEST_CASES_DATA = [ + ((2, 3, 16), None, 1), + ((2, 3, 15), (45, 15, 1), 5), + ((1, 4, 64), None, 8), + ((4, 2, 7), (14, 7, 1), 3), + ((3, 3, 32), None, 16), + ((2, 8, 9), (72, 9, 1), 4), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, out_size = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {"output_size": out_size} + + # Out-of-place + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="AdaptiveAvgPool1d - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """AdaptiveAvgPool1d operator test with simplified implementation""" + + def __init__(self): + super().__init__("AdaptiveAvgPool1d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.adaptive_avg_pool1d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.adaptive_avg_pool1d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/adaptive_avg_pool2d.py b/test/infinicore/ops/adaptive_avg_pool2d.py new file mode 100644 index 000000000..b91812471 --- /dev/null +++ b/test/infinicore/ops/adaptive_avg_pool2d.py @@ -0,0 +1,79 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, output_size_or_None) +# adaptive_avg_pool2d maps input HxW to target output size (h, w) + +_TEST_CASES_DATA = [ + ((2, 3, 16, 16), None, (1, 1)), + ((2, 4, 15, 17), (204, 51, 17, 1), (5, 6)), + ((1, 8, 32, 32), None, (8, 8)), + ((4, 2, 7, 9), (126, 63, 9, 1), (3, 4)), + ((3, 3, 31, 29), None, (16, 15)), + ((2, 8, 9, 11), (792, 99, 11, 1), (4, 5)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, out_size = data + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {"output_size": out_size} + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="AdaptiveAvgPool2d - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """AdaptiveAvgPool2d operator test with simplified implementation""" + + def __init__(self): + super().__init__("AdaptiveAvgPool2d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.adaptive_avg_pool2d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.adaptive_avg_pool2d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/adaptive_avg_pool3d.py b/test/infinicore/ops/adaptive_avg_pool3d.py new file mode 100644 index 000000000..17af51455 --- /dev/null +++ b/test/infinicore/ops/adaptive_avg_pool3d.py @@ -0,0 +1,79 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, output_size_or_None) +# adaptive_avg_pool3d maps input D x H x W to target output size (d, h, w) + +_TEST_CASES_DATA = [ + ((2, 3, 8, 8, 8), None, (1, 1, 1)), + ((2, 4, 7, 9, 6), (2016, 504, 72, 8, 1), (3, 3, 2)), + ((1, 8, 16, 16, 16), None, (4, 4, 4)), + ((2, 2, 5, 7, 9), (1260, 630, 126, 18, 2), (2, 3, 4)), + ((3, 3, 10, 9, 8), None, (5, 5, 4)), + ((2, 6, 9, 11, 13), (5148, 858, 13, 1, 1), (3, 4, 5)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, out_size = data + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {"output_size": out_size} + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="AdaptiveAvgPool3d - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """AdaptiveAvgPool3d operator test with simplified implementation""" + + def __init__(self): + super().__init__("AdaptiveAvgPool3d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.adaptive_avg_pool3d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.adaptive_avg_pool3d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/adaptive_max_pool1d.py b/test/infinicore/ops/adaptive_max_pool1d.py new file mode 100644 index 000000000..9a6442cb0 --- /dev/null +++ b/test/infinicore/ops/adaptive_max_pool1d.py @@ -0,0 +1,78 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, output_size_or_None) + +_TEST_CASES_DATA = [ + ((2, 3, 16), None, 1), + ((2, 3, 15), (45, 15, 1), 5), + ((1, 4, 64), None, 8), + ((4, 2, 7), (14, 7, 1), 3), + ((3, 3, 32), None, 16), + ((2, 8, 9), (72, 9, 1), 4), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, out_size = data + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {"output_size": out_size} + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="AdaptiveMaxPool1d - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """AdaptiveMaxPool1d operator test with simplified implementation""" + + def __init__(self): + super().__init__("AdaptiveMaxPool1d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.adaptive_max_pool1d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.adaptive_max_pool1d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/adaptive_max_pool2d.py b/test/infinicore/ops/adaptive_max_pool2d.py new file mode 100644 index 000000000..3632255ea --- /dev/null +++ b/test/infinicore/ops/adaptive_max_pool2d.py @@ -0,0 +1,78 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, output_size_or_None) + +_TEST_CASES_DATA = [ + ((2, 3, 16, 16), None, (1, 1)), + ((2, 4, 15, 17), (204, 51, 17, 1), (5, 6)), + ((1, 8, 32, 32), None, (8, 8)), + ((4, 2, 7, 9), (126, 63, 9, 1), (3, 4)), + ((3, 3, 31, 29), None, (16, 15)), + ((2, 8, 9, 11), (792, 99, 11, 1), (4, 5)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, out_size = data + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {"output_size": out_size} + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="AdaptiveMaxPool2d - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """AdaptiveMaxPool2d operator test with simplified implementation""" + + def __init__(self): + super().__init__("AdaptiveMaxPool2d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.adaptive_max_pool2d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.adaptive_max_pool2d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/add.py b/test/infinicore/ops/add.py index a3d17f905..6d0be7bc0 100644 --- a/test/infinicore/ops/add.py +++ b/test/infinicore/ops/add.py @@ -86,7 +86,7 @@ def parse_test_cases(): test_cases.append( TestCase( inputs=[a_spec, b_spec], - kwargs=None, + kwargs={}, output_spec=c_spec, # Specify the output tensor spec comparison_target="out", tolerance=tolerance, diff --git a/test/infinicore/ops/addbmm.py b/test/infinicore/ops/addbmm.py new file mode 100644 index 000000000..31dd71506 --- /dev/null +++ b/test/infinicore/ops/addbmm.py @@ -0,0 +1,117 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (input_shape, batch1_shape, batch2_shape, input_strides_or_None, batch1_strides_or_None, batch2_strides_or_None, beta_or_None, alpha_or_None) +# addbmm(input, batch1, batch2, beta=1, alpha=1, out=None) + +_TEST_CASES_DATA = [ + # small basic (shapes must satisfy: input (M,N), batch1 (B,M,K), batch2 (B,K,N)) + ((3, 5), (2, 3, 4), (2, 4, 5), None, None, None, None, None), + # larger + ((8, 8), (4, 8, 8), (4, 8, 8), None, None, None, 0.5, 2.0), + # strided input + ((5, 7), (2, 5, 6), (2, 6, 7), (30, 1), (0, 5, 1), None, None, None), + # batched different strides + ((2, 2), (4, 2, 3), (4, 3, 2), None, (24, 6, 1), (0, 3, 1), 1.0, None), + # square + ((16, 16), (2, 16, 16), (2, 16, 16), None, None, (512, 1, 1), None, 0.1), + # edge small + ((1, 1), (1, 1, 1), (1, 1, 1), None, None, None, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + + for data in _TEST_CASES_DATA: + in_shape, b1_shape, b2_shape = data[0], data[1], data[2] + in_strides = data[3] if len(data) > 3 else None + b1_strides = data[4] if len(data) > 4 else None + b2_strides = data[5] if len(data) > 5 else None + beta = data[6] if len(data) > 6 else None + alpha = data[7] if len(data) > 7 else None + + out_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + b1_spec = TensorSpec.from_tensor(b1_shape, b1_strides, dtype) + b2_spec = TensorSpec.from_tensor(b2_shape, b2_strides, dtype) + + kwargs = {} + if beta is not None: + kwargs["beta"] = beta + if alpha is not None: + kwargs["alpha"] = alpha + + # Out-of-place + test_cases.append( + TestCase( + inputs=[in_spec, b1_spec, b2_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="addbmm - OUT_OF_PLACE", + ) + ) + + # In-place out= provided + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[in_spec, b1_spec, b2_spec], + kwargs=kwargs, + output_spec=in_spec, + comparison_target="out", + tolerance=tol, + description="addbmm - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """addbmm operator test with simplified implementation""" + + def __init__(self): + super().__init__("addbmm") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.addbmm(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.addbmm(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/addcdiv.py b/test/infinicore/ops/addcdiv.py new file mode 100644 index 000000000..3c54526e5 --- /dev/null +++ b/test/infinicore/ops/addcdiv.py @@ -0,0 +1,135 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, t1_shape_or_None, t2_shape_or_None, value) + +_TEST_CASES_DATA = [ + ((2, 3, 4), None, None, None, 1.0), + ((1, 4, 8), (32, 8, 1), None, None, 0.5), + ((3, 2, 5, 7), None, None, None, 2.0), + ((2, 1, 16), None, None, None, 1.0), + ((1, 8, 9, 11), (792, 99, 11, 1), None, None, 1.5), + ((2, 6, 10), None, None, None, 0.25), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for in_shape, in_strides, t1_shape, t2_shape, value in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + input_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + t1_spec = TensorSpec.from_tensor( + in_shape if t1_shape is None else t1_shape, None, dtype + ) + t2_spec = TensorSpec.from_tensor( + in_shape if t2_shape is None else t2_shape, None, dtype + ) + + # Out-of-place + kwargs = {"value": value} + cases.append( + TestCase( + inputs=[input_spec, t1_spec, t2_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="addcdiv - OUT_OF_PLACE", + ) + ) + + # Explicit out + out_spec = TensorSpec.from_tensor(in_shape, None, dtype) + cases.append( + TestCase( + inputs=[input_spec, t1_spec, t2_spec], + kwargs=kwargs, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="addcdiv - INPLACE(out)", + ) + ) + + # In-place on input (out=0) + if not is_broadcast(input_spec.strides): + cases.append( + TestCase( + inputs=[input_spec, t1_spec, t2_spec], + kwargs={"value": value, "out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="addcdiv - INPLACE(a)", + ) + ) + + # In-place on tensor1 (out=1) + if not is_broadcast(t1_spec.strides): + cases.append( + TestCase( + inputs=[input_spec, t1_spec, t2_spec], + kwargs={"value": value, "out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="addcdiv - INPLACE(b)", + ) + ) + + # In-place on tensor2 (out=2) + if not is_broadcast(t2_spec.strides): + cases.append( + TestCase( + inputs=[input_spec, t1_spec, t2_spec], + kwargs={"value": value, "out": 2}, + output_spec=None, + comparison_target=2, + tolerance=tol, + description="addcdiv - INPLACE(c)", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """AddCdiv operator test with simplified implementation""" + + def __init__(self): + super().__init__("AddCdiv") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.addcdiv(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.addcdiv(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/addcmul.py b/test/infinicore/ops/addcmul.py new file mode 100644 index 000000000..1091d244f --- /dev/null +++ b/test/infinicore/ops/addcmul.py @@ -0,0 +1,130 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, t1_shape_or_None, t2_shape_or_None, value) + +_TEST_CASES_DATA = [ + ((2, 3, 4), None, None, None, 1.0), + ((1, 4, 8), (32, 8, 1), None, None, 0.5), + ((3, 2, 5, 7), None, None, None, 2.0), + ((2, 1, 16), None, None, None, 1.0), + ((1, 8, 9, 11), (792, 99, 11, 1), None, None, 1.5), + ((2, 6, 10), None, None, None, 0.25), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for in_shape, in_strides, t1_shape, t2_shape, value in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + input_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + t1_spec = TensorSpec.from_tensor( + in_shape if t1_shape is None else t1_shape, None, dtype + ) + t2_spec = TensorSpec.from_tensor( + in_shape if t2_shape is None else t2_shape, None, dtype + ) + + kwargs = {"value": value} + cases.append( + TestCase( + inputs=[input_spec, t1_spec, t2_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="addcmul - OUT_OF_PLACE", + ) + ) + + out_spec = TensorSpec.from_tensor(in_shape, None, dtype) + cases.append( + TestCase( + inputs=[input_spec, t1_spec, t2_spec], + kwargs=kwargs, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="addcmul - INPLACE(out)", + ) + ) + + if not is_broadcast(input_spec.strides): + cases.append( + TestCase( + inputs=[input_spec, t1_spec, t2_spec], + kwargs={"value": value, "out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="addcmul - INPLACE(a)", + ) + ) + + if not is_broadcast(t1_spec.strides): + cases.append( + TestCase( + inputs=[input_spec, t1_spec, t2_spec], + kwargs={"value": value, "out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="addcmul - INPLACE(b)", + ) + ) + + if not is_broadcast(t2_spec.strides): + cases.append( + TestCase( + inputs=[input_spec, t1_spec, t2_spec], + kwargs={"value": value, "out": 2}, + output_spec=None, + comparison_target=2, + tolerance=tol, + description="addcmul - INPLACE(c)", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """AddCmul operator test with simplified implementation""" + + def __init__(self): + super().__init__("AddCmul") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.addcmul(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.addcmul(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/addmv.py b/test/infinicore/ops/addmv.py new file mode 100644 index 000000000..43daa3f52 --- /dev/null +++ b/test/infinicore/ops/addmv.py @@ -0,0 +1,104 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (input_shape, mat_shape, vec_shape, input_strides_or_None, mat_strides_or_None, vec_strides_or_None, beta_or_None, alpha_or_None) + +_TEST_CASES_DATA = [ + ((4,), (4, 6), (6,), None, None, None, None, None), + ((8,), (8, 8), (8,), None, None, None, 0.0, 1.0), + ((3,), (3, 5), (5,), None, (15, 1), None, None, 0.5), + ((16,), (16, 32), (32,), None, (512, 1), None, 1.0, None), + ((1,), (1, 1), (1,), None, None, None, None, None), + ((12,), (12, 12), (12,), None, None, None, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + + for d in _TEST_CASES_DATA: + in_shape, mat_shape, vec_shape = d[0], d[1], d[2] + in_strides, mat_strides, vec_strides = d[3], d[4], d[5] + beta, alpha = d[6], d[7] + + out_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + mat_spec = TensorSpec.from_tensor(mat_shape, mat_strides, dtype) + vec_spec = TensorSpec.from_tensor(vec_shape, vec_strides, dtype) + + kwargs = {} + if beta is not None: + kwargs["beta"] = beta + if alpha is not None: + kwargs["alpha"] = alpha + + test_cases.append( + TestCase( + inputs=[in_spec, mat_spec, vec_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="addmv - OUT_OF_PLACE", + ) + ) + + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[in_spec, mat_spec, vec_spec], + kwargs=kwargs, + output_spec=in_spec, + comparison_target="out", + tolerance=tol, + description="addmv - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """addmv operator test with simplified implementation""" + + def __init__(self): + super().__init__("addmv") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.addmv(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.addmv(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/addr.py b/test/infinicore/ops/addr.py new file mode 100644 index 000000000..47df6671c --- /dev/null +++ b/test/infinicore/ops/addr.py @@ -0,0 +1,104 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (input_shape, vec1_shape, vec2_shape, input_strides_or_None, vec1_strides_or_None, vec2_strides_or_None, beta_or_None, alpha_or_None) + +_TEST_CASES_DATA = [ + ((3, 4), (3,), (4,), None, None, None, None, None), + ((8, 8), (8,), (8,), None, None, None, 0.5, 2.0), + ((5, 6), (5,), (6,), (30, 1), None, None, None, None), + ((1, 1), (1,), (1,), None, None, None, None, None), + ((16, 4), (16,), (4,), None, None, None, 1.0, None), + ((2, 7), (2,), (7,), None, None, None, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + + for d in _TEST_CASES_DATA: + in_shape, v1_shape, v2_shape = d[0], d[1], d[2] + in_strides, v1_strides, v2_strides = d[3], d[4], d[5] + beta, alpha = d[6], d[7] + + out_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + v1_spec = TensorSpec.from_tensor(v1_shape, v1_strides, dtype) + v2_spec = TensorSpec.from_tensor(v2_shape, v2_strides, dtype) + + kwargs = {} + if beta is not None: + kwargs["beta"] = beta + if alpha is not None: + kwargs["alpha"] = alpha + + test_cases.append( + TestCase( + inputs=[in_spec, v1_spec, v2_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="addr - OUT_OF_PLACE", + ) + ) + + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[in_spec, v1_spec, v2_spec], + kwargs=kwargs, + output_spec=in_spec, + comparison_target="out", + tolerance=tol, + description="addr - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """addr operator test with simplified implementation""" + + def __init__(self): + super().__init__("addr") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.addr(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.addr(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/affine_grid.py b/test/infinicore/ops/affine_grid.py new file mode 100644 index 000000000..aeba748f6 --- /dev/null +++ b/test/infinicore/ops/affine_grid.py @@ -0,0 +1,86 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (theta_shape, out_shape, theta_strides_or_None) + +_TEST_CASES_DATA = [ + ((1, 2, 3), (1, 3, 4, 4), None), + ((2, 2, 3), (2, 3, 8, 8), None), + ((1, 2, 3), (1, 4, 6, 6), (6, 2, 1)), + ((4, 2, 3), (4, 3, 5, 5), None), + ((2, 2, 3), (2, 1, 7, 7), None), + ((3, 2, 3), (3, 3, 2, 2), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + theta_shape, out_shape = data[0], data[1] + theta_strides = data[2] if len(data) > 2 else None + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + theta_spec = TensorSpec.from_tensor(theta_shape, theta_strides, dtype) + + # Out-of-place with align_corners variations + for align in (True, False): + kwargs = {"size": out_shape} + if align is not None: + kwargs["align_corners"] = align + + test_cases.append( + TestCase( + inputs=[theta_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="affine_grid - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """AffineGrid operator test with simplified implementation""" + + def __init__(self): + super().__init__("AffineGrid") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.affine_grid(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.affine_grid(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/all.py b/test/infinicore/ops/all.py new file mode 100644 index 000000000..694a5477d --- /dev/null +++ b/test/infinicore/ops/all.py @@ -0,0 +1,121 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None, keepdim_or_None, out_strides_or_None) + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None, None), + ((8, 8), (16, 1), 1, False, None), + ((2, 3, 4), None, 0, True, (0, 1, 1)), + ((1, 8), None, (0, 1), False, None), + ((16, 64), (128, 1), None, None, None), + ((4, 5, 6), (60, 12, 2), 2, True, (12, 4, 1)), +] + +_TOLERANCE_MAP = {infinicore.bool: {"atol": 0, "rtol": 0}} + +_TENSOR_DTYPES = [infinicore.bool, infinicore.uint8] + + +def _compute_out_shape(shape, dim, keepdim): + if dim is None: + return () + if isinstance(dim, tuple): + dims = sorted([(d if d >= 0 else len(shape) + d) for d in dim]) + if keepdim: + out = list(shape) + for d in dims: + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i not in dims) + else: + d = dim if dim >= 0 else len(shape) + dim + if keepdim: + out = list(shape) + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i != d) + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim, keepdim, out_strides = data + input_supports_inplace = not is_broadcast(strides) + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 0}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + # Out-of-place + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + if keepdim is not None: + kwargs["keepdim"] = keepdim + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="All - OUT_OF_PLACE", + ) + ) + + # explicit out when supported (create out tensor with computed shape) + out_shape = _compute_out_shape(shape, dim, keepdim) + out_spec = TensorSpec.from_tensor(out_shape, out_strides, infinicore.bool) + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="All - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """All operator test with simplified implementation""" + + def __init__(self): + super().__init__("All") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.all(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.all(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/alpha_dropout.py b/test/infinicore/ops/alpha_dropout.py new file mode 100644 index 000000000..25aaf5449 --- /dev/null +++ b/test/infinicore/ops/alpha_dropout.py @@ -0,0 +1,85 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, p, training, in_strides_or_None) + +_TEST_CASES_DATA = [ + ((8, 16), 0.1, True, None), + ((8, 16), 0.2, False, (128, 1)), + ((2, 3, 4), 0.5, True, None), + ((16, 64), 0.3, True, None), + ((4, 5, 6), 0.5, False, None), + ((3, 4, 5), 0.4, True, (60, 20, 4)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, p, training = data[0], data[1], data[2] + in_strides = data[3] if len(data) > 3 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-2, "rtol": 1e-2}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"p": p, "training": training} + + # Out-of-place + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="alpha_dropout - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """AlphaDropout operator test with simplified implementation""" + + def __init__(self): + super().__init__("AlphaDropout") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.alpha_dropout(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.alpha_dropout(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/amax.py b/test/infinicore/ops/amax.py new file mode 100644 index 000000000..85cd627d8 --- /dev/null +++ b/test/infinicore/ops/amax.py @@ -0,0 +1,121 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None, keepdim_or_None, out_strides_or_None) + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None, None), + ((8, 8), (16, 1), 1, False, None), + ((2, 3, 4), None, 0, True, (0, 1, 1)), + ((1, 8), None, (0,), False, None), + ((16, 64), (128, 1), None, None, None), + ((4, 5, 6), (60, 12, 2), 2, True, (12, 4, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def _compute_out_shape(shape, dim, keepdim): + if dim is None: + return () + if isinstance(dim, tuple): + dims = sorted([(d if d >= 0 else len(shape) + d) for d in dim]) + if keepdim: + out = list(shape) + for d in dims: + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i not in dims) + else: + d = dim if dim >= 0 else len(shape) + dim + if keepdim: + out = list(shape) + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i != d) + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim, keepdim, out_strides = data + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + if keepdim is not None: + kwargs["keepdim"] = keepdim + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Amax - OUT_OF_PLACE", + ) + ) + + out_shape = _compute_out_shape(shape, dim, keepdim) + out_spec = TensorSpec.from_tensor(out_shape, out_strides, dtype) + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="Amax - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Amax operator test with simplified implementation""" + + def __init__(self): + super().__init__("Amax") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.amax(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.amax(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/amin.py b/test/infinicore/ops/amin.py new file mode 100644 index 000000000..30f1170f4 --- /dev/null +++ b/test/infinicore/ops/amin.py @@ -0,0 +1,121 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None, keepdim_or_None, out_strides_or_None) + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None, None), + ((8, 8), (16, 1), 1, False, None), + ((2, 3, 4), None, 0, True, (0, 1, 1)), + ((1, 8), None, (0,), False, None), + ((16, 64), (128, 1), None, None, None), + ((4, 5, 6), (60, 12, 2), 2, True, (12, 4, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def _compute_out_shape(shape, dim, keepdim): + if dim is None: + return () + if isinstance(dim, tuple): + dims = sorted([(d if d >= 0 else len(shape) + d) for d in dim]) + if keepdim: + out = list(shape) + for d in dims: + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i not in dims) + else: + d = dim if dim >= 0 else len(shape) + dim + if keepdim: + out = list(shape) + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i != d) + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim, keepdim, out_strides = data + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + if keepdim is not None: + kwargs["keepdim"] = keepdim + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Amin - OUT_OF_PLACE", + ) + ) + + out_shape = _compute_out_shape(shape, dim, keepdim) + out_spec = TensorSpec.from_tensor(out_shape, out_strides, dtype) + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="Amin - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Amin operator test with simplified implementation""" + + def __init__(self): + super().__init__("Amin") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.amin(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.amin(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/any.py b/test/infinicore/ops/any.py new file mode 100644 index 000000000..f168be609 --- /dev/null +++ b/test/infinicore/ops/any.py @@ -0,0 +1,118 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None, keepdim_or_None, out_strides_or_None) + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None, None), + ((8, 8), (16, 1), 1, False, None), + ((2, 3, 4), None, 0, True, (0, 1, 1)), + ((1, 8), None, (0, 1), False, None), + ((16, 64), (128, 1), None, None, None), + ((4, 5, 6), (60, 12, 2), 2, True, (12, 4, 1)), +] + +_TOLERANCE_MAP = {infinicore.bool: {"atol": 0, "rtol": 0}} + +_TENSOR_DTYPES = [infinicore.bool, infinicore.uint8] + + +def _compute_out_shape(shape, dim, keepdim): + if dim is None: + return () + if isinstance(dim, tuple): + dims = sorted([(d if d >= 0 else len(shape) + d) for d in dim]) + if keepdim: + out = list(shape) + for d in dims: + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i not in dims) + else: + d = dim if dim >= 0 else len(shape) + dim + if keepdim: + out = list(shape) + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i != d) + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim, keepdim, out_strides = data + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 0}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + if keepdim is not None: + kwargs["keepdim"] = keepdim + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Any - OUT_OF_PLACE", + ) + ) + + out_shape = _compute_out_shape(shape, dim, keepdim) + out_spec = TensorSpec.from_tensor(out_shape, out_strides, infinicore.bool) + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="Any - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Any operator test with simplified implementation""" + + def __init__(self): + super().__init__("Any") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.any(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.any(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/argmax.py b/test/infinicore/ops/argmax.py new file mode 100644 index 000000000..c6d688ebb --- /dev/null +++ b/test/infinicore/ops/argmax.py @@ -0,0 +1,81 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None, keepdim_or_None) + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None), + ((8, 8), (16, 1), 1, False), + ((2, 3, 4), None, 0, True), + ((1, 8), None, 0, False), + ((16, 64), (128, 1), None, None), + ((4, 5, 6), (60, 12, 2), 2, True), +] + +_TOLERANCE_MAP = {infinicore.int64: {"atol": 0, "rtol": 0}} + +_TENSOR_DTYPES = [infinicore.float32, infinicore.int32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim, keepdim = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(infinicore.int64, {"atol": 0, "rtol": 0}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + if keepdim is not None: + kwargs["keepdim"] = keepdim + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="ArgMax - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """ArgMax operator test with simplified implementation""" + + def __init__(self): + super().__init__("ArgMax") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.argmax(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.argmax(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/argmin.py b/test/infinicore/ops/argmin.py new file mode 100644 index 000000000..5a107766b --- /dev/null +++ b/test/infinicore/ops/argmin.py @@ -0,0 +1,81 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None, keepdim_or_None) + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None), + ((8, 8), (16, 1), 1, False), + ((2, 3, 4), None, 0, True), + ((1, 8), None, 0, False), + ((16, 64), (128, 1), None, None), + ((4, 5, 6), (60, 12, 2), 2, True), +] + +_TOLERANCE_MAP = {infinicore.int64: {"atol": 0, "rtol": 0}} + +_TENSOR_DTYPES = [infinicore.float32, infinicore.int32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim, keepdim = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(infinicore.int64, {"atol": 0, "rtol": 0}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + if keepdim is not None: + kwargs["keepdim"] = keepdim + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="ArgMin - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """ArgMin operator test with simplified implementation""" + + def __init__(self): + super().__init__("ArgMin") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.argmin(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.argmin(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/argsort.py b/test/infinicore/ops/argsort.py new file mode 100644 index 000000000..b19018c13 --- /dev/null +++ b/test/infinicore/ops/argsort.py @@ -0,0 +1,96 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, input_strides, dim, descending) +_TEST_CASES_DATA = [ + ((6, 8), None, 1, False), + ((8, 4), (16, 1), 0, True), + ((5, 5), None, -1, False), + ((3, 7), (14, 1), 1, True), + ((10, 3), None, 1, False), + ((2, 16), (32, 1), 0, True), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-3}, + infinicore.float32: {"atol": 0, "rtol": 1e-5}, + infinicore.bfloat16: {"atol": 0, "rtol": 1e-2}, +} + +# For argsort the output is an index tensor (int64). We keep input dtypes as floats. +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, in_strides, dim, desc = data + out_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-5}) + + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, None, infinicore.int64) + + # Out-of-place + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"stable": False, "dim": dim, "descending": desc}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="argsort - OUT_OF_PLACE", + ) + ) + + # Explicit out + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"stable": False, "dim": dim, "descending": desc}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="argsort - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Argsort operator test with simplified implementation""" + + def __init__(self): + super().__init__("Argsort") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.argsort(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.argsort(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/argwhere.py b/test/infinicore/ops/argwhere.py new file mode 100644 index 000000000..abc3f53d1 --- /dev/null +++ b/test/infinicore/ops/argwhere.py @@ -0,0 +1,68 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None) +_TEST_CASES_DATA = [ + ((3, 4), None), + ((5,), None), + ((2, 2, 3), (12, 6, 2)), + ((1, 6), None), + ((4, 4), None), + ((2, 3, 2), None), +] + +_TOLERANCE_MAP = {infinicore.int64: {"atol": 0, "rtol": 0}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides in _TEST_CASES_DATA: + input_spec = TensorSpec.from_tensor(shape, strides, infinicore.float32) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP[infinicore.int64], + description=f"argwhere - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """ArgWhere operator test with simplified implementation""" + + def __init__(self): + super().__init__("ArgWhere") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.argwhere(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.argwhere(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/asin.py b/test/infinicore/ops/asin.py new file mode 100644 index 000000000..fd7772bc4 --- /dev/null +++ b/test/infinicore/ops/asin.py @@ -0,0 +1,108 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# ======================================================================= +# Test cases format: (shape, input_strides_or_None) +# ======================================================================= + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 16), None), + ((8, 16), (40, 1)), + ((2, 3, 4), None), + ((16, 5632), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="asin - OUT_OF_PLACE", + ) + ) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="asin - INPLACE(out)", + ) + ) + + if supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="asin - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Asin operator test with simplified implementation""" + + def __init__(self): + super().__init__("Asin") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.asin(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.asin(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/asinh.py b/test/infinicore/ops/asinh.py new file mode 100644 index 000000000..f1e0255b5 --- /dev/null +++ b/test/infinicore/ops/asinh.py @@ -0,0 +1,108 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# ======================================================================= +# Test cases format: (shape, input_strides_or_None) +# ======================================================================= + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 16), None), + ((8, 16), (40, 1)), + ((2, 3, 4), None), + ((16, 5632), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="asinh - OUT_OF_PLACE", + ) + ) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="asinh - INPLACE(out)", + ) + ) + + if supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="asinh - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Asinh operator test with simplified implementation""" + + def __init__(self): + super().__init__("Asinh") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.asinh(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.asinh(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/atan.py b/test/infinicore/ops/atan.py new file mode 100644 index 000000000..c1f9d6c0d --- /dev/null +++ b/test/infinicore/ops/atan.py @@ -0,0 +1,108 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# ======================================================================= +# Test cases format: (shape, input_strides_or_None) +# ======================================================================= + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 16), None), + ((8, 16), (40, 1)), + ((2, 3, 4), None), + ((16, 5632), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="atan - OUT_OF_PLACE", + ) + ) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="atan - INPLACE(out)", + ) + ) + + if supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="atan - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Atan operator test with simplified implementation""" + + def __init__(self): + super().__init__("Atan") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.atan(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.atan(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/atan2.py b/test/infinicore/ops/atan2.py new file mode 100644 index 000000000..b5076fae1 --- /dev/null +++ b/test/infinicore/ops/atan2.py @@ -0,0 +1,126 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# ======================================================================= +# Test cases format: (shape, a_strides_or_None, b_strides_or_None, out_strides_or_None) +# ======================================================================= + +_TEST_CASES_DATA = [ + ((13, 4), None, None, None), + ((13, 4), (10, 1), None, None), + ((13, 4), None, (10, 1), None), + ((8, 16), (40, 1), (40, 1), None), + ((2, 3, 4), None, None, None), + ((16, 5632), None, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape = data[0] + a_strides = data[1] if len(data) > 1 else None + b_strides = data[2] if len(data) > 2 else None + out_strides = data[3] if len(data) > 3 else None + + a_supports_inplace = not is_broadcast(a_strides) + b_supports_inplace = not is_broadcast(b_strides) + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, out_strides, dtype) + + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="atan2 - OUT_OF_PLACE", + ) + ) + + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="atan2 - INPLACE(out)", + ) + ) + + if a_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="atan2 - INPLACE(a)", + ) + ) + + if b_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="atan2 - INPLACE(b)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Atan2 operator test with simplified implementation""" + + def __init__(self): + super().__init__("Atan2") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.atan2(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.atan2(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/atanh.py b/test/infinicore/ops/atanh.py new file mode 100644 index 000000000..fb68d15e4 --- /dev/null +++ b/test/infinicore/ops/atanh.py @@ -0,0 +1,108 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# ======================================================================= +# Test cases format: (shape, input_strides_or_None) +# ======================================================================= + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 16), None), + ((8, 16), (40, 1)), + ((2, 3, 4), None), + ((16, 5632), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="atanh - OUT_OF_PLACE", + ) + ) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="atanh - INPLACE(out)", + ) + ) + + if supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="atanh - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Atanh operator test with simplified implementation""" + + def __init__(self): + super().__init__("Atanh") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.atanh(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.atanh(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/attention_temp.py b/test/infinicore/ops/attention_temp.py index 4fa379e17..b58fd75b0 100644 --- a/test/infinicore/ops/attention_temp.py +++ b/test/infinicore/ops/attention_temp.py @@ -202,7 +202,7 @@ def parse_test_cases(): output_spec=None, comparison_target=None, tolerance=tolerance, - description=f"Attention - OUT_OF_PLACE", + description="Attention - OUT_OF_PLACE", ) ) @@ -215,7 +215,7 @@ def parse_test_cases(): output_spec=output_spec, # Specify the output tensor spec comparison_target="out", tolerance=tolerance, - description=f"Attention - INPLACE(out)", + description="Attention - INPLACE(out)", ) ) diff --git a/test/infinicore/ops/avg_pool1d.py b/test/infinicore/ops/avg_pool1d.py new file mode 100644 index 000000000..b58de5d1e --- /dev/null +++ b/test/infinicore/ops/avg_pool1d.py @@ -0,0 +1,85 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, kernel_size, stride_or_None, padding) + +_TEST_CASES_DATA = [ + ((2, 3, 16), None, 3, None, 0), + ((1, 4, 15), (60, 15, 1), 5, 1, 2), + ((2, 1, 32), None, 2, 2, 0), + ((3, 2, 7), (14, 7, 1), 3, None, 1), + ((4, 6, 31), None, 4, 2, 1), + ((2, 8, 9), (72, 9, 1), 3, 1, 0), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + tests = [] + for shape, strides, k, s, p in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + kwargs["kernel_size"] = k + if s is not None: + kwargs["stride"] = s + if p is not None: + kwargs["padding"] = p + + tests.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="AvgPool1d - OUT_OF_PLACE", + ) + ) + + # In-place pooling isn't supported; only out-of-place expected + + return tests + + +class OpTest(BaseOperatorTest): + """AvgPool1d operator test with simplified implementation""" + + def __init__(self): + super().__init__("AvgPool1d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.avg_pool1d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.avg_pool1d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/avg_pool2d.py b/test/infinicore/ops/avg_pool2d.py new file mode 100644 index 000000000..08a9f2f06 --- /dev/null +++ b/test/infinicore/ops/avg_pool2d.py @@ -0,0 +1,82 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases: (in_shape, in_strides_or_None, kernel_size, stride_or_None, padding) + +_TEST_CASES_DATA = [ + ((2, 3, 16, 16), None, (3, 3), None, (1, 1)), + ((1, 4, 15, 17), (1020, 255, 17, 1), (5, 4), (2, 2), (0, 1)), + ((2, 1, 32, 32), None, (2, 2), (2, 2), (0, 0)), + ((3, 2, 7, 9), (378, 189, 9, 1), (3, 3), None, (1, 1)), + ((4, 6, 31, 29), None, (4, 4), (2, 2), (1, 1)), + ((2, 8, 9, 11), (1584, 198, 11, 1), (3, 2), 1, 0), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + tests = [] + for shape, strides, k, s, p in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {"kernel_size": k} + if s is not None: + kwargs["stride"] = s + if p is not None: + kwargs["padding"] = p + + tests.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="AvgPool2d - OUT_OF_PLACE", + ) + ) + + return tests + + +class OpTest(BaseOperatorTest): + """AvgPool2d operator test with simplified implementation""" + + def __init__(self): + super().__init__("AvgPool2d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.avg_pool2d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.avg_pool2d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/avg_pool3d.py b/test/infinicore/ops/avg_pool3d.py new file mode 100644 index 000000000..9b7cb3ffd --- /dev/null +++ b/test/infinicore/ops/avg_pool3d.py @@ -0,0 +1,81 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, kernel_size, stride_or_None, padding) + +_TEST_CASES_DATA = [ + ((1, 2, 8, 8, 8), None, (2, 2, 2), None, 0), + ((2, 3, 7, 9, 5), (756, 252, 36, 4, 1), (3, 3, 3), (2, 2, 1), (1, 1, 0)), + ((1, 4, 16, 16, 6), None, (4, 4, 2), (2, 2, 1), (0, 1, 0)), + ((2, 1, 9, 11, 7), (693, 77, 77, 7, 1), (3, 2, 3), None, (1, 0, 1)), + ((3, 2, 5, 6, 4), None, (2, 2, 2), (1, 1, 1), 0), + ((2, 6, 10, 9, 8), (4320, 720, 72, 8, 1), (3, 3, 2), (2, 1, 2), (1, 0, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + tests = [] + for shape, strides, k, s, p in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {"kernel_size": k} + if s is not None: + kwargs["stride"] = s + if p is not None: + kwargs["padding"] = p + + tests.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="AvgPool3d - OUT_OF_PLACE", + ) + ) + + return tests + + +class OpTest(BaseOperatorTest): + """AvgPool3d operator test with simplified implementation""" + + def __init__(self): + super().__init__("AvgPool3d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.avg_pool3d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.avg_pool3d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/baddbmm.py b/test/infinicore/ops/baddbmm.py new file mode 100644 index 000000000..54ba12cae --- /dev/null +++ b/test/infinicore/ops/baddbmm.py @@ -0,0 +1,110 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (input_shape, batch1_shape, batch2_shape, input_strides_or_None, batch1_strides_or_None, batch2_strides_or_None, beta_or_None, alpha_or_None) + +_TEST_CASES_DATA = [ + ((3, 5), (2, 3, 4), (2, 4, 5), None, None, None, None, None), + ((8, 8), (4, 8, 8), (4, 8, 8), None, None, None, 0.5, 2.0), + ((5, 7), (2, 5, 6), (2, 6, 7), (30, 1), (0, 5, 1), None, None, None), + ((16, 16), (2, 16, 16), (2, 16, 16), None, None, (512, 1, 1), 1.0, None), + ((1, 1), (1, 1, 1), (1, 1, 1), None, None, None, None, None), + ((6, 8), (3, 6, 7), (3, 7, 8), None, None, None, None, 0.2), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + + for data in _TEST_CASES_DATA: + in_shape, b1_shape, b2_shape = data[0], data[1], data[2] + in_strides = data[3] + b1_strides = data[4] + b2_strides = data[5] + beta = data[6] + alpha = data[7] + + out_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor( + (b1_shape[0], in_shape[0], b2_shape[2]), None, dtype + ) + b1_spec = TensorSpec.from_tensor(b1_shape, b1_strides, dtype) + b2_spec = TensorSpec.from_tensor(b2_shape, b2_strides, dtype) + + kwargs = {} + if beta is not None: + kwargs["beta"] = beta + if alpha is not None: + kwargs["alpha"] = alpha + + test_cases.append( + TestCase( + inputs=[in_spec, b1_spec, b2_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="baddbmm - OUT_OF_PLACE", + ) + ) + + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[in_spec, b1_spec, b2_spec], + kwargs=kwargs, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="baddbmm - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """baddbmm operator test with simplified implementation""" + + def __init__(self): + super().__init__("baddbmm") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.baddbmm(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.baddbmm(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/batch_norm.py b/test/infinicore/ops/batch_norm.py new file mode 100644 index 000000000..ecb7d6770 --- /dev/null +++ b/test/infinicore/ops/batch_norm.py @@ -0,0 +1,113 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, running_mean_present_bool, running_var_present_bool, weight_bias_present_bool, training_or_None, momentum_or_None, eps_or_None) +_TEST_CASES_DATA = [ + ((4, 3, 8, 8), None, True, True, True, False, None, None), + ((2, 6, 4, 4), (384, 96, 1, 1), True, True, False, True, 0.2, 1e-5), + ((1, 3, 16, 16), None, True, True, True, False, None, None), + ((8, 5, 2, 2), None, True, True, True, False, 0.1, 1e-3), + ((6, 4, 7, 7), None, False, False, True, True, None, 1e-4), + ((3, 2, 9, 9), None, True, True, False, False, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-1}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for ( + shape, + strides, + mean_p, + var_p, + wb_p, + training, + momentum, + eps, + ) in _TEST_CASES_DATA: + C = shape[1] + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(shape, strides, dtype) + + running_mean = TensorSpec.from_tensor((C,), None, dtype) if mean_p else None + running_var = TensorSpec.from_tensor((C,), None, dtype) if var_p else None + inputs = [inp] + kwargs = {} + if running_mean is not None: + inputs.append(running_mean) + else: + inputs.append(None) + if running_var is not None: + inputs.append(running_var) + else: + inputs.append(None) + if wb_p: + weight = TensorSpec.from_tensor((C,), None, dtype) + bias = TensorSpec.from_tensor((C,), None, dtype) + inputs.append(weight) + inputs.append(bias) + else: + inputs.append(None) + inputs.append(None) + + if training is not None: + kwargs["training"] = training + if momentum is not None: + kwargs["momentum"] = momentum + if eps is not None: + kwargs["eps"] = eps + + test_cases.append( + TestCase( + inputs=inputs, + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="batch_norm - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """batch_norm operator test with simplified implementation""" + + def __init__(self): + super().__init__("batch_norm") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.batch_norm(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.batch_norm(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/bilinear.py b/test/infinicore/ops/bilinear.py new file mode 100644 index 000000000..d8eba5111 --- /dev/null +++ b/test/infinicore/ops/bilinear.py @@ -0,0 +1,88 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (in1_shape, in2_shape, weight_shape, in1_strides_or_None, in2_strides_or_None, weight_strides_or_None, bias_present_bool) + +_TEST_CASES_DATA = [ + ((4, 3), (4, 5), (2, 3, 5), None, None, None, True), + ((1, 6), (1, 7), (3, 6, 7), None, None, None, True), + ((8, 2), (8, 4), (5, 2, 4), (16, 2), None, None, False), + ((2, 3), (2, 3), (4, 3, 3), None, (0, 3), None, True), + ((6, 10), (6, 12), (7, 10, 12), None, None, (840, 70, 1), False), + ((3, 1), (3, 1), (2, 1, 1), None, None, None, True), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for ( + in1_shape, + in2_shape, + weight_shape, + in1_strides, + in2_strides, + weight_strides, + bias_present, + ) in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + in1 = TensorSpec.from_tensor(in1_shape, in1_strides, dtype) + in2 = TensorSpec.from_tensor(in2_shape, in2_strides, dtype) + weight = TensorSpec.from_tensor(weight_shape, weight_strides, dtype) + + kwargs = {} + + test_cases.append( + TestCase( + inputs=[in1, in2, weight], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="bilinear - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """bilinear operator test with simplified implementation""" + + def __init__(self): + super().__init__("bilinear") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.bilinear(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.bilinear(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/binary_cross_entropy.py b/test/infinicore/ops/binary_cross_entropy.py new file mode 100644 index 000000000..4f07b31e8 --- /dev/null +++ b/test/infinicore/ops/binary_cross_entropy.py @@ -0,0 +1,85 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, weight_present_bool, reduction_or_None) + +_TEST_CASES_DATA = [ + ((4, 5), None, False, None), + ((8, 8), (512, 64), True, "sum"), + ((1, 10), None, False, "mean"), + ((16, 100), None, True, "mean"), + ((3, 7), (21, 7), True, None), + ((2, 2), None, False, "none"), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, weight_present, reduction in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(shape, strides, dtype) + tgt = TensorSpec.from_tensor(shape, None, dtype) + + inputs = [inp, tgt] + kwargs = {} + if weight_present: + weight_spec = TensorSpec.from_tensor(shape, None, dtype) + inputs.append(weight_spec) + if reduction is not None: + kwargs["reduction"] = reduction + + test_cases.append( + TestCase( + inputs=inputs, + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="binary_cross_entropy - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """binary_cross_entropy operator test with simplified implementation""" + + def __init__(self): + super().__init__("binary_cross_entropy") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.binary_cross_entropy(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.binary_cross_entropy(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/binary_cross_entropy_with_logits.py b/test/infinicore/ops/binary_cross_entropy_with_logits.py new file mode 100644 index 000000000..d4355a3cb --- /dev/null +++ b/test/infinicore/ops/binary_cross_entropy_with_logits.py @@ -0,0 +1,96 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, weight_present_bool, pos_weight_present_bool, reduction_or_None) + +_TEST_CASES_DATA = [ + ((4, 5), None, False, False, None), + ((8, 8), (512, 64), True, False, "sum"), + ((1, 10), None, False, True, "mean"), + ((16, 100), None, False, True, "mean"), + ((3, 7), (21, 7), True, False, None), + ((2, 2), None, False, False, "none"), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for ( + shape, + strides, + weight_present, + pos_weight_present, + reduction, + ) in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(shape, strides, dtype) + tgt = TensorSpec.from_tensor(shape, None, dtype) + + inputs = [inp, tgt] + kwargs = {} + if weight_present: + weight_spec = TensorSpec.from_tensor(shape, None, dtype) + inputs.append(weight_spec) + if pos_weight_present: + pos_weight_spec = TensorSpec.from_tensor( + (shape[1],) if len(shape) > 1 else (shape[0],), None, dtype + ) + inputs.append(pos_weight_spec) + if reduction is not None: + kwargs["reduction"] = reduction + + test_cases.append( + TestCase( + inputs=inputs, + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="binary_cross_entropy_with_logits - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """binary_cross_entropy_with_logits operator test with simplified implementation""" + + def __init__(self): + super().__init__("binary_cross_entropy_with_logits") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.binary_cross_entropy_with_logits(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.binary_cross_entropy_with_logits(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/bincount.py b/test/infinicore/ops/bincount.py new file mode 100644 index 000000000..4aefe817c --- /dev/null +++ b/test/infinicore/ops/bincount.py @@ -0,0 +1,96 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.tensor import TensorInitializer +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, weights_present_bool, minlength) +_TEST_CASES_DATA = [ + ((10,), None, False, 0), + ((6,), None, True, 0), + ((8,), None, False, 5), + ((12,), None, True, 3), + ((1,), None, False, 0), + ((20,), None, True, 0), +] + +_TOLERANCE_MAP = {infinicore.int64: {"atol": 0, "rtol": 0}} + +_TENSOR_DTYPES = [infinicore.int64] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, weights_present, minlength = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 0}) + + # bincount requires 1-D non-negative integer inputs; use RANDINT with low=0 + high = max(1, shape[0]) + input_spec = TensorSpec.from_tensor( + shape, + strides, + dtype, + init_mode=TensorInitializer.RANDINT, + low=0, + high=high, + ) + if weights_present: + weights_spec = TensorSpec.from_tensor(shape, None, infinicore.float32) + else: + weights_spec = None + + kwargs = ( + {"minlength": minlength} + if minlength is not None and minlength != 0 + else {} + ) + + inputs = [input_spec] if not weights_present else [input_spec, weights_spec] + + test_cases.append( + TestCase( + inputs=inputs, + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="bincount - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Bincount operator test with simplified implementation""" + + def __init__(self): + super().__init__("Bincount") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.bincount(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.bincount(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/bitwise_left_shift.py b/test/infinicore/ops/bitwise_left_shift.py new file mode 100644 index 000000000..c929f0910 --- /dev/null +++ b/test/infinicore/ops/bitwise_left_shift.py @@ -0,0 +1,142 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, a_strides_or_None, b_strides_or_None, out_strides_or_None) +# ============================================================================== +# Operator-specific configuration +# ============================================================================== + +_TEST_CASES_DATA = [ + # small shapes + ((8, 8), None, None, None), + ((8, 8), (16, 1), None, None), + # different shapes (broadcasting second operand) + ((8, 8), None, (0, 1), None), + ((4, 1), None, None, (8, 1)), + # 3D tensor + ((2, 3, 4), None, None, None), + # large but strided + ((16, 512), (1024, 1), (0, 1), None), +] + +# Integers require exact comparison +_TOLERANCE_MAP = { + infinicore.int32: {"atol": 0, "rtol": 0}, + infinicore.int64: {"atol": 0, "rtol": 0}, + infinicore.uint8: {"atol": 0, "rtol": 0}, +} + +# Data types to test (integer types) +_TENSOR_DTYPES = [infinicore.int32, infinicore.int64, infinicore.uint8] + + +def parse_test_cases(): + """ + Parse test case data and return list of TestCase objects for bitwise_left_shift. + """ + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + a_strides = data[1] if len(data) > 1 else None + b_strides = data[2] if len(data) > 2 else None + out_strides = data[3] if len(data) > 3 else None + + a_supports_inplace = not is_broadcast(a_strides) + b_supports_inplace = not is_broadcast(b_strides) + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 0}) + + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, out_strides, dtype) + + # Out-of-place + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description="Bitwise left shift - OUT_OF_PLACE", + ) + ) + + # explicit out + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tolerance, + description="Bitwise left shift - INPLACE(out)", + ) + ) + + # in-place into first input + if a_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tolerance, + description="Bitwise left shift - INPLACE(a)", + ) + ) + + # in-place into second input + if b_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tolerance, + description="Bitwise left shift - INPLACE(b)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """BitwiseLeftShift operator test with simplified implementation""" + + def __init__(self): + super().__init__("BitwiseLeftShift") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.bitwise_left_shift(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.bitwise_left_shift(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/bitwise_right_shift.py b/test/infinicore/ops/bitwise_right_shift.py new file mode 100644 index 000000000..c9b5c8cb5 --- /dev/null +++ b/test/infinicore/ops/bitwise_right_shift.py @@ -0,0 +1,121 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, a_strides_or_None, b_strides_or_None, out_strides_or_None) + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None), + ((8, 8), (16, 1), None, None), + ((8, 8), None, (0, 1), None), + ((4, 1), None, None, (8, 1)), + ((2, 3, 4), None, None, None), + ((16, 512), (1024, 1), (0, 1), None), +] + +_TOLERANCE_MAP = { + infinicore.int32: {"atol": 0, "rtol": 0}, + infinicore.int64: {"atol": 0, "rtol": 0}, + infinicore.uint8: {"atol": 0, "rtol": 0}, +} + +_TENSOR_DTYPES = [infinicore.int32, infinicore.int64, infinicore.uint8] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, a_strides, b_strides, out_strides = data[0], data[1], data[2], data[3] + + a_supports_inplace = not is_broadcast(a_strides) + b_supports_inplace = not is_broadcast(b_strides) + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 0}) + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, out_strides, dtype) + + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Bitwise right shift - OUT_OF_PLACE", + ) + ) + + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="Bitwise right shift - INPLACE(out)", + ) + ) + + if a_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="Bitwise right shift - INPLACE(a)", + ) + ) + + if b_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="Bitwise right shift - INPLACE(b)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """BitwiseRightShift operator test with simplified implementation""" + + def __init__(self): + super().__init__("BitwiseRightShift") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.bitwise_right_shift(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.bitwise_right_shift(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/bitwise_xor.py b/test/infinicore/ops/bitwise_xor.py index 438765e3c..ad3e30b63 100644 --- a/test/infinicore/ops/bitwise_xor.py +++ b/test/infinicore/ops/bitwise_xor.py @@ -31,7 +31,7 @@ ((16, 5632), (13312, 1), (13312, 1), None), ] -# Tolerance configuration - exact match required for bitwise operations +# Tolerance configuration _TOLERANCE_MAP = { infinicore.int8: {"atol": 0, "rtol": 0}, infinicore.int16: {"atol": 0, "rtol": 0}, @@ -41,14 +41,14 @@ infinicore.bool: {"atol": 0, "rtol": 0}, } -# Data types to test - integer types for bitwise operations +# Data types to test _TENSOR_DTYPES = [ infinicore.int8, infinicore.int16, infinicore.int32, infinicore.int64, infinicore.uint8, - infinicore.bool, # XOR also supports boolean tensors + infinicore.bool, ] diff --git a/test/infinicore/ops/block_diag.py b/test/infinicore/ops/block_diag.py new file mode 100644 index 000000000..ba9693775 --- /dev/null +++ b/test/infinicore/ops/block_diag.py @@ -0,0 +1,80 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (list_of_matrix_shapes, list_of_strides_or_None, dtype) + +_TEST_CASES_DATA = [ + ([(3, 4), (2, 2)], [None, None], None), + ([(1, 1), (1, 1), (1, 1)], [None, None, None], None), + ([(4, 4), (2, 3), (3, 2)], [None, (6, 1), (0, 3)], None), + ([(8, 8)], [None], None), + ([(5, 2), (2, 5)], [(10, 1), None], None), + ([(6, 6), (6, 6), (6, 6)], [None, None, None], None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + + for shapes, strides_list, _ in _TEST_CASES_DATA: + # prepare TensorSpec list for inputs + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + input_specs = [] + for s, st in zip(shapes, strides_list): + input_specs.append(TensorSpec.from_tensor(s, st, dtype)) + + test_cases.append( + TestCase( + inputs=input_specs, + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="block_diag - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """block_diag operator test with simplified implementation""" + + def __init__(self): + super().__init__("block_diag") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.block_diag(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.block_diag(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/broadcast_to.py b/test/infinicore/ops/broadcast_to.py new file mode 100644 index 000000000..6ad75ee9a --- /dev/null +++ b/test/infinicore/ops/broadcast_to.py @@ -0,0 +1,75 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (input_shape, input_strides_or_None, target_shape) +_TEST_CASES_DATA = [ + ((3, 1), None, (3, 4)), + ((1,), None, (5,)), + ((2, 1, 4), (8, 1, 1), (2, 3, 4)), + ((4, 1), None, (4, 6)), + ((1, 1), None, (2, 3)), + ((2,), None, (2, 2)), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, target = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype) + input_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {"size": target} + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="broadcast_to - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """BroadcastTo operator test with simplified implementation""" + + def __init__(self): + super().__init__("BroadcastTo") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.broadcast_to(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.broadcast_to(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/bucketize.py b/test/infinicore/ops/bucketize.py new file mode 100644 index 000000000..74b15c443 --- /dev/null +++ b/test/infinicore/ops/bucketize.py @@ -0,0 +1,69 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, boundaries_len) +_TEST_CASES_DATA = [ + ((5,), None, 3), + ((4, 3), None, 4), + ((2,), None, 5), + ((6,), None, 2), + ((3, 3), None, 6), + ((1,), None, 1), +] + +_TOLERANCE_MAP = {infinicore.int64: {"atol": 0, "rtol": 0}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, b_len in _TEST_CASES_DATA: + input_spec = TensorSpec.from_tensor(shape, strides, infinicore.float32) + boundaries_spec = TensorSpec.from_tensor((b_len,), None, infinicore.float32) + + test_cases.append( + TestCase( + inputs=[input_spec, boundaries_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP[infinicore.int64], + description="bucketize - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Bucketize operator test with simplified implementation""" + + def __init__(self): + super().__init__("Bucketize") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.bucketize(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.bucketize(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/cat.py b/test/infinicore/ops/cat.py index dc3e9b134..5c5aed5e1 100644 --- a/test/infinicore/ops/cat.py +++ b/test/infinicore/ops/cat.py @@ -89,7 +89,7 @@ def parse_test_cases(): output_spec=None, comparison_target=None, tolerance=tolerance, - description=f"Cat - OUT_OF_PLACE", + description="Cat - OUT_OF_PLACE", ) ) @@ -102,7 +102,7 @@ def parse_test_cases(): output_spec=output_spec, comparison_target="out", tolerance=tolerance, - description=f"Cat - INPLACE(out)", + description="Cat - INPLACE(out)", ) ) diff --git a/test/infinicore/ops/cdist.py b/test/infinicore/ops/cdist.py new file mode 100644 index 000000000..cf6123482 --- /dev/null +++ b/test/infinicore/ops/cdist.py @@ -0,0 +1,79 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (x1_shape, x2_shape, x1_strides_or_None, x2_strides_or_None, p_or_None) + +_TEST_CASES_DATA = [ + ((5, 3), (6, 3), None, None, None), + ((1, 4), (2, 4), None, None, 1.0), + ((8, 16), (8, 16), (128, 16), (128, 16), 2.0), + ((3, 2), (4, 2), None, (0, 2), None), + ((10, 5), (7, 5), None, None, float("inf")), + ((2, 1), (3, 1), None, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for x1_shape, x2_shape, x1_strides, x2_strides, p in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + x1_spec = TensorSpec.from_tensor(x1_shape, x1_strides, dtype) + x2_spec = TensorSpec.from_tensor(x2_shape, x2_strides, dtype) + + kwargs = {} + if p is not None: + kwargs["p"] = p + + test_cases.append( + TestCase( + inputs=[x1_spec, x2_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="cdist - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """cdist operator test with simplified implementation""" + + def __init__(self): + super().__init__("cdist") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.cdist(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.cdist(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/celu.py b/test/infinicore/ops/celu.py new file mode 100644 index 000000000..5b932f37e --- /dev/null +++ b/test/infinicore/ops/celu.py @@ -0,0 +1,113 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, alpha_or_None) + +# ============================================================================== +# Operator-specific configuration +# ============================================================================== + +_TEST_CASES_DATA = [ + ((13, 4), None, None), + ((13, 4), (10, 1), None), + ((8, 8), None, 0.5), + ((16, 16), (256, 1), 1.5), + ((32, 8), None, 1.0), +] + +# Tolerance configuration +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +# Data types to test +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + alpha = data[2] if len(data) > 2 else None + + input_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + # Out-of-place + kwargs = {} + if alpha is not None: + kwargs["alpha"] = alpha + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"CELU - OUT_OF_PLACE", + ) + ) + + # In-place + if input_supports_inplace: + inplace_kwargs = {"inplace": True} + if alpha is not None: + inplace_kwargs["alpha"] = alpha + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=inplace_kwargs, + output_spec=None, + comparison_target=0, + tolerance=tolerance, + description=f"CELU - INPLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """CELU operator test with simplified implementation""" + + def __init__(self): + super().__init__("CELU") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.celu(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.celu(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/clone.py b/test/infinicore/ops/clone.py new file mode 100644 index 000000000..93d23eb0d --- /dev/null +++ b/test/infinicore/ops/clone.py @@ -0,0 +1,73 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (shape, input_strides_or_None) +_TEST_CASES_DATA = [ + ((3, 4), None), + ((6, 2), (12, 1)), + ((5, 5), None), + ((1, 7), None), + ((8, 3), (24, 1)), + ((2, 2, 2), None), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype) + input_spec = TensorSpec.from_tensor(shape, strides, dtype) + + # clone is out-of-place; also test clone with memory_format not None if supported + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"clone - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Clone operator test with simplified implementation""" + + def __init__(self): + super().__init__("Clone") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.clone(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.clone(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/combinations.py b/test/infinicore/ops/combinations.py new file mode 100644 index 000000000..9146d6661 --- /dev/null +++ b/test/infinicore/ops/combinations.py @@ -0,0 +1,76 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, r) +# combinations operates on 1-D inputs (combinations of elements). We keep inputs small. +_TEST_CASES_DATA = [ + ((5,), None, 2), + ((6,), None, 3), + ((4,), None, 1), + ((7,), None, 2), + ((8,), None, 3), + ((3,), None, 2), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} + +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, r = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype) + input_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {"r": r} + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"combinations - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Combinations operator test with simplified implementation""" + + def __init__(self): + super().__init__("Combinations") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.combinations(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.combinations(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/conv1d.py b/test/infinicore/ops/conv1d.py new file mode 100644 index 000000000..498766261 --- /dev/null +++ b/test/infinicore/ops/conv1d.py @@ -0,0 +1,100 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases: (in_shape, in_strides_or_None, weight_shape, bias_shape_or_None, stride, padding, dilation, groups) + +_TEST_CASES_DATA = [ + ((2, 4, 16), None, (8, 4, 3), None, 1, 0, 1, 1), + ((1, 6, 15), (90, 15, 1), (4, 6, 5), (4,), 2, 2, 1, 1), + ((2, 16, 32), None, (8, 8, 1), None, 1, 0, 1, 2), + ((3, 3, 7), (21, 7, 1), (6, 3, 3), None, 1, 0, 1, 1), + ((2, 2, 31), None, (4, 2, 4), (4,), 2, 1, 1, 1), + ((1, 8, 9), (72, 9, 1), (8, 8, 3), None, 1, 1, 2, 1), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-4, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + tests = [] + for ( + in_shape, + in_strides, + w_shape, + b_shape, + stride, + padding, + dilation, + groups, + ) in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + weight_spec = TensorSpec.from_tensor(w_shape, None, dtype) + if b_shape is not None: + bias_spec = TensorSpec.from_tensor(b_shape, None, dtype) + else: + bias_spec = None + + kwargs = { + "stride": stride, + "padding": padding, + "dilation": dilation, + "groups": groups, + } + inputs = [in_spec, weight_spec] + if bias_spec is not None: + inputs.append(bias_spec) + + tests.append( + TestCase( + inputs=inputs, + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Conv1d - OUT_OF_PLACE", + ) + ) + + return tests + + +class OpTest(BaseOperatorTest): + """Conv1d operator test with simplified implementation""" + + def __init__(self): + super().__init__("Conv1d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.conv1d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.conv1d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/conv2d.py b/test/infinicore/ops/conv2d.py new file mode 100644 index 000000000..9fa7b37a0 --- /dev/null +++ b/test/infinicore/ops/conv2d.py @@ -0,0 +1,100 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases: (in_shape, in_strides_or_None, weight_shape, bias_shape_or_None, stride, padding, dilation, groups) + +_TEST_CASES_DATA = [ + ((2, 4, 16, 16), None, (8, 4, 3, 3), None, (1, 1), (0, 0), (1, 1), 1), + ((1, 6, 15, 17), (1530, 255, 17, 1), (4, 6, 5, 3), (4,), (2, 2), (2, 1), (1, 1), 1), + ((2, 8, 32, 32), None, (8, 8, 1, 1), None, (1, 1), (0, 0), (1, 2), 1), + ((3, 3, 7, 9), (189, 63, 9, 1), (6, 3, 3, 3), None, 1, (1, 1), (1, 1), 1), + ((2, 2, 31, 29), None, (4, 2, 4, 3), (4,), (2, 1), (1, 0), (1, 1), 1), + ((1, 8, 9, 11), (792, 99, 11, 1), (8, 8, 3, 3), None, (1, 1), (1, 1), (1, 1), 1), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-4, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + tests = [] + for ( + in_shape, + in_strides, + w_shape, + b_shape, + stride, + padding, + dilation, + groups, + ) in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + weight_spec = TensorSpec.from_tensor(w_shape, None, dtype) + if b_shape is not None: + bias_spec = TensorSpec.from_tensor(b_shape, None, dtype) + else: + bias_spec = None + + kwargs = { + "stride": stride, + "padding": padding, + "dilation": dilation, + "groups": groups, + } + inputs = [in_spec, weight_spec] + if bias_spec is not None: + inputs.append(bias_spec) + + tests.append( + TestCase( + inputs=inputs, + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Conv2d - OUT_OF_PLACE", + ) + ) + + return tests + + +class OpTest(BaseOperatorTest): + """Conv2d operator test with simplified implementation""" + + def __init__(self): + super().__init__("Conv2d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.conv2d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.conv2d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/conv3d.py b/test/infinicore/ops/conv3d.py new file mode 100644 index 000000000..648424551 --- /dev/null +++ b/test/infinicore/ops/conv3d.py @@ -0,0 +1,136 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases: (in_shape, in_strides_or_None, weight_shape, bias_shape_or_None, stride, padding, dilation, groups) + +_TEST_CASES_DATA = [ + ((1, 2, 8, 8, 8), None, (4, 2, 3, 3, 3), None, (1, 1, 1), (0, 0, 0), (1, 1, 1), 1), + ( + (2, 3, 7, 9, 5), + (756, 252, 36, 4, 1), + (6, 3, 3, 3, 1), + (6,), + (2, 2, 1), + (1, 1, 0), + (1, 1, 1), + 1, + ), + ( + (1, 4, 16, 16, 6), + None, + (8, 4, 1, 1, 2), + None, + (1, 1, 2), + (0, 1, 0), + (1, 1, 1), + 1, + ), + ( + (2, 1, 9, 11, 7), + (693, 77, 77, 7, 1), + (6, 1, 3, 3, 3), + None, + 1, + (1, 0, 1), + (1, 1, 1), + 1, + ), + ((3, 2, 5, 6, 4), None, (4, 2, 2, 2, 2), (4,), (1, 1, 1), (0, 1, 0), (1, 1, 1), 1), + ( + (2, 6, 10, 9, 8), + (4320, 720, 72, 8, 1), + (8, 6, 3, 3, 2), + None, + (2, 1, 2), + (1, 0, 1), + (1, 1, 1), + 1, + ), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-4, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + tests = [] + for ( + in_shape, + in_strides, + w_shape, + b_shape, + stride, + padding, + dilation, + groups, + ) in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + weight_spec = TensorSpec.from_tensor(w_shape, None, dtype) + if b_shape is not None: + bias_spec = TensorSpec.from_tensor(b_shape, None, dtype) + else: + bias_spec = None + + kwargs = { + "stride": stride, + "padding": padding, + "dilation": dilation, + "groups": groups, + } + inputs = [in_spec, weight_spec] + if bias_spec is not None: + inputs.append(bias_spec) + + tests.append( + TestCase( + inputs=inputs, + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Conv3d - OUT_OF_PLACE", + ) + ) + + return tests + + +class OpTest(BaseOperatorTest): + """Conv3d operator test with simplified implementation""" + + def __init__(self): + super().__init__("Conv3d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.conv3d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.conv3d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/conv_transpose1d.py b/test/infinicore/ops/conv_transpose1d.py new file mode 100644 index 000000000..5e91ec82c --- /dev/null +++ b/test/infinicore/ops/conv_transpose1d.py @@ -0,0 +1,100 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases: (in_shape, in_strides_or_None, weight_shape, bias_shape_or_None, stride, padding, output_padding, groups) + +_TEST_CASES_DATA = [ + ((2, 4, 16), None, (4, 4, 3), None, 1, 0, 0, 1), + ((1, 6, 15), (90, 15, 1), (6, 6, 5), (6,), 2, 1, 1, 1), + ((2, 8, 32), None, (8, 4, 1), None, 1, 0, 0, 2), + ((3, 3, 7), (21, 7, 1), (3, 3, 3), None, 1, 0, 0, 1), + ((2, 2, 31), None, (2, 2, 4), (2,), 2, 1, 1, 1), + ((1, 8, 9), (72, 9, 1), (8, 4, 3), None, 1, 1, 0, 1), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-4, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + tests = [] + for ( + in_shape, + in_strides, + w_shape, + b_shape, + stride, + padding, + out_pad, + groups, + ) in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + weight_spec = TensorSpec.from_tensor(w_shape, None, dtype) + if b_shape is not None: + bias_spec = TensorSpec.from_tensor(b_shape, None, dtype) + else: + bias_spec = None + + kwargs = { + "stride": stride, + "padding": padding, + "output_padding": out_pad, + "groups": groups, + } + inputs = [in_spec, weight_spec] + if bias_spec is not None: + inputs.append(bias_spec) + + tests.append( + TestCase( + inputs=inputs, + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="ConvTranspose1d - OUT_OF_PLACE", + ) + ) + + return tests + + +class OpTest(BaseOperatorTest): + """ConvTranspose1d operator test with simplified implementation""" + + def __init__(self): + super().__init__("ConvTranspose1d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.conv_transpose1d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.conv_transpose1d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/conv_transpose2d.py b/test/infinicore/ops/conv_transpose2d.py new file mode 100644 index 000000000..8e06c36fa --- /dev/null +++ b/test/infinicore/ops/conv_transpose2d.py @@ -0,0 +1,100 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases: (in_shape, in_strides_or_None, weight_shape, bias_shape_or_None, stride, padding, output_padding, groups) + +_TEST_CASES_DATA = [ + ((2, 4, 16, 16), None, (4, 4, 3, 3), None, (1, 1), (0, 0), (0, 0), 1), + ((1, 6, 15, 17), (1530, 255, 17, 1), (6, 6, 5, 3), (6,), (2, 2), (2, 1), (1, 1), 1), + ((2, 8, 32, 32), None, (8, 4, 1, 1), None, (1, 1), (0, 0), (0, 0), 1), + ((3, 3, 7, 9), (189, 63, 9, 1), (3, 3, 3, 3), None, 1, (1, 1), (0, 0), 1), + ((2, 2, 31, 29), None, (2, 2, 4, 3), (2,), (2, 1), (1, 0), (1, 0), 1), + ((1, 8, 9, 11), (792, 99, 11, 1), (8, 4, 3, 3), None, (1, 1), (1, 1), (0, 0), 1), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-4, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + tests = [] + for ( + in_shape, + in_strides, + w_shape, + b_shape, + stride, + padding, + out_pad, + groups, + ) in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + weight_spec = TensorSpec.from_tensor(w_shape, None, dtype) + if b_shape is not None: + bias_spec = TensorSpec.from_tensor(b_shape, None, dtype) + else: + bias_spec = None + + kwargs = { + "stride": stride, + "padding": padding, + "output_padding": out_pad, + "groups": groups, + } + inputs = [in_spec, weight_spec] + if bias_spec is not None: + inputs.append(bias_spec) + + tests.append( + TestCase( + inputs=inputs, + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="ConvTranspose2d - OUT_OF_PLACE", + ) + ) + + return tests + + +class OpTest(BaseOperatorTest): + """ConvTranspose2d operator test with simplified implementation""" + + def __init__(self): + super().__init__("ConvTranspose2d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.conv_transpose2d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.conv_transpose2d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/conv_transpose3d.py b/test/infinicore/ops/conv_transpose3d.py new file mode 100644 index 000000000..8399c25b3 --- /dev/null +++ b/test/infinicore/ops/conv_transpose3d.py @@ -0,0 +1,136 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases: (in_shape, in_strides_or_None, weight_shape, bias_shape_or_None, stride, padding, output_padding, groups) + +_TEST_CASES_DATA = [ + ((1, 2, 8, 8, 8), None, (2, 2, 3, 3, 3), None, (1, 1, 1), (0, 0, 0), (0, 0, 0), 1), + ( + (2, 3, 7, 9, 5), + (756, 252, 36, 4, 1), + (3, 3, 3, 3, 1), + (3,), + (2, 2, 1), + (1, 1, 0), + (0, 0, 0), + 1, + ), + ( + (1, 4, 16, 16, 6), + None, + (4, 2, 1, 1, 2), + None, + (1, 1, 2), + (0, 1, 0), + (0, 0, 0), + 1, + ), + ( + (2, 1, 9, 11, 7), + (693, 77, 77, 7, 1), + (1, 6, 3, 3, 3), + None, + 1, + (1, 0, 1), + (0, 0, 0), + 1, + ), + ((3, 2, 5, 6, 4), None, (2, 2, 2, 2, 2), (2,), (1, 1, 1), (0, 1, 0), (0, 0, 0), 1), + ( + (2, 6, 10, 9, 8), + (4320, 720, 72, 8, 1), + (6, 8, 3, 3, 2), + None, + (2, 1, 2), + (1, 0, 1), + (1, 0, 1), + 1, + ), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-4, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + tests = [] + for ( + in_shape, + in_strides, + w_shape, + b_shape, + stride, + padding, + out_pad, + groups, + ) in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + weight_spec = TensorSpec.from_tensor(w_shape, None, dtype) + if b_shape is not None: + bias_spec = TensorSpec.from_tensor(b_shape, None, dtype) + else: + bias_spec = None + + kwargs = { + "stride": stride, + "padding": padding, + "output_padding": out_pad, + "groups": groups, + } + inputs = [in_spec, weight_spec] + if bias_spec is not None: + inputs.append(bias_spec) + + tests.append( + TestCase( + inputs=inputs, + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="ConvTranspose3d - OUT_OF_PLACE", + ) + ) + + return tests + + +class OpTest(BaseOperatorTest): + """ConvTranspose3d operator test with simplified implementation""" + + def __init__(self): + super().__init__("ConvTranspose3d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.conv_transpose3d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.conv_transpose3d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/corrcoef.py b/test/infinicore/ops/corrcoef.py new file mode 100644 index 000000000..1983f9732 --- /dev/null +++ b/test/infinicore/ops/corrcoef.py @@ -0,0 +1,75 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None) +# corrcoef accepts 1-D or 2-D inputs; we include both shapes. +_TEST_CASES_DATA = [ + ((5,), None), + ((3, 5), None), + ((4, 4), (16, 1)), + ((2, 8), None), + ((6, 6), None), + ((1, 7), None), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} + +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype) + input_spec = TensorSpec.from_tensor(shape, strides, dtype) + kwargs = {} + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"corrcoef - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Corrcoef operator test with simplified implementation""" + + def __init__(self): + super().__init__("Corrcoef") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.corrcoef(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.corrcoef(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/cosh.py b/test/infinicore/ops/cosh.py new file mode 100644 index 000000000..dcf6be30c --- /dev/null +++ b/test/infinicore/ops/cosh.py @@ -0,0 +1,108 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# ======================================================================= +# Test cases format: (shape, input_strides_or_None) +# ======================================================================= + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 16), None), + ((8, 16), (40, 1)), + ((2, 3, 4), None), + ((16, 5632), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="cosh - OUT_OF_PLACE", + ) + ) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="cosh - INPLACE(out)", + ) + ) + + if supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="cosh - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Cosh operator test with simplified implementation""" + + def __init__(self): + super().__init__("Cosh") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.cosh(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.cosh(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/cosine_embedding_loss.py b/test/infinicore/ops/cosine_embedding_loss.py new file mode 100644 index 000000000..8ed9fd72c --- /dev/null +++ b/test/infinicore/ops/cosine_embedding_loss.py @@ -0,0 +1,83 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input1_shape, input2_shape, target_shape, input1_strides_or_None, input2_strides_or_None, target_strides_or_None, margin_or_None) +# infinicore.nn.functional.cosine_embedding_loss(x1, x2, y, margin=0.0, reduction='mean') + +_TEST_CASES_DATA = [ + ((4, 3), (4, 3), (4,), None, None, None, None), + ((8, 5), (8, 5), (8,), (40, 5), None, None, 0.5), + ((1, 10), (1, 10), (1,), None, None, None, 0.2), + ((16, 20), (16, 20), (16,), None, None, None, 0.0), + ((3, 7), (3, 7), (3,), None, (21, 7), None, None), + ((2, 2), (2, 2), (2,), None, None, None, 1.0), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for s1, s2, st, st1, st2, stt, margin in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + a = TensorSpec.from_tensor(s1, st1, dtype) + b = TensorSpec.from_tensor(s2, st2, dtype) + y = TensorSpec.from_tensor(st, stt, dtype) + + kwargs = {} + if margin is not None: + kwargs["margin"] = margin + + test_cases.append( + TestCase( + inputs=[a, b, y], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="cosine_embedding_loss - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """cosine_embedding_loss operator test with simplified implementation""" + + def __init__(self): + super().__init__("cosine_embedding_loss") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.cosine_embedding_loss(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.cosine_embedding_loss(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/cosine_similarity.py b/test/infinicore/ops/cosine_similarity.py new file mode 100644 index 000000000..6b94b7df6 --- /dev/null +++ b/test/infinicore/ops/cosine_similarity.py @@ -0,0 +1,91 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, dim, eps, a_strides_or_None, b_strides_or_None) +# infinicore.nn.functional.cosine_similarity(x1, x2, dim=1, eps=1e-8) + +_TEST_CASES_DATA = [ + ((8, 16), 1, 1e-8, None, None), + ((8, 16), 1, 1e-6, (128, 1), (128, 1)), + ((2, 3, 4), 2, 1e-8, None, None), + ((16, 64), 1, 1e-8, None, None), + ((4, 5, 6), 0, 1e-8, None, None), + ((3, 4, 5), 1, 1e-7, (60, 20, 4), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, dim, eps = data[0], data[1], data[2] + a_strides = data[3] if len(data) > 3 else None + b_strides = data[4] if len(data) > 4 else None + + a_supports_inplace = not is_broadcast(a_strides) + b_supports_inplace = not is_broadcast(b_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + + kwargs = {"dim": dim, "eps": eps} + + # Out-of-place + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"cosine_similarity - OUT_OF_PLACE", + ) + ) + + # PyTorch cosine_similarity does not support explicit out param; skip out tests + + return test_cases + + +class OpTest(BaseOperatorTest): + """CosineSimilarity operator test with simplified implementation""" + + def __init__(self): + super().__init__("CosineSimilarity") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.cosine_similarity(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.cosine_similarity(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/count_nonzero.py b/test/infinicore/ops/count_nonzero.py new file mode 100644 index 000000000..0cca739e8 --- /dev/null +++ b/test/infinicore/ops/count_nonzero.py @@ -0,0 +1,80 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None) +# count_nonzero counts number of non-zero elements along dims or overall + +_TEST_CASES_DATA = [ + ((8, 8), None, None), + ((8, 8), (16, 1), 1), + ((2, 3, 4), None, 0), + ((1, 8), None, (0,)), + ((16, 64), (128, 1), None), + ((4, 5, 6), (60, 12, 2), 2), +] + +_TOLERANCE_MAP = {infinicore.int64: {"atol": 0, "rtol": 0}} + +_TENSOR_DTYPES = [infinicore.int32, infinicore.float32, infinicore.uint8] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(infinicore.int64, {"atol": 0, "rtol": 0}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="CountNonZero - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """CountNonZero operator test with simplified implementation""" + + def __init__(self): + super().__init__("CountNonZero") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.count_nonzero(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.count_nonzero(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/cov.py b/test/infinicore/ops/cov.py new file mode 100644 index 000000000..55d36c7fb --- /dev/null +++ b/test/infinicore/ops/cov.py @@ -0,0 +1,77 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, correction, fweights, aweights) +_TEST_CASES_DATA = [ + ((5,), None, 0, None, None), + ((3, 5), None, 1, None, None), + ((4, 4), (16, 1), 0, None, None), + ((2, 8), None, 1, None, None), + ((6, 6), None, 0, None, None), + ((1, 7), None, 0, None, None), +] + + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, correction, fweights, aweights in _TEST_CASES_DATA: + input_spec = TensorSpec.from_tensor(shape, strides, infinicore.float32) + kwargs = {"correction": correction} + if fweights is not None: + kwargs["fweights"] = fweights + if aweights is not None: + kwargs["aweights"] = aweights + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"cov - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Cov operator test with simplified implementation""" + + def __init__(self): + super().__init__("Cov") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.cov(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.cov(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/cross.py b/test/infinicore/ops/cross.py new file mode 100644 index 000000000..ae1abdd5e --- /dev/null +++ b/test/infinicore/ops/cross.py @@ -0,0 +1,91 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, dim, a_strides_or_None, b_strides_or_None) +# infinicore.cross(a, b, dim=-1) + +_TEST_CASES_DATA = [ + ((8, 3), -1, None, None), + ((2, 3, 3), 2, None, (9, 3, 1)), + ((4, 3, 5), 1, None, None), + ((3, 3), -1, (9, 3), None), + ((16, 3), -1, None, None), + ((2, 3, 4, 3), 3, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, dim = data[0], data[1] + a_strides = data[2] if len(data) > 2 else None + b_strides = data[3] if len(data) > 3 else None + + a_supports_inplace = not is_broadcast(a_strides) + b_supports_inplace = not is_broadcast(b_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-4}) + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + + kwargs = {"dim": dim} + + # Out-of-place + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"cross - OUT_OF_PLACE", + ) + ) + + # explicit out not supported by infinicore.cross + + return test_cases + + +class OpTest(BaseOperatorTest): + """Cross operator test with simplified implementation""" + + def __init__(self): + super().__init__("Cross") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.cross(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.cross(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/cross_entropy.py b/test/infinicore/ops/cross_entropy.py new file mode 100644 index 000000000..1efba7833 --- /dev/null +++ b/test/infinicore/ops/cross_entropy.py @@ -0,0 +1,100 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.tensor import TensorInitializer + +# Test cases format: (input_shape_logits_N_C, target_shape_N, input_strides_or_None, weight_present_bool, ignore_index_or_None) +# infinicore.nn.functional.cross_entropy(input, target, weight=None, ignore_index=-100, reduction='mean') + +_TEST_CASES_DATA = [ + ((4, 5), (4,), None, False, None), + ((8, 10), (8,), None, True, -1), + ((1, 3), (1,), None, False, None), + ((16, 100), (16,), (1600, 100), True, None), + ((3, 7), (3,), None, False, None), + ((2, 2), (2,), None, True, -100), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for ( + logits_shape, + target_shape, + logits_strides, + weight_present, + ignore_index, + ) in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + logits = TensorSpec.from_tensor(logits_shape, logits_strides, dtype) + target = TensorSpec.from_tensor( + target_shape, + None, + infinicore.int64, + init_mode=TensorInitializer.RANDINT, + low=0, + high=logits_shape[1], + ) + + inputs = [logits, target] + kwargs = {} + if weight_present: + weight_spec = TensorSpec.from_tensor((logits_shape[1],), None, dtype) + inputs.append(weight_spec) + if ignore_index is not None: + kwargs["ignore_index"] = ignore_index + + test_cases.append( + TestCase( + inputs=inputs, + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="cross_entropy - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """cross_entropy operator test with simplified implementation""" + + def __init__(self): + super().__init__("cross_entropy") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.cross_entropy(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.cross_entropy(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/cummax.py b/test/infinicore/ops/cummax.py new file mode 100644 index 000000000..424d1a60e --- /dev/null +++ b/test/infinicore/ops/cummax.py @@ -0,0 +1,99 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, dim, input_strides_or_None, out_strides_or_None) +# cummax returns (values, indices). We will validate the values tensor using the +# comparison_target; indices are returned by PyTorch but the framework compares +# on the primary output (values). Indices tests are not explicitly compared here. + +_TEST_CASES_DATA = [ + ((13, 4), 1, None, None), + ((13, 4), 0, (10, 1), None), + ((8, 16), 1, None, None), + ((2, 3, 4), 2, None, None), + ((16, 64), 1, (128, 1), (128, 1)), + ((4, 5, 6), 0, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """ + Generate test cases for cummax. + """ + test_cases = [] + for data in _TEST_CASES_DATA: + shape, dim = data[0], data[1] + in_strides = data[2] if len(data) > 2 else None + out_strides = data[3] if len(data) > 3 else None + + input_supports_inplace = not is_broadcast(in_strides) + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, out_strides, dtype) + + # Out-of-place (returns values, indices) - compare values + kwargs = {"dim": dim} + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"cummax - OUT_OF_PLACE", + ) + ) + + # Explicit out for values (if supported) - PyTorch doesn't accept out for cummax, so skip + # Note: PyTorch does not support explicit 'out' for cummax, so we don't add out= cases. + + # In-place on input (overwrite) - not supported by PyTorch for cummax + # Note: cummax does not support inplace modification via inplace=True, so no INPLACE cases added. + + return test_cases + + +class OpTest(BaseOperatorTest): + """Cummax operator test with simplified implementation""" + + def __init__(self): + super().__init__("Cummax") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.cummax(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.cummax(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/cummin.py b/test/infinicore/ops/cummin.py new file mode 100644 index 000000000..19ac68b86 --- /dev/null +++ b/test/infinicore/ops/cummin.py @@ -0,0 +1,84 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, dim, input_strides_or_None, out_strides_or_None) +# cummin returns (values, indices). We validate values similar to cummax. + +_TEST_CASES_DATA = [ + ((13, 4), 1, None, None), + ((13, 4), 0, (10, 1), None), + ((8, 16), 1, None, None), + ((2, 3, 4), 2, None, None), + ((16, 64), 1, (128, 1), (128, 1)), + ((4, 5, 6), 0, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, dim = data[0], data[1] + in_strides = data[2] if len(data) > 2 else None + out_strides = data[3] if len(data) > 3 else None + + # PyTorch doesn't support inplace/out for cummin op; only out-of-place + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"dim": dim} + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"cummin- OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Cummin operator test with simplified implementation""" + + def __init__(self): + super().__init__("Cummin") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.cummin(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.cummin(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/cumprod.py b/test/infinicore/ops/cumprod.py new file mode 100644 index 000000000..6f39de5f8 --- /dev/null +++ b/test/infinicore/ops/cumprod.py @@ -0,0 +1,83 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, dim, input_strides_or_None, out_strides_or_None) +# cumprod computes cumulative product along dim. PyTorch does not support explicit +# out= or inplace=True for cumprod — we only add out-of-place tests. + +_TEST_CASES_DATA = [ + ((13, 4), 1, None, None), + ((13, 4), 0, (10, 1), None), + ((8, 16), 1, None, None), + ((2, 3, 4), 2, None, None), + ((16, 64), 1, (128, 1), None), + ((4, 5, 6), 0, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, dim = data[0], data[1] + in_strides = data[2] if len(data) > 2 else None + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"dim": dim} + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"cumprod - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Cumprod operator test with simplified implementation""" + + def __init__(self): + super().__init__("Cumprod") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.cumprod(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.cumprod(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/cumsum.py b/test/infinicore/ops/cumsum.py new file mode 100644 index 000000000..4b0ece287 --- /dev/null +++ b/test/infinicore/ops/cumsum.py @@ -0,0 +1,114 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, dim, input_strides_or_None, out_strides_or_None) +# cumsum supports out= in PyTorch? PyTorch provides infinicore.cumsum(input, dim, *, out=None) +# so we include explicit out cases. + +_TEST_CASES_DATA = [ + ((13, 4), 1, None, None), + ((13, 4), 0, (10, 1), None), + ((8, 16), 1, None, None), + ((2, 3, 4), 2, None, None), + ((16, 64), 1, (128, 1), (128, 1)), + ((4, 5, 6), 0, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, dim = data[0], data[1] + in_strides = data[2] if len(data) > 2 else None + out_strides = data[3] if len(data) > 3 else None + + out_supports_inplace = not is_broadcast(out_strides) + input_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, out_strides, dtype) + + # Out-of-place: pass dim as positional argument to match infinicore.cumsum(input, dim, *, dtype=None, out=None) + test_cases.append( + TestCase( + inputs=[input_spec, dim], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"cumsum - OUT_OF_PLACE", + ) + ) + + # Explicit out + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec, dim], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description=f"cumsum - INPLACE(out)", + ) + ) + + # In-place on input (overwrite) - if input supports inplace and op accepts out param + if input_supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec, dim], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description=f"cumsum - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Cumsum operator test with simplified implementation""" + + def __init__(self): + super().__init__("Cumsum") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.cumsum(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.cumsum(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/deg2rad.py b/test/infinicore/ops/deg2rad.py new file mode 100644 index 000000000..6c5f08a4a --- /dev/null +++ b/test/infinicore/ops/deg2rad.py @@ -0,0 +1,118 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# ======================================================================= +# Test cases format: (shape, input_strides_or_None) +# Test cases format: (in_shape, in_strides_or_None) +# ======================================================================= + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((13, 4), (0, 1)), + ((8, 16), None), + ((8, 16), (40, 1)), + ((16, 5632), None), + ((2, 3, 4), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """ + Parse deg2rad test cases. + """ + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + input_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + + # Out-of-place + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="deg2rad - OUT_OF_PLACE", + ) + ) + + # Explicit out if output supports inplace (same shape and not broadcast) + # We assume out supports inplace for same-shaped dense tensors. + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="deg2rad - INPLACE(out)", + ) + ) + + # In-place overwrite input + if input_supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="deg2rad - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Deg2rad operator test with simplified implementation""" + + def __init__(self): + super().__init__("Deg2rad") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.deg2rad(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.deg2rad(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/det.py b/test/infinicore/ops/det.py new file mode 100644 index 000000000..511ecdf3a --- /dev/null +++ b/test/infinicore/ops/det.py @@ -0,0 +1,75 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (matrix_shape, strides_or_None) +# det(input) — only out-of-place (no inplace/out parameter for det) + +_TEST_CASES_DATA = [ + ((1, 1), None), + ((2, 2), None), + ((3, 3), (3, 1)), + ((4, 4), None), + ((8, 8), (512, 1)), + ((16, 16), None), +] + +_TOLERANCE_MAP = { + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + spec = TensorSpec.from_tensor(shape, strides, dtype) + + test_cases.append( + TestCase( + inputs=[spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="det - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """det operator test with simplified implementation""" + + def __init__(self): + super().__init__("det") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.det(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.det(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/diag.py b/test/infinicore/ops/diag.py new file mode 100644 index 000000000..8ce8d2c34 --- /dev/null +++ b/test/infinicore/ops/diag.py @@ -0,0 +1,81 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, diagonal_k_or_None) +# infinicore.diag: behavior depends on input dim: 1-D -> returns 2-D diag matrix; 2-D -> returns 1-D diagonal + +_TEST_CASES_DATA = [ + ((4,), None, None), + ((3, 3), None, None), + ((5,), (0,), None), + ((6, 6), (360, 60), 1), + ((2, 4), None, -1), + ((1,), None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, k in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if k is not None: + kwargs["diagonal"] = k + + test_cases.append( + TestCase( + inputs=[spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="diag - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """diag operator test with simplified implementation""" + + def __init__(self): + super().__init__("diag") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.diag(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.diag(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/diag_embed.py b/test/infinicore/ops/diag_embed.py new file mode 100644 index 000000000..e2d90dcc4 --- /dev/null +++ b/test/infinicore/ops/diag_embed.py @@ -0,0 +1,80 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, offset_or_None) +# diag_embed(input, offset=0, dim1=-2, dim2=-1) + +_TEST_CASES_DATA = [ + ((3,), None, None), + ((4,), None, 1), + ((2, 5), None, 0), + ((6,), (0,), None), + ((1,), None, -1), + ((8,), None, 2), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, offset in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + spec = TensorSpec.from_tensor(shape, strides, dtype) + kwargs = {} + if offset is not None: + kwargs["offset"] = offset + + test_cases.append( + TestCase( + inputs=[spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="diag_embed - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """diag_embed operator test with simplified implementation""" + + def __init__(self): + super().__init__("diag_embed") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.diag_embed(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.diag_embed(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/diagflat.py b/test/infinicore/ops/diagflat.py new file mode 100644 index 000000000..938d382ee --- /dev/null +++ b/test/infinicore/ops/diagflat.py @@ -0,0 +1,80 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, offset_or_None) +# diagflat(input, offset=0, dim=0) + +_TEST_CASES_DATA = [ + ((4,), None, None), + ((3, 1), None, 1), + ((2, 2), (8, 1), 0), + ((1,), None, -1), + ((6,), None, 2), + ((8, 2), None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, offset in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + spec = TensorSpec.from_tensor(shape, strides, dtype) + kwargs = {} + if offset is not None: + kwargs["offset"] = offset + + test_cases.append( + TestCase( + inputs=[spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="diagflat - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """diagflat operator test with simplified implementation""" + + def __init__(self): + super().__init__("diagflat") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.diagflat(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.diagflat(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/diagonal.py b/test/infinicore/ops/diagonal.py new file mode 100644 index 000000000..34d1a763a --- /dev/null +++ b/test/infinicore/ops/diagonal.py @@ -0,0 +1,84 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, offset_or_None, dim1_or_None, dim2_or_None) +# infinicore.diagonal(input, offset=0, dim1=0, dim2=1) + +_TEST_CASES_DATA = [ + ((3, 4), None, None, None, None), + ((5, 5), (300, 60), 1, None, None), + ((2, 3, 3), None, 0, -2, -1), + ((4, 4), None, -1, None, None), + ((1, 6), None, None, None, None), + ((8, 8), (512, 1), 2, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, offset, d1, d2 in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + spec = TensorSpec.from_tensor(shape, strides, dtype) + kwargs = {} + if offset is not None: + kwargs["offset"] = offset + if d1 is not None: + kwargs["dim1"] = d1 + if d2 is not None: + kwargs["dim2"] = d2 + + test_cases.append( + TestCase( + inputs=[spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="diagonal - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """diagonal operator test with simplified implementation""" + + def __init__(self): + super().__init__("diagonal") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.diagonal(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.diagonal(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/diagonal_scatter.py b/test/infinicore/ops/diagonal_scatter.py new file mode 100644 index 000000000..ad2659187 --- /dev/null +++ b/test/infinicore/ops/diagonal_scatter.py @@ -0,0 +1,124 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# =============================================================================== +# Operator-specific configuration +# =============================================================================== + +# Test cases format: (shape, input_strides, src_strides_or_None, offset) +# diagonal_scatter writes values from src into input along diagonals specified by offset +_TEST_CASES_DATA = [ + ((6, 6), None, None, 0), + ((8, 8), (16, 1), None, 1), + ((7, 5), None, (10, 1), -1), + ((4, 9), None, None, 2), + ((10, 10), (20, 1), (20, 1), 0), + ((3, 5), None, None, -2), +] +# Test cases format: (shape, input_strides_or_None, src_strides_or_None, offset, optional_dim1, optional_dim2) +_TEST_CASES_DATA = [ + ((6, 6), None, None, 0, 0, 1), + ((8, 8), (16, 1), None, 1, 0, 1), + ((7, 5), None, (4,), -1, 0, 1), + ((4, 9), None, None, 2, 0, 1), + ((10, 10), (20, 1), (2,), 0, 0, 1), + ((3, 5), None, None, -2, 0, 1), +] + +# Tolerance configuration +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +# Data types to test for payload tensors +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """ + Parse diagonal_scatter test cases. + Format: (shape, input_strides, index_strides, src_strides, offset) + """ + test_cases = [] + + for data in _TEST_CASES_DATA: + shape, in_strides, src_strides, offset, dim1, dim2 = data + + # Determine in-place support by checking if input/src are broadcast + in_supports_inplace = not is_broadcast(in_strides) + src_supports_inplace = not is_broadcast(src_strides) + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + dummy = torch.zeros(*shape) + diag = torch.diagonal(dummy, offset=offset, dim1=dim1, dim2=dim2) + diag_len = diag.numel() + src_shape = (diag_len,) + src_spec = TensorSpec.from_tensor(src_shape, src_strides, dtype) + + # Out-of-place (return value) + test_cases.append( + TestCase( + inputs=[input_spec, src_spec], + kwargs={"offset": offset, "dim1": dim1, "dim2": dim2}, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"diagonal_scatter - OUT_OF_PLACE", + ) + ) + + # In-place on input (modify input) + if in_supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec, src_spec], + kwargs={"offset": offset, "dim1": dim1, "dim2": dim2}, + output_spec=None, + comparison_target=0, + tolerance=tolerance, + description=f"diagonal_scatter - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """DiagonalScatter operator test with simplified implementation""" + + def __init__(self): + super().__init__("DiagonalScatter") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.diagonal_scatter(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.diagonal_scatter(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/diff.py b/test/infinicore/ops/diff.py new file mode 100644 index 000000000..02bf0abd2 --- /dev/null +++ b/test/infinicore/ops/diff.py @@ -0,0 +1,94 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, n, dim, input_strides_or_None) + +_TEST_CASES_DATA = [ + ((13, 4), 1, 1, None), + ((13, 6), 2, 1, (12, 1)), + ((8, 16), 1, 0, None), + ((2, 3, 5), 1, 2, None), + ((16, 64), 3, 1, None), + ((4, 5, 6), 2, 0, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, n, dim = data[0], data[1], data[2] + in_strides = data[3] if len(data) > 3 else None + + input_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_shape = list(shape) + # diff reduces size along dim by n (if valid) — shapes may differ + try: + out_shape[dim] = out_shape[dim] - n + except Exception: + pass + out_spec = TensorSpec.from_tensor(tuple(out_shape), None, dtype) + + kwargs = {"n": n, "dim": dim} + # Out-of-place + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"diff - OUT_OF_PLACE", + ) + ) + + # PyTorch does not support explicit out for diff — skip explicit out tests + # Note: PyTorch diff does not accept out parameter; hence no INPLACE(out) cases. + + return test_cases + + +class OpTest(BaseOperatorTest): + """Diff operator test with simplified implementation""" + + def __init__(self): + super().__init__("Diff") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.diff(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.diff(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/digamma.py b/test/infinicore/ops/digamma.py new file mode 100644 index 000000000..eb523d18c --- /dev/null +++ b/test/infinicore/ops/digamma.py @@ -0,0 +1,111 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# ======================================================================= +# Test cases format: (shape, input_strides_or_None) +# ======================================================================= + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 16), None), + ((8, 16), (40, 1)), + ((2, 3, 4), None), + ((16, 5632), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + + # Out-of-place + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="digamma - OUT_OF_PLACE", + ) + ) + + # Explicit out + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="digamma - INPLACE(out)", + ) + ) + + # In-place on input + if supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="digamma - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Digamma operator test with simplified implementation""" + + def __init__(self): + super().__init__("Digamma") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.digamma(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.digamma(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/dist.py b/test/infinicore/ops/dist.py new file mode 100644 index 000000000..f2fd2c2b7 --- /dev/null +++ b/test/infinicore/ops/dist.py @@ -0,0 +1,82 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (shape, a_strides_or_None, b_strides_or_None, p_or_None) +# dist computes p-norm distance between two tensors + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None), + ((8, 8), (16, 1), (16, 1), 1.0), + ((2, 3, 4), None, None, 2.0), + ((1, 8), None, (0, 1), None), + ((16, 64), (128, 1), (128, 1), 3.0), + ((4, 5, 6), (60, 12, 2), (60, 12, 2), 0.5), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, a_strides, b_strides, p = data + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + + kwargs = {} + if p is not None: + kwargs["p"] = p + + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Dist - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Dist operator test with simplified implementation""" + + def __init__(self): + super().__init__("Dist") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.dist(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.dist(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/dot.py b/test/infinicore/ops/dot.py new file mode 100644 index 000000000..fcf0f1147 --- /dev/null +++ b/test/infinicore/ops/dot.py @@ -0,0 +1,80 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (vec1_shape, vec2_shape, vec1_strides_or_None, vec2_strides_or_None) +# infinicore.dot(a, b) — 1-D vectors; returns scalar + +_TEST_CASES_DATA = [ + ((3,), (3,), None, None), + ((8,), (8,), None, None), + ((1,), (1,), None, None), + ((16,), (16,), None, None), + ((5,), (5,), None, None), + ((32,), (32,), None, None), + ((8,), (8,), (2,), (2,)), +] + + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for s1, s2, st1, st2 in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + a = TensorSpec.from_tensor(s1, st1, dtype) + b = TensorSpec.from_tensor(s2, st2, dtype) + + test_cases.append( + TestCase( + inputs=[a, b], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="dot - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """dot operator test with simplified implementation""" + + def __init__(self): + super().__init__("dot") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.dot(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.dot(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/dropout1d.py b/test/infinicore/ops/dropout1d.py new file mode 100644 index 000000000..6b094bed2 --- /dev/null +++ b/test/infinicore/ops/dropout1d.py @@ -0,0 +1,86 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, p, training, in_strides_or_None) +# infinicore.nn.functional.dropout1d(input, p=0.5, training=True) + +_TEST_CASES_DATA = [ + ((8, 16), 0.1, True, None), + ((8, 16), 0.2, False, (128, 1)), + ((2, 3, 4), 0.5, True, None), + ((16, 64), 0.3, True, None), + ((4, 5, 6), 0.5, False, None), + ((3, 4, 5), 0.4, True, (60, 20, 4)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, p, training = data[0], data[1], data[2] + in_strides = data[3] if len(data) > 3 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-2, "rtol": 1e-2}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"p": p, "training": training} + + # Out-of-place + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"dropout1d - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Dropout1d operator test with simplified implementation""" + + def __init__(self): + super().__init__("Dropout1d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.dropout1d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.dropout1d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/dropout2d.py b/test/infinicore/ops/dropout2d.py new file mode 100644 index 000000000..373abbdf6 --- /dev/null +++ b/test/infinicore/ops/dropout2d.py @@ -0,0 +1,85 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, p, training, in_strides_or_None) +# infinicore.nn.functional.dropout2d(input, p=0.5, training=True) + +_TEST_CASES_DATA = [ + ((8, 16, 8, 8), 0.1, True, None), + ((8, 16, 8, 8), 0.2, False, (1024, 64, 8, 1)), + ((2, 3, 4, 8), 0.5, True, None), + ((16, 64, 4, 4), 0.3, True, None), + ((4, 5, 6, 8), 0.5, False, None), + ((3, 4, 5, 5), 0.4, True, (60, 20, 4, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, p, training = data[0], data[1], data[2] + in_strides = data[3] if len(data) > 3 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-2, "rtol": 1e-2}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"p": p, "training": training} + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"dropout2d - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Dropout2d operator test with simplified implementation""" + + def __init__(self): + super().__init__("Dropout2d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.dropout2d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.dropout2d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/dropout3d.py b/test/infinicore/ops/dropout3d.py new file mode 100644 index 000000000..de9191577 --- /dev/null +++ b/test/infinicore/ops/dropout3d.py @@ -0,0 +1,85 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, p, training, in_strides_or_None) +# infinicore.nn.functional.dropout3d(input, p=0.5, training=True) + +_TEST_CASES_DATA = [ + ((8, 16, 8, 8), 0.1, True, None), + ((8, 16, 8, 8), 0.2, False, (8192, 1024, 128, 16)), + ((2, 3, 4, 4), 0.5, True, None), + ((16, 64, 4, 4), 0.3, True, None), + ((4, 5, 6, 3), 0.5, False, None), + ((3, 4, 5, 2), 0.4, True, (240, 60, 12, 2)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, p, training = data[0], data[1], data[2] + in_strides = data[3] if len(data) > 3 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-2, "rtol": 1e-2}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"p": p, "training": training} + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"dropout3d - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Dropout3d operator test with simplified implementation""" + + def __init__(self): + super().__init__("Dropout3d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.dropout3d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.dropout3d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/elu.py b/test/infinicore/ops/elu.py index 92d2072d3..cbe9210d5 100644 --- a/test/infinicore/ops/elu.py +++ b/test/infinicore/ops/elu.py @@ -56,7 +56,6 @@ def parse_test_cases(): """ Parse ELU test case data according to format: (shape, input_strides, alpha) - ELU only supports out-of-place and in-place modes via PyTorch's inplace parameter """ test_cases = [] @@ -133,9 +132,10 @@ def torch_operator(self, *args, **kwargs): """PyTorch ELU implementation""" return torch.nn.functional.elu(*args, **kwargs) - # def infinicore_operator(self, x, alpha=1.0, out=None, **kwargs): + # def infinicore_operator(self, *args, **kwargs): # """InfiniCore ELU implementation""" - # return None + # return infinicore.nn.functional.elu(*args, **kwargs) + # # return None def main(): diff --git a/test/infinicore/ops/embedding.py b/test/infinicore/ops/embedding.py new file mode 100644 index 000000000..b3445e60e --- /dev/null +++ b/test/infinicore/ops/embedding.py @@ -0,0 +1,85 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.tensor import TensorInitializer +from framework.runner import GenericTestRunner + +# Test cases format: (num_embeddings, embedding_dim, indices_shape, padding_idx_or_None) +# Embedding typically uses contiguous weight tensors; we include different sizes and padding_idx. +_TEST_CASES_DATA = [ + (10, 4, (3, 5), None), + (20, 8, (6,), None), + (5, 3, (2, 2), 0), + (15, 6, (4, 3), None), + (7, 7, (1, 10), None), + (12, 5, (3, 3), 1), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for num_embeddings, emb_dim, idx_shape, padding_idx in _TEST_CASES_DATA: + # weight is (num_embeddings, emb_dim) + weight_spec = TensorSpec.from_tensor( + (num_embeddings, emb_dim), None, infinicore.float32 + ) + indices_spec = TensorSpec.from_tensor( + idx_shape, + None, + infinicore.int64, + init_mode=TensorInitializer.RANDINT, + low=0, + high=num_embeddings, # infinicore.randint high is exclusive, so this yields 0..num_embeddings-1 + ) + + kwargs = {} + if padding_idx is not None: + kwargs["padding_idx"] = padding_idx + + test_cases.append( + TestCase( + inputs=[weight_spec, indices_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP[infinicore.float32], + description=f"embedding - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Embedding operator test with simplified implementation""" + + def __init__(self): + super().__init__("Embedding") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.embedding(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.embedding(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/empty.py b/test/infinicore/ops/empty.py new file mode 100644 index 000000000..7de2ca928 --- /dev/null +++ b/test/infinicore/ops/empty.py @@ -0,0 +1,86 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.datatypes import to_torch_dtype +from framework.runner import GenericTestRunner + +# Test cases format: (shape, dtype) +# Note: infinicore.empty returns uninitialized memory. Tests will compare shape and dtype via output_spec +_TEST_CASES_DATA = [ + ((3, 4), infinicore.float32), + ((6, 2), infinicore.float16), + ((5, 5), infinicore.float32), + ((1, 7), infinicore.bfloat16), + ((8, 3), infinicore.float32), + ((2, 2, 2), infinicore.float16), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 0, "rtol": 0}} + + +def parse_test_cases(): + test_cases = [] + for shape, dtype in _TEST_CASES_DATA: + out_spec = TensorSpec.from_tensor(shape, None, dtype) + + test_cases.append( + TestCase( + inputs=[], + kwargs={"size": shape, "dtype": dtype}, + output_spec=out_spec, + comparison_target="out", + tolerance=_TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 0}), + description=f"empty - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Empty operator test with simplified implementation""" + + def __init__(self): + super().__init__("Empty") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + if "size" not in kwargs: + raise TypeError("full test did not provide 'size' parameter") + size = kwargs.pop("size") + + if "dtype" not in kwargs: + raise TypeError("full test did not provide 'dtype' parameter") + dtype_torch = to_torch_dtype(kwargs.pop("dtype")) + if dtype_torch is None: + raise TypeError("full test provided unsupported 'dtype' parameter") + + # 支持测试框架通过 kwargs 注入 out 参数 + out = kwargs.pop("out", None) + + if out is not None: + return torch.empty(tuple(size), dtype=dtype_torch, out=out) + else: + return torch.empty(tuple(size), dtype=dtype_torch) + # return infinicore.empty(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.empty(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/empty_like.py b/test/infinicore/ops/empty_like.py new file mode 100644 index 000000000..abf6db2e5 --- /dev/null +++ b/test/infinicore/ops/empty_like.py @@ -0,0 +1,84 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.datatypes import to_torch_dtype +from framework.runner import GenericTestRunner + +# Test cases format: (base_shape, base_strides_or_None, dtype_or_None) +# Note: empty_like returns uninitialized memory; we validate shape/dtype via output_spec +_TEST_CASES_DATA = [ + ((3, 4), None, None), + ((6, 2), (12, 1), infinicore.float16), + ((5, 5), None, infinicore.float32), + ((1, 7), None, infinicore.bfloat16), + ((8, 3), (24, 1), None), + ((2, 2, 2), None, infinicore.float32), +] + +_TOLERANCE_MAP = { + infinicore.float32: {"atol": 1e-6, "rtol": 1e-5}, + infinicore.float16: {"atol": 1e-3, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 1e-2}, +} + +_TENSOR_DTYPES = [infinicore.float32, infinicore.float16, infinicore.bfloat16] + + +def parse_test_cases(): + test_cases = [] + for base_shape, base_strides, dtype in _TEST_CASES_DATA: + for input_dtype in _TENSOR_DTYPES: + base_spec = TensorSpec.from_tensor(base_shape, base_strides, input_dtype) + + kwargs = {"dtype": dtype} if dtype is not None else {} + + test_cases.append( + TestCase( + inputs=[base_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP.get( + input_dtype, {"atol": 1e-5, "rtol": 1e-4} + ), + description="empty_like - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """EmptyLike operator test with simplified implementation""" + + def __init__(self): + super().__init__("EmptyLike") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + if "dtype" not in kwargs: + dtype_torch = None + else: + dtype_torch = to_torch_dtype(kwargs.pop("dtype")) + return torch.empty_like(*args, dtype=dtype_torch) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.empty_like(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/empty_strided.py b/test/infinicore/ops/empty_strided.py new file mode 100644 index 000000000..aacfffd36 --- /dev/null +++ b/test/infinicore/ops/empty_strided.py @@ -0,0 +1,87 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.datatypes import to_torch_dtype + +# Test cases format: (shape, stride, dtype) +# empty_strided creates a tensor with given stride; use small shapes to exercise strides. +_TEST_CASES_DATA = [ + ((3, 4), (16, 1), infinicore.float32), + ((4, 3), (12, 4), infinicore.float16), + ((2, 5), (20, 1), infinicore.float32), + ((1, 6), (48, 8), infinicore.bfloat16), + ((2, 2, 2), (8, 4, 2), infinicore.float32), + ((5,), (1,), infinicore.float32), +] + +_TOLERANCE_MAP = { + infinicore.float32: {"atol": 0, "rtol": 0}, + infinicore.float16: {"atol": 1e-3, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 1e-2}, +} + + +def parse_test_cases(): + test_cases = [] + for shape, stride, dtype in _TEST_CASES_DATA: + kwargs = {"size": shape, "stride": stride, "dtype": dtype} + + test_cases.append( + TestCase( + inputs=[], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 0}), + description=f"empty_strided - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """EmptyStrided operator test with simplified implementation""" + + def __init__(self): + super().__init__("EmptyStrided") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + if "size" not in kwargs: + raise TypeError("empty_strided test did not provide 'size' parameter") + size = kwargs.pop("size") + + if "stride" not in kwargs: + raise TypeError("empty_strided test did not provide 'stride' parameter") + stride = kwargs.pop("stride") + + if "dtype" not in kwargs: + raise TypeError("empty_strided test did not provide 'dtype' parameter") + dtype_torch = to_torch_dtype(kwargs.pop("dtype")) + if dtype_torch is None: + raise TypeError("empty_strided test provided unsupported 'dtype' parameter") + + return torch.empty_strided(tuple(size), stride=stride, dtype=dtype_torch) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.empty_strided(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/equal.py b/test/infinicore/ops/equal.py new file mode 100644 index 000000000..b3719c0be --- /dev/null +++ b/test/infinicore/ops/equal.py @@ -0,0 +1,126 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, a_strides_or_None, b_strides_or_None, out_strides_or_None) +# equal compares element-wise and returns boolean tensor + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None), + ((8, 8), (16, 1), (16, 1), None), + ((8, 8), None, (0, 1), None), + ((1, 8), None, None, (8, 1)), + ((2, 3, 4), None, None, None), + ((16, 128), (256, 1), (256, 1), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.int32: {"atol": 0, "rtol": 0}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32, infinicore.int32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, a_strides, b_strides, out_strides = data[0], data[1], data[2], data[3] + + a_supports_inplace = not is_broadcast(a_strides) + b_supports_inplace = not is_broadcast(b_strides) + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, out_strides, infinicore.bool) + + # Out-of-place + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Equal - OUT_OF_PLACE", + ) + ) + + # explicit out + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="Equal - INPLACE(out)", + ) + ) + + # in-place a + if a_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="Equal - INPLACE(a)", + ) + ) + + # in-place b + if b_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="Equal - INPLACE(b)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Equal operator test with simplified implementation""" + + def __init__(self): + super().__init__("Equal") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.eq(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.eq(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/erf.py b/test/infinicore/ops/erf.py new file mode 100644 index 000000000..2356891a5 --- /dev/null +++ b/test/infinicore/ops/erf.py @@ -0,0 +1,108 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# ======================================================================= +# Test cases format: (shape, input_strides_or_None) +# ======================================================================= + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 16), None), + ((8, 16), (40, 1)), + ((2, 3, 4), None), + ((16, 5632), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="erf - OUT_OF_PLACE", + ) + ) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="erf - INPLACE(out)", + ) + ) + + if supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="erf - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Erf operator test with simplified implementation""" + + def __init__(self): + super().__init__("Erf") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.erf(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.erf(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/erfc.py b/test/infinicore/ops/erfc.py new file mode 100644 index 000000000..445e4b005 --- /dev/null +++ b/test/infinicore/ops/erfc.py @@ -0,0 +1,108 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# ======================================================================= +# Test cases format: (shape, input_strides_or_None) +# ======================================================================= + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 16), None), + ((8, 16), (40, 1)), + ((2, 3, 4), None), + ((16, 5632), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="erfc - OUT_OF_PLACE", + ) + ) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="erfc - INPLACE(out)", + ) + ) + + if supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="erfc - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Erfc operator test with simplified implementation""" + + def __init__(self): + super().__init__("Erfc") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.erfc(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.erfc(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/erfinv.py b/test/infinicore/ops/erfinv.py new file mode 100644 index 000000000..9c424a787 --- /dev/null +++ b/test/infinicore/ops/erfinv.py @@ -0,0 +1,109 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# ======================================================================= +# Test cases format: (shape, input_strides_or_None) +# Note: erfinv domain is (-1, 1); test generation will use values within that range. +# ======================================================================= + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 16), None), + ((8, 16), (40, 1)), + ((2, 3, 4), None), + ((16, 5632), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="erfinv - OUT_OF_PLACE", + ) + ) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="erfinv - INPLACE(out)", + ) + ) + + if supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="erfinv - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Erfinv operator test with simplified implementation""" + + def __init__(self): + super().__init__("Erfinv") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.erfinv(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.erfinv(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/exp2.py b/test/infinicore/ops/exp2.py new file mode 100644 index 000000000..55132ff25 --- /dev/null +++ b/test/infinicore/ops/exp2.py @@ -0,0 +1,98 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) +# infinicore.exp2(input) + +_TEST_CASES_DATA = [ + ((2, 3), None), + ((1, 4, 8), (32, 8, 1)), + ((3, 2, 5, 7), None), + ((2, 1, 16), None), + ((1, 8, 9, 11), (792, 99, 11, 1)), + ((2, 6, 10), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="exp2 - OUT_OF_PLACE", + ) + ) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="exp2 - INPLACE(out)", + ) + ) + if not is_broadcast(in_spec.strides): + cases.append( + TestCase( + inputs=[in_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="exp2 - INPLACE(a)", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Exp2 operator test with simplified implementation""" + + def __init__(self): + super().__init__("Exp2") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.exp2(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.exp2(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/expm1.py b/test/infinicore/ops/expm1.py new file mode 100644 index 000000000..0fbe2415f --- /dev/null +++ b/test/infinicore/ops/expm1.py @@ -0,0 +1,98 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) +# infinicore.expm1(input) + +_TEST_CASES_DATA = [ + ((2, 3), None), + ((1, 4, 8), (32, 8, 1)), + ((3, 2, 5, 7), None), + ((2, 1, 16), None), + ((1, 8, 9, 11), (792, 99, 11, 1)), + ((2, 6, 10), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="expm1 - OUT_OF_PLACE", + ) + ) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="expm1 - INPLACE(out)", + ) + ) + if not is_broadcast(in_spec.strides): + cases.append( + TestCase( + inputs=[in_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="expm1 - INPLACE(a)", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Expm1 operator test with simplified implementation""" + + def __init__(self): + super().__init__("Expm1") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.expm1(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.expm1(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/feature_alpha_dropout.py b/test/infinicore/ops/feature_alpha_dropout.py new file mode 100644 index 000000000..d9a773a57 --- /dev/null +++ b/test/infinicore/ops/feature_alpha_dropout.py @@ -0,0 +1,86 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, p, training, in_strides_or_None) +# infinicore.nn.functional.feature_alpha_dropout(input, p=0.5, training=True) + +_TEST_CASES_DATA = [ + ((8, 16), 0.1, True, None), + ((8, 16), 0.2, False, (128, 1)), + ((2, 3, 4), 0.5, True, None), + ((16, 64), 0.3, True, None), + ((4, 5, 6), 0.5, False, None), + ((3, 4, 5), 0.4, True, (60, 20, 4)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, p, training = data[0], data[1], data[2] + in_strides = data[3] if len(data) > 3 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-2, "rtol": 1e-2}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"p": p, "training": training} + + # Out-of-place + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"feature_alpha_dropout - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """FeatureAlphaDropout operator test with simplified implementation""" + + def __init__(self): + super().__init__("FeatureAlphaDropout") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.feature_alpha_dropout(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.feature_alpha_dropout(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/flip.py b/test/infinicore/ops/flip.py new file mode 100644 index 000000000..77841a9b8 --- /dev/null +++ b/test/infinicore/ops/flip.py @@ -0,0 +1,92 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, dims_tuple, input_strides_or_None) +# infinicore.flip(input, dims) + +_TEST_CASES_DATA = [ + ((13, 4), (0,), None), + ((8, 16), (1,), (128, 1)), + ((2, 3, 4), (2,), None), + ((4, 5, 6), (0, 2), None), + ((16, 64), (0, 1), None), + ((2, 2, 3, 4), (1, 3), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, dims = data[0], data[1] + in_strides = data[2] if len(data) > 2 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-4}) + in_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"dims": dims} + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"flip - OUT_OF_PLACE", + ) + ) + + # infinicore.flip has no explicit out or inplace flag; skip in-place/out variants. + + return test_cases + + +class OpTest(BaseOperatorTest): + """Flip operator test with simplified implementation""" + + def __init__(self): + super().__init__("Flip") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + # dims = kwargs.pop("dims", None) + # if dims is not None: + # return infinicore.flip(*args, dims) + return torch.flip(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # dims = kwargs.pop("dims", None) + # if dims is not None: + # return infinicore.flip(*args, dims) + # return infinicore.flip(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/fliplr.py b/test/infinicore/ops/fliplr.py new file mode 100644 index 000000000..c9c361238 --- /dev/null +++ b/test/infinicore/ops/fliplr.py @@ -0,0 +1,83 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, input_strides_or_None) +# infinicore.fliplr(input) flips the left/right (dim=-1 for 2D-like tensors) + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((8, 16), (128, 1)), + ((2, 3, 4), None), + ((4, 5), None), + ((16, 64), None), + ((3, 4, 5), (60, 20, 4)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-4}) + in_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="fliplr - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """FlipLR operator test with simplified implementation""" + + def __init__(self): + super().__init__("FlipLR") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.fliplr(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.fliplr(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/flipud.py b/test/infinicore/ops/flipud.py new file mode 100644 index 000000000..baf171f82 --- /dev/null +++ b/test/infinicore/ops/flipud.py @@ -0,0 +1,83 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, input_strides_or_None) +# infinicore.flipud(input) flips up/down (dim=-2 for 2D-like tensors) + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((8, 16), (128, 1)), + ((2, 3, 4), None), + ((4, 5), None), + ((16, 64), None), + ((3, 4, 5), (60, 20, 4)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-4}) + in_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="flipud - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """FlipUD operator test with simplified implementation""" + + def __init__(self): + super().__init__("FlipUD") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.flipud(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.flipud(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/float_power.py b/test/infinicore/ops/float_power.py new file mode 100644 index 000000000..db94f6c6d --- /dev/null +++ b/test/infinicore/ops/float_power.py @@ -0,0 +1,123 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, exponent_scalar_or_None, exponent_tensor_shape_or_None) +# infinicore.float_power(input, exponent) + +_TEST_CASES_DATA = [ + ((2, 3, 4), None, 2.0, None), + ((1, 4, 8), (32, 8, 1), None, (1, 4, 8)), + ((3, 2, 5, 7), None, 3.0, None), + ((2, 1, 16), None, None, (2, 1, 16)), + ((1, 8, 9, 11), (792, 99, 11, 1), 1.5, None), + ((2, 6, 10), None, None, (2, 6, 10)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + dtype_map = { + infinicore.float16: infinicore.float64, + infinicore.float32: infinicore.float64, + infinicore.complex64: infinicore.complex128, + } + + for shape, strides, exp_scalar, exp_tensor_shape in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + input_spec = TensorSpec.from_tensor(shape, strides, dtype) + out_dtype = dtype_map.get(dtype, dtype) + + # exponent as scalar + if exp_scalar is not None: + kwargs = {} + cases.append( + TestCase( + inputs=[input_spec, exp_scalar], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="float_power_scalar_exp - OUT_OF_PLACE", + ) + ) + out_spec = TensorSpec.from_tensor(shape, None, out_dtype) + cases.append( + TestCase( + inputs=[input_spec, exp_scalar], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="float_power_scalar_exp - INPLACE(out)", + ) + ) + + # exponent as tensor + if exp_tensor_shape is not None: + exp_spec = TensorSpec.from_tensor(exp_tensor_shape, None, dtype) + cases.append( + TestCase( + inputs=[input_spec, exp_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="float_power_tensor_exp - OUT_OF_PLACE", + ) + ) + out_spec = TensorSpec.from_tensor(shape, None, out_dtype) + cases.append( + TestCase( + inputs=[input_spec, exp_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="float_power_tensor_exp_explicit_out", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """FloatPower operator test with simplified implementation""" + + def __init__(self): + super().__init__("FloatPower") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.float_power(*args, **kwargs) + + +# def infinicore_operator(self, *args, **kwargs): +# """InfiniCore implementation (operator not yet available).""" +# return infinicore.float_power(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/floor.py b/test/infinicore/ops/floor.py new file mode 100644 index 000000000..9f5ffa890 --- /dev/null +++ b/test/infinicore/ops/floor.py @@ -0,0 +1,98 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) +# infinicore.floor(input) + +_TEST_CASES_DATA = [ + ((2, 3), None), + ((1, 4, 8), (32, 8, 1)), + ((3, 2, 5, 7), None), + ((2, 1, 16), None), + ((1, 8, 9, 11), (792, 99, 11, 1)), + ((2, 6, 10), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0.0, "rtol": 0.0}, + infinicore.float32: {"atol": 0.0, "rtol": 0.0}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="floor - OUT_OF_PLACE", + ) + ) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="floor - INPLACE(out)", + ) + ) + if not is_broadcast(in_spec.strides): + cases.append( + TestCase( + inputs=[in_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="floor - INPLACE(a)", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Floor operator test with simplified implementation""" + + def __init__(self): + super().__init__("Floor") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.floor(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.floor(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/floor_divide.py b/test/infinicore/ops/floor_divide.py new file mode 100644 index 000000000..2a360e439 --- /dev/null +++ b/test/infinicore/ops/floor_divide.py @@ -0,0 +1,113 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (a_shape, a_strides_or_None, b_shape_or_None) +# infinicore.floor_divide(a, b) + +_TEST_CASES_DATA = [ + ((2, 3, 4), None, None), + ((1, 4, 8), (32, 8, 1), None), + ((3, 2, 5, 7), None, None), + ((2, 1, 16), None, None), + ((1, 8, 9, 11), (792, 99, 11, 1), None), + ((2, 6, 10), None, None), +] + +_TOLERANCE_MAP = { + infinicore.int32: {"atol": 0.0, "rtol": 0.0}, + infinicore.int64: {"atol": 0.0, "rtol": 0.0}, +} +_TENSOR_DTYPES = [infinicore.int32, infinicore.int64] + + +def parse_test_cases(): + cases = [] + for a_shape, a_strides, b_shape in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + a_spec = TensorSpec.from_tensor(a_shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor( + a_shape if b_shape is None else b_shape, None, dtype + ) + + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="floor_divide - OUT_OF_PLACE", + ) + ) + out_spec = TensorSpec.from_tensor(a_shape, None, dtype) + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="floor_divide - INPLACE(out)", + ) + ) + + if not is_broadcast(a_spec.strides): + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="floor_divide - INPLACE(a)", + ) + ) + if not is_broadcast(b_spec.strides): + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="floor_divide - INPLACE(b)", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """FloorDivide operator test with simplified implementation""" + + def __init__(self): + super().__init__("FloorDivide") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.floor_divide(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.floor_divide(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/fmax.py b/test/infinicore/ops/fmax.py new file mode 100644 index 000000000..36e7ae25b --- /dev/null +++ b/test/infinicore/ops/fmax.py @@ -0,0 +1,109 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, a_strides, b_strides) +_TEST_CASES_DATA = [ + ((6, 8), None, None), + ((8, 4), (16, 1), None), + ((5, 5), None, (10, 1)), + ((3, 7), (14, 1), (14, 1)), + ((10, 3), None, None), + ((2, 16), (32, 1), (32, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, a_strides, b_strides = data + + a_inplace = not is_broadcast(a_strides) + b_inplace = not is_broadcast(b_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + + # Out-of-place + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="fmax - OUT_OF_PLACE", + ) + ) + + # In-place variations + if a_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="fmax - INPLACE(a)", + ) + ) + + if b_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="fmax - INPLACE(b)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """FMax operator test with simplified implementation""" + + def __init__(self): + super().__init__("FMax") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.fmax(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.fmax(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/fmin.py b/test/infinicore/ops/fmin.py new file mode 100644 index 000000000..f81831ba5 --- /dev/null +++ b/test/infinicore/ops/fmin.py @@ -0,0 +1,107 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, a_strides, b_strides) +_TEST_CASES_DATA = [ + ((6, 8), None, None), + ((8, 4), (16, 1), None), + ((5, 5), None, (10, 1)), + ((3, 7), (14, 1), (14, 1)), + ((10, 3), None, None), + ((2, 16), (32, 1), (32, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, a_strides, b_strides = data + + a_inplace = not is_broadcast(a_strides) + b_inplace = not is_broadcast(b_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="fmin - OUT", + ) + ) + + if a_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="fmin - INPLACE(a)", + ) + ) + + if b_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="fmin - INPLACE(b)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """FMin operator test with simplified implementation""" + + def __init__(self): + super().__init__("FMin") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.fmin(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.fmin(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/fmod.py b/test/infinicore/ops/fmod.py new file mode 100644 index 000000000..97762bda8 --- /dev/null +++ b/test/infinicore/ops/fmod.py @@ -0,0 +1,114 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, other_shape_or_None) +# infinicore.fmod(input, other) + +_TEST_CASES_DATA = [ + ((2, 3, 4), None, None), + ((1, 4, 8), (32, 8, 1), None), + ((3, 2, 5, 7), None, None), + ((2, 1, 16), None, None), + ((1, 8, 9, 11), (792, 99, 11, 1), None), + ((2, 6, 10), None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for in_shape, in_strides, other_shape in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + a_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + b_spec = TensorSpec.from_tensor( + in_shape if other_shape is None else other_shape, None, dtype + ) + + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="fmod_out_of_place", + ) + ) + + out_spec = TensorSpec.from_tensor(in_shape, None, dtype) + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="fmod_explicit_out", + ) + ) + + if not is_broadcast(a_spec.strides): + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="fmod_inplace_a", + ) + ) + if not is_broadcast(b_spec.strides): + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="fmod_inplace_b", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Fmod operator test with simplified implementation""" + + def __init__(self): + super().__init__("Fmod") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.fmod(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.fmod(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/fold.py b/test/infinicore/ops/fold.py new file mode 100644 index 000000000..19db0a2ff --- /dev/null +++ b/test/infinicore/ops/fold.py @@ -0,0 +1,110 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (in_shape, in_strides_or_None, output_size, kernel_size, dilation, padding, stride) +# For fold/unfold: input is the output of unfold; fold reconstructs image from patches. + +_TEST_CASES_DATA = [ + # ((2, 6, 9), None, (4, 4), (2, 2), 1, 0, (2, 2)), + # ((1, 8, 16), None, (8, 8), (4, 4), 1, 0, (2, 2)), + # ((3, 4, 12), None, (6, 6), (3, 2), 1, 1, (1, 2)), + # ((2, 2, 20), None, (10, 2), (2, 2), 1, 0, (2, 1)), + # ((1, 3, 25), None, (5, 5), (5, 5), 1, 0, (5, 5)), + # ((2, 5, 18), (90, 18, 1), (9, 2), (3, 2), 1, 0, (2, 1)), + # 原来对应 ((2,3,8,8), None, (3,3), 1, 0, (1,1)) + # 计算得到 L=6*6=36, channels_for_fold = 3*3*3 = 27 + ((2, 27, 36), None, (8, 8), (3, 3), 1, 0, (1, 1)), + # 原来对应 ((1,4,10,12), None, (5,3), 1, 1, (2,1)) + # L = 4 * 12 = 48, channels = 4*5*3 = 60 + ((1, 60, 48), None, (10, 12), (5, 3), 1, 1, (2, 1)), + # 原来对应 ((2,2,16,16), (512,256,16,1), (4,4), 1, 0, (4,4)) + # L = 4 * 4 = 16, channels = 2*4*4 = 32 + ((2, 32, 16), None, (16, 16), (4, 4), 1, 0, (4, 4)), + # 原来对应 ((3,6,7,9), None, (3,2), 1, 0, (1,1)) + # L = 5 * 8 = 40, channels = 6*3*2 = 36 + ((3, 36, 40), None, (7, 9), (3, 2), 1, 0, (1, 1)), + # 原来对应 ((1,8,9,11), None, (2,3), 1, 1, (1,2)) + # L = 10 * 6 = 60, channels = 8*2*3 = 48 + ((1, 48, 60), None, (9, 11), (2, 3), 1, 1, (1, 2)), + # 原来对应 ((2,5,12,6), (360,72,6,1), (3,3), 1, 0, (2,1)) + # L = 5 * 4 = 20, channels = 5*3*3 = 45 + ((2, 45, 20), None, (12, 6), (3, 3), 1, 0, (2, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for ( + in_shape, + in_strides, + output_size, + kernel_size, + dilation, + padding, + stride, + ) in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype) + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + + kwargs = { + "output_size": output_size, + "kernel_size": kernel_size, + "dilation": dilation, + "padding": padding, + "stride": stride, + } + + cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Fold - OUT_OF_PLACE", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Fold operator test with simplified implementation""" + + def __init__(self): + super().__init__("Fold") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.fold(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.fold(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/frac.py b/test/infinicore/ops/frac.py new file mode 100644 index 000000000..d3fc9abf2 --- /dev/null +++ b/test/infinicore/ops/frac.py @@ -0,0 +1,98 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) +# infinicore.frac(input) + +_TEST_CASES_DATA = [ + ((2, 3), None), + ((1, 4, 8), (32, 8, 1)), + ((3, 2, 5, 7), None), + ((2, 1, 16), None), + ((1, 8, 9, 11), (792, 99, 11, 1)), + ((2, 6, 10), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="frac_out", + ) + ) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="frac_out_explicit", + ) + ) + if not is_broadcast(in_spec.strides): + cases.append( + TestCase( + inputs=[in_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="frac_inplace", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Frac operator test with simplified implementation""" + + def __init__(self): + super().__init__("Frac") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.frac(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.frac(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/fractional_max_pool2d.py b/test/infinicore/ops/fractional_max_pool2d.py new file mode 100644 index 000000000..6a81c7b01 --- /dev/null +++ b/test/infinicore/ops/fractional_max_pool2d.py @@ -0,0 +1,81 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (in_shape, in_strides_or_None, kernel_size, output_size_or_None, return_indices) +# Note: fractional_max_pool may return values and indices; PyTorch accepts optional random samples. We avoid +# explicit _random_samples and focus on default behavior. Indices (if returned) form a separate output; tests +# here exercise the value-returning path. + +_TEST_CASES_DATA = [ + ((2, 3, 15, 15), None, (3, 3), (5, 5), False), + ((1, 4, 16, 14), (896, 224, 14, 1), (4, 3), (4, 5), False), + ((2, 2, 17, 19), None, (5, 5), (7, 6), False), + ((3, 6, 9, 11), None, (2, 2), (4, 5), False), + ((1, 8, 20, 20), (3200, 400, 20, 1), (3, 3), (6, 6), False), + ((2, 5, 12, 10), None, (4, 3), (3, 3), False), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for in_shape, in_strides, kernel_size, out_size, return_indices in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + kwargs = { + "kernel_size": kernel_size, + "output_size": out_size, + "return_indices": return_indices, + } + cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="FractionalMaxPool2d - OUT_OF_PLACE", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """FractionalMaxPool2d operator test with simplified implementation""" + + def __init__(self): + super().__init__("FractionalMaxPool2d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.fractional_max_pool2d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.fractional_max_pool2d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/fractional_max_pool3d.py b/test/infinicore/ops/fractional_max_pool3d.py new file mode 100644 index 000000000..f0783970a --- /dev/null +++ b/test/infinicore/ops/fractional_max_pool3d.py @@ -0,0 +1,79 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (in_shape, in_strides_or_None, kernel_size, output_size_or_None, return_indices) +# Note: PyTorch fractional_max_pool3d behaves similarly to fractional_max_pool2d; we avoid _random_samples. + +_TEST_CASES_DATA = [ + ((2, 3, 9, 9, 9), None, (3, 3, 3), (4, 4, 4), False), + ((1, 4, 8, 10, 12), None, (2, 3, 2), (4, 4, 6), False), + ((2, 2, 7, 11, 5), (770, 110, 55, 5, 1), (3, 2, 3), (3, 4, 2), False), + ((3, 6, 5, 6, 7), None, (2, 2, 2), (3, 3, 4), False), + ((1, 8, 10, 10, 10), None, (4, 3, 2), (5, 4, 5), False), + ((2, 5, 12, 8, 6), None, (3, 3, 2), (4, 3, 2), False), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for in_shape, in_strides, kernel_size, out_size, return_indices in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + kwargs = { + "kernel_size": kernel_size, + "output_size": out_size, + "return_indices": return_indices, + } + cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="FractionalMaxPool3d - OUT_OF_PLACE", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """FractionalMaxPool3d operator test with simplified implementation""" + + def __init__(self): + super().__init__("FractionalMaxPool3d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.fractional_max_pool3d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.fractional_max_pool3d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/frexp.py b/test/infinicore/ops/frexp.py new file mode 100644 index 000000000..3cde22909 --- /dev/null +++ b/test/infinicore/ops/frexp.py @@ -0,0 +1,75 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (in_shape, in_strides_or_None) + +_TEST_CASES_DATA = [ + ((2, 3), None), + ((1, 4, 8), (32, 8, 1)), + ((3, 2, 5, 7), None), + ((2, 1, 16), None), + ((1, 8, 9, 11), (792, 99, 11, 1)), + ((2, 6, 10), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + # Return-value tests only + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="frexp_out_tuple", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Frexp operator test with simplified implementation""" + + def __init__(self): + super().__init__("Frexp") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.frexp(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.frexp(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/full.py b/test/infinicore/ops/full.py new file mode 100644 index 000000000..eb8ba5151 --- /dev/null +++ b/test/infinicore/ops/full.py @@ -0,0 +1,91 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.datatypes import to_torch_dtype + +# Test cases format: (shape, fill_value, dtype) +_TEST_CASES_DATA = [ + ((3, 4), 0.0, infinicore.float32), + ((6, 2), 1.5, infinicore.float16), + ((5, 5), -2.0, infinicore.float32), + ((1, 7), 3.14, infinicore.bfloat16), + ((8, 3), 42.0, infinicore.float32), + ((2, 2, 2), 0.5, infinicore.float16), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32, infinicore.float16, infinicore.bfloat16] + + +def parse_test_cases(): + test_cases = [] + for shape, val, dtype in _TEST_CASES_DATA: + out_spec = TensorSpec.from_tensor(shape, None, dtype) + # kwargs = {"fill_value": val, "size": shape, "dtype": to_torch_dtype(dtype)} + kwargs = {"fill_value": val, "size": shape, "dtype": dtype} + + test_cases.append( + TestCase( + inputs=[], + kwargs=kwargs, + output_spec=out_spec, + comparison_target="out", + tolerance=_TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}), + description=f"full - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Full operator test with simplified implementation""" + + def __init__(self): + super().__init__("Full") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + if "fill_value" not in kwargs: + raise TypeError("full test did not provide 'fill_value' parameter") + fill_value = kwargs.pop("fill_value") + + if "size" not in kwargs: + raise TypeError("full test did not provide 'size' parameter") + size = kwargs.pop("size") + + if "dtype" not in kwargs: + raise TypeError("full test did not provide 'dtype' parameter") + dtype_torch = to_torch_dtype(kwargs.pop("dtype")) + if dtype_torch is None: + raise TypeError("full test provided unsupported 'dtype' parameter") + + # 支持测试框架通过 kwargs 注入 out 参数 + out = kwargs.pop("out", None) + + if out is not None: + return torch.full(tuple(size), fill_value, dtype=dtype_torch, out=out) + else: + return torch.full(tuple(size), fill_value, dtype=dtype_torch) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.full(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/full_like.py b/test/infinicore/ops/full_like.py new file mode 100644 index 000000000..e6508e4b1 --- /dev/null +++ b/test/infinicore/ops/full_like.py @@ -0,0 +1,99 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.datatypes import to_torch_dtype + +# Test cases format: (base_shape, base_strides_or_None, fill_value, dtype_or_None) +_TEST_CASES_DATA = [ + ((3, 4), None, 0.0, None), + ((6, 2), (12, 1), 1.5, infinicore.float16), + ((5, 5), None, -2.0, infinicore.float32), + ((1, 7), None, 3.14, infinicore.bfloat16), + ((8, 3), (24, 1), 42.0, None), + ((2, 2, 2), None, 0.5, infinicore.float16), +] + +_TOLERANCE_MAP = { + infinicore.float32: {"atol": 0, "rtol": 0}, + infinicore.float16: {"atol": 1e-3, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 1e-2}, +} + +_TENSOR_DTYPES = [infinicore.float32, infinicore.float16, infinicore.bfloat16] + + +def parse_test_cases(): + test_cases = [] + for base_shape, base_strides, val, dtype in _TEST_CASES_DATA: + for input_dtype in _TENSOR_DTYPES: + base_spec = TensorSpec.from_tensor(base_shape, base_strides, input_dtype) + + kwargs = {"fill_value": val} + if dtype is not None: + kwargs["dtype"] = dtype + + # torch.full_like does not accept an `out=` kwarg in most PyTorch + # versions; call out-of-place and compare the return value instead. + test_cases.append( + TestCase( + inputs=[base_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}), + description=f"full_like - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """FullLike operator test with simplified implementation""" + + def __init__(self): + super().__init__("FullLike") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + if "fill_value" not in kwargs: + raise TypeError("full_like test did not provide 'fill_value' parameter") + fill_value = kwargs.pop("fill_value") + + if "dtype" not in kwargs: + dtype_torch = None + else: + dtype_torch = to_torch_dtype(kwargs.pop("dtype")) + return torch.full_like(*args, fill_value=fill_value, dtype=dtype_torch) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore full_like implementation (operator not yet available).""" + # # Mirror the torch_operator signature/behavior but call infinicore implementation. + # if "fill_value" not in kwargs: + # raise TypeError("full_like test did not provide 'fill_value' parameter") + # fill_value = kwargs.pop("fill_value") + # + # if "dtype" not in kwargs: + # dtype_infinicore = None + # else: + # # tests pass infinicore dtypes directly, so forward as-is + # dtype_infinicore = kwargs.pop("dtype") + # return infinicore.full_like(*args, fill_value=fill_value, dtype=dtype_infinicore) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/gather.py b/test/infinicore/ops/gather.py new file mode 100644 index 000000000..2496e0512 --- /dev/null +++ b/test/infinicore/ops/gather.py @@ -0,0 +1,96 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.tensor import TensorInitializer +from framework.utils import is_broadcast + +# Test cases format: (input_shape, input_strides_or_None, dim, index_shape) +_TEST_CASES_DATA = [ + ((3, 4), None, 1, (3, 2)), + ((5, 6), (30, 1), 0, (2, 6)), + ((2, 3, 4), None, 2, (2, 3, 2)), + ((4, 4), None, -1, (4, 2)), + ((6, 2), (12, 1), 1, (6, 1)), + ((3, 5), None, 0, (1, 5)), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, dim, idx_shape in _TEST_CASES_DATA: + input_spec = TensorSpec.from_tensor(shape, strides, infinicore.float32) + input_ndim = len(shape) + if dim < 0: + actual_dim = input_ndim + dim + else: + actual_dim = dim + + size_dim = shape[actual_dim] + + # index tensor spec:值必须在 [0, size_dim) + index_spec = TensorSpec.from_tensor( + idx_shape, + None, + infinicore.int64, + init_mode=TensorInitializer.RANDINT, + low=0, + high=size_dim, # exclusive bound + ) + + # gather returns same dtype as input + kwargs = {"dim": dim} + + test_cases.append( + TestCase( + inputs=[input_spec, index_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP[infinicore.float32], + description=f"gather - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Gather operator test with simplified implementation""" + + def __init__(self): + super().__init__("Gather") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + if "dim" not in kwargs: + raise TypeError("gather test did not provide 'dim' parameter") + dim = kwargs.pop("dim") + input = args[0] + index = args[1] + + return torch.gather(input, dim, index) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.gather(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/gaussian_nll_loss.py b/test/infinicore/ops/gaussian_nll_loss.py new file mode 100644 index 000000000..2ac22f864 --- /dev/null +++ b/test/infinicore/ops/gaussian_nll_loss.py @@ -0,0 +1,89 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, var_present_bool, full_or_None, eps_or_None, input_strides_or_None) +# infinicore.nn.functional.gaussian_nll_loss(input, target, var, full=False, eps=1e-6, reduction='mean') + +_TEST_CASES_DATA = [ + ((4, 5), True, None, None, None), + ((8, 8), True, True, 1e-6, (512, 64)), + ((1, 10), True, False, 1e-3, None), + ((16, 100), True, None, None, None), + ((3, 7), True, True, 1e-5, None), + ((2, 2), True, False, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-1}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, var_present, full, eps, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(shape, strides, dtype) + tgt = TensorSpec.from_tensor(shape, None, dtype) + var = TensorSpec.from_tensor(shape, None, dtype) if var_present else None + + inputs = [inp, tgt] + if var is not None: + inputs.append(var) + + kwargs = {} + if full is not None: + kwargs["full"] = full + if eps is not None: + kwargs["eps"] = eps + + test_cases.append( + TestCase( + inputs=inputs, + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="gaussian_nll_loss - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """gaussian_nll_loss operator test with simplified implementation""" + + def __init__(self): + super().__init__("gaussian_nll_loss") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.gaussian_nll_loss(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.gaussian_nll_loss(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/gcd.py b/test/infinicore/ops/gcd.py new file mode 100644 index 000000000..e1a275acd --- /dev/null +++ b/test/infinicore/ops/gcd.py @@ -0,0 +1,103 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, a_strides, b_strides) +_TEST_CASES_DATA = [ + ((6, 8), None, None), + ((8, 4), (16, 1), None), + ((5, 5), None, (10, 1)), + ((3, 7), (14, 1), (14, 1)), + ((10, 3), None, None), + ((2, 16), (32, 1), (32, 1)), +] + +_TOLERANCE_MAP = {infinicore.int64: {"atol": 0, "rtol": 0}} +_TENSOR_DTYPES = [infinicore.int64] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, a_strides, b_strides = data + + a_inplace = not is_broadcast(a_strides) + b_inplace = not is_broadcast(b_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype) + + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + + # Out-of-place + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="gcd - OUT", + ) + ) + + if a_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="gcd - INPLACE(a)", + ) + ) + + if b_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="gcd - INPLACE(b)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """GCD operator test with simplified implementation""" + + def __init__(self): + super().__init__("GCD") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.gcd(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.gcd(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/glu.py b/test/infinicore/ops/glu.py new file mode 100644 index 000000000..ce8f4a496 --- /dev/null +++ b/test/infinicore/ops/glu.py @@ -0,0 +1,87 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None) + +_TEST_CASES_DATA = [ + ((13, 4), None, -1), + ((8, 6), (48, 1), -1), # last dim divisible by 2 + ((4, 8, 6), (48, 8, 1), 1), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """GLU: infinicore.nn.functional.glu(input, dim=-1) + + GLU splits last dimension in half; ensure last dim is even in test shapes. + No inplace argument in functional API, so only out-of-place tests. + """ + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + dim = data[2] if len(data) > 2 else -1 + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"dim": dim} + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"GLU - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """GLU operator test with simplified implementation""" + + def __init__(self): + super().__init__("GLU") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.glu(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.glu(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/group_norm.py b/test/infinicore/ops/group_norm.py new file mode 100644 index 000000000..aef318b5d --- /dev/null +++ b/test/infinicore/ops/group_norm.py @@ -0,0 +1,95 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, num_groups, weight_bias_present_bool, eps_or_None) +# infinicore.nn.functional.group_norm(input, num_groups, weight=None, bias=None, eps=1e-5) + +_TEST_CASES_DATA = [ + ((4, 8, 16, 16), None, 4, True, None), + ((2, 6, 8, 8), (768, 128, 1, 1), 3, False, 1e-3), + ((1, 3, 10, 10), None, 3, True, None), + ((8, 12, 6, 6), None, 6, True, 1e-4), + ((6, 4, 7, 7), None, 2, False, None), + ((3, 2, 9, 9), None, 1, True, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-1}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, num_groups, wb_present, eps in _TEST_CASES_DATA: + C = shape[1] + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(shape, strides, dtype) + + # infinicore.nn.functional.group_norm(input, num_groups, weight=None, bias=None, eps=1e-5) + # pass num_groups as positional argument to avoid duplicate kwarg issue + inputs = [inp, num_groups] + kwargs = {} + if wb_present: + weight = TensorSpec.from_tensor((C,), None, dtype) + bias = TensorSpec.from_tensor((C,), None, dtype) + inputs.append(weight) + inputs.append(bias) + else: + # explicit None placeholders for weight and bias + inputs.append(None) + inputs.append(None) + + if eps is not None: + kwargs["eps"] = eps + + test_cases.append( + TestCase( + inputs=inputs, + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="group_norm - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """group_norm operator test with simplified implementation""" + + def __init__(self): + super().__init__("group_norm") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.group_norm(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.group_norm(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/gt.py b/test/infinicore/ops/gt.py new file mode 100644 index 000000000..3883ce87c --- /dev/null +++ b/test/infinicore/ops/gt.py @@ -0,0 +1,122 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, a_strides_or_None, b_strides_or_None, out_strides_or_None) +# greater-than comparison + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None), + ((8, 8), (16, 1), (16, 1), None), + ((8, 8), None, (0, 1), None), + ((1, 8), None, None, (8, 1)), + ((2, 3, 4), None, None, None), + ((32, 64), (128, 1), (128, 1), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.int32: {"atol": 0, "rtol": 0}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32, infinicore.int32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, a_strides, b_strides, out_strides = data[0], data[1], data[2], data[3] + + a_supports_inplace = not is_broadcast(a_strides) + b_supports_inplace = not is_broadcast(b_strides) + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, out_strides, infinicore.bool) + + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="GT - OUT_OF_PLACE", + ) + ) + + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="GT - INPLACE(out)", + ) + ) + + if a_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="GT - INPLACE(a)", + ) + ) + + if b_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="GT - INPLACE(b)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """GT operator test with simplified implementation""" + + def __init__(self): + super().__init__("GT") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.gt(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.gt(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/gumbel_softmax.py b/test/infinicore/ops/gumbel_softmax.py new file mode 100644 index 000000000..cdc566db2 --- /dev/null +++ b/test/infinicore/ops/gumbel_softmax.py @@ -0,0 +1,87 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, tau_or_None, hard_or_None, dim_or_None) + +_TEST_CASES_DATA = [ + ((4, 10), None, 1.0, False, -1), + ((8, 20), (160, 1), 0.5, False, -1), + ((2, 5, 6), None, 1.5, True, 2), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """ + gumbel_softmax: infinicore.nn.functional.gumbel_softmax(input, tau=1, hard=False, eps=1e-10, dim=-1) + """ + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + tau = data[2] if len(data) > 2 else 1.0 + hard = data[3] if len(data) > 3 else False + dim = data[4] if len(data) > 4 else -1 + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"tau": tau, "hard": hard, "dim": dim} + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"GumbelSoftmax - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """GumbelSoftmax operator test with simplified implementation""" + + def __init__(self): + super().__init__("GumbelSoftmax") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.gumbel_softmax(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.gumbel_softmax(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/hardshrink.py b/test/infinicore/ops/hardshrink.py new file mode 100644 index 000000000..323fad25f --- /dev/null +++ b/test/infinicore/ops/hardshrink.py @@ -0,0 +1,86 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, lambd_or_None) + +_TEST_CASES_DATA = [ + ((13, 4), None, None), + ((13, 4), (10, 1), 0.5), + ((8, 8, 8), None, 1.0), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """ + hardshrink(input, lambd=0.5) + """ + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + lambd = data[2] if len(data) > 2 else 0.5 + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {} + if lambd is not None: + kwargs["lambd"] = lambd + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"Hardshrink - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Hardshrink operator test with simplified implementation""" + + def __init__(self): + super().__init__("Hardshrink") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.hardshrink(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.hardshrink(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/hardsigmoid.py b/test/infinicore/ops/hardsigmoid.py new file mode 100644 index 000000000..59b692596 --- /dev/null +++ b/test/infinicore/ops/hardsigmoid.py @@ -0,0 +1,82 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 8, 8), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """hardsigmoid(input) + + No inplace arg on functional API; only out-of-place tests. + """ + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"Hardsigmoid - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Hardsigmoid operator test with simplified implementation""" + + def __init__(self): + super().__init__("Hardsigmoid") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.hardsigmoid(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.hardsigmoid(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/hardswish.py b/test/infinicore/ops/hardswish.py new file mode 100644 index 000000000..b9a096b14 --- /dev/null +++ b/test/infinicore/ops/hardswish.py @@ -0,0 +1,81 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((8, 8), (64, 1)), + ((2, 3, 6), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """ + hardswish(input) + """ + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"Hardswish - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Hardswish operator test with simplified implementation""" + + def __init__(self): + super().__init__("Hardswish") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.hardswish(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.hardswish(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/hardtanh.py b/test/infinicore/ops/hardtanh.py new file mode 100644 index 000000000..2bc6c0529 --- /dev/null +++ b/test/infinicore/ops/hardtanh.py @@ -0,0 +1,98 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, min_val_or_None, max_val_or_None) + +_TEST_CASES_DATA = [ + ((13, 4), None, -1.0, 1.0), + ((13, 4), (10, 1), -0.5, 0.5), + ((8, 8, 8), None, -2.0, 2.0), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """hardtanh(input, min_val=-1.0, max_val=1.0, inplace=False)""" + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + minv = data[2] if len(data) > 2 else -1.0 + maxv = data[3] if len(data) > 3 else 1.0 + + input_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"min_val": minv, "max_val": maxv} + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"Hardtanh - OUT_OF_PLACE", + ) + ) + + if input_supports_inplace: + inplace_kwargs = {"min_val": minv, "max_val": maxv, "inplace": True} + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=inplace_kwargs, + output_spec=None, + comparison_target=0, + tolerance=tolerance, + description=f"Hardtanh - INPLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Hardtanh operator test with simplified implementation""" + + def __init__(self): + super().__init__("Hardtanh") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.hardtanh(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.hardtanh(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/heaviside.py b/test/infinicore/ops/heaviside.py new file mode 100644 index 000000000..09d1ae9ca --- /dev/null +++ b/test/infinicore/ops/heaviside.py @@ -0,0 +1,131 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# ======================================================================= +# Test cases format: (shape, a_strides_or_None, b_strides_or_None, out_strides_or_None) +# heaviside is binary: heaviside(input, values) +# ======================================================================= + +_TEST_CASES_DATA = [ + ((13, 4), None, None, None), + ((13, 4), (10, 1), None, None), + ((13, 4), None, (10, 1), None), + ((8, 16), (40, 1), (40, 1), None), + ((2, 3, 4), None, None, None), + ((16, 5632), None, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape = data[0] + a_strides = data[1] if len(data) > 1 else None + b_strides = data[2] if len(data) > 2 else None + out_strides = data[3] if len(data) > 3 else None + + a_supports_inplace = not is_broadcast(a_strides) + b_supports_inplace = not is_broadcast(b_strides) + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, out_strides, dtype) + + # Out-of-place + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="heaviside - OUT_OF_PLACE", + ) + ) + + # Explicit out + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="heaviside - INPLACE(out)", + ) + ) + + # In-place on first input + if a_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="heaviside - INPLACE(a)", + ) + ) + + # In-place on second input + if b_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="heaviside - INPLACE(b)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Heaviside operator test with simplified implementation""" + + def __init__(self): + super().__init__("Heaviside") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.heaviside(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.heaviside(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/hinge_embedding_loss.py b/test/infinicore/ops/hinge_embedding_loss.py new file mode 100644 index 000000000..e26238bd2 --- /dev/null +++ b/test/infinicore/ops/hinge_embedding_loss.py @@ -0,0 +1,82 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, target_shape, input_strides_or_None, target_strides_or_None, margin_or_None) +# infinicore.nn.functional.hinge_embedding_loss(input, target, margin=1.0, reduction='mean') + +_TEST_CASES_DATA = [ + ((4,), (4,), None, None, None), + ((8,), (8,), None, None, 0.5), + ((1,), (1,), None, None, None), + ((16,), (16,), (4,), None, 1.0), + ((3,), (3,), None, None, 0.2), + ((2,), (2,), None, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, tgt_shape, s1, s2, margin in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(shape, s1, dtype) + tgt = TensorSpec.from_tensor(tgt_shape, s2, dtype) + + kwargs = {} + if margin is not None: + kwargs["margin"] = margin + + test_cases.append( + TestCase( + inputs=[inp, tgt], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="hinge_embedding_loss - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """hinge_embedding_loss operator test with simplified implementation""" + + def __init__(self): + super().__init__("hinge_embedding_loss") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.hinge_embedding_loss(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.hinge_embedding_loss(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/histc.py b/test/infinicore/ops/histc.py new file mode 100644 index 000000000..ebd35dfd8 --- /dev/null +++ b/test/infinicore/ops/histc.py @@ -0,0 +1,74 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, bins, min_val, max_val) +_TEST_CASES_DATA = [ + ((100,), None, 10, 0.0, 1.0), + ((50,), None, 5, -1.0, 1.0), + ((20,), None, 8, 0.0, 2.0), + ((10,), None, 4, 0.0, 1.0), + ((1,), None, 3, 0.0, 1.0), + ((200,), None, 20, -2.0, 2.0), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, bins, minv, maxv = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype) + input_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {"bins": bins, "min": minv, "max": maxv} + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"histc - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """HistC operator test with simplified implementation""" + + def __init__(self): + super().__init__("HistC") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.histc(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.histc(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/histogram.py b/test/infinicore/ops/histogram.py new file mode 100644 index 000000000..4e44ef210 --- /dev/null +++ b/test/infinicore/ops/histogram.py @@ -0,0 +1,75 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, bins_or_sequence, range_or_None) +_TEST_CASES_DATA = [ + ((100,), None, 10, (0.0, 1.0)), + ((20,), None, 5, (0.0, 2.0)), + ((10,), None, 4, (0.0, 1.0)), + ((200,), None, 20, (-1.0, 1.0)), + ((1,), None, 3, (0.0, 1.0)), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, bins, rng = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype) + input_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {"bins": bins} + if rng is not None: + kwargs["range"] = rng + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"histogram - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Histogram operator test with simplified implementation""" + + def __init__(self): + super().__init__("Histogram") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.histogram(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.histogram(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/hsplit.py b/test/infinicore/ops/hsplit.py new file mode 100644 index 000000000..b15b535a2 --- /dev/null +++ b/test/infinicore/ops/hsplit.py @@ -0,0 +1,101 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, sections_or_None) +# infinicore.hsplit(input, sections) +# Note: PyTorch hsplit is a convenience wrapper around split/reshape. We include both int and list sections. + +_TEST_CASES_DATA = [ + ((4, 8), None, 2), + ((4, 9), None, [3, 6]), + ((2, 6, 12), None, 3), + ((1, 10), (10, 1), 5), + ((8, 4), None, [1, 3]), + ((6, 12), None, 4), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, sections in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(shape, strides, dtype) + # infinicore.hsplit expects positional second arg (sections) rather than kw in this API; + # put sections into inputs when present. Convert list -> tuple. + if sections is not None: + # wrap sections in a small wrapper so TestCase.__init__ does not + # interpret the tuple as a Tensor shape + class Sections: + def __init__(self, v): + self.v = v + + def as_tuple(self): + return tuple(self.v) if isinstance(self.v, list) else self.v + + def __repr__(self): + return f"sections({self.v})" + + sec = Sections(sections) + test_inputs = [inp, sec] + else: + test_inputs = [inp] + + test_cases.append( + TestCase( + inputs=test_inputs, + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="hsplit - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """hsplit operator test with simplified implementation""" + + def __init__(self): + super().__init__("hsplit") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + # unwrap Sections wrapper if present + args = list(args) + if len(args) >= 2 and hasattr(args[1], "as_tuple"): + args[1] = args[1].as_tuple() + return torch.hsplit(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.hsplit(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/huber_loss.py b/test/infinicore/ops/huber_loss.py new file mode 100644 index 000000000..6ada545de --- /dev/null +++ b/test/infinicore/ops/huber_loss.py @@ -0,0 +1,82 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, target_shape, input_strides_or_None, target_strides_or_None, delta_or_None) +# infinicore.nn.functional.huber_loss(input, target, reduction='mean', delta=1.0) + +_TEST_CASES_DATA = [ + ((4, 5), (4, 5), None, None, None), + ((8, 8), (8, 8), (512, 64), None, 1.0), + ((1, 10), (1, 10), None, None, 0.5), + ((16, 100), (16, 100), None, None, 2.0), + ((3, 7), (3, 7), None, (21, 7), None), + ((2, 2), (2, 2), None, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, tgt_shape, s1, s2, delta in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(shape, s1, dtype) + tgt = TensorSpec.from_tensor(tgt_shape, s2, dtype) + + kwargs = {} + if delta is not None: + kwargs["delta"] = delta + + test_cases.append( + TestCase( + inputs=[inp, tgt], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="huber_loss - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """huber_loss operator test with simplified implementation""" + + def __init__(self): + super().__init__("huber_loss") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.huber_loss(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.huber_loss(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/hypot.py b/test/infinicore/ops/hypot.py new file mode 100644 index 000000000..57706ddea --- /dev/null +++ b/test/infinicore/ops/hypot.py @@ -0,0 +1,114 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (a_shape, a_strides_or_None, b_shape_or_None) +# infinicore.hypot(a, b) + +_TEST_CASES_DATA = [ + ((2, 3, 4), None, None), + ((1, 4, 8), (32, 8, 1), None), + ((3, 2, 5, 7), None, None), + ((2, 1, 16), None, None), + ((1, 8, 9, 11), (792, 99, 11, 1), None), + ((2, 6, 10), None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for a_shape, a_strides, b_shape in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + a_spec = TensorSpec.from_tensor(a_shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor( + a_shape if b_shape is None else b_shape, None, dtype + ) + + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="hypot_out_of_place", + ) + ) + + out_spec = TensorSpec.from_tensor(a_shape, None, dtype) + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="hypot_explicit_out", + ) + ) + + if not is_broadcast(a_spec.strides): + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="hypot_inplace_a", + ) + ) + if not is_broadcast(b_spec.strides): + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="hypot_inplace_b", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Hypot operator test with simplified implementation""" + + def __init__(self): + super().__init__("Hypot") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.hypot(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.hypot(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/index_add.py b/test/infinicore/ops/index_add.py new file mode 100644 index 000000000..7650d8085 --- /dev/null +++ b/test/infinicore/ops/index_add.py @@ -0,0 +1,102 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (target_shape, target_strides_or_None, dim, index_shape, src_shape) +_TEST_CASES_DATA = [ + ((5, 6), None, 1, (5, 2), (5, 2)), + ((4, 4), (16, 1), 0, (2, 4), (2, 4)), + ((3, 5), None, 1, (3, 3), (3, 3)), + ((2, 6), None, 1, (2, 2), (2, 2)), + ((6, 3), (18, 1), 0, (3, 3), (3, 3)), + ((4, 7), None, 1, (4, 2), (4, 2)), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for target_shape, t_strides, dim, idx_shape, src_shape in _TEST_CASES_DATA: + target_spec = TensorSpec.from_tensor( + target_shape, t_strides, infinicore.float32 + ) + # index for index_add should be 1-D with length equal to source.size(dim) + index_len = src_shape[dim] + from framework.tensor import TensorInitializer + + index_spec = TensorSpec.from_tensor( + (index_len,), + None, + infinicore.int64, + init_mode=TensorInitializer.RANDINT, + low=0, + high=target_shape[dim], + ) + src_spec = TensorSpec.from_tensor(src_shape, None, infinicore.float32) + + # out parameter can be used (explicit out) + out_supports = not is_broadcast(t_strides) + + # Out-of-place (return value) + # Use positional dim to match infinicore.index_add(input, dim, index, tensor) + test_cases.append( + TestCase( + inputs=[target_spec, dim, index_spec, src_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP[infinicore.float32], + description=f"index_add - OUT_OF_PLACE", + ) + ) + + # In-place on target (out=target) + if out_supports: + test_cases.append( + TestCase( + inputs=[target_spec, dim, index_spec, src_spec], + kwargs=None, + output_spec=target_spec, + comparison_target="out", + tolerance=_TOLERANCE_MAP[infinicore.float32], + description=f"index_add - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """IndexAdd operator test with simplified implementation""" + + def __init__(self): + super().__init__("IndexAdd") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.index_add(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.index_add(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/index_copy.py b/test/infinicore/ops/index_copy.py new file mode 100644 index 000000000..21b4eb249 --- /dev/null +++ b/test/infinicore/ops/index_copy.py @@ -0,0 +1,100 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.tensor import TensorInitializer +from framework.utils import is_broadcast + +# Test cases format: (target_shape, target_strides_or_None, dim, index_shape, src_shape) +_TEST_CASES_DATA = [ + ((5, 6), None, 1, (5, 2), (5, 2)), + ((4, 4), (16, 1), 0, (2, 4), (2, 4)), + ((3, 5), None, 1, (3, 3), (3, 3)), + ((2, 6), None, 1, (2, 2), (2, 2)), + ((6, 3), (18, 1), 0, (3, 3), (3, 3)), + ((4, 7), None, 1, (4, 2), (4, 2)), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for target_shape, t_strides, dim, idx_shape, src_shape in _TEST_CASES_DATA: + target_spec = TensorSpec.from_tensor( + target_shape, t_strides, infinicore.float32 + ) + # index for index_copy should be 1-D with length equal to source.size(dim) + index_len = src_shape[dim] + + index_spec = TensorSpec.from_tensor( + (index_len,), + None, + infinicore.int64, + init_mode=TensorInitializer.RANDINT, + low=0, + high=target_shape[dim], + ) + src_spec = TensorSpec.from_tensor(src_shape, None, infinicore.float32) + + out_supports = not is_broadcast(t_strides) + + # Out-of-place: infinicore.index_copy(input, dim, index, source) + test_cases.append( + TestCase( + inputs=[target_spec, dim, index_spec, src_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP[infinicore.float32], + description=f"index_copy - OUT_OF_PLACE", + ) + ) + + # Explicit out: same ordering, output will be provided by framework + if out_supports: + test_cases.append( + TestCase( + inputs=[target_spec, dim, index_spec, src_spec], + kwargs=None, + output_spec=target_spec, + comparison_target="out", + tolerance=_TOLERANCE_MAP[infinicore.float32], + description=f"index_copy - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """IndexCopy operator test with simplified implementation""" + + def __init__(self): + super().__init__("IndexCopy") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.index_copy(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.index_copy(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/index_reduce.py b/test/infinicore/ops/index_reduce.py new file mode 100644 index 000000000..e7777c743 --- /dev/null +++ b/test/infinicore/ops/index_reduce.py @@ -0,0 +1,102 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (target_shape, target_strides_or_None, dim, index_shape, reduce) +_TEST_CASES_DATA = [ + ((5, 6), None, 1, (5, 2), "prod"), + ((4, 4), (16, 1), 0, (2, 4), "amax"), + ((3, 5), None, 1, (3, 3), "amin"), + ((2, 6), None, 1, (2, 2), "prod"), + ((2, 6), None, 1, (2, 2), "amin"), + ((6, 3), (18, 1), 0, (3, 3), "mean"), + ((4, 7), None, 1, (4, 2), "prod"), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for tgt_shape, tgt_strides, dim, idx_shape, reduce in _TEST_CASES_DATA: + target_spec = TensorSpec.from_tensor(tgt_shape, tgt_strides, infinicore.float32) + # idx_shape here represents the source shape for index_reduce; index itself must be 1-D + src_shape = idx_shape + # determine index length from source along the reduction dim + index_len = src_shape[dim] + from framework.tensor import TensorInitializer + + index_spec = TensorSpec.from_tensor( + (index_len,), + None, + infinicore.int64, + init_mode=TensorInitializer.RANDINT, + low=0, + high=tgt_shape[dim], + ) + + src_spec = TensorSpec.from_tensor(src_shape, None, infinicore.float32) + + out_supports = not is_broadcast(tgt_strides) + + kwargs = {"reduce": reduce} + + test_cases.append( + TestCase( + inputs=[target_spec, dim, index_spec, src_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP[infinicore.float32], + description=f"index_reduce - OUT_OF_PLACE", + ) + ) + + if out_supports: + test_cases.append( + TestCase( + inputs=[target_spec, dim, index_spec, src_spec], + kwargs=kwargs, + output_spec=target_spec, + comparison_target="out", + tolerance=_TOLERANCE_MAP[infinicore.float32], + description=f"index_reduce - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """IndexReduce operator test with simplified implementation""" + + def __init__(self): + super().__init__("IndexReduce") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.index_reduce(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.index_reduce(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/index_select.py b/test/infinicore/ops/index_select.py new file mode 100644 index 000000000..208243e21 --- /dev/null +++ b/test/infinicore/ops/index_select.py @@ -0,0 +1,81 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, dim, index_shape) +_TEST_CASES_DATA = [ + ((3, 4), None, 1, (2,)), + ((5, 6), (30, 1), 0, (3,)), + ((2, 3, 4), None, 2, (2,)), + ((4, 4), None, -1, (1,)), + ((6, 2), (12, 1), 1, (2,)), + ((3, 5), None, 0, (1,)), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, dim, idx_shape in _TEST_CASES_DATA: + input_spec = TensorSpec.from_tensor(shape, strides, infinicore.float32) + # index_select requires a 1-D index tensor + index_len = idx_shape[0] + from framework.tensor import TensorInitializer + + index_spec = TensorSpec.from_tensor( + (index_len,), + None, + infinicore.int64, + init_mode=TensorInitializer.RANDINT, + low=0, + high=shape[dim], + ) + + # Use positional dim to match infinicore.index_select(input, dim, index) + test_cases.append( + TestCase( + inputs=[input_spec, dim, index_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP[infinicore.float32], + description=f"index_select - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """IndexSelect operator test with simplified implementation""" + + def __init__(self): + super().__init__("IndexSelect") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.index_select(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.index_select(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/inner.py b/test/infinicore/ops/inner.py new file mode 100644 index 000000000..66a6aae43 --- /dev/null +++ b/test/infinicore/ops/inner.py @@ -0,0 +1,78 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (a_shape, b_shape, a_strides_or_None, b_strides_or_None) +# infinicore.inner(a, b) + +_TEST_CASES_DATA = [ + ((3,), (3,), None, None), + ((2, 3), (3,), None, None), + ((4, 5), (4, 5), (20, 5), (20, 5)), + ((6,), (6,), None, (0,)), + ((2, 3, 4), (4,), None, None), + ((8, 8), (8, 8), None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for a_shape, b_shape, a_strides, b_strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + a = TensorSpec.from_tensor(a_shape, a_strides, dtype) + b = TensorSpec.from_tensor(b_shape, b_strides, dtype) + + test_cases.append( + TestCase( + inputs=[a, b], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="inner - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """inner operator test with simplified implementation""" + + def __init__(self): + super().__init__("inner") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.inner(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.inner(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/instance_norm.py b/test/infinicore/ops/instance_norm.py new file mode 100644 index 000000000..0d1eb30ed --- /dev/null +++ b/test/infinicore/ops/instance_norm.py @@ -0,0 +1,111 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, running_mean_present_bool, running_var_present_bool, weight_bias_present_bool, use_input_stats_or_None, momentum_or_None, eps_or_None) +# infinicore.nn.functional.instance_norm(input, running_mean=None, running_var=None, weight=None, bias=None, use_input_stats=False, momentum=0.1, eps=1e-5) + +_TEST_CASES_DATA = [ + ((4, 3, 8, 8), None, True, True, True, False, None, None), + ((2, 6, 4, 4), None, True, True, True, True, 0.2, 1e-5), + ((1, 3, 16, 16), None, True, True, False, False, None, None), + ((8, 5, 2, 2), None, True, True, False, True, 0.1, 1e-3), + ((6, 4, 7, 7), None, True, True, True, False, None, 1e-4), + ((3, 2, 9, 9), None, True, True, True, True, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-1}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for ( + shape, + strides, + mean_p, + var_p, + wb_p, + use_stats, + momentum, + eps, + ) in _TEST_CASES_DATA: + C = shape[1] + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(shape, strides, dtype) + + running_mean = TensorSpec.from_tensor((C,), None, dtype) if mean_p else None + running_var = TensorSpec.from_tensor((C,), None, dtype) if var_p else None + + inputs = [inp] + # instance_norm signature expects running_mean, running_var next + inputs.append(running_mean) + inputs.append(running_var) + if wb_p: + weight = TensorSpec.from_tensor((C,), None, dtype) + bias = TensorSpec.from_tensor((C,), None, dtype) + inputs.append(weight) + inputs.append(bias) + else: + inputs.append(None) + inputs.append(None) + + kwargs = {} + if use_stats is not None: + kwargs["use_input_stats"] = use_stats + if momentum is not None: + kwargs["momentum"] = momentum + if eps is not None: + kwargs["eps"] = eps + + test_cases.append( + TestCase( + inputs=inputs, + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="instance_norm - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """instance_norm operator test with simplified implementation""" + + def __init__(self): + super().__init__("instance_norm") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.instance_norm(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.instance_norm(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/interpolate.py b/test/infinicore/ops/interpolate.py new file mode 100644 index 000000000..3a19876d5 --- /dev/null +++ b/test/infinicore/ops/interpolate.py @@ -0,0 +1,90 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, size_or_scale_factor, mode, align_corners_or_None, input_strides_or_None) +# infinicore.nn.functional.interpolate(input, size=None, scale_factor=None, mode='nearest', align_corners=None) + +_TEST_CASES_DATA = [ + ((1, 3, 16, 16), (32, 32), "bilinear", True, None), + ((2, 3, 8, 8), (16, 16), "nearest", None, (384, 128, 16, 1)), + ((1, 1, 10), 20, "linear", False, None), + ((2, 3, 6, 6), (12, 12), "area", None, None), + ((1, 3, 4, 4, 4), (8, 8, 8), "trilinear", True, None), + ((4, 3, 7, 7), 2.0, "bilinear", False, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, size_or_scale, mode, align, in_strides = data + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + in_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"mode": mode} + if isinstance(size_or_scale, tuple): + kwargs["size"] = size_or_scale + else: + kwargs["scale_factor"] = size_or_scale + if align is not None: + kwargs["align_corners"] = align + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"interpolate - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Interpolate operator test with simplified implementation""" + + def __init__(self): + super().__init__("Interpolate") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.interpolate(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.interpolate(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/inverse.py b/test/infinicore/ops/inverse.py new file mode 100644 index 000000000..c10db8344 --- /dev/null +++ b/test/infinicore/ops/inverse.py @@ -0,0 +1,75 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (matrix_shape, strides_or_None) +# inverse(input) — only out-of-place (no out parameter) + +_TEST_CASES_DATA = [ + ((1, 1), None), + ((2, 2), None), + ((3, 3), (3, 1)), + ((4, 4), None), + ((8, 8), (512, 1)), + ((16, 16), None), +] + +_TOLERANCE_MAP = { + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + spec = TensorSpec.from_tensor(shape, strides, dtype) + + test_cases.append( + TestCase( + inputs=[spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="inverse - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """inverse operator test with simplified implementation""" + + def __init__(self): + super().__init__("inverse") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.inverse(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.inverse(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/isclose.py b/test/infinicore/ops/isclose.py new file mode 100644 index 000000000..a95b39a3d --- /dev/null +++ b/test/infinicore/ops/isclose.py @@ -0,0 +1,89 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, a_strides_or_None, b_strides_or_None, extra_kwargs_or_None) +# isclose compares two tensors with atol/rtol; we include kwargs combinations + +_TEST_CASES_DATA = [ + ((8, 8), None, None, {"rtol": 1e-05, "atol": 1e-08}), + ((8, 8), (16, 1), (16, 1), {"rtol": 1e-03, "atol": 1e-05}), + ((8, 8), None, (0, 1), {"rtol": 1e-02, "atol": 1e-03}), + ((2, 3, 4), None, None, {"rtol": 1e-02, "atol": 1e-03}), + ((1, 8), None, None, {"equal_nan": True}), + ((16, 64), (128, 1), (128, 1), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32, infinicore.bfloat16] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, a_strides, b_strides, extra = data[0], data[1], data[2], data[3] + + a_supports_inplace = not is_broadcast(a_strides) + b_supports_inplace = not is_broadcast(b_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + + kwargs = {} + if extra is not None: + kwargs.update(extra) + + # Out-of-place not support 'out=' + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="IsClose - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """IsClose operator test with simplified implementation""" + + def __init__(self): + super().__init__("IsClose") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.isclose(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.isclose(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/isfinite.py b/test/infinicore/ops/isfinite.py new file mode 100644 index 000000000..518b27582 --- /dev/null +++ b/test/infinicore/ops/isfinite.py @@ -0,0 +1,81 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, input_strides_or_None) +# isfinite checks each element for finiteness and returns boolean tensor + +_TEST_CASES_DATA = [ + ((8, 8), None), + ((8, 8), (16, 1)), + ((8, 8), None), + ((2, 3, 4), None), + ((1, 8), None), + ((16, 128), (256, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32, infinicore.bfloat16] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, in_strides = data[0], data[1] + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + # Out-of-place + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="IsFinite - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """IsFinite operator test with simplified implementation""" + + def __init__(self): + super().__init__("IsFinite") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.isfinite(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.isfinite(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/isin.py b/test/infinicore/ops/isin.py new file mode 100644 index 000000000..c95a466e3 --- /dev/null +++ b/test/infinicore/ops/isin.py @@ -0,0 +1,101 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, input_strides_or_None, test_elements_strides_or_None, extra_or_None) +# isin checks membership of each element in provided test_elements (tensor or list) + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None), + ((8, 8), (16, 1), None, None), + ((8, 8), None, (1,), None), + ((2, 3, 4), None, None, None), + ((1, 8), None, None, None), + ((16, 64), (128, 1), None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.int32: {"atol": 0, "rtol": 0}, +} + +_TENSOR_DTYPES = [infinicore.int32, infinicore.float32, infinicore.float16] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, in_strides, elements_strides, _ = data[0], data[1], data[2], data[3] + + input_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + # Build "test elements" as a tensor of small set of values (same dtype) + elements_spec = TensorSpec.from_tensor( + (4,), elements_strides if elements_strides else None, dtype + ) + + # Out-of-place + test_cases.append( + TestCase( + inputs=[input_spec, elements_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="IsIn - OUT_OF_PLACE", + ) + ) + + # explicit out + out_spec = TensorSpec.from_tensor(shape, None, infinicore.bool) + test_cases.append( + TestCase( + inputs=[input_spec, elements_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="IsIn - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """IsIn operator test with simplified implementation""" + + def __init__(self): + super().__init__("IsIn") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.isin(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.isin(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/isneginf.py b/test/infinicore/ops/isneginf.py new file mode 100644 index 000000000..95274fef8 --- /dev/null +++ b/test/infinicore/ops/isneginf.py @@ -0,0 +1,96 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, input_strides_or_None, out_strides_or_None) +# isneginf checks for -inf values + +_TEST_CASES_DATA = [ + ((8, 8), None, None), + ((8, 8), (16, 1), None), + ((8, 8), None, (0, 1)), + ((2, 3, 4), None, None), + ((1, 8), None, None), + ((16, 128), (256, 1), (256, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32, infinicore.bfloat16] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, in_strides, out_strides = data[0], data[1], data[2] + + input_supports_inplace = not is_broadcast(in_strides) + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, out_strides, infinicore.bool) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="IsNegInf - OUT_OF_PLACE", + ) + ) + + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="IsNegInf - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """IsNegInf operator test with simplified implementation""" + + def __init__(self): + super().__init__("IsNegInf") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.isneginf(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.isneginf(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/isposinf.py b/test/infinicore/ops/isposinf.py new file mode 100644 index 000000000..3f1ae6956 --- /dev/null +++ b/test/infinicore/ops/isposinf.py @@ -0,0 +1,96 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, input_strides_or_None, out_strides_or_None) +# isposinf checks for +inf values + +_TEST_CASES_DATA = [ + ((8, 8), None, None), + ((8, 8), (16, 1), None), + ((8, 8), None, (0, 1)), + ((2, 3, 4), None, None), + ((1, 8), None, None), + ((16, 128), (256, 1), (256, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32, infinicore.bfloat16] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, in_strides, out_strides = data[0], data[1], data[2] + + input_supports_inplace = not is_broadcast(in_strides) + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, out_strides, infinicore.bool) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="IsPosInf - OUT_OF_PLACE", + ) + ) + + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="IsPosInf - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """IsPosInf operator test with simplified implementation""" + + def __init__(self): + super().__init__("IsPosInf") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.isposinf(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.isposinf(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/kl_div.py b/test/infinicore/ops/kl_div.py new file mode 100644 index 000000000..b7ab6fd68 --- /dev/null +++ b/test/infinicore/ops/kl_div.py @@ -0,0 +1,84 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, reduction_or_None, log_target_bool_or_None) +# infinicore.nn.functional.kl_div(input, target, reduction='mean', log_target=False) + +_TEST_CASES_DATA = [ + ((4, 5), None, "batchmean", None), + ((8, 8), (512, 64), "sum", False), + ((1, 10), None, "batchmean", True), + ((16, 100), None, "batchmean", False), + ((3, 7), None, "batchmean", None), + ((2, 2), None, "sum", None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-1}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, reduction, log_target in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + a = TensorSpec.from_tensor(shape, strides, dtype) + b = TensorSpec.from_tensor(shape, None, dtype) + + kwargs = {} + if reduction is not None: + kwargs["reduction"] = reduction + if log_target is not None: + kwargs["log_target"] = log_target + + test_cases.append( + TestCase( + inputs=[a, b], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="kl_div - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """kl_div operator test with simplified implementation""" + + def __init__(self): + super().__init__("kl_div") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.kl_div(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.kl_div(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/kron.py b/test/infinicore/ops/kron.py new file mode 100644 index 000000000..f285578a7 --- /dev/null +++ b/test/infinicore/ops/kron.py @@ -0,0 +1,78 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (a_shape, b_shape, a_strides_or_None, b_strides_or_None) +# infinicore.kron(a, b) + +_TEST_CASES_DATA = [ + ((2, 3), (4, 1), None, None), + ((1,), (3,), None, None), + ((4, 4), (2, 2), (64, 16), (8, 1)), + ((6,), (6,), None, None), + ((3, 2), (2, 3), None, (12, 1)), + ((8, 1), (1, 8), None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for a_shape, b_shape, a_strides, b_strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + a = TensorSpec.from_tensor(a_shape, a_strides, dtype) + b = TensorSpec.from_tensor(b_shape, b_strides, dtype) + + test_cases.append( + TestCase( + inputs=[a, b], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="kron - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """kron operator test with simplified implementation""" + + def __init__(self): + super().__init__("kron") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.kron(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.kron(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/kthvalue.py b/test/infinicore/ops/kthvalue.py new file mode 100644 index 000000000..a8ddfd748 --- /dev/null +++ b/test/infinicore/ops/kthvalue.py @@ -0,0 +1,87 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, input_strides, k, dim, keepdim) +_TEST_CASES_DATA = [ + ((6, 8), None, 1, 1, False), + ((8, 4), (16, 1), 2, 0, True), + ((5, 5), None, 3, -1, False), + ((3, 7), (14, 1), 2, 1, True), + ((10, 3), None, 1, 1, False), + ((2, 16), (32, 1), 5, 1, False), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, in_strides, k, dim, keepdim = data + + out_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + # kthvalue returns (values, indices). We'll request out-of-place and explicit out for values. + values_spec = TensorSpec.from_tensor(shape, None, dtype) + indices_spec = TensorSpec.from_tensor(shape, None, infinicore.int64) + + kwargs = {"k": k, "dim": dim, "keepdim": keepdim} + + # Out-of-place + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"kthvalue - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """KthValue operator test with simplified implementation""" + + def __init__(self): + super().__init__("KthValue") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.kthvalue(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.kthvalue(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/l1_loss.py b/test/infinicore/ops/l1_loss.py new file mode 100644 index 000000000..22197ee90 --- /dev/null +++ b/test/infinicore/ops/l1_loss.py @@ -0,0 +1,82 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, target_shape, input_strides_or_None, reduction_or_None) +# infinicore.nn.functional.l1_loss(input, target, reduction='mean') + +_TEST_CASES_DATA = [ + ((4, 5), (4, 5), None, None), + ((8, 8), (8, 8), (512, 64), "sum"), + ((1, 10), (1, 10), None, "mean"), + ((16, 100), (16, 100), None, None), + ((3, 7), (3, 7), None, "none"), + ((2, 2), (2, 2), None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, tgt_shape, strides, reduction in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(shape, strides, dtype) + tgt = TensorSpec.from_tensor(tgt_shape, None, dtype) + + kwargs = {} + if reduction is not None: + kwargs["reduction"] = reduction + + test_cases.append( + TestCase( + inputs=[inp, tgt], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="l1_loss - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """l1_loss operator test with simplified implementation""" + + def __init__(self): + super().__init__("l1_loss") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.l1_loss(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.l1_loss(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/ldexp.py b/test/infinicore/ops/ldexp.py new file mode 100644 index 000000000..b1c6236a1 --- /dev/null +++ b/test/infinicore/ops/ldexp.py @@ -0,0 +1,105 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# ldexp(input, other) computes input * (2**other) + +_TEST_CASES_DATA = [ + ((2, 3), (3,)), + ((1, 4, 8), None), + ((3, 2, 5, 7), (1,)), + ((2, 1, 16), None), + ((1, 8, 9, 11), None), + ((2, 6, 10), (1,)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, other_shape in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, None, dtype) + + if other_shape is None: + other_spec = TensorSpec.from_tensor((1,), None, infinicore.int32) + else: + other_spec = TensorSpec.from_tensor(other_shape, None, infinicore.int32) + + # out-of-place + cases.append( + TestCase( + inputs=[in_spec, other_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="ldexp_out", + ) + ) + + # explicit out + out_spec = TensorSpec.from_tensor(shape, None, dtype) + cases.append( + TestCase( + inputs=[in_spec, other_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="ldexp_explicit_out", + ) + ) + + # in-place + cases.append( + TestCase( + inputs=[in_spec, other_spec], + kwargs={}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="ldexp_inplace", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Ldexp operator test with simplified implementation""" + + def __init__(self): + super().__init__("Ldexp") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.ldexp(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.ldexp(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/leaky_relu.py b/test/infinicore/ops/leaky_relu.py new file mode 100644 index 000000000..cc53f45fb --- /dev/null +++ b/test/infinicore/ops/leaky_relu.py @@ -0,0 +1,103 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, negative_slope_or_None) + +_TEST_CASES_DATA = [ + ((13, 4), None, None), + ((13, 4), (10, 1), 0.01), + ((8, 8, 8), None, 0.2), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """ + leaky_relu(input, negative_slope=0.01, inplace=False) + """ + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + slope = data[2] if len(data) > 2 else 0.01 + + input_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {} + if slope is not None: + kwargs["negative_slope"] = slope + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"LeakyReLU - OUT_OF_PLACE", + ) + ) + + if input_supports_inplace: + inplace_kwargs = {"inplace": True} + if slope is not None: + inplace_kwargs["negative_slope"] = slope + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=inplace_kwargs, + output_spec=None, + comparison_target=0, + tolerance=tolerance, + description=f"LeakyReLU - INPLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """LeakyReLU operator test with simplified implementation""" + + def __init__(self): + super().__init__("LeakyReLU") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.leaky_relu(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.leaky_relu(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/lerp.py b/test/infinicore/ops/lerp.py new file mode 100644 index 000000000..88c5e891b --- /dev/null +++ b/test/infinicore/ops/lerp.py @@ -0,0 +1,155 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (start_shape, start_strides_or_None, end_shape_or_None, weight_scalar_or_None, weight_tensor_shape_or_None) +# infinicore.lerp(start, end, weight) + +_TEST_CASES_DATA = [ + ((2, 3, 4), None, None, 0.5, None), + ((1, 4, 8), (32, 8, 1), None, None, (1, 4, 8)), + ((3, 2, 5, 7), None, None, 0.25, None), + ((2, 1, 16), None, None, None, (2, 1, 16)), + ((1, 8, 9, 11), (792, 99, 11, 1), None, 0.75, None), + ((2, 6, 10), None, None, None, (2, 6, 10)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for ( + start_shape, + start_strides, + end_shape, + weight_scalar, + weight_tensor_shape, + ) in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + start_spec = TensorSpec.from_tensor(start_shape, start_strides, dtype) + end_spec = TensorSpec.from_tensor( + start_shape if end_shape is None else end_shape, None, dtype + ) + + if weight_scalar is not None: + weight = weight_scalar + cases.append( + TestCase( + inputs=[start_spec, end_spec, weight], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="lerp_scalar_weight_out", + ) + ) + out_spec = TensorSpec.from_tensor(start_shape, None, dtype) + cases.append( + TestCase( + inputs=[start_spec, end_spec, weight], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="lerp_scalar_weight_explicit_out", + ) + ) + if not is_broadcast(start_spec.strides): + cases.append( + TestCase( + inputs=[start_spec, end_spec, weight], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="lerp_scalar_inplace_start", + ) + ) + if not is_broadcast(end_spec.strides): + cases.append( + TestCase( + inputs=[start_spec, end_spec, weight], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="lerp_scalar_inplace_end", + ) + ) + + if weight_tensor_shape is not None: + weight_spec = TensorSpec.from_tensor(weight_tensor_shape, None, dtype) + cases.append( + TestCase( + inputs=[start_spec, end_spec, weight_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="lerp_tensor_weight_out", + ) + ) + out_spec = TensorSpec.from_tensor(start_shape, None, dtype) + cases.append( + TestCase( + inputs=[start_spec, end_spec, weight_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="lerp_tensor_weight_explicit_out", + ) + ) + if not is_broadcast(weight_spec.strides): + cases.append( + TestCase( + inputs=[start_spec, end_spec, weight_spec], + kwargs={"out": 2}, + output_spec=None, + comparison_target=2, + tolerance=tol, + description="lerp_inplace_weight", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Lerp operator test with simplified implementation""" + + def __init__(self): + super().__init__("Lerp") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.lerp(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.lerp(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/linear.py b/test/infinicore/ops/linear.py new file mode 100644 index 000000000..00a07a55a --- /dev/null +++ b/test/infinicore/ops/linear.py @@ -0,0 +1,89 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, weight_shape, input_strides_or_None, weight_strides_or_None, bias_present_bool) +# infinicore.nn.functional.linear(input, weight, bias=None) + +_TEST_CASES_DATA = [ + ((4, 3), (2, 3), None, None, True), + ((1, 6), (3, 6), None, None, False), + ((8, 10), (5, 10), (80, 10), None, True), + ((2, 4), (4, 4), None, (16, 4), True), + ((16, 8), (8, 8), None, None, False), + ((3, 1), (2, 1), None, None, True), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for ( + input_shape, + weight_shape, + in_strides, + w_strides, + bias_present, + ) in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(input_shape, in_strides, dtype) + weight = TensorSpec.from_tensor(weight_shape, w_strides, dtype) + + inputs = [inp, weight] + if bias_present: + bias_spec = TensorSpec.from_tensor((weight_shape[0],), None, dtype) + inputs.append(bias_spec) + + test_cases.append( + TestCase( + inputs=inputs, + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="linear - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """linear operator test with simplified implementation""" + + def __init__(self): + super().__init__("linear") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.linear(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.linear(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/local_response_norm.py b/test/infinicore/ops/local_response_norm.py new file mode 100644 index 000000000..58b053154 --- /dev/null +++ b/test/infinicore/ops/local_response_norm.py @@ -0,0 +1,83 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, size, alpha_or_None, beta_or_None, k_or_None) +# infinicore.nn.functional.local_response_norm(input, size, alpha=1e-4, beta=0.75, k=1.0) + +_TEST_CASES_DATA = [ + ((4, 3, 8, 8), None, 5, None, None, None), + ((2, 6, 4, 4), (384, 96, 1, 1), 3, 1e-4, 0.75, 1.0), + ((1, 3, 16, 16), None, 7, None, None, None), + ((8, 5, 2, 2), None, 1, 1e-3, 0.5, 0.0), + ((6, 4, 7, 7), None, 9, None, None, None), + ((3, 2, 9, 9), None, 4, 1e-5, 0.9, 2.0), +] + +_TOLERANCE_MAP = { + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, size, alpha, beta, k in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {"size": size} + if alpha is not None: + kwargs["alpha"] = alpha + if beta is not None: + kwargs["beta"] = beta + if k is not None: + kwargs["k"] = k + + test_cases.append( + TestCase( + inputs=[inp], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="local_response_norm - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """local_response_norm operator test with simplified implementation""" + + def __init__(self): + super().__init__("local_response_norm") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.local_response_norm(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.local_response_norm(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/log.py b/test/infinicore/ops/log.py new file mode 100644 index 000000000..49a62026c --- /dev/null +++ b/test/infinicore/ops/log.py @@ -0,0 +1,98 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) +# infinicore.log(input) + +_TEST_CASES_DATA = [ + ((2, 3), None), + ((1, 4, 8), (32, 8, 1)), + ((3, 2, 5, 7), None), + ((2, 1, 16), None), + ((1, 8, 9, 11), (792, 99, 11, 1)), + ((2, 6, 10), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="log_out", + ) + ) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="log_out_explicit", + ) + ) + if not is_broadcast(in_spec.strides): + cases.append( + TestCase( + inputs=[in_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="log_inplace", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Log operator test with simplified implementation""" + + def __init__(self): + super().__init__("Log") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.log(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.log(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/log10.py b/test/infinicore/ops/log10.py new file mode 100644 index 000000000..e1a9a1110 --- /dev/null +++ b/test/infinicore/ops/log10.py @@ -0,0 +1,98 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) +# infinicore.log10(input) + +_TEST_CASES_DATA = [ + ((2, 3), None), + ((1, 4, 8), (32, 8, 1)), + ((3, 2, 5, 7), None), + ((2, 1, 16), None), + ((1, 8, 9, 11), (792, 99, 11, 1)), + ((2, 6, 10), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="log10_out", + ) + ) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="log10_out_explicit", + ) + ) + if not is_broadcast(in_spec.strides): + cases.append( + TestCase( + inputs=[in_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="log10_inplace", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Log10 operator test with simplified implementation""" + + def __init__(self): + super().__init__("Log10") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.log10(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.log10(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/log1p.py b/test/infinicore/ops/log1p.py new file mode 100644 index 000000000..ae8879b7b --- /dev/null +++ b/test/infinicore/ops/log1p.py @@ -0,0 +1,98 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) +# infinicore.log1p(input) + +_TEST_CASES_DATA = [ + ((2, 3), None), + ((1, 4, 8), (32, 8, 1)), + ((3, 2, 5, 7), None), + ((2, 1, 16), None), + ((1, 8, 9, 11), (792, 99, 11, 1)), + ((2, 6, 10), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="log1p_out", + ) + ) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="log1p_out_explicit", + ) + ) + if not is_broadcast(in_spec.strides): + cases.append( + TestCase( + inputs=[in_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="log1p_inplace", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Log1p operator test with simplified implementation""" + + def __init__(self): + super().__init__("Log1p") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.log1p(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.log1p(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/log2.py b/test/infinicore/ops/log2.py new file mode 100644 index 000000000..cc16ab4c8 --- /dev/null +++ b/test/infinicore/ops/log2.py @@ -0,0 +1,98 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) +# infinicore.log2(input) + +_TEST_CASES_DATA = [ + ((2, 3), None), + ((1, 4, 8), (32, 8, 1)), + ((3, 2, 5, 7), None), + ((2, 1, 16), None), + ((1, 8, 9, 11), (792, 99, 11, 1)), + ((2, 6, 10), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="log2_out", + ) + ) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="log2_out_explicit", + ) + ) + if not is_broadcast(in_spec.strides): + cases.append( + TestCase( + inputs=[in_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="log2_inplace", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Log2 operator test with simplified implementation""" + + def __init__(self): + super().__init__("Log2") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.log2(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.log2(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/log_softmax.py b/test/infinicore/ops/log_softmax.py new file mode 100644 index 000000000..8c540073c --- /dev/null +++ b/test/infinicore/ops/log_softmax.py @@ -0,0 +1,82 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None) + +_TEST_CASES_DATA = [ + ((4, 10), None, -1), + ((2, 5, 8), (40, 8, 1), 1), + ((8, 20), None, 1), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """log_softmax(input, dim=None, dtype=None)""" + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + dim = data[2] if len(data) > 2 else -1 + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"dim": dim} + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"LogSoftmax - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """LogSoftmax operator test with simplified implementation""" + + def __init__(self): + super().__init__("LogSoftmax") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.log_softmax(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.log_softmax(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/logaddexp.py b/test/infinicore/ops/logaddexp.py new file mode 100644 index 000000000..5f02039da --- /dev/null +++ b/test/infinicore/ops/logaddexp.py @@ -0,0 +1,113 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (a_shape, a_strides_or_None, b_shape_or_None) +# infinicore.logaddexp(a, b) + +_TEST_CASES_DATA = [ + ((2, 3, 4), None, None), + ((1, 4, 8), (32, 8, 1), None), + ((3, 2, 5, 7), None, None), + ((2, 1, 16), None, None), + ((1, 8, 9, 11), (792, 99, 11, 1), None), + ((2, 6, 10), None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for a_shape, a_strides, b_shape in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + a_spec = TensorSpec.from_tensor(a_shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor( + a_shape if b_shape is None else b_shape, None, dtype + ) + + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="logaddexp_out", + ) + ) + out_spec = TensorSpec.from_tensor(a_shape, None, dtype) + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="logaddexp_out_explicit", + ) + ) + + if not is_broadcast(a_spec.strides): + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="logaddexp_inplace_a", + ) + ) + if not is_broadcast(b_spec.strides): + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="logaddexp_inplace_b", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """LogAddExp operator test with simplified implementation""" + + def __init__(self): + super().__init__("LogAddExp") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.logaddexp(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.logaddexp(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/logaddexp2.py b/test/infinicore/ops/logaddexp2.py new file mode 100644 index 000000000..6b63dc9a4 --- /dev/null +++ b/test/infinicore/ops/logaddexp2.py @@ -0,0 +1,113 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (a_shape, a_strides_or_None, b_shape_or_None) +# infinicore.logaddexp2(a, b) + +_TEST_CASES_DATA = [ + ((2, 3, 4), None, None), + ((1, 4, 8), (32, 8, 1), None), + ((3, 2, 5, 7), None, None), + ((2, 1, 16), None, None), + ((1, 8, 9, 11), (792, 99, 11, 1), None), + ((2, 6, 10), None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for a_shape, a_strides, b_shape in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + a_spec = TensorSpec.from_tensor(a_shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor( + a_shape if b_shape is None else b_shape, None, dtype + ) + + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="logaddexp2_out", + ) + ) + out_spec = TensorSpec.from_tensor(a_shape, None, dtype) + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="logaddexp2_out_explicit", + ) + ) + + if not is_broadcast(a_spec.strides): + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="logaddexp2_inplace_a", + ) + ) + if not is_broadcast(b_spec.strides): + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="logaddexp2_inplace_b", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """LogAddExp2 operator test with simplified implementation""" + + def __init__(self): + super().__init__("LogAddExp2") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.logaddexp2(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.logaddexp2(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/logcumsumexp.py b/test/infinicore/ops/logcumsumexp.py new file mode 100644 index 000000000..9bd380ecc --- /dev/null +++ b/test/infinicore/ops/logcumsumexp.py @@ -0,0 +1,88 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, dim, input_strides_or_None) +# logcumsumexp computes log of cumulative sum of exponentials along dim. + +_TEST_CASES_DATA = [ + ((13, 4), 1, None), + ((13, 4), 0, (10, 1)), + ((8, 16), 1, None), + ((2, 3, 5), 2, None), + ((16, 64), 1, None), + ((4, 5, 6), 0, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, dim = data[0], data[1] + in_strides = data[2] if len(data) > 2 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + + # Out-of-place + kwargs = {"dim": dim} + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"logcumsumexp - OUT_OF_PLACE", + ) + ) + + # PyTorch does not expose explicit out for logcumsumexp — skip out tests + + return test_cases + + +class OpTest(BaseOperatorTest): + """LogCumsumExp operator test with simplified implementation""" + + def __init__(self): + super().__init__("LogCumsumExp") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.logcumsumexp(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.logcumsumexp(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/logdet.py b/test/infinicore/ops/logdet.py new file mode 100644 index 000000000..1b53d2344 --- /dev/null +++ b/test/infinicore/ops/logdet.py @@ -0,0 +1,77 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (matrix_shape, strides_or_None) +# logdet(input) — returns (sign, logabsdet) in PyTorch + +_TEST_CASES_DATA = [ + ((1, 1), None), + ((2, 2), None), + ((3, 3), (3, 1)), + ((4, 4), None), + ((8, 8), (512, 1)), + ((16, 16), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + spec = TensorSpec.from_tensor(shape, strides, dtype) + + test_cases.append( + TestCase( + inputs=[spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="logdet - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """logdet operator test with simplified implementation""" + + def __init__(self): + super().__init__("logdet") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.logdet(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.logdet(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/logical_and.py b/test/infinicore/ops/logical_and.py new file mode 100644 index 000000000..adcff0f01 --- /dev/null +++ b/test/infinicore/ops/logical_and.py @@ -0,0 +1,120 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, a_strides_or_None, b_strides_or_None, out_strides_or_None) +# logical_and performs element-wise boolean AND; inputs may be bool or integer + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None), + ((8, 8), (16, 1), (16, 1), None), + ((8, 8), None, (0, 1), None), + ((1, 8), None, None, (8, 1)), + ((2, 3, 4), None, None, None), + ((16, 128), (256, 1), (256, 1), None), +] + +_TOLERANCE_MAP = { + infinicore.bool: {"atol": 0, "rtol": 0}, +} + +_TENSOR_DTYPES = [infinicore.bool, infinicore.int32, infinicore.uint8] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, a_strides, b_strides, out_strides = data[0], data[1], data[2], data[3] + + a_supports_inplace = not is_broadcast(a_strides) + b_supports_inplace = not is_broadcast(b_strides) + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 0}) + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, out_strides, infinicore.bool) + + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Logical AND - OUT_OF_PLACE", + ) + ) + + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="Logical AND - INPLACE(out)", + ) + ) + + if a_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="Logical AND - INPLACE(a)", + ) + ) + + if b_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="Logical AND - INPLACE(b)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """LogicalAnd operator test with simplified implementation""" + + def __init__(self): + super().__init__("LogicalAnd") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.logical_and(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.logical_and(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/logical_not.py b/test/infinicore/ops/logical_not.py new file mode 100644 index 000000000..b821bc153 --- /dev/null +++ b/test/infinicore/ops/logical_not.py @@ -0,0 +1,104 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, input_strides_or_None, out_strides_or_None) +# logical_not negates boolean values + +_TEST_CASES_DATA = [ + ((8, 8), None, None), + ((8, 8), (16, 1), None), + ((8, 8), None, (0, 1)), + ((2, 3, 4), None, None), + ((1, 8), None, None), + ((16, 128), (256, 1), (256, 1)), +] + +_TOLERANCE_MAP = {infinicore.bool: {"atol": 0, "rtol": 0}} + +_TENSOR_DTYPES = [infinicore.bool, infinicore.int32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, in_strides, out_strides = data[0], data[1], data[2] + + input_supports_inplace = not is_broadcast(in_strides) + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 0}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, out_strides, infinicore.bool) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Logical NOT - OUT_OF_PLACE", + ) + ) + + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="Logical NOT - INPLACE(out)", + ) + ) + + if input_supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="Logical NOT - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """LogicalNot operator test with simplified implementation""" + + def __init__(self): + super().__init__("LogicalNot") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.logical_not(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.logical_not(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/logical_or.py b/test/infinicore/ops/logical_or.py new file mode 100644 index 000000000..78620ef51 --- /dev/null +++ b/test/infinicore/ops/logical_or.py @@ -0,0 +1,118 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, a_strides_or_None, b_strides_or_None, out_strides_or_None) +# logical_or performs element-wise boolean OR + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None), + ((8, 8), (16, 1), (16, 1), None), + ((8, 8), None, (0, 1), None), + ((1, 8), None, None, (8, 1)), + ((2, 3, 4), None, None, None), + ((16, 128), (256, 1), (256, 1), None), +] + +_TOLERANCE_MAP = {infinicore.bool: {"atol": 0, "rtol": 0}} + +_TENSOR_DTYPES = [infinicore.bool, infinicore.int32, infinicore.uint8] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, a_strides, b_strides, out_strides = data[0], data[1], data[2], data[3] + + a_supports_inplace = not is_broadcast(a_strides) + b_supports_inplace = not is_broadcast(b_strides) + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 0}) + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, out_strides, infinicore.bool) + + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Logical OR - OUT_OF_PLACE", + ) + ) + + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="Logical OR - INPLACE(out)", + ) + ) + + if a_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="Logical OR - INPLACE(a)", + ) + ) + + if b_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="Logical OR - INPLACE(b)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """LogicalOr operator test with simplified implementation""" + + def __init__(self): + super().__init__("LogicalOr") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.logical_or(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.logical_or(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/logical_xor.py b/test/infinicore/ops/logical_xor.py new file mode 100644 index 000000000..c206b1aa0 --- /dev/null +++ b/test/infinicore/ops/logical_xor.py @@ -0,0 +1,118 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, a_strides_or_None, b_strides_or_None, out_strides_or_None) +# logical_xor performs element-wise boolean XOR + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None), + ((8, 8), (16, 1), (16, 1), None), + ((8, 8), None, (0, 1), None), + ((1, 8), None, None, (8, 1)), + ((2, 3, 4), None, None, None), + ((16, 128), (256, 1), (256, 1), None), +] + +_TOLERANCE_MAP = {infinicore.bool: {"atol": 0, "rtol": 0}} + +_TENSOR_DTYPES = [infinicore.bool, infinicore.int32, infinicore.uint8] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, a_strides, b_strides, out_strides = data[0], data[1], data[2], data[3] + + a_supports_inplace = not is_broadcast(a_strides) + b_supports_inplace = not is_broadcast(b_strides) + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 0}) + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, out_strides, infinicore.bool) + + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Logical XOR - OUT_OF_PLACE", + ) + ) + + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="Logical XOR - INPLACE(out)", + ) + ) + + if a_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="Logical XOR - INPLACE(a)", + ) + ) + + if b_supports_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="Logical XOR - INPLACE(b)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """LogicalXor operator test with simplified implementation""" + + def __init__(self): + super().__init__("LogicalXor") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.logical_xor(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.logical_xor(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/logsigmoid.py b/test/infinicore/ops/logsigmoid.py new file mode 100644 index 000000000..021542c56 --- /dev/null +++ b/test/infinicore/ops/logsigmoid.py @@ -0,0 +1,79 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 8, 8), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """logsigmoid(input)""" + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"LogSigmoid - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """LogSigmoid operator test with simplified implementation""" + + def __init__(self): + super().__init__("LogSigmoid") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.logsigmoid(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.logsigmoid(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/logsumexp.py b/test/infinicore/ops/logsumexp.py new file mode 100644 index 000000000..6bcd65925 --- /dev/null +++ b/test/infinicore/ops/logsumexp.py @@ -0,0 +1,115 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim, keepdim_or_None, out_strides_or_None) +# logsumexp computes log(sum(exp(input), dim=dim)) with numerical stability + +_TEST_CASES_DATA = [ + ((8, 8), None, 1, None, None), + ((8, 8), (16, 1), 0, False, None), + ((2, 3, 4), None, 2, True, (0, 1, 1)), + ((1, 8), None, 0, False, None), + ((16, 64), (128, 1), 1, True, None), + ((4, 5, 6), (60, 12, 2), 2, True, (12, 4, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def _compute_out_shape(shape, dim, keepdim): + if isinstance(dim, tuple): + dims = sorted([(d if d >= 0 else len(shape) + d) for d in dim]) + else: + dims = [dim] + + if dim is None: + return () + if keepdim: + out = list(shape) + for d in dims: + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i not in dims) + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim, keepdim, out_strides = data + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {"dim": dim} + if keepdim is not None: + kwargs["keepdim"] = keepdim + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="LogSumExp - OUT_OF_PLACE", + ) + ) + + out_shape = _compute_out_shape(shape, dim, keepdim) + out_spec = TensorSpec.from_tensor(out_shape, out_strides, dtype) + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="LogSumExp - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """LogSumExp operator test with simplified implementation""" + + def __init__(self): + super().__init__("LogSumExp") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.logsumexp(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.logsumexp(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/lp_pool1d.py b/test/infinicore/ops/lp_pool1d.py new file mode 100644 index 000000000..10b9dcd7f --- /dev/null +++ b/test/infinicore/ops/lp_pool1d.py @@ -0,0 +1,76 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (in_shape, in_strides_or_None, norm_type, kernel_size, stride_or_None, ceil_mode) + +_TEST_CASES_DATA = [ + ((2, 3, 16), None, 2.0, 3, None, False), + ((1, 4, 15), (60, 15, 1), 1.0, 5, 1, True), + ((2, 1, 32), None, 3.0, 2, 2, False), + ((3, 2, 7), None, 2.0, 3, None, True), + ((4, 6, 31), None, 1.5, 4, 2, False), + ((2, 8, 9), (72, 9, 1), 2.0, 3, 1, False), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + tests = [] + for in_shape, in_strides, p, k, s, ceil_mode in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + kwargs = {"norm_type": p, "kernel_size": k, "ceil_mode": ceil_mode} + if s is not None: + kwargs["stride"] = s + tests.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="LpPool1d - OUT_OF_PLACE", + ) + ) + + return tests + + +class OpTest(BaseOperatorTest): + """LpPool1d operator test with simplified implementation""" + + def __init__(self): + super().__init__("LpPool1d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.lp_pool1d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.lp_pool1d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/lp_pool2d.py b/test/infinicore/ops/lp_pool2d.py new file mode 100644 index 000000000..a21b7c43d --- /dev/null +++ b/test/infinicore/ops/lp_pool2d.py @@ -0,0 +1,76 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (in_shape, in_strides_or_None, norm_type, kernel_size, stride_or_None, ceil_mode) + +_TEST_CASES_DATA = [ + ((2, 3, 16, 16), None, 2.0, (3, 3), None, False), + ((1, 4, 15, 17), (1020, 255, 17, 1), 1.0, (5, 4), (2, 2), True), + ((2, 1, 32, 32), None, 3.0, (2, 2), (2, 2), False), + ((3, 2, 7, 9), None, 2.0, (3, 3), None, True), + ((4, 6, 31, 29), None, 1.5, (4, 4), (2, 2), False), + ((2, 8, 9, 11), (1584, 198, 11, 1), 2.0, (3, 2), 1, False), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + tests = [] + for in_shape, in_strides, p, k, s, ceil_mode in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + kwargs = {"norm_type": p, "kernel_size": k, "ceil_mode": ceil_mode} + if s is not None: + kwargs["stride"] = s + tests.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="LpPool2d - OUT_OF_PLACE", + ) + ) + + return tests + + +class OpTest(BaseOperatorTest): + """LpPool2d operator test with simplified implementation""" + + def __init__(self): + super().__init__("LpPool2d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.lp_pool2d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.lp_pool2d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/lp_pool3d.py b/test/infinicore/ops/lp_pool3d.py new file mode 100644 index 000000000..12ea28485 --- /dev/null +++ b/test/infinicore/ops/lp_pool3d.py @@ -0,0 +1,73 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (in_shape, in_strides_or_None, norm_type, kernel_size, stride_or_None, ceil_mode) + +_TEST_CASES_DATA = [ + ((1, 2, 8, 8, 8), None, 2.0, (2, 2, 2), None, False), + ((2, 3, 7, 9, 5), None, 1.0, (3, 3, 2), (2, 2, 1), True), + ((1, 4, 16, 16, 6), None, 3.0, (4, 4, 2), (2, 2, 1), False), + ((2, 1, 9, 11, 7), None, 2.0, (3, 2, 3), None, True), + ((3, 2, 5, 6, 4), None, 1.5, (2, 2, 2), (1, 1, 1), False), + ((2, 6, 10, 9, 8), None, 2.0, (3, 3, 2), (2, 1, 2), False), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + tests = [] + for in_shape, in_strides, p, k, s, ceil_mode in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + kwargs = {"norm_type": p, "kernel_size": k, "ceil_mode": ceil_mode} + if s is not None: + kwargs["stride"] = s + tests.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="LpPool3d - OUT_OF_PLACE", + ) + ) + + return tests + + +class OpTest(BaseOperatorTest): + """LpPool3d operator test with simplified implementation""" + + def __init__(self): + super().__init__("LpPool3d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.lp_pool3d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.lp_pool3d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/margin_ranking_loss.py b/test/infinicore/ops/margin_ranking_loss.py new file mode 100644 index 000000000..51b68c0b2 --- /dev/null +++ b/test/infinicore/ops/margin_ranking_loss.py @@ -0,0 +1,83 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input1_shape, input2_shape, target_shape, input1_strides_or_None, input2_strides_or_None, target_strides_or_None, margin_or_None, p_or_None) +# infinicore.nn.functional.margin_ranking_loss(input1, input2, target, margin=0, p=1, reduction='mean') + +_TEST_CASES_DATA = [ + ((4,), (4,), (4,), None, None, None, None), + ((8,), (8,), (8,), None, None, None, 1), + ((1,), (1,), (1,), None, None, None, None), + ((16,), (16,), (16,), (2,), None, None, 2), + ((3,), (3,), (3,), None, (1,), None, None), + ((2,), (2,), (2,), None, None, None, 1), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for s1, s2, st, st1, st2, stt, margin in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + a = TensorSpec.from_tensor(s1, st1, dtype) + b = TensorSpec.from_tensor(s2, st2, dtype) + y = TensorSpec.from_tensor(st, stt, dtype) + + kwargs = {} + if margin is not None: + kwargs["margin"] = margin + + test_cases.append( + TestCase( + inputs=[a, b, y], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="margin_ranking_loss - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """margin_ranking_loss operator test with simplified implementation""" + + def __init__(self): + super().__init__("margin_ranking_loss") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.margin_ranking_loss(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.margin_ranking_loss(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/masked_select.py b/test/infinicore/ops/masked_select.py new file mode 100644 index 000000000..74774fb52 --- /dev/null +++ b/test/infinicore/ops/masked_select.py @@ -0,0 +1,69 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, mask_shape) +_TEST_CASES_DATA = [ + ((3, 4), None, (3, 4)), + ((5,), None, (5,)), + ((2, 2, 3), (12, 6, 2), (2, 2, 3)), + ((1, 6), None, (1, 6)), + ((4, 4), None, (4, 4)), + ((2, 3, 2), None, (2, 3, 2)), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, mask_shape in _TEST_CASES_DATA: + input_spec = TensorSpec.from_tensor(shape, strides, infinicore.float32) + mask_spec = TensorSpec.from_tensor(mask_shape, None, infinicore.bool) + + test_cases.append( + TestCase( + inputs=[input_spec, mask_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP[infinicore.float32], + description=f"masked_select - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """MaskedSelect operator test with simplified implementation""" + + def __init__(self): + super().__init__("MaskedSelect") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.masked_select(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.masked_select(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/matrix_exp.py b/test/infinicore/ops/matrix_exp.py new file mode 100644 index 000000000..afadd3048 --- /dev/null +++ b/test/infinicore/ops/matrix_exp.py @@ -0,0 +1,77 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (matrix_shape, strides_or_None) +# infinicore.matrix_exp(input) + +_TEST_CASES_DATA = [ + ((1, 1), None), + ((2, 2), None), + ((3, 3), (18, 6)), + ((4, 4), None), + ((6, 6), None), + ((8, 8), (512, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + spec = TensorSpec.from_tensor(shape, strides, dtype) + + test_cases.append( + TestCase( + inputs=[spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="matrix_exp - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """matrix_exp operator test with simplified implementation""" + + def __init__(self): + super().__init__("matrix_exp") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.matrix_exp(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.matrix_exp(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/matrix_power.py b/test/infinicore/ops/matrix_power.py new file mode 100644 index 000000000..d4d073829 --- /dev/null +++ b/test/infinicore/ops/matrix_power.py @@ -0,0 +1,79 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (matrix_shape, strides_or_None, n_or_None) +# infinicore.matrix_power(input, n) + +_TEST_CASES_DATA = [ + ((2, 2), None, 1), + ((3, 3), None, 2), + ((4, 4), (256, 64), 3), + ((1, 1), None, 5), + ((6, 6), None, 0), + ((8, 8), (512, 1), 2), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, n in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {"n": n} if n is not None else {} + + test_cases.append( + TestCase( + inputs=[spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="matrix_power - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """matrix_power operator test with simplified implementation""" + + def __init__(self): + super().__init__("matrix_power") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.matrix_power(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.matrix_power(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/max.py b/test/infinicore/ops/max.py new file mode 100644 index 000000000..5891918a0 --- /dev/null +++ b/test/infinicore/ops/max.py @@ -0,0 +1,122 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None, keepdim_or_None, out_strides_or_None) +# max reduces by taking maximum along dim(s) or overall + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None, None), + ((8, 8), (16, 1), 1, False, None), + ((2, 3, 4), None, 0, True, (0, 1, 1)), + ((1, 8), None, 0, False, None), + ((16, 64), (128, 1), None, None, None), + ((4, 5, 6), (60, 12, 2), 2, True, (12, 4, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def _compute_out_shape(shape, dim, keepdim): + if dim is None: + return () + if isinstance(dim, tuple): + dims = sorted([(d if d >= 0 else len(shape) + d) for d in dim]) + if keepdim: + out = list(shape) + for d in dims: + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i not in dims) + else: + d = dim if dim >= 0 else len(shape) + dim + if keepdim: + out = list(shape) + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i != d) + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim, keepdim, out_strides = data + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + if keepdim is not None: + kwargs["keepdim"] = keepdim + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Max - OUT_OF_PLACE", + ) + ) + + out_shape = _compute_out_shape(shape, dim, keepdim) + out_spec = TensorSpec.from_tensor(out_shape, out_strides, dtype) + if out_supports_inplace and dim is None: + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="Max - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Max operator test with simplified implementation""" + + def __init__(self): + super().__init__("Max") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.max(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.max(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/max_pool1d.py b/test/infinicore/ops/max_pool1d.py new file mode 100644 index 000000000..0562d94d3 --- /dev/null +++ b/test/infinicore/ops/max_pool1d.py @@ -0,0 +1,78 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (in_shape, in_strides_or_None, kernel_size, stride_or_None, padding, dilation, ceil_mode) + +_TEST_CASES_DATA = [ + ((2, 3, 16), None, 3, None, 0, 1, False), + ((1, 4, 15), (60, 15, 1), 5, 1, 2, 1, True), + ((2, 1, 32), None, 2, 2, 0, 1, False), + ((3, 2, 7), (14, 7, 1), 3, None, 1, 1, True), + ((4, 6, 31), None, 4, 2, 1, 1, False), + ((2, 8, 9), (72, 9, 1), 3, 1, 0, 1, False), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0.0, "rtol": 0.0}, + infinicore.float32: {"atol": 0.0, "rtol": 0.0}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for in_shape, in_strides, k, s, p, d, ceil_mode in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + kwargs = {"kernel_size": k, "dilation": d, "ceil_mode": ceil_mode} + if s is not None: + kwargs["stride"] = s + if p is not None: + kwargs["padding"] = p + cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="MaxPool1d - OUT_OF_PLACE", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """MaxPool1d operator test with simplified implementation""" + + def __init__(self): + super().__init__("MaxPool1d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.max_pool1d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.max_pool1d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/max_pool2d.py b/test/infinicore/ops/max_pool2d.py new file mode 100644 index 000000000..933d938b8 --- /dev/null +++ b/test/infinicore/ops/max_pool2d.py @@ -0,0 +1,78 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (in_shape, in_strides_or_None, kernel_size, stride_or_None, padding, dilation, ceil_mode) + +_TEST_CASES_DATA = [ + ((2, 3, 16, 16), None, (3, 3), None, (1, 1), (1, 1), False), + ((1, 4, 15, 17), (1020, 255, 17, 1), (5, 4), (2, 2), (2, 1), (1, 1), True), + ((2, 1, 32, 32), None, (2, 2), (2, 2), (0, 0), (1, 1), False), + ((3, 2, 7, 9), None, (3, 3), None, (1, 1), (1, 1), True), + ((4, 6, 31, 29), None, (4, 4), (2, 2), (1, 1), (1, 1), False), + ((2, 8, 9, 11), (1584, 198, 11, 1), (3, 2), 1, 0, (1, 1), False), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0.0, "rtol": 0.0}, + infinicore.float32: {"atol": 0.0, "rtol": 0.0}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for in_shape, in_strides, k, s, p, d, ceil_mode in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + kwargs = {"kernel_size": k, "dilation": d, "ceil_mode": ceil_mode} + if s is not None: + kwargs["stride"] = s + if p is not None: + kwargs["padding"] = p + cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="MaxPool2d - OUT_OF_PLACE", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """MaxPool2d operator test with simplified implementation""" + + def __init__(self): + super().__init__("MaxPool2d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.max_pool2d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.max_pool2d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/max_pool3d.py b/test/infinicore/ops/max_pool3d.py new file mode 100644 index 000000000..6e2564fea --- /dev/null +++ b/test/infinicore/ops/max_pool3d.py @@ -0,0 +1,78 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (in_shape, in_strides_or_None, kernel_size, stride_or_None, padding, dilation, ceil_mode) + +_TEST_CASES_DATA = [ + ((1, 2, 8, 8, 8), None, (2, 2, 2), None, (0, 0, 0), (1, 1, 1), False), + ((2, 3, 7, 9, 5), None, (3, 2, 3), (2, 2, 1), (1, 1, 0), (1, 1, 1), True), + ((1, 4, 16, 16, 6), None, (4, 4, 2), (2, 2, 1), (0, 1, 0), (1, 1, 1), False), + ((2, 1, 9, 11, 7), None, (3, 3, 3), None, (1, 0, 1), (1, 1, 1), True), + ((3, 2, 5, 6, 4), None, (2, 2, 2), (1, 1, 1), 0, (1, 1, 1), False), + ((2, 6, 10, 9, 8), None, (3, 3, 2), (2, 1, 2), (1, 0, 1), (1, 1, 1), False), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0.0, "rtol": 0.0}, + infinicore.float32: {"atol": 0.0, "rtol": 0.0}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for in_shape, in_strides, k, s, p, d, ceil_mode in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + kwargs = {"kernel_size": k, "dilation": d, "ceil_mode": ceil_mode} + if s is not None: + kwargs["stride"] = s + if p is not None: + kwargs["padding"] = p + cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="MaxPool3d - OUT_OF_PLACE", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """MaxPool3d operator test with simplified implementation""" + + def __init__(self): + super().__init__("MaxPool3d") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.max_pool3d(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.max_pool3d(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/maximum.py b/test/infinicore/ops/maximum.py new file mode 100644 index 000000000..26931e2a6 --- /dev/null +++ b/test/infinicore/ops/maximum.py @@ -0,0 +1,109 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, a_strides, b_strides) +_TEST_CASES_DATA = [ + ((6, 8), None, None), + ((8, 4), (16, 1), None), + ((5, 5), None, (10, 1)), + ((3, 7), (14, 1), (14, 1)), + ((10, 3), None, None), + ((2, 16), (32, 1), (32, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, a_strides, b_strides = data + + a_inplace = not is_broadcast(a_strides) + b_inplace = not is_broadcast(b_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + + # Out-of-place + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="maximum - OUT", + ) + ) + + # In-place variations + if a_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="maximum - INPLACE(a)", + ) + ) + + if b_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="maximum - INPLACE(b)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Maximum operator test with simplified implementation""" + + def __init__(self): + super().__init__("Maximum") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.maximum(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.maximum(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/mean.py b/test/infinicore/ops/mean.py new file mode 100644 index 000000000..9cc81000c --- /dev/null +++ b/test/infinicore/ops/mean.py @@ -0,0 +1,123 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None, keepdim_or_None, out_strides_or_None) +# mean computes average along dimension(s) or overall + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None, None), + ((8, 8), (16, 1), 1, False, None), + ((2, 3, 4), None, 0, True, (0, 1, 1)), + ((1, 8), None, (0,), False, None), + ((16, 64), (128, 1), None, None, None), + ((4, 5, 6), (60, 12, 2), 2, True, (12, 4, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32, infinicore.bfloat16] + + +def _compute_out_shape(shape, dim, keepdim): + if dim is None: + return () + if isinstance(dim, tuple): + dims = sorted([(d if d >= 0 else len(shape) + d) for d in dim]) + if keepdim: + out = list(shape) + for d in dims: + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i not in dims) + else: + d = dim if dim >= 0 else len(shape) + dim + if keepdim: + out = list(shape) + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i != d) + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim, keepdim, out_strides = data + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + if keepdim is not None: + kwargs["keepdim"] = keepdim + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Mean - OUT_OF_PLACE", + ) + ) + + out_shape = _compute_out_shape(shape, dim, keepdim) + out_spec = TensorSpec.from_tensor(out_shape, out_strides, dtype) + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="Mean - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Mean operator test with simplified implementation""" + + def __init__(self): + super().__init__("Mean") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.mean(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.mean(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/median.py b/test/infinicore/ops/median.py new file mode 100644 index 000000000..193fc155f --- /dev/null +++ b/test/infinicore/ops/median.py @@ -0,0 +1,110 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None, keepdim_or_None, out_strides_or_None) +# median can return median values (and indices when dim is provided) + +_TEST_CASES_DATA = [ + ((9,), None, None, None, None), + ((8, 8), (16, 1), 1, False, None), + ((2, 3, 5), None, 2, True, (0, 1, 1)), + ((1, 8), None, 0, False, None), + ((16, 63), (128, 1), None, None, None), + ((5, 6, 7), (210, 35, 5), 2, True, (35, 5, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def _compute_out_shape(shape, dim, keepdim): + if dim is None: + return () + if isinstance(dim, tuple): + dims = sorted([(d if d >= 0 else len(shape) + d) for d in dim]) + if keepdim: + out = list(shape) + for d in dims: + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i not in dims) + else: + d = dim if dim >= 0 else len(shape) + dim + if keepdim: + out = list(shape) + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i != d) + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim, keepdim, out_strides = data + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + if keepdim is not None: + kwargs["keepdim"] = keepdim + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Median - OUT_OF_PLACE", + ) + ) + + # Note: infinicore.median returns (values, indices) when dim is provided. explicit out param for both outputs is not available in PyTorch. + + return test_cases + + +class OpTest(BaseOperatorTest): + """Median operator test with simplified implementation""" + + def __init__(self): + super().__init__("Median") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.median(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.median(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/min.py b/test/infinicore/ops/min.py new file mode 100644 index 000000000..c5a023405 --- /dev/null +++ b/test/infinicore/ops/min.py @@ -0,0 +1,122 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None, keepdim_or_None, out_strides_or_None) +# min reduces by taking minimum along dim(s) or overall + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None, None), + ((8, 8), (16, 1), 1, False, None), + ((2, 3, 4), None, 0, True, (0, 1, 1)), + ((1, 8), None, 0, False, None), + ((16, 64), (128, 1), None, None, None), + ((4, 5, 6), (60, 12, 2), 2, True, (12, 4, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def _compute_out_shape(shape, dim, keepdim): + if dim is None: + return () + if isinstance(dim, tuple): + dims = sorted([(d if d >= 0 else len(shape) + d) for d in dim]) + if keepdim: + out = list(shape) + for d in dims: + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i not in dims) + else: + d = dim if dim >= 0 else len(shape) + dim + if keepdim: + out = list(shape) + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i != d) + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim, keepdim, out_strides = data + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + if keepdim is not None: + kwargs["keepdim"] = keepdim + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Min - OUT_OF_PLACE", + ) + ) + + out_shape = _compute_out_shape(shape, dim, keepdim) + out_spec = TensorSpec.from_tensor(out_shape, out_strides, dtype) + if out_supports_inplace and dim is None: + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="Min - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Min operator test with simplified implementation""" + + def __init__(self): + super().__init__("Min") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.min(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.min(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/minimum.py b/test/infinicore/ops/minimum.py new file mode 100644 index 000000000..00af20195 --- /dev/null +++ b/test/infinicore/ops/minimum.py @@ -0,0 +1,107 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, a_strides, b_strides) +_TEST_CASES_DATA = [ + ((6, 8), None, None), + ((8, 4), (16, 1), None), + ((5, 5), None, (10, 1)), + ((3, 7), (14, 1), (14, 1)), + ((10, 3), None, None), + ((2, 16), (32, 1), (32, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, a_strides, b_strides = data + + a_inplace = not is_broadcast(a_strides) + b_inplace = not is_broadcast(b_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="minimum - OUT_OF_PLACE", + ) + ) + + if a_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="minimum - INPLACE(a)", + ) + ) + + if b_inplace: + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="minimum - INPLACE(b)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Minimum operator test with simplified implementation""" + + def __init__(self): + super().__init__("Minimum") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.minimum(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.minimum(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/mish.py b/test/infinicore/ops/mish.py new file mode 100644 index 000000000..f714d4377 --- /dev/null +++ b/test/infinicore/ops/mish.py @@ -0,0 +1,79 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 8, 8), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """mish(input)""" + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"Mish - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Mish operator test with simplified implementation""" + + def __init__(self): + super().__init__("Mish") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.mish(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.mish(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/mse_loss.py b/test/infinicore/ops/mse_loss.py new file mode 100644 index 000000000..69cb4802b --- /dev/null +++ b/test/infinicore/ops/mse_loss.py @@ -0,0 +1,82 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, target_shape, input_strides_or_None, reduction_or_None) +# infinicore.nn.functional.mse_loss(input, target, reduction='mean') + +_TEST_CASES_DATA = [ + ((4, 5), (4, 5), None, None), + ((8, 8), (8, 8), (512, 64), "sum"), + ((1, 10), (1, 10), None, "mean"), + ((16, 100), (16, 100), None, None), + ((3, 7), (3, 7), None, "none"), + ((2, 2), (2, 2), None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, tgt_shape, strides, reduction in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(shape, strides, dtype) + tgt = TensorSpec.from_tensor(tgt_shape, None, dtype) + + kwargs = {} + if reduction is not None: + kwargs["reduction"] = reduction + + test_cases.append( + TestCase( + inputs=[inp, tgt], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="mse_loss - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """mse_loss operator test with simplified implementation""" + + def __init__(self): + super().__init__("mse_loss") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.mse_loss(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.mse_loss(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/msort.py b/test/infinicore/ops/msort.py new file mode 100644 index 000000000..b5c0aa77a --- /dev/null +++ b/test/infinicore/ops/msort.py @@ -0,0 +1,95 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, input_strides) +_TEST_CASES_DATA = [ + ((6, 8), None), + ((8, 4), (16, 1)), + ((5, 5), None), + ((3, 7), (14, 1)), + ((10, 3), None), + ((2, 16), (32, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-3}, + infinicore.float32: {"atol": 0, "rtol": 1e-5}, + infinicore.bfloat16: {"atol": 0, "rtol": 1e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, in_strides = data + + out_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-5}) + + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + # Out-of-place + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="msort - OUT_OF_PLACE", + ) + ) + + if out_supports_inplace: + out_spec = TensorSpec.from_tensor(shape, None, dtype) + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="msort - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """MSort operator test with simplified implementation""" + + def __init__(self): + super().__init__("MSort") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.msort(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.msort(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/multilabel_margin_loss.py b/test/infinicore/ops/multilabel_margin_loss.py new file mode 100644 index 000000000..d35ea55dd --- /dev/null +++ b/test/infinicore/ops/multilabel_margin_loss.py @@ -0,0 +1,88 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.tensor import TensorInitializer + +# Test cases format: (input_shape_N_C, target_shape_N_C, input_strides_or_None, reduction_or_None) +# infinicore.nn.functional.multilabel_margin_loss(input, target, reduction='mean') + +_TEST_CASES_DATA = [ + ((4, 5), (4, 5), None, None), + ((8, 6), (8, 6), None, "sum"), + ((1, 3), (1, 3), None, "mean"), + ((16, 10), (16, 10), None, None), + ((3, 4), (3, 4), None, "none"), + ((2, 2), (2, 2), None, None), +] + +_TOLERANCE_MAP = { + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for inp_shape, tgt_shape, strides, reduction in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(inp_shape, strides, dtype) + tgt = TensorSpec.from_tensor( + tgt_shape, + None, + infinicore.int64, + init_mode=TensorInitializer.RANDINT, + low=0, + high=inp_shape[1], + ) + + kwargs = {} + if reduction is not None: + kwargs["reduction"] = reduction + + test_cases.append( + TestCase( + inputs=[inp, tgt], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="multilabel_margin_loss - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """multilabel_margin_loss operator test with simplified implementation""" + + def __init__(self): + super().__init__("multilabel_margin_loss") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.multilabel_margin_loss(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.multilabel_margin_loss(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/multilabel_soft_margin_loss.py b/test/infinicore/ops/multilabel_soft_margin_loss.py new file mode 100644 index 000000000..ccf666fba --- /dev/null +++ b/test/infinicore/ops/multilabel_soft_margin_loss.py @@ -0,0 +1,82 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, target_shape, input_strides_or_None, reduction_or_None) +# infinicore.nn.functional.multilabel_soft_margin_loss(input, target, reduction='mean') + +_TEST_CASES_DATA = [ + ((4, 5), (4, 5), None, None), + ((8, 6), (8, 6), None, "sum"), + ((1, 3), (1, 3), None, "mean"), + ((16, 10), (16, 10), None, None), + ((3, 4), (3, 4), None, "none"), + ((2, 2), (2, 2), None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-1}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for inp_shape, tgt_shape, strides, reduction in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(inp_shape, strides, dtype) + tgt = TensorSpec.from_tensor(tgt_shape, None, dtype) + + kwargs = {} + if reduction is not None: + kwargs["reduction"] = reduction + + test_cases.append( + TestCase( + inputs=[inp, tgt], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="multilabel_soft_margin_loss - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """multilabel_soft_margin_loss operator test with simplified implementation""" + + def __init__(self): + super().__init__("multilabel_soft_margin_loss") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.multilabel_soft_margin_loss(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.multilabel_soft_margin_loss(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/mv.py b/test/infinicore/ops/mv.py new file mode 100644 index 000000000..d2b60052b --- /dev/null +++ b/test/infinicore/ops/mv.py @@ -0,0 +1,93 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (mat_shape, vec_shape, mat_strides_or_None, vec_strides_or_None) +# mv(mat, vec, out=None) + +_TEST_CASES_DATA = [ + ((3, 4), (4,), None, None), + ((8, 8), (8,), (512, 1), None), + ((1, 5), (5,), None, None), + ((6, 6), (6,), None, (0,)), + ((12, 12), (12,), (144, 12), None), + ((16, 8), (8,), None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for m_shape, v_shape, m_strides, v_strides in _TEST_CASES_DATA: + out_supports_inplace = not is_broadcast(v_strides) + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + m = TensorSpec.from_tensor(m_shape, m_strides, dtype) + v = TensorSpec.from_tensor(v_shape, v_strides, dtype) + + test_cases.append( + TestCase( + inputs=[m, v], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="mv - OUT_OF_PLACE", + ) + ) + + if out_supports_inplace: + out_spec = TensorSpec.from_tensor((m_shape[0],), None, dtype) + test_cases.append( + TestCase( + inputs=[m, v], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="mv - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """mv operator test with simplified implementation""" + + def __init__(self): + super().__init__("mv") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.mv(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.mv(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/narrow.py b/test/infinicore/ops/narrow.py new file mode 100644 index 000000000..953d388d4 --- /dev/null +++ b/test/infinicore/ops/narrow.py @@ -0,0 +1,70 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, dim, start, length) +_TEST_CASES_DATA = [ + ((5, 6), None, 1, 0, 3), + ((4, 4), (16, 1), 0, 1, 2), + ((3, 5), None, 1, 1, 2), + ((2, 6), None, 1, 0, 1), + ((6, 3), None, 0, 2, 2), + ((4, 7), (28, 1), 1, 1, 3), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, dim, start, length in _TEST_CASES_DATA: + input_spec = TensorSpec.from_tensor(shape, strides, infinicore.float32) + + kwargs = {"dim": dim, "start": start, "length": length} + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP[infinicore.float32], + description=f"narrow - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Narrow operator test with simplified implementation""" + + def __init__(self): + super().__init__("Narrow") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.narrow(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.narrow(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/narrow_copy.py b/test/infinicore/ops/narrow_copy.py new file mode 100644 index 000000000..1d8f84249 --- /dev/null +++ b/test/infinicore/ops/narrow_copy.py @@ -0,0 +1,83 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (input_shape, input_strides_or_None, dim, start, length) +_TEST_CASES_DATA = [ + ((5, 6), None, 1, 0, 3), + ((4, 4), (16, 1), 0, 1, 2), + ((3, 5), None, 1, 1, 2), + ((2, 6), None, 1, 0, 1), + ((6, 3), None, 0, 2, 2), + ((4, 7), (28, 1), 1, 1, 3), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, dim, start, length in _TEST_CASES_DATA: + input_spec = TensorSpec.from_tensor(shape, strides, infinicore.float32) + + out_spec = ( + TensorSpec.from_tensor( + (length,) + + tuple(s for s in shape if s is not None and s != shape[dim]), + None, + infinicore.float32, + ) + if False + else None + ) + # Above out_spec construction is conservative and not used; we let test framework compare outputs directly. + + kwargs = {"dim": dim, "start": start, "length": length} + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP[infinicore.float32], + description=f"narrow_copy - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """NarrowCopy operator test with simplified implementation""" + + def __init__(self): + super().__init__("NarrowCopy") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.narrow_copy(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.narrow_copy(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/nll_loss.py b/test/infinicore/ops/nll_loss.py new file mode 100644 index 000000000..e7c1a897d --- /dev/null +++ b/test/infinicore/ops/nll_loss.py @@ -0,0 +1,103 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.tensor import TensorInitializer + +# Test cases format: (input_shape_N_C, target_shape_N, input_strides_or_None, weight_present_bool, ignore_index_or_None) +# infinicore.nn.functional.nll_loss(input, target, weight=None, ignore_index=-100, reduction='mean') + +_TEST_CASES_DATA = [ + ((4, 5), (4,), None, False, None), + ((8, 10), (8,), None, True, -1), + ((1, 3), (1,), None, False, None), + ((16, 6), (16,), (96, 6), True, None), + ((3, 7), (3,), None, False, None), + ((2, 2), (2,), None, True, -100), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for ( + logits_shape, + target_shape, + logits_strides, + weight_present, + ignore_index, + ) in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + logits = TensorSpec.from_tensor(logits_shape, logits_strides, dtype) + target = TensorSpec.from_tensor(target_shape, None, infinicore.int64) + target = TensorSpec.from_tensor( + target_shape, + None, + infinicore.int64, + init_mode=TensorInitializer.RANDINT, + low=0, + high=logits_shape[ + 1 + ], # high 是 exclusive 上限 => 0 ≤ target < num_classes + ) + + inputs = [logits, target] + kwargs = {} + if weight_present: + weight_spec = TensorSpec.from_tensor((logits_shape[1],), None, dtype) + inputs.append(weight_spec) + if ignore_index is not None: + kwargs["ignore_index"] = ignore_index + + test_cases.append( + TestCase( + inputs=inputs, + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="nll_loss - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """nll_loss operator test with simplified implementation""" + + def __init__(self): + super().__init__("nll_loss") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.nll_loss(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.nll_loss(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/nonzero.py b/test/infinicore/ops/nonzero.py new file mode 100644 index 000000000..95bde3277 --- /dev/null +++ b/test/infinicore/ops/nonzero.py @@ -0,0 +1,69 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None) +# nonzero returns indices of non-zero elements; output is N x ndim indices +_TEST_CASES_DATA = [ + ((3, 4), None), + ((5,), None), + ((2, 2, 3), (12, 6, 2)), + ((1, 6), None), + ((4, 4), None), + ((2, 3, 2), None), +] + +_TOLERANCE_MAP = {infinicore.int64: {"atol": 0, "rtol": 0}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides in _TEST_CASES_DATA: + input_spec = TensorSpec.from_tensor(shape, strides, infinicore.float32) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP[infinicore.int64], + description=f"nonzero - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """NonZero operator test with simplified implementation""" + + def __init__(self): + super().__init__("NonZero") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nonzero(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nonzero(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/normalize.py b/test/infinicore/ops/normalize.py new file mode 100644 index 000000000..5c35243df --- /dev/null +++ b/test/infinicore/ops/normalize.py @@ -0,0 +1,85 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, p_or_None, dim_or_None, eps_or_None) +# infinicore.nn.functional.normalize(input, p=2.0, dim=1, eps=1e-12) + +_TEST_CASES_DATA = [ + ((4, 3), None, None, None, None), + ((8, 5), (40, 5), 1.0, 1, 1e-12), + ((1, 10), None, 2.0, 1, 1e-6), + ((16, 100), None, float("inf"), 1, None), + ((3, 7), None, 0.5, 1, None), + ((2, 2), None, None, 0, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, p, dim, eps in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if p is not None: + kwargs["p"] = p + if dim is not None: + kwargs["dim"] = dim + if eps is not None: + kwargs["eps"] = eps + + test_cases.append( + TestCase( + inputs=[inp], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="normalize - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """normalize operator test with simplified implementation""" + + def __init__(self): + super().__init__("normalize") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.normalize(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.normalize(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/one_hot.py b/test/infinicore/ops/one_hot.py new file mode 100644 index 000000000..2d7abdff9 --- /dev/null +++ b/test/infinicore/ops/one_hot.py @@ -0,0 +1,87 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.tensor import TensorInitializer +from framework.runner import GenericTestRunner + +# Test cases format: (indices_shape, indices_strides_or_None, num_classes_or_None) +_TEST_CASES_DATA = [ + ((5,), None, 10), + ((3, 4), None, 8), + ((2, 2), None, None), + ((1,), None, 3), + ((6,), None, 6), + ((4, 3), None, 12), +] + +_TOLERANCE_MAP = {infinicore.int64: {"atol": 0, "rtol": 0}} +_TENSOR_DTYPES = [infinicore.int64] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, num_classes in _TEST_CASES_DATA: + # ensure indices are non-negative and within [0, num_classes) when provided + if num_classes is not None: + high = num_classes + else: + high = max(1, shape[0]) + idx_spec = TensorSpec.from_tensor( + shape, + strides, + infinicore.int64, + init_mode=TensorInitializer.RANDINT, + low=0, + high=high, + ) + + kwargs = {} + if num_classes is not None: + kwargs["num_classes"] = num_classes + + out_spec = None + + test_cases.append( + TestCase( + inputs=[idx_spec], + kwargs=kwargs, + output_spec=out_spec, + comparison_target=None, + tolerance=_TOLERANCE_MAP.get(infinicore.int64), + description=f"one_hot - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """OneHot operator test with simplified implementation""" + + def __init__(self): + super().__init__("OneHot") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.one_hot(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.one_hot(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/pad.py b/test/infinicore/ops/pad.py new file mode 100644 index 000000000..8b834eed7 --- /dev/null +++ b/test/infinicore/ops/pad.py @@ -0,0 +1,86 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, pad_tuple, mode, value_or_None, input_strides_or_None) +# infinicore.nn.functional.pad(input, pad, mode='constant', value=0) + +_TEST_CASES_DATA = [ + ((1, 3, 4, 4), (1, 1, 2, 2), "constant", 0.0, None), + ((2, 3, 8, 8), (2, 2, 2, 2), "reflect", None, (384, 128, 16, 1)), + ((1, 1, 10), (3, 3), "replicate", None, None), + ((2, 3, 6, 6), (1, 0, 1, 0), "constant", 1.5, None), + ((3, 4, 5), (2, 2, 1, 1), "circular", None, None), + ((4, 5), (1, 2), "constant", -1.0, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, pad_t, mode, value, in_strides = data + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + in_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"pad": pad_t, "mode": mode} + if value is not None: + kwargs["value"] = value + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"pad - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Pad operator test with simplified implementation""" + + def __init__(self): + super().__init__("Pad") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.pad(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.pad(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/pairwise_distance.py b/test/infinicore/ops/pairwise_distance.py new file mode 100644 index 000000000..e2a913b82 --- /dev/null +++ b/test/infinicore/ops/pairwise_distance.py @@ -0,0 +1,88 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, p, eps, keepdim, a_strides_or_None, b_strides_or_None) +# infinicore.nn.functional.pairwise_distance(x1, x2, p=2.0, eps=1e-6, keepdim=False) + +_TEST_CASES_DATA = [ + ((8, 16), 2.0, 1e-6, False, None, None), + ((8, 16), 1.0, 1e-6, False, (128, 1), (128, 1)), + ((2, 3, 4), 2.0, 1e-6, True, None, None), + ((16, 64), 3.0, 1e-6, False, None, None), + ((4, 5, 6), 2.0, 1e-6, False, None, None), + ((3, 4, 5), 2.0, 1e-6, True, (60, 20, 4), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, p, eps, keepdim = data[0], data[1], data[2], data[3] + a_strides = data[4] if len(data) > 4 else None + b_strides = data[5] if len(data) > 5 else None + + a_supports_inplace = not is_broadcast(a_strides) + b_supports_inplace = not is_broadcast(b_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + a_spec = TensorSpec.from_tensor(shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor(shape, b_strides, dtype) + + kwargs = {"p": p, "eps": eps, "keepdim": keepdim} + + test_cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"pairwise_distance - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """PairwiseDistance operator test with simplified implementation""" + + def __init__(self): + super().__init__("PairwiseDistance") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.pairwise_distance(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.pairwise_distance(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/pdist.py b/test/infinicore/ops/pdist.py new file mode 100644 index 000000000..e86f5e9cf --- /dev/null +++ b/test/infinicore/ops/pdist.py @@ -0,0 +1,80 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, p, a_strides_or_None) +# infinicore.pdist(input, p=2.0) computes pairwise distances between rows of input + +_TEST_CASES_DATA = [ + ((8, 16), 2.0, None), + ((10, 5), 2.0, (80, 8)), + ((4, 3), 1.0, None), + ((6, 8), 3.0, None), + ((2, 16), 2.0, None), + ((12, 4), 2.0, None), +] + +_TOLERANCE_MAP = { + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, p = data[0], data[1] + in_strides = data[2] if len(data) > 2 else None + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"p": p} + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"pdist - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Pdist operator test with simplified implementation""" + + def __init__(self): + super().__init__("Pdist") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.pdist(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.pdist(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/pixel_shuffle.py b/test/infinicore/ops/pixel_shuffle.py new file mode 100644 index 000000000..5c1b08b80 --- /dev/null +++ b/test/infinicore/ops/pixel_shuffle.py @@ -0,0 +1,84 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, upscale_factor, input_strides_or_None) +# infinicore.nn.functional.pixel_shuffle(input, upscale_factor) + +_TEST_CASES_DATA = [ + ((1, 4, 8, 8), 2, None), + ((2, 9, 4, 4), 3, (288, 144, 36, 9)), + ((1, 16, 4, 4), 4, None), + ((3, 8, 6, 6), 2, None), + ((2, 12, 3, 3), 2, None), + ((4, 27, 2, 2), 3, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, factor = data[0], data[1] + in_strides = data[2] if len(data) > 2 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + in_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"upscale_factor": factor} + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"pixel_shuffle - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """PixelShuffle operator test with simplified implementation""" + + def __init__(self): + super().__init__("PixelShuffle") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.pixel_shuffle(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.pixel_shuffle(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/pixel_unshuffle.py b/test/infinicore/ops/pixel_unshuffle.py new file mode 100644 index 000000000..a3a0dab29 --- /dev/null +++ b/test/infinicore/ops/pixel_unshuffle.py @@ -0,0 +1,84 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, downscale_factor, input_strides_or_None) +# infinicore.nn.functional.pixel_unshuffle(input, downscale_factor) + +_TEST_CASES_DATA = [ + ((1, 4, 8, 8), 2, None), + ((2, 9, 6, 6), 3, (648, 72, 12, 2)), + ((1, 16, 4, 4), 4, None), + ((3, 8, 6, 6), 2, None), + ((2, 12, 4, 4), 2, None), + ((4, 27, 6, 6), 3, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, factor = data[0], data[1] + in_strides = data[2] if len(data) > 2 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + in_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"downscale_factor": factor} + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"pixel_unshuffle - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """PixelUnshuffle operator test with simplified implementation""" + + def __init__(self): + super().__init__("PixelUnshuffle") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.pixel_unshuffle(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.pixel_unshuffle(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/prelu.py b/test/infinicore/ops/prelu.py new file mode 100644 index 000000000..9c7731f04 --- /dev/null +++ b/test/infinicore/ops/prelu.py @@ -0,0 +1,92 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, weight_shape_or_None) +# Note: PReLU requires a weight parameter of shape (C,) or (1,), we create a per-channel weight when possible. + +_TEST_CASES_DATA = [ + ((4, 4), None, None), + ((8, 4, 4), (128, 32, 1), (4,)), + ((2, 3, 6), None, (3,)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """prelu(input, weight)""" + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + weight_shape = data[2] if len(data) > 2 and data[2] is not None else None + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + # Determine default weight shape: channel dimension if available + if weight_shape is None: + if len(shape) >= 2: + c = shape[1] + weight_spec = TensorSpec.from_tensor((c,), None, dtype) + else: + weight_spec = TensorSpec.from_tensor((1,), None, dtype) + else: + weight_spec = TensorSpec.from_tensor(weight_shape, None, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec, weight_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"PReLU - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """PReLU operator test with simplified implementation""" + + def __init__(self): + super().__init__("PReLU") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.prelu(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.prelu(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/prod.py b/test/infinicore/ops/prod.py new file mode 100644 index 000000000..72392a4d1 --- /dev/null +++ b/test/infinicore/ops/prod.py @@ -0,0 +1,87 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None, keepdim_or_None, dtype_or_None) +# prod computes product along dim(s) or overall + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None, None), + ((8, 8), (16, 1), 1, False, None), + ((2, 3, 4), None, 0, True, None), + ((1, 8), None, 0, False, None), + ((16, 64), (128, 1), None, None, None), + ((4, 5, 6), (60, 12, 2), 2, True, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim, keepdim, dtype_param = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + if keepdim is not None: + kwargs["keepdim"] = keepdim + if dtype_param is not None: + kwargs["dtype"] = dtype_param + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Prod - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Prod operator test with simplified implementation""" + + def __init__(self): + super().__init__("Prod") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.prod(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.prod(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/quantile.py b/test/infinicore/ops/quantile.py new file mode 100644 index 000000000..753f52afe --- /dev/null +++ b/test/infinicore/ops/quantile.py @@ -0,0 +1,123 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, q_or_None, dim_or_None, keepdim_or_None, out_strides_or_None) +# quantile computes quantiles along dim or overall. q may be float or tensor + +_TEST_CASES_DATA = [ + ((8, 8), None, 0.5, None, None, None), + ((8, 8), (16, 1), 0.25, 1, False, None), + ((2, 3, 4), None, 0.75, 2, True, (0, 1, 1)), + ((1, 8), None, torch.tensor([0.1, 0.9]), 0, False, None), + ((16, 64), (128, 1), 0.5, None, None, None), + ((4, 5, 6), (60, 12, 2), 0.5, 2, True, (12, 4, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float32] + + +def _compute_out_shape(shape, dim, keepdim, q_is_tensor=False): + # if q is tensor with len>1, output shape may include q dim; keep simple: when q is tensor, return (len(q), ...) prefix + if dim is None: + base = () + else: + if isinstance(dim, tuple): + dims = sorted([(d if d >= 0 else len(shape) + d) for d in dim]) + else: + dims = [dim] + if keepdim: + out = list(shape) + for d in dims: + out[d] = 1 + base = tuple(out) + else: + base = tuple(s for i, s in enumerate(shape) if i not in dims) + + if q_is_tensor: + # Prepend q-length as first dim + return (2,) + base + return base + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, q, dim, keepdim, out_strides = data + q_is_tensor = isinstance(q, torch.Tensor) + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {"q": q} + if dim is not None: + kwargs["dim"] = dim + if keepdim is not None: + kwargs["keepdim"] = keepdim + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Quantile - OUT_OF_PLACE", + ) + ) + + out_shape = _compute_out_shape(shape, dim, keepdim, q_is_tensor=q_is_tensor) + out_spec = TensorSpec.from_tensor(out_shape, out_strides, dtype) + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="Quantile - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Quantile operator test with simplified implementation""" + + def __init__(self): + super().__init__("Quantile") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.quantile(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.quantile(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/random_sample.py b/test/infinicore/ops/random_sample.py index f5e993f75..faa625134 100644 --- a/test/infinicore/ops/random_sample.py +++ b/test/infinicore/ops/random_sample.py @@ -80,7 +80,7 @@ def parse_test_cases(): ), comparison_target="out", tolerance=tolerance, - description=f"RandomSample - OUT", + description=f"RandomSample - INPLACE(out)", ) ) diff --git a/test/infinicore/ops/reciprocal.py b/test/infinicore/ops/reciprocal.py new file mode 100644 index 000000000..2f2c4885e --- /dev/null +++ b/test/infinicore/ops/reciprocal.py @@ -0,0 +1,100 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) +# infinicore.reciprocal(input) + +_TEST_CASES_DATA = [ + ((2, 3), None), + ((1, 4, 8), (32, 8, 1)), + ((3, 2, 5, 7), None), + ((2, 1, 16), None), + ((1, 8, 9, 11), (792, 99, 11, 1)), + ((2, 6, 10), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="reciprocal_out_of_place", + ) + ) + + out_spec = TensorSpec.from_tensor(shape, None, dtype) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="reciprocal_explicit_out", + ) + ) + + if not is_broadcast(in_spec.strides): + cases.append( + TestCase( + inputs=[in_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="reciprocal_inplace_input0", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Reciprocal operator test with simplified implementation""" + + def __init__(self): + super().__init__("Reciprocal") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.reciprocal(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.reciprocal(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/relu6.py b/test/infinicore/ops/relu6.py new file mode 100644 index 000000000..8ea21fc78 --- /dev/null +++ b/test/infinicore/ops/relu6.py @@ -0,0 +1,93 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 8, 8), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """relu6(input, inplace=False)""" + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + input_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"ReLU6 - OUT_OF_PLACE", + ) + ) + + if input_supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"inplace": True}, + output_spec=None, + comparison_target=0, + tolerance=tolerance, + description=f"ReLU6 - INPLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """ReLU6 operator test with simplified implementation""" + + def __init__(self): + super().__init__("ReLU6") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.relu6(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.relu6(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/remainder.py b/test/infinicore/ops/remainder.py new file mode 100644 index 000000000..816b626ad --- /dev/null +++ b/test/infinicore/ops/remainder.py @@ -0,0 +1,114 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (a_shape, a_strides_or_None, b_shape_or_None) +# infinicore.remainder(a, b) + +_TEST_CASES_DATA = [ + ((2, 3, 4), None, None), + ((1, 4, 8), (32, 8, 1), None), + ((3, 2, 5, 7), None, None), + ((2, 1, 16), None, None), + ((1, 8, 9, 11), (792, 99, 11, 1), None), + ((2, 6, 10), None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for a_shape, a_strides, b_shape in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + a_spec = TensorSpec.from_tensor(a_shape, a_strides, dtype) + b_spec = TensorSpec.from_tensor( + a_shape if b_shape is None else b_shape, None, dtype + ) + + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="remainder_out_of_place", + ) + ) + + out_spec = TensorSpec.from_tensor(a_shape, None, dtype) + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="remainder_explicit_out", + ) + ) + + if not is_broadcast(a_spec.strides): + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="remainder_inplace_a", + ) + ) + if not is_broadcast(b_spec.strides): + cases.append( + TestCase( + inputs=[a_spec, b_spec], + kwargs={"out": 1}, + output_spec=None, + comparison_target=1, + tolerance=tol, + description="remainder_inplace_b", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Remainder operator test with simplified implementation""" + + def __init__(self): + super().__init__("Remainder") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.remainder(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.remainder(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/reshape.py b/test/infinicore/ops/reshape.py new file mode 100644 index 000000000..dd9291839 --- /dev/null +++ b/test/infinicore/ops/reshape.py @@ -0,0 +1,87 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, new_shape) +# reshape can change shape; out parameter is not used in infinicore.reshape API (returns view or tensor) + +_TEST_CASES_DATA = [ + ((2, 6), None, (3, 4)), + ((3, 4), (12, 1), (12,)), + ((4, 2, 3), None, (2, 12)), + ((2, 3, 4), (48, 16, 4), (6, 4)), + ((16, 64), None, (64, 16)), + ((1, 24), None, (2, 12)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + in_shape, in_strides, new_shape = data[0], data[1], data[2] + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-4}) + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + + # Out-of-place (reshape returns tensor) + # Following reference pattern: pass new shape as positional arg to infinicore.reshape + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs={"shape": new_shape}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"reshape - OUT_OF_PLACE", + ) + ) + + # In-place reshape (view-based) is not an explicit API; skip INPLACE cases. + # Note: infinicore.reshape may return a view; framework will compare returned tensor. + + return test_cases + + +class OpTest(BaseOperatorTest): + """Reshape operator test with simplified implementation""" + + def __init__(self): + super().__init__("Reshape") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.reshape(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.reshape(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/rot90.py b/test/infinicore/ops/rot90.py new file mode 100644 index 000000000..0556311a3 --- /dev/null +++ b/test/infinicore/ops/rot90.py @@ -0,0 +1,84 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, k, dims_tuple, input_strides_or_None) +# infinicore.rot90(input, k=1, dims=(0,1)) + +_TEST_CASES_DATA = [ + ((3, 4), 1, (0, 1), None), + ((2, 3, 4), 2, (1, 2), (24, 8, 2)), + ((4, 5, 6, 7), 3, (2, 3), None), + ((6, 7), 1, (0, 1), None), + ((2, 2, 3, 4), 2, (1, 3), None), + ((5, 6, 7), 1, (0, 2), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, k, dims = data[0], data[1], data[2] + in_strides = data[3] if len(data) > 3 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-4}) + in_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"k": k, "dims": dims} + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"rot90 - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Rot90 operator test with simplified implementation""" + + def __init__(self): + super().__init__("Rot90") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.rot90(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.rot90(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/round.py b/test/infinicore/ops/round.py new file mode 100644 index 000000000..2a371d7d1 --- /dev/null +++ b/test/infinicore/ops/round.py @@ -0,0 +1,104 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# round(input, decimals=0) +# We'll test with various decimals including negative values and None. + +_TEST_CASES_DATA = [ + ((2, 3), None, 0), + ((1, 4, 8), None, 1), + ((3, 2, 5, 7), None, -1), + ((2, 1, 16), None, 2), + ((1, 8, 9, 11), None, 0), + ((2, 6, 10), None, 3), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides, decimals in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + # out-of-place + kwargs = {"decimals": decimals} if decimals is not None else {} + cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="round_out", + ) + ) + + # explicit out + out_spec = TensorSpec.from_tensor(shape, strides, dtype) + cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="round_explicit_out", + ) + ) + + # in-place when not broadcast + if not is_broadcast(strides): + cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="round_inplace", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Round operator test with simplified implementation""" + + def __init__(self): + super().__init__("Round") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.round(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.round(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/rrelu.py b/test/infinicore/ops/rrelu.py new file mode 100644 index 000000000..2ae5de544 --- /dev/null +++ b/test/infinicore/ops/rrelu.py @@ -0,0 +1,98 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, lower_or_None, upper_or_None) + +_TEST_CASES_DATA = [ + ((13, 4), None, 0.125, 0.333), + ((13, 4), (10, 1), 0.1, 0.3), + ((8, 8, 8), None, 0.05, 0.2), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """rrelu(input, lower=0.125, upper=0.333..., training=False, inplace=False)""" + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + lower = data[2] if len(data) > 2 else 0.125 + upper = data[3] if len(data) > 3 else 0.333 + + input_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"lower": lower, "upper": upper} + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"RReLU - OUT_OF_PLACE", + ) + ) + + if input_supports_inplace: + inplace_kwargs = {"lower": lower, "upper": upper, "inplace": True} + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=inplace_kwargs, + output_spec=None, + comparison_target=0, + tolerance=tolerance, + description=f"RReLU - INPLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """RReLU operator test with simplified implementation""" + + def __init__(self): + super().__init__("RReLU") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.rrelu(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.rrelu(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/scaled_dot_product_attention.py b/test/infinicore/ops/scaled_dot_product_attention.py new file mode 100644 index 000000000..e907d0250 --- /dev/null +++ b/test/infinicore/ops/scaled_dot_product_attention.py @@ -0,0 +1,84 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (q_shape, k_shape, v_shape, attn_mask_or_None, dropout_p, is_causal) +# q/k/v typically have shape (..., seq_len, head_dim) or (batch, seq_len, num_heads, head_dim) + +_TEST_CASES_DATA = [ + ((2, 8, 16), (2, 8, 16), (2, 8, 16), None, 0.0, False), + ((1, 4, 32), (1, 4, 32), (1, 4, 32), None, 0.0, False), + ((2, 6, 12), (2, 6, 12), (2, 6, 12), None, 0.0, True), + ((3, 8, 8), (3, 8, 8), (3, 8, 8), None, 0.0, False), + ((2, 4, 16), (2, 4, 16), (2, 4, 16), None, 0.0, True), + ((1, 2, 64), (1, 2, 64), (1, 2, 64), None, 0.0, False), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-4, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for q_shape, k_shape, v_shape, attn_mask, dropout_p, is_causal in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + q_spec = TensorSpec.from_tensor(q_shape, None, dtype) + k_spec = TensorSpec.from_tensor(k_shape, None, dtype) + v_spec = TensorSpec.from_tensor(v_shape, None, dtype) + kwargs = { + "attn_mask": attn_mask, + "dropout_p": dropout_p, + "is_causal": is_causal, + } + # remove None keys + kwargs = {k: v for k, v in kwargs.items() if v is not None} + + cases.append( + TestCase( + inputs=[q_spec, k_spec, v_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="ScaledDotProductAttention", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """ScaledDotProductAttention operator test with simplified implementation""" + + def __init__(self): + super().__init__("ScaledDotProductAttention") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.scaled_dot_product_attention(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.scaled_dot_product_attention(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/scatter.py b/test/infinicore/ops/scatter.py new file mode 100644 index 000000000..2cf0652ab --- /dev/null +++ b/test/infinicore/ops/scatter.py @@ -0,0 +1,96 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.tensor import TensorInitializer +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (input_shape, input_strides_or_None, dim, index_shape, src_shape) +_TEST_CASES_DATA = [ + ((5, 6), None, 1, (5, 2), (5, 2)), + ((4, 4), (16, 1), 0, (2, 4), (2, 4)), + ((3, 5), None, 1, (3, 3), (3, 3)), + ((2, 6), None, 1, (2, 2), (2, 2)), + ((6, 3), (18, 1), 0, (3, 3), (3, 3)), + ((4, 7), None, 1, (4, 2), (4, 2)), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for tgt_shape, tgt_strides, dim, idx_shape, src_shape in _TEST_CASES_DATA: + tgt_spec = TensorSpec.from_tensor(tgt_shape, tgt_strides, infinicore.float32) + # initialize index tensor within [0, size) for the target dim to avoid OOB + effective_dim = dim if dim >= 0 else (dim + len(tgt_shape)) + max_index = tgt_shape[effective_dim] + idx_spec = TensorSpec.from_tensor( + idx_shape, + None, + infinicore.int64, + init_mode=TensorInitializer.RANDINT, + low=0, + high=max_index, + ) + src_spec = TensorSpec.from_tensor(src_shape, None, infinicore.float32) + + out_supports = not is_broadcast(tgt_strides) + + # Out-of-place + test_cases.append( + TestCase( + inputs=[tgt_spec, idx_spec, src_spec], + kwargs={"dim": dim}, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP[infinicore.float32], + description=f"scatter - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Scatter operator test with simplified implementation""" + + def __init__(self): + super().__init__("Scatter") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + if len(args) < 3: + raise TypeError("scatter requires input, index, src as positional args") + inp, idx, src = args[0], args[1], args[2] + + dim = None + if kwargs: + dim = kwargs.get("dim", None) + + if dim is None: + raise TypeError("scatter test did not provide 'dim' parameter") + + return torch.scatter(inp, dim, idx, src) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.scatter(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/scatter_add.py b/test/infinicore/ops/scatter_add.py new file mode 100644 index 000000000..02310430d --- /dev/null +++ b/test/infinicore/ops/scatter_add.py @@ -0,0 +1,124 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.tensor import TensorInitializer +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, input_strides, index_strides, src_strides, dim) +_TEST_CASES_DATA = [ + ((6, 8), None, None, None, 1), + ((8, 4), (16, 1), None, None, 0), + ((5, 5), None, None, (10, 1), 1), + ((3, 7), None, (14, 1), None, 1), + ((10, 3), (30, 1), (30, 1), (30, 1), 0), + ((2, 16), None, None, None, 1), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """Format: (shape, input_strides, index_strides, src_strides, dim)""" + test_cases = [] + + for data in _TEST_CASES_DATA: + shape, in_strides, idx_strides, src_strides, dim = data + + in_supports_inplace = not is_broadcast(in_strides) + out_supports_inplace = not is_broadcast(src_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + # index tensor spec: 必须生成 [0, shape[dim]-1] 的 int64 值 + high = max(1, shape[dim]) + index_spec = TensorSpec.from_tensor( + shape, + idx_strides, + infinicore.int64, + init_mode=TensorInitializer.RANDINT, + low=0, + high=high, + ) + src_spec = TensorSpec.from_tensor(shape, src_strides, dtype) + + # Out-of-place + test_cases.append( + TestCase( + inputs=[input_spec, dim, index_spec, src_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"scatter_add - OUT_OF_PLACE", + ) + ) + + # In-place on input + if in_supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec, dim, index_spec, src_spec], + kwargs={}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description=f"scatter_add - INPLACE(input)", + ) + ) + + # Out as explicit tensor + if out_supports_inplace: + out_spec = TensorSpec.from_tensor(shape, None, dtype) + test_cases.append( + TestCase( + inputs=[input_spec, dim, index_spec, src_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description=f"scatter_add - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """ScatterAdd operator test with simplified implementation""" + + def __init__(self): + super().__init__("ScatterAdd") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.scatter_add(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.scatter_add(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/scatter_reduce.py b/test/infinicore/ops/scatter_reduce.py new file mode 100644 index 000000000..927a3d13f --- /dev/null +++ b/test/infinicore/ops/scatter_reduce.py @@ -0,0 +1,125 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.tensor import TensorInitializer +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, input_strides, index_strides, src_strides, dim, reduce) +_TEST_CASES_DATA = [ + ((6, 6), None, None, None, 1, "sum"), + ((8, 4), (16, 1), None, None, 0, "amax"), + ((5, 5), None, None, (10, 1), 1, "mean"), + ((3, 7), None, (14, 1), None, 1, "amin"), + ((10, 3), (30, 1), (30, 1), (30, 1), 0, "sum"), + ((2, 16), None, None, None, 1, "prod"), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """Format: (shape, input_strides, index_strides, src_strides, dim, reduce)""" + test_cases = [] + + for data in _TEST_CASES_DATA: + shape, in_strides, idx_strides, src_strides, dim, reduce = data + + in_supports_inplace = not is_broadcast(in_strides) + out_supports_inplace = not is_broadcast(src_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + # index_spec = TensorSpec.from_tensor(shape, idx_strides, infinicore.int64) + # index tensor spec: 必须生成 [0, shape[dim]-1] 的 int64 值 + high = max(1, shape[dim]) + index_spec = TensorSpec.from_tensor( + shape, + idx_strides, + infinicore.int64, + init_mode=TensorInitializer.RANDINT, + low=0, + high=high, + ) + src_spec = TensorSpec.from_tensor(shape, src_strides, dtype) + + # Out-of-place + test_cases.append( + TestCase( + inputs=[input_spec, dim, index_spec, src_spec], + kwargs={"reduce": reduce}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"scatter_reduce - OUT_OF_PLACE", + ) + ) + + # In-place on input + if in_supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec, dim, index_spec, src_spec], + kwargs={"reduce": reduce, "out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description=f"scatter_reduce - INPLACE(input)", + ) + ) + + # Explicit out tensor + if out_supports_inplace: + out_spec = TensorSpec.from_tensor(shape, None, dtype) + test_cases.append( + TestCase( + inputs=[input_spec, dim, index_spec, src_spec], + kwargs={"reduce": reduce}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description=f"scatter_reduce - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """ScatterReduce operator test with simplified implementation""" + + def __init__(self): + super().__init__("ScatterReduce") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.scatter_reduce(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.scatter_reduce(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/select_op.py b/test/infinicore/ops/select_op.py new file mode 100644 index 000000000..4c8460802 --- /dev/null +++ b/test/infinicore/ops/select_op.py @@ -0,0 +1,70 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, dim, index) +_TEST_CASES_DATA = [ + ((3, 4), None, 0, 1), + ((5, 6), (30, 1), 1, 2), + ((2, 3, 4), None, 2, 0), + ((4, 4), None, -1, 1), + ((6, 2), (12, 1), 1, 0), + ((3, 5), None, 0, 2), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, dim, idx in _TEST_CASES_DATA: + input_spec = TensorSpec.from_tensor(shape, strides, infinicore.float32) + + kwargs = {"dim": dim, "index": idx} + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP[infinicore.float32], + description=f"select - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Select operator test with simplified implementation""" + + def __init__(self): + super().__init__("Select") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.select(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.select(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/select_scatter.py b/test/infinicore/ops/select_scatter.py new file mode 100644 index 000000000..b1a7e4f35 --- /dev/null +++ b/test/infinicore/ops/select_scatter.py @@ -0,0 +1,102 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast +from framework.tensor import TensorInitializer + +# Test cases format: (shape, input_strides, index_strides, src_strides, dim) +_TEST_CASES_DATA = [ + ((6, 8), None, None, None, 1), + ((8, 4), (16, 1), None, None, 0), + ((5, 5), None, None, (10, 1), 1), + ((3, 7), None, (14, 1), None, 1), + ((10, 3), (30, 1), (30, 1), (30, 1), 0), + ((2, 16), None, None, None, 1), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, in_strides, idx_strides, src_strides, dim = data + + in_supports_inplace = not is_broadcast(in_strides) + out_supports_inplace = not is_broadcast(src_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + src_shape = tuple(s for i, s in enumerate(shape) if i != dim) + src_strides_for_src = ( + src_strides + if (src_strides and len(src_strides) == len(src_shape)) + else None + ) + src_spec = TensorSpec.from_tensor(src_shape, src_strides_for_src, dtype) + + high = max(1, shape[dim]) + index_spec = TensorSpec.from_tensor( + shape, + idx_strides, + infinicore.int64, + init_mode=TensorInitializer.RANDINT, + low=0, + high=high, + ) + + index_val = 0 if shape[dim] <= 1 else (shape[dim] // 2) + + test_cases.append( + TestCase( + inputs=[input_spec, src_spec, dim, index_val], + kwargs=None, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"select_scatter - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """SelectScatter operator test with simplified implementation""" + + def __init__(self): + super().__init__("SelectScatter") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.select_scatter(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.select_scatter(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/selu.py b/test/infinicore/ops/selu.py new file mode 100644 index 000000000..3e56e1b5a --- /dev/null +++ b/test/infinicore/ops/selu.py @@ -0,0 +1,82 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 8, 8), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """selu(input) + + SELU functional API does not accept inplace; only out-of-place. + """ + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"SELU - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """SELU operator test with simplified implementation""" + + def __init__(self): + super().__init__("SELU") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.selu(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.selu(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/sgn.py b/test/infinicore/ops/sgn.py new file mode 100644 index 000000000..2fc44ee4c --- /dev/null +++ b/test/infinicore/ops/sgn.py @@ -0,0 +1,96 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (in_shape, in_strides_or_None) +# sgn is an alias for sign on many platforms; we provide similar coverage. + +_TEST_CASES_DATA = [ + ((2, 3), None), + ((1, 4, 8), (32, 8, 1)), + ((3, 2, 5, 7), None), + ((2, 1, 16), None), + ((1, 8, 9, 11), (792, 99, 11, 1)), + ((2, 6, 10), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0.0, "rtol": 0.0}, + infinicore.float32: {"atol": 0.0, "rtol": 0.0}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="sgn_out_of_place", + ) + ) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="sgn_explicit_out", + ) + ) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="sgn_inplace_out0", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Sgn operator test with simplified implementation""" + + def __init__(self): + super().__init__("Sgn") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.sgn(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.sgn(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/sign.py b/test/infinicore/ops/sign.py new file mode 100644 index 000000000..900927e0f --- /dev/null +++ b/test/infinicore/ops/sign.py @@ -0,0 +1,95 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (in_shape, in_strides_or_None) + +_TEST_CASES_DATA = [ + ((2, 3), None), + ((1, 4, 8), (32, 8, 1)), + ((3, 2, 5, 7), None), + ((2, 1, 16), None), + ((1, 8, 9, 11), (792, 99, 11, 1)), + ((2, 6, 10), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0.0, "rtol": 0.0}, + infinicore.float32: {"atol": 0.0, "rtol": 0.0}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="sign_out_of_place", + ) + ) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="sign_explicit_out", + ) + ) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="sign_inplace_out0", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Sign operator test with simplified implementation""" + + def __init__(self): + super().__init__("Sign") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.sign(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.sign(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/signbit.py b/test/infinicore/ops/signbit.py new file mode 100644 index 000000000..751510ba7 --- /dev/null +++ b/test/infinicore/ops/signbit.py @@ -0,0 +1,89 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (in_shape, in_strides_or_None) +# signbit returns boolean-like tensor; tolerances are exact. + +_TEST_CASES_DATA = [ + ((2, 3), None), + ((1, 4, 8), (32, 8, 1)), + ((3, 2, 5, 7), None), + ((2, 1, 16), None), + ((1, 8, 9, 11), (792, 99, 11, 1)), + ((2, 6, 10), None), +] + +_TOLERANCE_MAP = {infinicore.bool: {"atol": 0.0, "rtol": 0.0}} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[infinicore.bool] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + # Out-of-place (returns boolean mask) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="signbit_out_of_place", + ) + ) + + # Explicit out (boolean dtype out) + out_spec = TensorSpec.from_tensor(shape, None, infinicore.bool) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="signbit_explicit_out", + ) + ) + + # In-place on input (not typical for boolean outputs) - skip because in-place would change dtype + # Note: PyTorch does not support in-place signbit that preserves dtype; therefore no in-place case. + + return cases + + +class OpTest(BaseOperatorTest): + """SignBit operator test with simplified implementation""" + + def __init__(self): + super().__init__("SignBit") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.signbit(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.signbit(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/sinh.py b/test/infinicore/ops/sinh.py new file mode 100644 index 000000000..5c1a2fe7a --- /dev/null +++ b/test/infinicore/ops/sinh.py @@ -0,0 +1,108 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# ======================================================================= +# Test cases format: (shape, input_strides_or_None) +# ======================================================================= + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 16), None), + ((8, 16), (40, 1)), + ((2, 3, 4), None), + ((16, 5632), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="sinh - OUT_OF_PLACE", + ) + ) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="sinh - INPLACE(out)", + ) + ) + + if supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="sinh - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Sinh operator test with simplified implementation""" + + def __init__(self): + super().__init__("Sinh") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.sinh(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.sinh(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/slice_scatter.py b/test/infinicore/ops/slice_scatter.py new file mode 100644 index 000000000..75d0497c7 --- /dev/null +++ b/test/infinicore/ops/slice_scatter.py @@ -0,0 +1,113 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# # Test cases format: (shape, input_strides, src_strides, dim, start, end, step) +# _TEST_CASES_DATA = [ +# ((6, 8), None, None, 1, 0, 4, 1), +# ((8, 4), (16, 1), (16, 1), 0, 1, 3, 1), +# ((5, 7), None, (10, 1), 1, -3, None, 1), +# ((3, 9), None, None, 1, 2, 8, 2), +# ((10, 3), (30, 1), (30, 1), 0, None, None, 1), +# ((2, 16), None, None, 1, 0, 2, 1), +# ] +# Format: (input_shape, input_strides, src_shape, src_strides, dim, start, end, step) +_TEST_CASES_DATA = [ + ((6, 8), None, (6, 4), None, 1, 0, 4, 1), + ((8, 4), (16, 1), (2, 4), (16, 1), 0, 1, 3, 1), + ((5, 7), None, (5, 3), (10, 1), 1, -3, None, 1), + ((3, 9), None, (3, 3), None, 1, 2, 8, 2), + ((10, 3), (30, 1), (10, 3), (30, 1), 0, None, None, 1), + ((2, 16), None, (2, 2), None, 1, 0, 2, 1), +] + + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-3}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + input_shape, in_strides, src_shape, src_strides, dim, start, end, step = data + + in_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3}) + + input_spec = TensorSpec.from_tensor(input_shape, in_strides, dtype) + src_spec = TensorSpec.from_tensor(src_shape, src_strides, dtype) + + kwargs = {"dim": dim} + if start is not None: + kwargs["start"] = start + if end is not None: + kwargs["end"] = end + if step is not None: + kwargs["step"] = step + + # Out-of-place + test_cases.append( + TestCase( + inputs=[input_spec, src_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"slice_scatter - OUT_OF_PLACE", + ) + ) + + if in_supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec, src_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=0, + tolerance=tol, + description=f"slice_scatter - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """SliceScatter operator test with simplified implementation""" + + def __init__(self): + super().__init__("SliceScatter") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.slice_scatter(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.slice_scatter(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/slogdet.py b/test/infinicore/ops/slogdet.py new file mode 100644 index 000000000..7c9247dec --- /dev/null +++ b/test/infinicore/ops/slogdet.py @@ -0,0 +1,75 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (matrix_shape, strides_or_None) +# slogdet(input) — returns (sign, logabsdet) + +_TEST_CASES_DATA = [ + ((1, 1), None), + ((2, 2), None), + ((3, 3), (3, 1)), + ((4, 4), None), + ((8, 8), (512, 1)), + ((16, 16), None), +] + +_TOLERANCE_MAP = { + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + spec = TensorSpec.from_tensor(shape, strides, dtype) + + test_cases.append( + TestCase( + inputs=[spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="slogdet - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """slogdet operator test with simplified implementation""" + + def __init__(self): + super().__init__("slogdet") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.slogdet(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.slogdet(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/smooth_l1_loss.py b/test/infinicore/ops/smooth_l1_loss.py new file mode 100644 index 000000000..cde8019b9 --- /dev/null +++ b/test/infinicore/ops/smooth_l1_loss.py @@ -0,0 +1,83 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, target_shape, input_strides_or_None, beta_or_None, reduction_or_None) +# infinicore.nn.functional.smooth_l1_loss(input, target, reduction='mean', beta=1.0) + +_TEST_CASES_DATA = [ + ((4, 5), (4, 5), None, None, None), + ((8, 8), (8, 8), (512, 64), 1.0, "sum"), + ((1, 10), (1, 10), None, 0.5, "mean"), + ((16, 100), (16, 100), None, 0.1, None), + ((3, 7), (3, 7), None, None, "none"), + ((2, 2), (2, 2), None, 1.5, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, tgt_shape, strides, beta, reduction in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(shape, strides, dtype) + tgt = TensorSpec.from_tensor(tgt_shape, None, dtype) + kwargs = {} + if beta is not None: + kwargs["beta"] = beta + if reduction is not None: + kwargs["reduction"] = reduction + + test_cases.append( + TestCase( + inputs=[inp, tgt], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="smooth_l1_loss - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """smooth_l1_loss operator test with simplified implementation""" + + def __init__(self): + super().__init__("smooth_l1_loss") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.smooth_l1_loss(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.smooth_l1_loss(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/soft_margin_loss.py b/test/infinicore/ops/soft_margin_loss.py new file mode 100644 index 000000000..a0cd02278 --- /dev/null +++ b/test/infinicore/ops/soft_margin_loss.py @@ -0,0 +1,82 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, target_shape, input_strides_or_None, reduction_or_None) +# infinicore.nn.functional.soft_margin_loss(input, target, reduction='mean') + +_TEST_CASES_DATA = [ + ((4, 5), (4, 5), None, None), + ((8, 8), (8, 8), (512, 64), "sum"), + ((1, 10), (1, 10), None, "mean"), + ((16, 100), (16, 100), None, None), + ((3, 7), (3, 7), None, "none"), + ((2, 2), (2, 2), None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-1}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, tgt_shape, strides, reduction in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(shape, strides, dtype) + tgt = TensorSpec.from_tensor(tgt_shape, None, dtype) + + kwargs = {} + if reduction is not None: + kwargs["reduction"] = reduction + + test_cases.append( + TestCase( + inputs=[inp, tgt], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="soft_margin_loss - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """soft_margin_loss operator test with simplified implementation""" + + def __init__(self): + super().__init__("soft_margin_loss") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.soft_margin_loss(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.soft_margin_loss(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/softmin.py b/test/infinicore/ops/softmin.py new file mode 100644 index 000000000..c5e96aedb --- /dev/null +++ b/test/infinicore/ops/softmin.py @@ -0,0 +1,82 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None) + +_TEST_CASES_DATA = [ + ((4, 10), None, -1), + ((2, 5, 8), (40, 8, 1), 1), + ((8, 20), None, 1), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """softmin(input, dim=None, dtype=None)""" + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + dim = data[2] if len(data) > 2 else -1 + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"dim": dim} + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"Softmin - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Softmin operator test with simplified implementation""" + + def __init__(self): + super().__init__("Softmin") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.softmin(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.softmin(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/softplus.py b/test/infinicore/ops/softplus.py new file mode 100644 index 000000000..5fa361137 --- /dev/null +++ b/test/infinicore/ops/softplus.py @@ -0,0 +1,87 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, beta_or_None, threshold_or_None) + +_TEST_CASES_DATA = [ + ((13, 4), None, None, None), + ((13, 4), (10, 1), 1.0, 20.0), + ((8, 8, 8), None, 0.5, 10.0), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """softplus(input, beta=1, threshold=20)""" + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + beta = data[2] if len(data) > 2 else 1 + threshold = data[3] if len(data) > 3 else 20 + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {} + if beta is not None: + kwargs["beta"] = beta + if threshold is not None: + kwargs["threshold"] = threshold + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"Softplus - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Softplus operator test with simplified implementation""" + + def __init__(self): + super().__init__("Softplus") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.softplus(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.softplus(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/softshrink.py b/test/infinicore/ops/softshrink.py new file mode 100644 index 000000000..25a679d55 --- /dev/null +++ b/test/infinicore/ops/softshrink.py @@ -0,0 +1,84 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, lambd_or_None) + +_TEST_CASES_DATA = [ + ((13, 4), None, None), + ((13, 4), (10, 1), 0.5), + ((8, 8, 8), None, 1.0), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """softshrink(input, lambd=0.5)""" + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + lambd = data[2] if len(data) > 2 else 0.5 + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {} + if lambd is not None: + kwargs["lambd"] = lambd + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"Softshrink - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Softshrink operator test with simplified implementation""" + + def __init__(self): + super().__init__("Softshrink") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.softshrink(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.softshrink(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/softsign.py b/test/infinicore/ops/softsign.py new file mode 100644 index 000000000..da6d64b68 --- /dev/null +++ b/test/infinicore/ops/softsign.py @@ -0,0 +1,79 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 8, 8), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """softsign(input)""" + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"Softsign - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Softsign operator test with simplified implementation""" + + def __init__(self): + super().__init__("Softsign") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.softsign(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.softsign(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/sort.py b/test/infinicore/ops/sort.py new file mode 100644 index 000000000..87ce55d2c --- /dev/null +++ b/test/infinicore/ops/sort.py @@ -0,0 +1,180 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# ============================================================================== +# Operator-specific configuration for sort +# ============================================================================== + +# Test cases format: (shape, dim, descending, input_strides, values_strides, indices_strides) +_TEST_CASES_DATA = [ + # Basic cases + ((13, 4), None, False, None, None, None), + ((13, 4), 0, False, None, None, None), + ((13, 4), 1, False, None, None, None), + ((13, 4), -1, False, None, None, None), + # Descending + ((13, 4), 1, True, None, None, None), + # Stable flag (PyTorch 1.8+ supports stable sort; include it to match 2.9 signature) + ((4, 5, 6), 1, False, None, None, None), + ((4, 5, 6), -1, True, None, None, None), + # 3D in-place cases + ((4, 5, 6), 1, False, None, (4, 1, 6), (4, 1, 6)), + ((4, 5, 6), -1, False, (30, 6, 1), (64, 1, 5), (64, 1, 5)), + # Strided inputs and outputs + ((13, 4), None, False, (4, 1), (12, 1), (24, 1)), + ((13, 4), 0, False, (1, 4), (64, 1), (1, 4)), + ((13, 4), 1, False, (1, 4), (64, 1), (1, 4)), +] + +# Tolerance configuration +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +# Data types to test +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def calculate_output_shape(input_shape, dim): + """ + Calculate the output shape for sort (values and indices share the same shape) + """ + if dim is None: + # Default behavior: sort on last dimension + dim = len(input_shape) - 1 if len(input_shape) > 0 else 0 + # normalize negative dim + if dim < 0: + dim = dim + len(input_shape) + output_shape = list(input_shape) + return tuple(output_shape) + + +def parse_test_cases(): + """ + Parse sort test cases including both out-of-place and in-place (out=...) variants. + torch.sort(input, dim=-1, descending=False, stable=False, out=None) + returns (values, indices) + """ + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + dim = data[1] if len(data) > 1 else None + descending = data[2] if len(data) > 2 else False + input_strides = data[3] if len(data) > 3 else None + values_strides = data[4] if len(data) > 4 else None + indices_strides = data[5] if len(data) > 5 else None + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + + # Create tensor specs + input_spec = TensorSpec.from_tensor(shape, input_strides, dtype) + + # Build description + description_parts = ["sort"] + if dim is not None: + description_parts.append(f"dim={dim}") + if descending: + description_parts.append("descending=True") + if input_strides is not None: + description_parts.append(f"input_strides={input_strides}") + + base_description = " - ".join(description_parts) + + # Common kwargs + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + kwargs["descending"] = descending + # stable is available in newer PyTorch; keep default False + + # ================================================================== + # Test Case 1: Out-of-place (return values) + # ================================================================== + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, # return values will be compared + comparison_target=None, + tolerance=tolerance, + description=f"{base_description} - OUT_OF_PLACE", + output_count=2, # (values, indices) + ) + ) + + # ================================================================== + # Test Case 2: In-place with explicit output tensors (out=(values, indices)) + # ================================================================== + output_shape = calculate_output_shape(shape, dim) + + # Create output specs if strides provided; otherwise None + values_spec = TensorSpec.from_tensor(output_shape, values_strides, dtype) + # indices are integer type (long) in PyTorch + indices_spec = TensorSpec.from_tensor( + output_shape, indices_strides, infinicore.int64 + ) + + values_supports_inplace = not is_broadcast( + getattr(values_spec, "strides", None) + ) + indices_supports_inplace = not is_broadcast( + getattr(indices_spec, "strides", None) + ) + + if values_supports_inplace and indices_supports_inplace: + inplace_kwargs = kwargs.copy() + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=inplace_kwargs, + output_specs=[values_spec, indices_spec], + comparison_target="out", + tolerance=tolerance, + description=f"{base_description} - INPLACE(out)", + output_count=2, + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Sort operator test with multiple outputs (values, indices)""" + + def __init__(self): + super().__init__("sort") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator( + self, x, dim=-1, descending=False, stable=False, out=None, **kwargs + ): + # forward to torch.sort; stable kwarg included for compatibility + return torch.sort(x, dim=dim, descending=descending, stable=stable, out=out) + + # def infinicore_operator(self, x, dim=-1, descending=False, stable=False, out=None, **kwargs): + # # assume infinicore provides a similar API + # return infinicore.sort(x, dim=dim, descending=descending, stable=stable, out=out) + + +def main(): + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/split.py b/test/infinicore/ops/split.py new file mode 100644 index 000000000..e7267c7c2 --- /dev/null +++ b/test/infinicore/ops/split.py @@ -0,0 +1,82 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, split_size_or_sections, dim_or_None) +# infinicore.split(tensor, split_size_or_sections, dim=0) + +_TEST_CASES_DATA = [ + ((8, 6), None, 2, 0), + ((4, 9), None, [3, 6], 1), + ((6, 12, 3), None, 4, 1), + ((10,), None, 5, 0), + ((3, 8), (24, 8), [2, 1], 0), + ((12, 4), None, 3, 0), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, sections, dim in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + inp = TensorSpec.from_tensor(shape, strides, dtype) + kwargs = {} + kwargs["split_size_or_sections"] = sections + if dim is not None: + kwargs["dim"] = dim + + test_cases.append( + TestCase( + inputs=[inp], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="split - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """split operator test with simplified implementation""" + + def __init__(self): + super().__init__("split") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + # infinicore.split signature differs; test runner will map kwargs accordingly + return torch.split(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.split(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/sqrt.py b/test/infinicore/ops/sqrt.py new file mode 100644 index 000000000..a11268a36 --- /dev/null +++ b/test/infinicore/ops/sqrt.py @@ -0,0 +1,98 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) +# infinicore.sqrt(input) + +_TEST_CASES_DATA = [ + ((2, 3), None), + ((1, 4, 8), (32, 8, 1)), + ((3, 2, 5, 7), None), + ((2, 1, 16), None), + ((1, 8, 9, 11), (792, 99, 11, 1)), + ((2, 6, 10), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="sqrt_out", + ) + ) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="sqrt_out_explicit", + ) + ) + if not is_broadcast(in_spec.strides): + cases.append( + TestCase( + inputs=[in_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="sqrt_inplace", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Sqrt operator test with simplified implementation""" + + def __init__(self): + super().__init__("Sqrt") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.sqrt(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.sqrt(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/square.py b/test/infinicore/ops/square.py new file mode 100644 index 000000000..168a6c0dc --- /dev/null +++ b/test/infinicore/ops/square.py @@ -0,0 +1,100 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) +# infinicore.square(input) + +_TEST_CASES_DATA = [ + ((2, 3), None), + ((1, 4, 8), (32, 8, 1)), + ((3, 2, 5, 7), None), + ((2, 1, 16), None), + ((1, 8, 9, 11), (792, 99, 11, 1)), + ((2, 6, 10), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="square_out_of_place", + ) + ) + + out_spec = TensorSpec.from_tensor(shape, None, dtype) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="square_explicit_out", + ) + ) + + if not is_broadcast(in_spec.strides): + cases.append( + TestCase( + inputs=[in_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="square_inplace_input0", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Square operator test with simplified implementation""" + + def __init__(self): + super().__init__("Square") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.square(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.square(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/std.py b/test/infinicore/ops/std.py new file mode 100644 index 000000000..7d1308f5f --- /dev/null +++ b/test/infinicore/ops/std.py @@ -0,0 +1,123 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: +# (in_shape, in_strides_or_None, dim_or_None, correction_or_None, keepdim_or_None, out_strides_or_None) +_TEST_CASES_DATA = [ + ((8, 8), None, None, None, None, None), + ((8, 8), (16, 1), 1, 1, False, None), + ((2, 3, 4), None, 0, 0, True, (48, 6, 1)), + ((4, 8), None, 0, 1, False, None), + ((16, 64), (128, 1), None, 0, None, None), + ((4, 5, 6), (60, 12, 2), 2, 1, True, (12, 4, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def _compute_out_shape(shape, dim, keepdim): + if dim is None: + return () + if isinstance(dim, tuple): + dims = sorted([(d if d >= 0 else len(shape) + d) for d in dim]) + if keepdim: + out = list(shape) + for d in dims: + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i not in dims) + else: + d = dim if dim >= 0 else len(shape) + dim + if keepdim: + out = list(shape) + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i != d) + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim, correction, keepdim, out_strides = data + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + if correction is not None: + kwargs["correction"] = correction + if keepdim is not None: + kwargs["keepdim"] = keepdim + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Std - OUT_OF_PLACE", + ) + ) + + out_shape = _compute_out_shape(shape, dim, keepdim) + out_spec = TensorSpec.from_tensor(out_shape, out_strides, dtype) + if out_supports_inplace: + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="Std - INPLACE(out)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Std operator test with simplified implementation""" + + def __init__(self): + super().__init__("Std") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.std(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.std(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/std_mean.py b/test/infinicore/ops/std_mean.py new file mode 100644 index 000000000..dc4aa8308 --- /dev/null +++ b/test/infinicore/ops/std_mean.py @@ -0,0 +1,110 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None, unbiased_or_None, out_strides_or_None) +# std_mean returns (std, mean) along dim(s) or overall + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None, None), + ((8, 8), (16, 1), 1, True, None), + ((2, 3, 4), None, 0, False, (0, 1, 1)), + ((4, 8), None, 0, True, None), + ((16, 64), (128, 1), None, False, None), + ((4, 5, 6), (60, 12, 2), 2, True, (12, 4, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def _compute_out_shape(shape, dim, keepdim): + if dim is None: + return () + if isinstance(dim, tuple): + dims = sorted([(d if d >= 0 else len(shape) + d) for d in dim]) + if keepdim: + out = list(shape) + for d in dims: + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i not in dims) + else: + d = dim if dim >= 0 else len(shape) + dim + if keepdim: + out = list(shape) + out[d] = 1 + return tuple(out) + else: + return tuple(s for i, s in enumerate(shape) if i != d) + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim, unbiased, out_strides = data + out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + if unbiased is not None: + kwargs["unbiased"] = unbiased + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="StdMean - OUT_OF_PLACE", + ) + ) + + # Note: std_mean returns a tuple (std, mean) - PyTorch does not support explicit out for tuple returns + + return test_cases + + +class OpTest(BaseOperatorTest): + """StdMean operator test with simplified implementation""" + + def __init__(self): + super().__init__("StdMean") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.std_mean(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.std_mean(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/sum.py b/test/infinicore/ops/sum.py new file mode 100644 index 000000000..4653e899a --- /dev/null +++ b/test/infinicore/ops/sum.py @@ -0,0 +1,88 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None, keepdim_or_None, dtype_or_None) +# sum computes the sum along dim(s) or overall + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None, None), + ((8, 8), (16, 1), 1, False, None), + ((2, 3, 4), None, 0, True, None), + ((1, 8), None, (0,), False, None), + ((16, 64), (128, 1), None, None, None), + ((4, 5, 6), (60, 12, 2), 2, True, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim, keepdim, dtype_param = data + # out_supports_inplace = not is_broadcast(out_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + if keepdim is not None: + kwargs["keepdim"] = keepdim + if dtype_param is not None: + kwargs["dtype"] = dtype_param + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Sum - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Sum operator test with simplified implementation""" + + def __init__(self): + super().__init__("Sum") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.sum(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.sum(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/svd.py b/test/infinicore/ops/svd.py new file mode 100644 index 000000000..ea4b6d253 --- /dev/null +++ b/test/infinicore/ops/svd.py @@ -0,0 +1,78 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (matrix_shape, strides_or_None, compute_uv_or_None) +# infinicore.svd(a, some=True, compute_uv=True) — different return shapes depending on flags + +_TEST_CASES_DATA = [ + ((3, 3), None, True), + ((4, 2), None, True), + ((6, 6), (360, 60), True), + ((2, 4), None, False), + ((8, 8), None, True), + ((1, 1), None, True), +] + +_TOLERANCE_MAP = { + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, compute_uv in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + spec = TensorSpec.from_tensor(shape, strides, dtype) + kwargs = {} + if compute_uv is not None: + kwargs["compute_uv"] = compute_uv + + test_cases.append( + TestCase( + inputs=[spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="svd - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """svd operator test with simplified implementation""" + + def __init__(self): + super().__init__("svd") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.svd(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.svd(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/take.py b/test/infinicore/ops/take.py new file mode 100644 index 000000000..11db98f3e --- /dev/null +++ b/test/infinicore/ops/take.py @@ -0,0 +1,83 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.tensor import TensorInitializer +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, indices_shape) +_TEST_CASES_DATA = [ + ((3, 4), None, (6,)), + ((5,), None, (3,)), + ((2, 3, 4), (24, 8, 2), (4,)), + ((1, 6), None, (2,)), + ((4, 4), None, (8,)), + ((2, 3, 2), None, (3,)), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for shape, strides, idx_shape in _TEST_CASES_DATA: + input_spec = TensorSpec.from_tensor(shape, strides, infinicore.float32) + + # indices for infinicore.take are flattened indices in [0, input.numel()) + prod = 1 + for s in shape: + prod *= s + + indices_spec = TensorSpec.from_tensor( + idx_shape, + None, + infinicore.int64, + init_mode=TensorInitializer.RANDINT, + low=0, + high=max(1, prod), + ) + + test_cases.append( + TestCase( + inputs=[input_spec, indices_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP[infinicore.float32], + description=f"take - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Take operator test with simplified implementation""" + + def __init__(self): + super().__init__("Take") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.take(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.take(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/tan.py b/test/infinicore/ops/tan.py new file mode 100644 index 000000000..4c7232416 --- /dev/null +++ b/test/infinicore/ops/tan.py @@ -0,0 +1,108 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# ======================================================================= +# Test cases format: (shape, input_strides_or_None) +# ======================================================================= + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 16), None), + ((8, 16), (40, 1)), + ((2, 3, 4), None), + ((16, 5632), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_spec = TensorSpec.from_tensor(shape, None, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="tan - OUT_OF_PLACE", + ) + ) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=None, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="tan - INPLACE(out)", + ) + ) + + if supports_inplace: + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={"out": 0}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="tan - INPLACE(input)", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Tan operator test with simplified implementation""" + + def __init__(self): + super().__init__("Tan") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.tan(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.tan(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/tanhshrink.py b/test/infinicore/ops/tanhshrink.py new file mode 100644 index 000000000..149b5a340 --- /dev/null +++ b/test/infinicore/ops/tanhshrink.py @@ -0,0 +1,79 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None) + +_TEST_CASES_DATA = [ + ((13, 4), None), + ((13, 4), (10, 1)), + ((8, 8, 8), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """tanhshrink(input)""" + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"Tanhshrink - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Tanhshrink operator test with simplified implementation""" + + def __init__(self): + super().__init__("Tanhshrink") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.tanhshrink(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.tanhshrink(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/threshold.py b/test/infinicore/ops/threshold.py new file mode 100644 index 000000000..e4c279256 --- /dev/null +++ b/test/infinicore/ops/threshold.py @@ -0,0 +1,98 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, threshold, value) + +_TEST_CASES_DATA = [ + ((13, 4), None, 0.0, 0.0), + ((13, 4), (10, 1), 0.5, -1.0), + ((8, 8, 8), None, 1.0, 2.0), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + """threshold(input, threshold, value, inplace=False)""" + test_cases = [] + + for data in _TEST_CASES_DATA: + shape = data[0] + in_strides = data[1] if len(data) > 1 else None + thr = data[2] if len(data) > 2 else 0.0 + val = data[3] if len(data) > 3 else 0.0 + + input_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"threshold": thr, "value": val} + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tolerance, + description=f"Threshold - OUT_OF_PLACE", + ) + ) + + if input_supports_inplace: + inplace_kwargs = {"threshold": thr, "value": val, "inplace": True} + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=inplace_kwargs, + output_spec=None, + comparison_target=0, + tolerance=tolerance, + description=f"Threshold - INPLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Threshold operator test with simplified implementation""" + + def __init__(self): + super().__init__("Threshold") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.threshold(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.threshold(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/topk.py b/test/infinicore/ops/topk.py new file mode 100644 index 000000000..2d85859f6 --- /dev/null +++ b/test/infinicore/ops/topk.py @@ -0,0 +1,88 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, input_strides, k, dim, largest, sorted) +_TEST_CASES_DATA = [ + ((6, 8), None, 1, 1, True, True), + ((8, 4), (16, 1), 2, 0, True, False), + ((5, 5), None, 3, -1, False, True), + ((3, 7), (14, 1), 2, 1, True, True), + ((10, 3), None, 2, 1, True, False), + ((2, 16), (32, 1), 5, 1, False, True), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, in_strides, k, dim, largest, sorted_ = data + + out_supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + + input_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"k": k, "dim": dim, "largest": largest, "sorted": sorted_} + + # Out-of-place returns (values, indices) + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"topk - OUT_OF_PLACE", + ) + ) + + # topk returns (values, indices) - in-place/out variant requires tuple of outputs + # The current test harness expects a single TensorSpec for `output_spec`, so + # we avoid creating an in-place test for topk here and only test out-of-place. + + return test_cases + + +class OpTest(BaseOperatorTest): + """TopK operator test with simplified implementation""" + + def __init__(self): + super().__init__("TopK") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.topk(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.topk(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/transpose.py b/test/infinicore/ops/transpose.py new file mode 100644 index 000000000..76ac9043d --- /dev/null +++ b/test/infinicore/ops/transpose.py @@ -0,0 +1,98 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, dim0, dim1, input_strides_or_None) +# infinicore.transpose(input, dim0, dim1) + +_TEST_CASES_DATA = [ + ((13, 4), 0, 1, None), + ((8, 16), 0, 1, (128, 1)), + ((2, 3, 4), 0, 2, None), + ((4, 5, 6), 1, 2, None), + ((16, 64), 0, 1, None), + ((2, 2, 3, 4), 1, 3, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 0, "rtol": 1e-2}, + infinicore.float32: {"atol": 0, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 0, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, dim0, dim1 = data[0], data[1], data[2] + in_strides = data[3] if len(data) > 3 else None + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-4}) + in_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + out_shape = list(shape) + out_shape[dim0], out_shape[dim1] = out_shape[dim1], out_shape[dim0] + out_spec = TensorSpec.from_tensor(tuple(out_shape), None, dtype) + + # Out-of-place + kwargs = {"dim0": dim0, "dim1": dim1} + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs={"dim0": dim0, "dim1": dim1}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"transpose - OUT_OF_PLACE", + ) + ) + + # In-place via out param not supported; skip explicit out tests. + + return test_cases + + +class OpTest(BaseOperatorTest): + """Transpose operator test with simplified implementation""" + + def __init__(self): + super().__init__("Transpose") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + # dim0 = kwargs.pop("dim0", None) + # dim1 = kwargs.pop("dim1", None) + # if dim0 is not None and dim1 is not None: + # return infinicore.transpose(*args, dim0, dim1) + return torch.transpose(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # dim0 = kwargs.pop("dim0", None) + # dim1 = kwargs.pop("dim1", None) + # if dim0 is not None and dim1 is not None: + # return infinicore.transpose(*args, dim0, dim1) + # return infinicore.transpose(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/triplet_margin_loss.py b/test/infinicore/ops/triplet_margin_loss.py new file mode 100644 index 000000000..fff922f03 --- /dev/null +++ b/test/infinicore/ops/triplet_margin_loss.py @@ -0,0 +1,89 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (anchor_shape, positive_shape, negative_shape, strides_or_None, margin_or_None, p_or_None, eps_or_None, swap_or_None) +# infinicore.nn.functional.triplet_margin_loss(anchor, positive, negative, margin=1.0, p=2, eps=1e-6, swap=False, reduction='mean') + +_TEST_CASES_DATA = [ + ((4, 3), (4, 3), (4, 3), None, None, None, None, None), + ((8, 5), (8, 5), (8, 5), (40, 5), 1.0, 2, 1e-6, False), + ((1, 10), (1, 10), (1, 10), None, 0.5, 1, 1e-3, None), + ((16, 12), (16, 12), (16, 12), None, 2.0, 2, None, True), + ((3, 7), (3, 7), (3, 7), None, None, None, None, None), + ((2, 4), (2, 4), (2, 4), None, None, None, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-1}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for a_shape, p_shape, n_shape, strides, margin, p, eps, swap in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + a = TensorSpec.from_tensor(a_shape, strides, dtype) + pos = TensorSpec.from_tensor(p_shape, strides, dtype) + neg = TensorSpec.from_tensor(n_shape, strides, dtype) + + kwargs = {} + if margin is not None: + kwargs["margin"] = margin + if p is not None: + kwargs["p"] = p + if eps is not None: + kwargs["eps"] = eps + if swap is not None: + kwargs["swap"] = swap + + test_cases.append( + TestCase( + inputs=[a, pos, neg], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="triplet_margin_loss - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """triplet_margin_loss operator test with simplified implementation""" + + def __init__(self): + super().__init__("triplet_margin_loss") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.triplet_margin_loss(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.triplet_margin_loss(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/triplet_margin_with_distance_loss.py b/test/infinicore/ops/triplet_margin_with_distance_loss.py new file mode 100644 index 000000000..16655372f --- /dev/null +++ b/test/infinicore/ops/triplet_margin_with_distance_loss.py @@ -0,0 +1,86 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (anchor_shape, positive_shape, negative_shape, strides_or_None, margin_or_None, swap_or_None) +# infinicore.nn.functional.triplet_margin_with_distance_loss(anchor, positive, negative, distance_function=None, margin=1.0, swap=False, reduction='mean') + +_TEST_CASES_DATA = [ + ((4, 3), (4, 3), (4, 3), None, None, None), + ((8, 5), (8, 5), (8, 5), (40, 5), 1.0, False), + ((1, 10), (1, 10), (1, 10), None, 0.5, None), + ((16, 12), (16, 12), (16, 12), None, 2.0, True), + ((3, 7), (3, 7), (3, 7), None, None, None), + ((2, 4), (2, 4), (2, 4), None, None, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-1}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for a_shape, p_shape, n_shape, strides, margin, swap in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + a = TensorSpec.from_tensor(a_shape, strides, dtype) + pos = TensorSpec.from_tensor(p_shape, strides, dtype) + neg = TensorSpec.from_tensor(n_shape, strides, dtype) + + kwargs = {} + if margin is not None: + kwargs["margin"] = margin + if swap is not None: + kwargs["swap"] = swap + + # distance_function is optional; test harness can supply default if needed + test_cases.append( + TestCase( + inputs=[a, pos, neg], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="triplet_margin_with_distance_loss - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """triplet_margin_with_distance_loss operator test with simplified implementation""" + + def __init__(self): + super().__init__("triplet_margin_with_distance_loss") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.triplet_margin_with_distance_loss(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.triplet_margin_with_distance_loss(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/trunc.py b/test/infinicore/ops/trunc.py new file mode 100644 index 000000000..cb2d4b438 --- /dev/null +++ b/test/infinicore/ops/trunc.py @@ -0,0 +1,101 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# trunc(input) +_TEST_CASES_DATA = [ + ((2, 3), None), + ((1, 4, 8), (32, 8, 1)), + ((3, 2, 5, 7), None), + ((2, 1, 16), None), + ((1, 8, 9, 11), None), + ((2, 6, 10), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for shape, strides in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP[dtype] + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + # out-of-place + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="trunc_out", + ) + ) + + # explicit out + out_spec = TensorSpec.from_tensor(shape, strides, dtype) + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=out_spec, + comparison_target="out", + tolerance=tol, + description="trunc_explicit_out", + ) + ) + + # in-place when not broadcast + if not is_broadcast(strides): + cases.append( + TestCase( + inputs=[in_spec], + kwargs={}, + output_spec=None, + comparison_target=0, + tolerance=tol, + description="trunc_inplace", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Trunc operator test with simplified implementation""" + + def __init__(self): + super().__init__("Trunc") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.trunc(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.trunc(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/unfold.py b/test/infinicore/ops/unfold.py new file mode 100644 index 000000000..0b0e59499 --- /dev/null +++ b/test/infinicore/ops/unfold.py @@ -0,0 +1,88 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (in_shape, in_strides_or_None, kernel_size, dilation, padding, stride) +# Unfold extracts sliding local blocks from a batched input tensor. + +_TEST_CASES_DATA = [ + ((2, 3, 8, 8), None, (3, 3), 1, 0, (1, 1)), + ((1, 4, 10, 12), None, (5, 3), 1, 1, (2, 1)), + ((2, 2, 16, 16), (512, 256, 16, 1), (4, 4), 1, 0, (4, 4)), + ((3, 6, 7, 9), None, (3, 2), 1, 0, (1, 1)), + ((1, 8, 9, 11), None, (2, 3), 1, 1, (1, 2)), + ((2, 5, 12, 6), (360, 72, 6, 1), (3, 3), 1, 0, (2, 1)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + cases = [] + for ( + in_shape, + in_strides, + kernel_size, + dilation, + padding, + stride, + ) in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype) + in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype) + kwargs = { + "kernel_size": kernel_size, + "dilation": dilation, + "padding": padding, + "stride": stride, + } + cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Unfold - OUT_OF_PLACE", + ) + ) + + return cases + + +class OpTest(BaseOperatorTest): + """Unfold operator test with simplified implementation""" + + def __init__(self): + super().__init__("Unfold") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.unfold(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.unfold(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/unique.py b/test/infinicore/ops/unique.py new file mode 100644 index 000000000..e75f878b4 --- /dev/null +++ b/test/infinicore/ops/unique.py @@ -0,0 +1,88 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, sorted_or_None, return_inverse_or_None, return_counts_or_None) +# unique returns unique values and optionally inverse indices and counts + +_TEST_CASES_DATA = [ + ((8,), None, True, False, False), + ((8, 8), None, False, True, False), + ((2, 3, 4), (24, 8, 2), True, False, True), + ((1, 8), None, None, True, True), + ((16, 64), (128, 1), False, False, True), + ((4, 5), (20, 4), True, True, False), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.int32: {"atol": 0, "rtol": 0}, +} + +_TENSOR_DTYPES = [infinicore.int32, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, sorted_flag, return_inverse, return_counts = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if sorted_flag is not None: + kwargs["sorted"] = sorted_flag + if return_inverse: + kwargs["return_inverse"] = True + if return_counts: + kwargs["return_counts"] = True + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Unique - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Unique operator test with simplified implementation""" + + def __init__(self): + super().__init__("Unique") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.unique(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.unique(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/upsample.py b/test/infinicore/ops/upsample.py new file mode 100644 index 000000000..30c0b8740 --- /dev/null +++ b/test/infinicore/ops/upsample.py @@ -0,0 +1,90 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, size_or_scale_factor, mode, align_corners_or_None, input_strides_or_None) + +_TEST_CASES_DATA = [ + ((1, 3, 16, 16), (32, 32), "bilinear", True, None), + ((2, 3, 8, 8), (16, 16), "nearest", None, (384, 128, 16, 1)), + ((1, 1, 10), 20, "linear", False, None), + ((2, 3, 6, 6), (12, 12), "area", None, None), + ((1, 3, 4, 4, 4), (8, 8, 8), "trilinear", True, None), + ((4, 3, 7, 7), 2.0, "bilinear", False, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, size_or_scale, mode, align, in_strides = data + + supports_inplace = not is_broadcast(in_strides) + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + in_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"mode": mode} + if isinstance(size_or_scale, tuple): + kwargs["size"] = size_or_scale + else: + kwargs["scale_factor"] = size_or_scale + if align is not None: + kwargs["align_corners"] = align + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"upsample - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Upsample operator test with simplified implementation""" + + def __init__(self): + super().__init__("Upsample") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + # Map deprecated upsample to interpolate + return torch.nn.functional.interpolate(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.interpolate(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/upsample_bilinear.py b/test/infinicore/ops/upsample_bilinear.py new file mode 100644 index 000000000..548caa4a7 --- /dev/null +++ b/test/infinicore/ops/upsample_bilinear.py @@ -0,0 +1,89 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, size_or_scale_factor, align_corners_or_None, input_strides_or_None) +# infinicore.nn.functional.upsample_bilinear is deprecated in favor of interpolate(mode='bilinear') + +_TEST_CASES_DATA = [ + ((1, 3, 16, 16), (32, 32), True, None), + ((2, 3, 8, 8), (16, 16), None, (384, 128, 16, 1)), + ((1, 1, 10, 10), (20, 20), False, None), + ((2, 3, 6, 6), (12, 12), None, None), + ((4, 3, 7, 7), 2.0, False, None), + ((3, 3, 5, 5), (10, 10), True, None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, size_or_scale, align, in_strides = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + in_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"mode": "bilinear"} + if isinstance(size_or_scale, tuple): + kwargs["size"] = size_or_scale + else: + kwargs["scale_factor"] = size_or_scale + if align is not None: + kwargs["align_corners"] = align + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"upsample_bilinear - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """UpsampleBilinear operator test with simplified implementation""" + + def __init__(self): + super().__init__("UpsampleBilinear") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + # Use interpolate with bilinear mode + return torch.nn.functional.interpolate(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.interpolate(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/upsample_nearest.py b/test/infinicore/ops/upsample_nearest.py new file mode 100644 index 000000000..3e88a32db --- /dev/null +++ b/test/infinicore/ops/upsample_nearest.py @@ -0,0 +1,86 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (shape, size_or_scale_factor, input_strides_or_None) +# infinicore.nn.functional.upsample_nearest is deprecated in favor of interpolate(mode='nearest') + +_TEST_CASES_DATA = [ + ((1, 3, 16, 16), (32, 32), None), + ((2, 3, 8, 8), (16, 16), (384, 128, 16, 1)), + ((1, 1, 10), 20, None), + ((2, 3, 6, 6), (12, 12), None), + ((4, 3, 7, 7), 2.0, None), + ((3, 3, 5, 5), (10, 10), None), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-2, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, size_or_scale, in_strides = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + in_spec = TensorSpec.from_tensor(shape, in_strides, dtype) + + kwargs = {"mode": "nearest"} + if isinstance(size_or_scale, tuple): + kwargs["size"] = size_or_scale + else: + kwargs["scale_factor"] = size_or_scale + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"upsample_nearest - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """UpsampleNearest operator test with simplified implementation""" + + def __init__(self): + super().__init__("UpsampleNearest") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.nn.functional.interpolate(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.nn.functional.interpolate(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/vander.py b/test/infinicore/ops/vander.py new file mode 100644 index 000000000..414bc7e62 --- /dev/null +++ b/test/infinicore/ops/vander.py @@ -0,0 +1,76 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (input_shape, input_strides_or_None, N) +_TEST_CASES_DATA = [ + ((5,), None, None), + ((6,), None, 3), + ((4,), None, 5), + ((7,), None, None), + ((8,), None, 4), + ((3,), None, 2), +] + +_TOLERANCE_MAP = {infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, N = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype) + input_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if N is not None: + kwargs["N"] = N + + test_cases.append( + TestCase( + inputs=[input_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description=f"vander - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Vander operator test with simplified implementation""" + + def __init__(self): + super().__init__("Vander") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.vander(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.vander(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/var.py b/test/infinicore/ops/var.py new file mode 100644 index 000000000..2d3106d89 --- /dev/null +++ b/test/infinicore/ops/var.py @@ -0,0 +1,87 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None, unbiased_or_None, keepdim_or_None) +# var computes variance along dim(s) or overall + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None, None), + ((8, 8), (16, 1), 1, True, False), + ((2, 3, 4), None, 0, False, True), + ((4, 8), None, 0, True, False), + ((16, 64), (128, 1), None, False, None), + ((4, 5, 6), (60, 12, 2), 2, True, True), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim, unbiased, keepdim = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + if unbiased is not None: + kwargs["unbiased"] = unbiased + if keepdim is not None: + kwargs["keepdim"] = keepdim + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="Var - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Var operator test with simplified implementation""" + + def __init__(self): + super().__init__("Var") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.var(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.var(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/var_mean.py b/test/infinicore/ops/var_mean.py new file mode 100644 index 000000000..c249fba09 --- /dev/null +++ b/test/infinicore/ops/var_mean.py @@ -0,0 +1,87 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner +from framework.utils import is_broadcast + +# Test cases format: (in_shape, in_strides_or_None, dim_or_None, unbiased_or_None, keepdim_or_None) +# var_mean returns (var, mean) + +_TEST_CASES_DATA = [ + ((8, 8), None, None, None, None), + ((8, 8), (16, 1), 1, True, False), + ((2, 3, 4), None, 0, False, True), + ((4, 8), None, 0, True, False), + ((16, 64), (128, 1), None, False, None), + ((4, 5, 6), (60, 12, 2), 2, True, True), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for data in _TEST_CASES_DATA: + shape, strides, dim, unbiased, keepdim = data + + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3}) + in_spec = TensorSpec.from_tensor(shape, strides, dtype) + + kwargs = {} + if dim is not None: + kwargs["dim"] = dim + if unbiased is not None: + kwargs["unbiased"] = unbiased + if keepdim is not None: + kwargs["keepdim"] = keepdim + + test_cases.append( + TestCase( + inputs=[in_spec], + kwargs=kwargs, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="VarMean - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """VarMean operator test with simplified implementation""" + + def __init__(self): + super().__init__("VarMean") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.var_mean(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.var_mean(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/vdot.py b/test/infinicore/ops/vdot.py new file mode 100644 index 000000000..7d26735d7 --- /dev/null +++ b/test/infinicore/ops/vdot.py @@ -0,0 +1,78 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (vec1_shape, vec2_shape, vec1_strides_or_None, vec2_strides_or_None) +# vdot(a, b) — conjugate dot product for 1-D vectors + +_TEST_CASES_DATA = [ + ((3,), (3,), None, None), + ((8,), (8,), (0,), None), + ((1,), (1,), None, None), + ((16,), (16,), None, (256,)), + ((5,), (5,), None, None), + ((32,), (32,), (64,), (64,)), +] + +_TOLERANCE_MAP = { + infinicore.float16: {"atol": 1e-3, "rtol": 1e-2}, + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2}, +} + +_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for s1, s2, st1, st2 in _TEST_CASES_DATA: + for dtype in _TENSOR_DTYPES: + tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4}) + a = TensorSpec.from_tensor(s1, st1, dtype) + b = TensorSpec.from_tensor(s2, st2, dtype) + + test_cases.append( + TestCase( + inputs=[a, b], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=tol, + description="vdot - OUT_OF_PLACE", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """vdot operator test with simplified implementation""" + + def __init__(self): + super().__init__("vdot") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.vdot(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.vdot(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main() diff --git a/test/infinicore/ops/where.py b/test/infinicore/ops/where.py new file mode 100644 index 000000000..ff07d791a --- /dev/null +++ b/test/infinicore/ops/where.py @@ -0,0 +1,87 @@ +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) + +import torch +import infinicore +from framework.base import BaseOperatorTest, TensorSpec, TestCase +from framework.runner import GenericTestRunner + +# Test cases format: (condition_shape, cond_strides_or_None, x_shape_or_None, y_shape_or_None) +# infinicore.where can be used as where(condition, x, y) or where(condition) returning indices. +_TEST_CASES_DATA = [ + ((3, 4), None, (3, 4), (3, 4)), + ((5,), None, (5,), (5,)), + ((2, 2, 3), (12, 6, 2), None, None), + ((1, 6), None, (1, 6), (1, 6)), + ((4, 4), None, None, None), + ((2, 3, 2), None, (2, 3, 2), (2, 3, 2)), +] + +_TOLERANCE_MAP = { + infinicore.float32: {"atol": 1e-5, "rtol": 1e-4}, + infinicore.int64: {"atol": 0, "rtol": 0}, +} +_TENSOR_DTYPES = [infinicore.float32] + + +def parse_test_cases(): + test_cases = [] + for cond_shape, cond_strides, x_shape, y_shape in _TEST_CASES_DATA: + cond_spec = TensorSpec.from_tensor(cond_shape, cond_strides, infinicore.bool) + + if x_shape is None or y_shape is None: + # where(condition) -> returns indices + test_cases.append( + TestCase( + inputs=[cond_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP[infinicore.int64], + description=f"where - condition_only shape={cond_shape}", + ) + ) + else: + x_spec = TensorSpec.from_tensor(x_shape, None, infinicore.float32) + y_spec = TensorSpec.from_tensor(y_shape, None, infinicore.float32) + test_cases.append( + TestCase( + inputs=[cond_spec, x_spec, y_spec], + kwargs={}, + output_spec=None, + comparison_target=None, + tolerance=_TOLERANCE_MAP[infinicore.float32], + description=f"where - select shape={cond_shape}", + ) + ) + + return test_cases + + +class OpTest(BaseOperatorTest): + """Where operator test with simplified implementation""" + + def __init__(self): + super().__init__("Where") + + def get_test_cases(self): + return parse_test_cases() + + def torch_operator(self, *args, **kwargs): + return torch.where(*args, **kwargs) + + # def infinicore_operator(self, *args, **kwargs): + # """InfiniCore implementation (operator not yet available).""" + # return infinicore.where(*args, **kwargs) + + +def main(): + """Main entry point""" + runner = GenericTestRunner(OpTest) + runner.run_and_exit() + + +if __name__ == "__main__": + main()