Skip to content

Commit c2bab64

Browse files
committed
pow backward test is added
1 parent f9f5878 commit c2bab64

File tree

2 files changed

+174
-0
lines changed

2 files changed

+174
-0
lines changed

tests/Backward/test_pow_backward.c

Lines changed: 171 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,171 @@
1+
#include "../../include/cten.h"
2+
#include "../test_utils.h"
3+
#include "../csv_reporter.h"
4+
#include "../test_config.h"
5+
#include <stdio.h>
6+
#include <math.h>
7+
8+
void test_pow_backward() {
9+
const char* op_name = "pow_backward";
10+
PoolId pool_id = 0;
11+
cten_begin_malloc(pool_id);
12+
13+
// Test Case 1: Simple element-wise vector power
14+
{
15+
const char* tc_name = "pow_vectors_backward";
16+
TensorShape shape = {2};
17+
float x_data[] = {3.3774f, 0.6125f};
18+
float y_data[] = {1.4626f, 1.2812f};
19+
20+
// z = x^y = [3.3774^1.4626, 0.6125^1.2812] = [6.9329, 0.5292]
21+
// loss = sum(z) = 7.4621
22+
float exp_grad_x[] = {2.568312f, 1.116224f};
23+
float exp_grad_y[] = {7.218272f, -0.261589f};
24+
25+
Tensor x = create_test_tensor(shape, x_data, true);
26+
Tensor y = create_test_tensor(shape, y_data, true);
27+
28+
Tensor z = Tensor_pow(x, y);
29+
Tensor loss = Tensor_sum(z);
30+
31+
Tensor grad_dummy = {0};
32+
Tensor_backward(loss, grad_dummy);
33+
34+
Tensor expected_grad_x = create_test_tensor(shape, exp_grad_x, false);
35+
Tensor expected_grad_y = create_test_tensor(shape, exp_grad_y, false);
36+
37+
compare_tensors(&x.node->grad, &expected_grad_x, op_name, tc_name, 1, TEST_FLOAT_TOLERANCE);
38+
compare_tensors(&y.node->grad, &expected_grad_y, op_name, tc_name, 2, TEST_FLOAT_TOLERANCE);
39+
}
40+
41+
// Test Case 2: Broadcasting a vector by a scalar exponent
42+
{
43+
const char* tc_name = "pow_broadcast_vec_scalar_backward";
44+
TensorShape x_shape = {2};
45+
TensorShape y_shape = {1};
46+
float x_data[] = {3.8141f, 3.5451f};
47+
float y_data[] = {3.6226f};
48+
49+
// z = [3.8141^3.6226, 3.5451^3.6226] = [159.5443, 135.3775]
50+
// loss = sum(z) = 294.9218
51+
float exp_grad_x[] = {121.277222f, 100.109677f};
52+
float exp_grad_y[] = {294.921848f};
53+
54+
Tensor x = create_test_tensor(x_shape, x_data, true);
55+
Tensor y = create_test_tensor(y_shape, y_data, true);
56+
57+
Tensor z = Tensor_pow(x, y);
58+
Tensor loss = Tensor_sum(z);
59+
60+
Tensor grad_dummy = {0};
61+
Tensor_backward(loss, grad_dummy);
62+
63+
Tensor expected_grad_x = create_test_tensor(x_shape, exp_grad_x, false);
64+
Tensor expected_grad_y = create_test_tensor(y_shape, exp_grad_y, false);
65+
66+
compare_tensors(&x.node->grad, &expected_grad_x, op_name, tc_name, 1, TEST_FLOAT_TOLERANCE);
67+
compare_tensors(&y.node->grad, &expected_grad_y, op_name, tc_name, 2, TEST_FLOAT_TOLERANCE);
68+
}
69+
70+
// Test Case 3: Broadcasting a scalar base by a vector exponent
71+
{
72+
const char* tc_name = "pow_broadcast_scalar_vec_backward";
73+
TensorShape x_shape = {1};
74+
TensorShape y_shape = {2};
75+
float x_data[] = {0.8912f};
76+
float y_data[] = {1.9767f, 0.6043f};
77+
78+
// z = [0.8912^1.9767, 0.8912^0.6043] = [0.8003, 0.9383]
79+
// loss = sum(z) = 1.7386
80+
float exp_grad_x[] = {2.39885f};
81+
float exp_grad_y[] = {-0.091731f, -0.107441f};
82+
83+
Tensor x = create_test_tensor(x_shape, x_data, true);
84+
Tensor y = create_test_tensor(y_shape, y_data, true);
85+
86+
Tensor z = Tensor_pow(x, y);
87+
Tensor loss = Tensor_sum(z);
88+
89+
Tensor grad_dummy = {0};
90+
Tensor_backward(loss, grad_dummy);
91+
92+
Tensor expected_grad_x = create_test_tensor(x_shape, exp_grad_x, false);
93+
Tensor expected_grad_y = create_test_tensor(y_shape, exp_grad_y, false);
94+
95+
compare_tensors(&x.node->grad, &expected_grad_x, op_name, tc_name, 1, TEST_FLOAT_TOLERANCE);
96+
compare_tensors(&y.node->grad, &expected_grad_y, op_name, tc_name, 2, TEST_FLOAT_TOLERANCE);
97+
}
98+
99+
// Test Case 4: Edge cases with x=0 and x<0
100+
{
101+
const char* tc_name = "pow_edge_cases_backward";
102+
TensorShape shape = {2};
103+
float x_data[] = {0.0f, -2.0f};
104+
float y_data[] = {2.0f, 3.0f}; // Exponents are integers
105+
106+
// z = [0^2, (-2)^3] = [0.0, -8.0]
107+
// loss = sum(z) = -8.0
108+
// d(loss)/dx:
109+
// For x=0, y=2: grad is 0 (special case in GradFn_pow)
110+
// For x=-2, y=3: 3*(-2)^2 = 12.0
111+
float exp_grad_x[] = {0.0f, 12.0f};
112+
// d(loss)/dy:
113+
// For x=0: grad is 0 (special case in GradFn_pow)
114+
// For x=-2: grad is 0 (ln(-2) is undefined, special case)
115+
float exp_grad_y[] = {0.0f, 0.0f};
116+
117+
Tensor x = create_test_tensor(shape, x_data, true);
118+
Tensor y = create_test_tensor(shape, y_data, true);
119+
120+
Tensor z = Tensor_pow(x, y);
121+
Tensor loss = Tensor_sum(z);
122+
123+
Tensor grad_dummy = {0};
124+
Tensor_backward(loss, grad_dummy);
125+
126+
Tensor expected_grad_x = create_test_tensor(shape, exp_grad_x, false);
127+
Tensor expected_grad_y = create_test_tensor(shape, exp_grad_y, false);
128+
129+
compare_tensors(&x.node->grad, &expected_grad_x, op_name, tc_name, 1, TEST_FLOAT_TOLERANCE);
130+
compare_tensors(&y.node->grad, &expected_grad_y, op_name, tc_name, 2, TEST_FLOAT_TOLERANCE);
131+
}
132+
133+
// Test Case 5: Complex computation graph (z = (a^b) * c)
134+
{
135+
const char* tc_name = "pow_complex_graph_backward";
136+
TensorShape shape = {1};
137+
float a_data[] = {1.4839f};
138+
float b_data[] = {2.2687f};
139+
float c_data[] = {0.6194f};
140+
141+
// Let d = a^b. Then z = d * c.
142+
// Forward: d = 1.4839^2.2687 = 2.4483. z = 2.4483 * 0.6194 = 1.5164
143+
// Backward pass (upstream grad for d is c=0.6194):
144+
// dz/da = (dz/dd) * (dd/da) = c * (b*a^(b-1)) = 0.6194 * (2.2687*1.4839^1.2687)
145+
float exp_grad_a[] = {2.318512f};
146+
// dz/db = (dz/dd) * (dd/db) = c * (a^b*ln(a)) = 0.6194 * (2.4483*ln(1.4839))
147+
float exp_grad_b[] = {0.598515f};
148+
// dz/dc = d = a^b = 2.4483
149+
float exp_grad_c[] = {2.448306f};
150+
151+
Tensor a = create_test_tensor(shape, a_data, true);
152+
Tensor b = create_test_tensor(shape, b_data, true);
153+
Tensor c = create_test_tensor(shape, c_data, true);
154+
155+
Tensor d = Tensor_pow(a, b);
156+
Tensor z = Tensor_mul(d, c);
157+
158+
Tensor grad_dummy = {0};
159+
Tensor_backward(z, grad_dummy);
160+
161+
Tensor expected_grad_a_tensor = create_test_tensor(shape, exp_grad_a, false);
162+
Tensor expected_grad_b_tensor = create_test_tensor(shape, exp_grad_b, false);
163+
Tensor expected_grad_c_tensor = create_test_tensor(shape, exp_grad_c, false);
164+
165+
compare_tensors(&a.node->grad, &expected_grad_a_tensor, op_name, tc_name, 1, TEST_FLOAT_TOLERANCE);
166+
compare_tensors(&b.node->grad, &expected_grad_b_tensor, op_name, tc_name, 2, TEST_FLOAT_TOLERANCE);
167+
compare_tensors(&c.node->grad, &expected_grad_c_tensor, op_name, tc_name, 3, TEST_FLOAT_TOLERANCE);
168+
}
169+
170+
cten_free(pool_id);
171+
}

tests/cten_tests.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ void test_max_backward();
4040
void test_sum_backward();
4141
void test_mean_backward();
4242
void test_div_backward();
43+
void test_pow_backward();
4344

4445
int main() {
4546
printf("Starting cTensor Test Suite on %s...\n", PLATFORM_NAME);
@@ -143,6 +144,8 @@ int main() {
143144
test_div_backward();
144145
printf("Div backward tests finished.\n");
145146

147+
test_pow_backward();
148+
printf("Pow backward tests finished.\n");
146149
// other tests
147150

148151
csv_reporter_close();

0 commit comments

Comments
 (0)