1+ #include "../../include/cten.h"
2+ #include "../test_utils.h"
3+ #include "../csv_reporter.h"
4+ #include "../test_config.h"
5+ #include <stdio.h>
6+
7+ void test_div_backward () {
8+ const char * op_name = "div_backward" ;
9+ PoolId pool_id = 0 ;
10+ cten_begin_malloc (pool_id );
11+
12+ // Test Case 1: Simple element-wise vector division
13+ {
14+ const char * tc_name = "div_vectors_backward" ;
15+ TensorShape shape = {3 };
16+ float x_data [] = {6.7548f , 3.4753f , -7.6282f };
17+ float y_data [] = {4.5687f , 2.6877f , -1.8746f };
18+
19+ // z = x / y = [1.4785, 1.2930, 4.0692]
20+ // loss = sum(z) = 6.8407
21+ float exp_grad_x [] = {0.218881f , 0.372065f , -0.533447f };
22+ float exp_grad_y [] = {-0.323614f , -0.481095f , 2.170725f };
23+
24+ Tensor x = create_test_tensor (shape , x_data , true);
25+ Tensor y = create_test_tensor (shape , y_data , true);
26+
27+ Tensor z = Tensor_div (x , y );
28+ Tensor loss = Tensor_sum (z );
29+
30+ Tensor grad_dummy = {0 };
31+ Tensor_backward (loss , grad_dummy );
32+
33+ Tensor expected_grad_x = create_test_tensor (shape , exp_grad_x , false);
34+ Tensor expected_grad_y = create_test_tensor (shape , exp_grad_y , false);
35+
36+ compare_tensors (& x .node -> grad , & expected_grad_x , op_name , tc_name , 1 , TEST_FLOAT_TOLERANCE );
37+ compare_tensors (& y .node -> grad , & expected_grad_y , op_name , tc_name , 2 , TEST_FLOAT_TOLERANCE );
38+ }
39+
40+ // Test Case 2: Broadcasting a vector by a scalar
41+ {
42+ const char * tc_name = "div_broadcast_vec_scalar_backward" ;
43+ TensorShape x_shape = {2 };
44+ TensorShape y_shape = {1 };
45+ float x_data [] = {1.2388f , -6.849f };
46+ float y_data [] = {-1.8818f };
47+
48+ // z = x / y = [-0.6583, 3.6396]
49+ // loss = sum(z) = 2.9813
50+ float exp_grad_x [] = {-0.531406f , -0.531406f };
51+ float exp_grad_y [] = {1.584278f };
52+
53+ Tensor x = create_test_tensor (x_shape , x_data , true);
54+ Tensor y = create_test_tensor (y_shape , y_data , true);
55+
56+ Tensor z = Tensor_div (x , y );
57+ Tensor loss = Tensor_sum (z );
58+
59+ Tensor grad_dummy = {0 };
60+ Tensor_backward (loss , grad_dummy );
61+
62+ Tensor expected_grad_x = create_test_tensor (x_shape , exp_grad_x , false);
63+ Tensor expected_grad_y = create_test_tensor (y_shape , exp_grad_y , false);
64+
65+ compare_tensors (& x .node -> grad , & expected_grad_x , op_name , tc_name , 1 , TEST_FLOAT_TOLERANCE );
66+ compare_tensors (& y .node -> grad , & expected_grad_y , op_name , tc_name , 2 , TEST_FLOAT_TOLERANCE );
67+ }
68+
69+ // Test Case 3: Broadcasting a scalar by a vector
70+ {
71+ const char * tc_name = "div_broadcast_scalar_vec_backward" ;
72+ TensorShape x_shape = {1 };
73+ TensorShape y_shape = {3 };
74+ float x_data [] = {8.2849f };
75+ float y_data [] = {-4.2233f , 2.361f , 4.8289f };
76+
77+ // z = x / y = [-1.9617, 3.5091, 1.7157]
78+ // loss = sum(z) = 3.2631
79+ float exp_grad_x [] = {0.393854f };
80+ float exp_grad_y [] = {-0.464498f , -1.486262f , -0.355296f };
81+
82+ Tensor x = create_test_tensor (x_shape , x_data , true);
83+ Tensor y = create_test_tensor (y_shape , y_data , true);
84+
85+ Tensor z = Tensor_div (x , y );
86+ Tensor loss = Tensor_sum (z );
87+
88+ Tensor grad_dummy = {0 };
89+ Tensor_backward (loss , grad_dummy );
90+
91+ Tensor expected_grad_x = create_test_tensor (x_shape , exp_grad_x , false);
92+ Tensor expected_grad_y = create_test_tensor (y_shape , exp_grad_y , false);
93+
94+ compare_tensors (& x .node -> grad , & expected_grad_x , op_name , tc_name , 1 , TEST_FLOAT_TOLERANCE );
95+ compare_tensors (& y .node -> grad , & expected_grad_y , op_name , tc_name , 2 , TEST_FLOAT_TOLERANCE );
96+ }
97+
98+ // Test Case 4: Matrix division with negative values
99+ {
100+ const char * tc_name = "div_matrices_neg_vals_backward" ;
101+ TensorShape shape = {2 , 2 };
102+ float x_data [] = {1.8347f , -8.6274f , -8.2642f , -5.8261f };
103+ float y_data [] = {-2.5141f , -4.3176f , -4.4468f , 3.8183f };
104+
105+ // z = x / y = [-0.7298, 1.9982, 1.8585, -1.5258]
106+ // loss = sum(z) = 1.6011
107+ float exp_grad_x [] = {-0.397757f , -0.23161f , -0.224881f , 0.261897f };
108+ float exp_grad_y [] = {-0.290269f , 0.462802f , 0.417932f , 0.399611f };
109+
110+ Tensor x = create_test_tensor (shape , x_data , true);
111+ Tensor y = create_test_tensor (shape , y_data , true);
112+
113+ Tensor z = Tensor_div (x , y );
114+ Tensor loss = Tensor_sum (z );
115+
116+ Tensor grad_dummy = {0 };
117+ Tensor_backward (loss , grad_dummy );
118+
119+ Tensor expected_grad_x = create_test_tensor (shape , exp_grad_x , false);
120+ Tensor expected_grad_y = create_test_tensor (shape , exp_grad_y , false);
121+
122+ compare_tensors (& x .node -> grad , & expected_grad_x , op_name , tc_name , 1 , TEST_FLOAT_TOLERANCE );
123+ compare_tensors (& y .node -> grad , & expected_grad_y , op_name , tc_name , 2 , TEST_FLOAT_TOLERANCE );
124+ }
125+
126+ // Test Case 5: Complex computation graph (z = (a/b) * c)
127+ {
128+ const char * tc_name = "div_complex_graph_backward" ;
129+ TensorShape shape = {1 };
130+ float a_data [] = {3.0511f };
131+ float b_data [] = {1.3192f };
132+ float c_data [] = {1.404f };
133+
134+ // Let d = a / b. Then z = d * c.
135+ // Forward: d = 3.0511/1.3192 = 2.3129. z = 2.3129 * 1.404 = 3.2472
136+ // Backward pass:
137+ // dz/dc = d = 2.312841
138+ float exp_grad_c [] = {2.312841f };
139+
140+ // dz/d(d) = c = 1.404 (This is the upstream gradient for the div op)
141+ // dz/da = (dz/dd) * (dd/da) = c * (1/b) = 1.404 * (1/1.3192) = 1.064281
142+ float exp_grad_a [] = {1.064281f };
143+ // dz/db = (dz/dd) * (dd/db) = c * (-a/b²) = 1.404 * (-3.0511/(1.3192*1.3192)) = -2.461514
144+ float exp_grad_b [] = {-2.461514f };
145+
146+ Tensor a = create_test_tensor (shape , a_data , true);
147+ Tensor b = create_test_tensor (shape , b_data , true);
148+ Tensor c = create_test_tensor (shape , c_data , true);
149+
150+ Tensor d = Tensor_div (a , b );
151+ Tensor z = Tensor_mul (d , c );
152+
153+ Tensor grad_dummy = {0 };
154+ Tensor_backward (z , grad_dummy );
155+
156+ Tensor expected_grad_a_tensor = create_test_tensor (shape , exp_grad_a , false);
157+ Tensor expected_grad_b_tensor = create_test_tensor (shape , exp_grad_b , false);
158+ Tensor expected_grad_c_tensor = create_test_tensor (shape , exp_grad_c , false);
159+
160+ compare_tensors (& a .node -> grad , & expected_grad_a_tensor , op_name , tc_name , 1 , TEST_FLOAT_TOLERANCE );
161+ compare_tensors (& b .node -> grad , & expected_grad_b_tensor , op_name , tc_name , 2 , TEST_FLOAT_TOLERANCE );
162+ compare_tensors (& c .node -> grad , & expected_grad_c_tensor , op_name , tc_name , 3 , TEST_FLOAT_TOLERANCE );
163+ }
164+
165+ cten_free (pool_id );
166+ }
0 commit comments