@@ -447,3 +447,64 @@ TEST_F(OpLogSoftmaxOutTest, DynamicShapeUnbound) {
447447 Tensor ret = op_log_softmax_out (x, 1 , false , out);
448448 EXPECT_TENSOR_CLOSE (out, expected_result);
449449}
450+
451+ TEST_F (OpLogSoftmaxOutTest, DoubleCase) {
452+ TensorFactory<ScalarType::Double> tf;
453+
454+ // Test case with specific inputs:
455+ // Input tensor: torch.float64 (8, 5, 7)
456+ // Dim: 2
457+ // half_to_float: False
458+ Tensor input = tf.zeros ({8 , 5 , 7 });
459+ auto in_data = input.mutable_data_ptr <double >();
460+
461+ // Fill with some test data (sequential values scaled)
462+ for (int i = 0 ; i < 8 * 5 * 7 ; i++) {
463+ in_data[i] = static_cast <double >(i) * 0.01 ;
464+ }
465+
466+ // Output tensor with same shape
467+ Tensor out = tf.zeros ({8 , 5 , 7 });
468+
469+ // Apply log_softmax along dimension 2 (the last dimension with size 7)
470+ op_log_softmax_out (input, /* dim=*/ 2 , /* half_to_float=*/ false , out);
471+
472+ if (!SupportedFeatures::get ()->op_log_softmax_dtype_double ) {
473+ // For optimized kernels, we expect the call above to fail gracefully
474+ expect_failure ();
475+ GTEST_SKIP () << " This kernel does not support dtype double" ;
476+ }
477+
478+ // Verify output dimensions
479+ EXPECT_EQ (out.size (0 ), 8 );
480+ EXPECT_EQ (out.size (1 ), 5 );
481+ EXPECT_EQ (out.size (2 ), 7 );
482+
483+ // Verify that output has reasonable values
484+ auto out_data = out.const_data_ptr <double >();
485+
486+ // Check for NaN or Inf values
487+ for (int i = 0 ; i < 8 * 5 * 7 ; i++) {
488+ EXPECT_FALSE (std::isnan (out_data[i])) << " Output should not contain NaN at index " << i;
489+ EXPECT_FALSE (std::isinf (out_data[i])) << " Output should not contain Inf at index " << i;
490+ }
491+
492+ // For log_softmax, all values should be <= 0 (since softmax values are <= 1, log is <= 0)
493+ for (int i = 0 ; i < 8 * 5 * 7 ; i++) {
494+ EXPECT_LE (out_data[i], 0.0 ) << " Log softmax values should be <= 0 at index " << i;
495+ }
496+
497+ // Verify that each slice along dimension 2 sums to approximately 1 when exp'd
498+ // This tests the core property of softmax: sum(softmax(x)) = 1
499+ for (int batch = 0 ; batch < 8 ; batch++) {
500+ for (int channel = 0 ; channel < 5 ; channel++) {
501+ double sum_exp = 0.0 ;
502+ for (int dim2 = 0 ; dim2 < 7 ; dim2++) {
503+ int idx = batch * 5 * 7 + channel * 7 + dim2;
504+ sum_exp += std::exp (out_data[idx]);
505+ }
506+ EXPECT_NEAR (sum_exp, 1.0 , 1e-6 )
507+ << " Sum of exp(log_softmax) should be 1.0 for batch=" << batch << " , channel=" << channel;
508+ }
509+ }
510+ }
0 commit comments