@@ -465,6 +465,205 @@ def test_axis(self):
465465 self .assertAlmostEqual (loss , expected_loss , 3 )
466466
467467
468+ class HuberLossTest (testing .TestCase ):
469+ def huber_loss (self , y_true , y_pred , delta = 1.0 ):
470+ error = y_pred - y_true
471+ abs_error = np .abs (error )
472+
473+ quadratic = np .minimum (abs_error , delta )
474+ linear = np .subtract (abs_error , quadratic )
475+ return np .add (
476+ np .multiply (0.5 , np .multiply (quadratic , quadratic )),
477+ np .multiply (delta , linear ),
478+ )
479+
480+ def setup (self , delta = 1.0 ):
481+ self .np_y_pred = np .array ([[0.9 , 0.2 , 0.2 ], [0.8 , 0.4 , 0.6 ]])
482+ self .np_y_true = np .array ([[1.0 , 0.0 , 1.0 ], [1.0 , 0.0 , 0.0 ]])
483+
484+ self .batch_size = 6
485+ self .expected_losses = self .huber_loss (
486+ self .np_y_true , self .np_y_pred , delta
487+ )
488+
489+ self .y_pred = self .np_y_pred
490+ self .y_true = self .np_y_true
491+
492+ def test_config (self ):
493+ h_obj = losses .Huber (reduction = "sum" , name = "huber" )
494+ self .assertEqual (h_obj .name , "huber" )
495+ self .assertEqual (h_obj .reduction , "sum" )
496+
497+ def test_all_correct (self ):
498+ self .setup ()
499+ h_obj = losses .Huber ()
500+ loss = h_obj (self .y_true , self .y_true )
501+ self .assertAlmostEqual (loss , 0.0 , 3 )
502+
503+ def test_unweighted (self ):
504+ self .setup ()
505+ h_obj = losses .Huber ()
506+ loss = h_obj (self .y_true , self .y_pred )
507+ actual_loss = np .sum (self .expected_losses ) / self .batch_size
508+ self .assertAlmostEqual (loss , actual_loss , 3 )
509+
510+ def test_scalar_weighted (self ):
511+ self .setup ()
512+ h_obj = losses .Huber ()
513+ sample_weight = 2.3
514+ loss = h_obj (self .y_true , self .y_pred , sample_weight = sample_weight )
515+ actual_loss = (
516+ sample_weight * np .sum (self .expected_losses ) / self .batch_size
517+ )
518+ self .assertAlmostEqual (loss , actual_loss , 3 )
519+
520+ # Verify we get the same output when the same input is given
521+ loss_2 = h_obj (self .y_true , self .y_pred , sample_weight = sample_weight )
522+ self .assertAlmostEqual (loss , loss_2 , 3 )
523+
524+ def test_sample_weighted (self ):
525+ self .setup ()
526+ h_obj = losses .Huber ()
527+ sample_weight = np .array ([[1.2 ], [3.4 ]])
528+
529+ loss = h_obj (self .y_true , self .y_pred , sample_weight = sample_weight )
530+ actual_loss = np .multiply (
531+ self .expected_losses ,
532+ np .asarray ([1.2 , 1.2 , 1.2 , 3.4 , 3.4 , 3.4 ]).reshape ((2 , 3 )),
533+ )
534+ actual_loss = np .sum (actual_loss ) / self .batch_size
535+ self .assertAlmostEqual (loss , actual_loss , 3 )
536+
537+ def test_timestep_weighted (self ):
538+ self .setup ()
539+ h_obj = losses .Huber ()
540+ y_pred = self .np_y_pred .reshape ((2 , 3 , 1 ))
541+ y_true = self .np_y_true .reshape ((2 , 3 , 1 ))
542+ expected_losses = self .huber_loss (y_true , y_pred )
543+
544+ sample_weight = np .array ([3 , 6 , 5 , 0 , 4 , 2 ]).reshape ((2 , 3 , 1 ))
545+ loss = h_obj (
546+ y_true ,
547+ y_pred ,
548+ sample_weight = sample_weight ,
549+ )
550+ actual_loss = np .multiply (expected_losses , sample_weight )
551+ actual_loss = np .sum (actual_loss ) / self .batch_size
552+ self .assertAlmostEqual (loss , actual_loss , 3 )
553+
554+ def test_zero_weighted (self ):
555+ self .setup ()
556+ h_obj = losses .Huber ()
557+ sample_weight = 0
558+ loss = h_obj (self .y_true , self .y_pred , sample_weight = sample_weight )
559+ self .assertAlmostEqual (loss , 0.0 , 3 )
560+
561+ def test_non_default_delta (self ):
562+ self .setup (delta = 0.8 )
563+ h_obj = losses .Huber (delta = 0.8 )
564+ sample_weight = 2.3
565+ loss = h_obj (self .y_true , self .y_pred , sample_weight = sample_weight )
566+ actual_loss = (
567+ sample_weight * np .sum (self .expected_losses ) / self .batch_size
568+ )
569+ self .assertAlmostEqual (loss , actual_loss , 3 )
570+
571+ def test_loss_with_non_default_dtype (self ):
572+ # Test case for GitHub issue:
573+ # https://github.com/tensorflow/tensorflow/issues/39004
574+ # TODO
575+ pass
576+
577+
578+ class LogCoshTest (testing .TestCase ):
579+ def setup (self ):
580+ y_true = np .asarray ([[1 , 9 , 2 ], [- 5 , - 2 , 6 ]], dtype = np .float32 )
581+ y_pred = np .asarray ([[4 , 8 , 12 ], [8 , 1 , 3 ]], dtype = np .float32 )
582+
583+ self .batch_size = 6
584+ error = y_pred - y_true
585+ self .expected_losses = np .log ((np .exp (error ) + np .exp (- error )) / 2 )
586+
587+ self .y_true = y_true
588+ self .y_pred = y_pred
589+
590+ def test_config (self ):
591+ logcosh_obj = losses .LogCosh (reduction = "sum" , name = "logcosh_loss" )
592+ self .assertEqual (logcosh_obj .name , "logcosh_loss" )
593+ self .assertEqual (logcosh_obj .reduction , "sum" )
594+
595+ def test_unweighted (self ):
596+ self .setup ()
597+ logcosh_obj = losses .LogCosh ()
598+
599+ loss = logcosh_obj (self .y_true , self .y_pred )
600+ expected_loss = np .sum (self .expected_losses ) / self .batch_size
601+ self .assertAlmostEqual (loss , expected_loss , 3 )
602+
603+ def test_scalar_weighted (self ):
604+ self .setup ()
605+ logcosh_obj = losses .LogCosh ()
606+ sample_weight = 2.3
607+
608+ loss = logcosh_obj (
609+ self .y_true , self .y_pred , sample_weight = sample_weight
610+ )
611+ expected_loss = (
612+ sample_weight * np .sum (self .expected_losses ) / self .batch_size
613+ )
614+ self .assertAlmostEqual (loss , expected_loss , 3 )
615+
616+ # Verify we get the same output when the same input is given
617+ loss_2 = logcosh_obj (
618+ self .y_true , self .y_pred , sample_weight = sample_weight
619+ )
620+ self .assertAlmostEqual (loss , loss_2 , 3 )
621+
622+ def test_sample_weighted (self ):
623+ self .setup ()
624+ logcosh_obj = losses .LogCosh ()
625+
626+ sample_weight = np .asarray ([1.2 , 3.4 ])
627+ loss = logcosh_obj (
628+ self .y_true , self .y_pred , sample_weight = sample_weight
629+ )
630+
631+ expected_loss = np .multiply (
632+ self .expected_losses ,
633+ np .asarray ([1.2 , 1.2 , 1.2 , 3.4 , 3.4 , 3.4 ]).reshape ((2 , 3 )),
634+ )
635+ expected_loss = np .sum (expected_loss ) / self .batch_size
636+ self .assertAlmostEqual (loss , expected_loss , 3 )
637+
638+ def test_timestep_weighted (self ):
639+ self .setup ()
640+ logcosh_obj = losses .LogCosh ()
641+ y_true = np .asarray ([1 , 9 , 2 , - 5 , - 2 , 6 ]).reshape (2 , 3 , 1 )
642+ y_pred = np .asarray ([4 , 8 , 12 , 8 , 1 , 3 ]).reshape (2 , 3 , 1 )
643+ error = y_pred - y_true
644+ expected_losses = np .log ((np .exp (error ) + np .exp (- error )) / 2 )
645+ sample_weight = np .array ([3 , 6 , 5 , 0 , 4 , 2 ]).reshape ((2 , 3 , 1 ))
646+
647+ loss = logcosh_obj (
648+ y_true ,
649+ y_pred ,
650+ sample_weight = sample_weight ,
651+ )
652+ expected_loss = (
653+ np .sum (expected_losses * sample_weight ) / self .batch_size
654+ )
655+ self .assertAlmostEqual (loss , expected_loss , 3 )
656+
657+ def test_zero_weighted (self ):
658+ self .setup ()
659+ logcosh_obj = losses .LogCosh ()
660+ sample_weight = 0
661+ loss = logcosh_obj (
662+ self .y_true , self .y_pred , sample_weight = sample_weight
663+ )
664+ self .assertAlmostEqual (loss , 0.0 , 3 )
665+
666+
468667class KLDivergenceTest (testing .TestCase ):
469668 def setup (self ):
470669 self .y_pred = np .asarray (
0 commit comments