@@ -431,9 +431,10 @@ public void EvalLossSequence()
431
431
var y = FloatTensor . RandomN ( new long [ ] { 64 , 10 } , device : "cpu:0" ) ;
432
432
433
433
var eval = seq . Forward ( x ) ;
434
- var loss = NN . LossFunction . MSE ( eval , y , NN . Reduction . Sum ) ;
434
+ var loss = NN . LossFunction . MSE ( NN . Reduction . Sum ) ;
435
+ var output = loss ( eval , y ) ;
435
436
436
- var result = loss . DataItem < float > ( ) ;
437
+ var result = output . DataItem < float > ( ) ;
437
438
Assert . IsNotNull ( result ) ;
438
439
}
439
440
@@ -444,9 +445,9 @@ public void TestPoissonNLLLoss()
444
445
using ( TorchTensor target = FloatTensor . From ( new float [ ] { 1f , 2f , 3f } ) )
445
446
{
446
447
var componentWiseLoss = ( ( TorchTensor ) input . Exp ( ) ) - target * input ;
447
- Assert . IsTrue ( componentWiseLoss . Equal ( NN . LossFunction . PoissonNLL ( input , target , reduction : NN . Reduction . None ) ) ) ;
448
- Assert . IsTrue ( componentWiseLoss . Sum ( ) . Equal ( NN . LossFunction . PoissonNLL ( input , target , reduction : NN . Reduction . Sum ) ) ) ;
449
- Assert . IsTrue ( componentWiseLoss . Mean ( ) . Equal ( NN . LossFunction . PoissonNLL ( input , target , reduction : NN . Reduction . Mean ) ) ) ;
448
+ Assert . IsTrue ( componentWiseLoss . Equal ( NN . LossFunction . PoissonNLL ( reduction : NN . Reduction . None ) ( input , target ) ) ) ;
449
+ Assert . IsTrue ( componentWiseLoss . Sum ( ) . Equal ( NN . LossFunction . PoissonNLL ( reduction : NN . Reduction . Sum ) ( input , target ) ) ) ;
450
+ Assert . IsTrue ( componentWiseLoss . Mean ( ) . Equal ( NN . LossFunction . PoissonNLL ( reduction : NN . Reduction . Mean ) ( input , target ) ) ) ;
450
451
}
451
452
}
452
453
@@ -456,7 +457,7 @@ public void TestPoissonNLLLoss2()
456
457
using ( TorchTensor input = FloatTensor . Random ( new long [ ] { 5 , 2 } ) )
457
458
using ( TorchTensor target = FloatTensor . Random ( new long [ ] { 5 , 2 } ) )
458
459
{
459
- Assert . IsNotNull ( NN . LossFunction . PoissonNLL ( input , target , true , true ) ) ;
460
+ Assert . IsNotNull ( NN . LossFunction . PoissonNLL ( true , true ) ( input , target ) ) ;
460
461
}
461
462
}
462
463
@@ -481,11 +482,12 @@ public void TestBackward()
481
482
var y = FloatTensor . RandomN ( new long [ ] { 64 , 10 } , device : "cpu:0" ) ;
482
483
483
484
var eval = seq . Forward ( x ) ;
484
- var loss = NN . LossFunction . MSE ( eval , y , NN . Reduction . None ) ;
485
+ var loss = NN . LossFunction . MSE ( NN . Reduction . None ) ;
486
+ var output = loss ( eval , y ) ;
485
487
486
488
seq . ZeroGrad ( ) ;
487
489
488
- loss . Backward ( ) ;
490
+ output . Backward ( ) ;
489
491
}
490
492
491
493
[ TestMethod ]
@@ -499,11 +501,12 @@ public void TestGettingParameters()
499
501
var y = FloatTensor . RandomN ( new long [ ] { 64 , 10 } , device : "cpu:0" ) ;
500
502
501
503
var eval = seq . Forward ( x ) ;
502
- var loss = NN . LossFunction . MSE ( eval , y , NN . Reduction . None ) ;
504
+ var loss = NN . LossFunction . MSE ( NN . Reduction . None ) ;
505
+ var output = loss ( eval , y ) ;
503
506
504
507
seq . ZeroGrad ( ) ;
505
508
506
- loss . Backward ( ) ;
509
+ output . Backward ( ) ;
507
510
508
511
foreach ( var parm in seq . Parameters ( ) )
509
512
{
@@ -522,11 +525,12 @@ public void TestGrad()
522
525
var y = FloatTensor . RandomN ( new long [ ] { 64 , 10 } , device : "cpu:0" ) ;
523
526
524
527
var eval = seq . Forward ( x ) ;
525
- var loss = NN . LossFunction . MSE ( eval , y , NN . Reduction . None ) ;
528
+ var loss = NN . LossFunction . MSE ( NN . Reduction . None ) ;
529
+ var output = loss ( eval , y ) ;
526
530
527
531
seq . ZeroGrad ( ) ;
528
532
529
- loss . Backward ( ) ;
533
+ output . Backward ( ) ;
530
534
531
535
foreach ( var parm in seq . Parameters ( ) )
532
536
{
@@ -658,19 +662,20 @@ public void TestTraining()
658
662
659
663
float learning_rate = 0.00004f ;
660
664
float prevLoss = float . MaxValue ;
665
+ var loss = NN . LossFunction . MSE ( NN . Reduction . Sum ) ;
661
666
662
667
for ( int i = 0 ; i < 10 ; i ++ )
663
668
{
664
669
var eval = seq . Forward ( x ) ;
665
- var loss = NN . LossFunction . MSE ( eval , y , NN . Reduction . Sum ) ;
666
- var lossVal = loss . DataItem < float > ( ) ;
670
+ var output = loss ( eval , y ) ;
671
+ var lossVal = output . DataItem < float > ( ) ;
667
672
668
673
Assert . IsTrue ( lossVal < prevLoss ) ;
669
674
prevLoss = lossVal ;
670
675
671
676
seq . ZeroGrad ( ) ;
672
677
673
- loss . Backward ( ) ;
678
+ output . Backward ( ) ;
674
679
675
680
using ( var noGrad = new AutoGradMode ( false ) )
676
681
{
@@ -712,37 +717,6 @@ public void TestTrainingAdam()
712
717
var x = FloatTensor . RandomN ( new long [ ] { 64 , 1000 } , device : "cpu:0" ) ;
713
718
var y = FloatTensor . RandomN ( new long [ ] { 64 , 10 } , device : "cpu:0" ) ;
714
719
715
- double learning_rate = 0.00004f ;
716
- float prevLoss = float . MaxValue ;
717
- var optimizer = NN . Optimizer . Adam ( seq . Parameters ( ) , learning_rate ) ;
718
-
719
- for ( int i = 0 ; i < 10 ; i ++ )
720
- {
721
- var eval = seq . Forward ( x ) ;
722
- var loss = NN . LossFunction . MSE ( eval , y , NN . Reduction . Sum ) ;
723
- var lossVal = loss . DataItem < float > ( ) ;
724
-
725
- Assert . IsTrue ( lossVal < prevLoss ) ;
726
- prevLoss = lossVal ;
727
-
728
- optimizer . ZeroGrad ( ) ;
729
-
730
- loss . Backward ( ) ;
731
-
732
- optimizer . Step ( ) ;
733
- }
734
- }
735
-
736
- [ TestMethod ]
737
- public void TestTrainingAdam2 ( )
738
- {
739
- var lin1 = NN . Module . Linear ( 1000 , 100 ) ;
740
- var lin2 = NN . Module . Linear ( 100 , 10 ) ;
741
- var seq = NN . Module . Sequential ( lin1 , NN . Module . Relu ( ) , lin2 ) ;
742
-
743
- var x = FloatTensor . RandomN ( new long [ ] { 64 , 1000 } , device : "cpu:0" ) ;
744
- var y = FloatTensor . RandomN ( new long [ ] { 64 , 10 } , device : "cpu:0" ) ;
745
-
746
720
double learning_rate = 0.00004f ;
747
721
float prevLoss = float . MaxValue ;
748
722
var optimizer = NN . Optimizer . Adam ( seq . Parameters ( ) , learning_rate ) ;
0 commit comments