Skip to content

Commit 138b0e8

Browse files
authored
Adds Poisson, KL Divergence Probabilistic Metric / Loss (keras-team#62)
* Adds Poisson, KL Divergence Probabilistic Metric / Loss * Update format and docstring
1 parent 401d536 commit 138b0e8

File tree

6 files changed

+496
-0
lines changed

6 files changed

+496
-0
lines changed

keras_core/losses/__init__.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,36 +2,44 @@
22
from keras_core.losses.loss import Loss
33
from keras_core.losses.losses import CategoricalHinge
44
from keras_core.losses.losses import Hinge
5+
from keras_core.losses.losses import KLDivergence
56
from keras_core.losses.losses import LossFunctionWrapper
67
from keras_core.losses.losses import MeanAbsoluteError
78
from keras_core.losses.losses import MeanAbsolutePercentageError
89
from keras_core.losses.losses import MeanSquaredError
910
from keras_core.losses.losses import MeanSquaredLogarithmicError
11+
from keras_core.losses.losses import Poisson
1012
from keras_core.losses.losses import SquaredHinge
1113
from keras_core.losses.losses import categorical_hinge
1214
from keras_core.losses.losses import hinge
15+
from keras_core.losses.losses import kl_divergence
1316
from keras_core.losses.losses import mean_absolute_error
1417
from keras_core.losses.losses import mean_absolute_percentage_error
1518
from keras_core.losses.losses import mean_squared_error
1619
from keras_core.losses.losses import mean_squared_logarithmic_error
20+
from keras_core.losses.losses import poisson
1721
from keras_core.losses.losses import squared_hinge
1822
from keras_core.saving import serialization_lib
1923

2024
ALL_OBJECTS = {
2125
Loss,
2226
LossFunctionWrapper,
27+
KLDivergence,
2328
MeanSquaredError,
2429
MeanAbsoluteError,
2530
MeanAbsolutePercentageError,
2631
MeanSquaredLogarithmicError,
2732
Hinge,
33+
Poisson,
2834
SquaredHinge,
2935
CategoricalHinge,
36+
kl_divergence,
3037
mean_squared_error,
3138
mean_absolute_error,
3239
mean_absolute_percentage_error,
3340
mean_squared_logarithmic_error,
3441
hinge,
42+
poisson,
3543
squared_hinge,
3644
categorical_hinge,
3745
}

keras_core/losses/losses.py

Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -264,6 +264,54 @@ def get_config(self):
264264
return Loss.get_config(self)
265265

266266

267+
@keras_core_export("keras_core.losses.KLDivergence")
268+
class KLDivergence(LossFunctionWrapper):
269+
"""Computes Kullback-Leibler divergence loss between `y_true` & `y_pred`.
270+
271+
Formula:
272+
273+
```python
274+
loss = y_true * log(y_true / y_pred)
275+
```
276+
277+
Args:
278+
reduction: Type of reduction to apply to loss. For almost all cases
279+
this defaults to `"sum_over_batch_size"`. Options are `"sum"`,
280+
`"sum_over_batch_size"` or `None`.
281+
name: Optional name for the instance. Defaults to 'kl_divergence'.
282+
"""
283+
284+
def __init__(self, reduction="sum_over_batch_size", name="kl_divergence"):
285+
super().__init__(kl_divergence, reduction=reduction, name=name)
286+
287+
def get_config(self):
288+
return Loss.get_config(self)
289+
290+
291+
@keras_core_export("keras_core.losses.Poisson")
292+
class Poisson(LossFunctionWrapper):
293+
"""Computes the Poisson loss between `y_true` & `y_pred`.
294+
295+
Formula:
296+
297+
```python
298+
loss = y_pred - y_true * log(y_pred)
299+
```
300+
301+
Args:
302+
reduction: Type of reduction to apply to loss. For almost all cases
303+
this defaults to `"sum_over_batch_size"`. Options are `"sum"`,
304+
`"sum_over_batch_size"` or `None`.
305+
name: Optional name for the instance. Defaults to `"poisson"`
306+
"""
307+
308+
def __init__(self, reduction="sum_over_batch_size", name="poisson"):
309+
super().__init__(poisson, reduction=reduction, name=name)
310+
311+
def get_config(self):
312+
return Loss.get_config(self)
313+
314+
267315
def convert_binary_labels_to_hinge(y_true):
268316
"""Converts binary labels into -1/1 for hinge loss/metric calculation."""
269317
are_zeros = ops.equal(y_true, 0)
@@ -582,3 +630,82 @@ def cosine_similarity(y_true, y_pred, axis=-1):
582630
y_pred = normalize(y_pred, axis=axis)
583631
y_true = normalize(y_true, axis=axis)
584632
return -ops.sum(y_true * y_pred, axis=axis)
633+
634+
635+
@keras_core_export(
636+
[
637+
"keras_core.metrics.kl_divergence",
638+
"keras_core.losses.kl_divergence",
639+
]
640+
)
641+
def kl_divergence(y_true, y_pred):
642+
"""Computes Kullback-Leibler divergence loss between `y_true` & `y_pred`.
643+
644+
Formula:
645+
646+
```python
647+
loss = y_true * log(y_true / y_pred)
648+
```
649+
650+
Standalone usage:
651+
652+
>>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float32)
653+
>>> y_pred = np.random.random(size=(2, 3))
654+
>>> loss = keras_core.losses.kl_divergence(y_true, y_pred)
655+
>>> assert loss.shape == (2,)
656+
>>> y_true = ops.clip(y_true, 1e-7, 1)
657+
>>> y_pred = ops.clip(y_pred, 1e-7, 1)
658+
>>> assert np.array_equal(
659+
... loss, np.sum(y_true * np.log(y_true / y_pred), axis=-1))
660+
661+
Args:
662+
y_true: Tensor of true targets.
663+
y_pred: Tensor of predicted targets.
664+
665+
Returns:
666+
KL Divergence loss values with shape = `[batch_size, d0, .. dN-1]`.
667+
"""
668+
y_pred = ops.convert_to_tensor(y_pred)
669+
y_true = ops.convert_to_tensor(y_true, y_pred.dtype)
670+
y_true = ops.clip(y_true, backend.epsilon(), 1)
671+
y_pred = ops.clip(y_pred, backend.epsilon(), 1)
672+
return ops.sum(y_true * ops.log(y_true / y_pred), axis=-1)
673+
674+
675+
@keras_core_export(
676+
[
677+
"keras_core.metrics.poisson",
678+
"keras_core.losses.poisson",
679+
]
680+
)
681+
def poisson(y_true, y_pred):
682+
"""Computes the Poisson loss between y_true and y_pred.
683+
684+
Formula:
685+
686+
```python
687+
loss = y_pred - y_true * log(y_pred)
688+
```
689+
690+
Standalone usage:
691+
692+
>>> y_true = np.random.randint(0, 2, size=(2, 3))
693+
>>> y_pred = np.random.random(size=(2, 3))
694+
>>> loss = keras_core.losses.poisson(y_true, y_pred)
695+
>>> assert loss.shape == (2,)
696+
>>> y_pred = y_pred + 1e-7
697+
>>> assert np.allclose(
698+
... loss, np.mean(y_pred - y_true * np.log(y_pred), axis=-1),
699+
... atol=1e-5)
700+
701+
Args:
702+
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
703+
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
704+
705+
Returns:
706+
Poisson loss values with shape = `[batch_size, d0, .. dN-1]`.
707+
"""
708+
y_pred = ops.convert_to_tensor(y_pred)
709+
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
710+
epsilon = ops.convert_to_tensor(backend.epsilon())
711+
return ops.mean(y_pred - y_true * ops.log(y_pred + epsilon), axis=-1)

keras_core/losses/losses_test.py

Lines changed: 168 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -461,3 +461,171 @@ def test_axis(self):
461461
loss = cosine_obj(self.y_true, self.y_pred)
462462
expected_loss = -np.mean(self.expected_loss)
463463
self.assertAlmostEqual(loss, expected_loss, 3)
464+
465+
466+
class KLDivergenceTest(testing.TestCase):
467+
def setup(self):
468+
self.y_pred = np.asarray(
469+
[0.4, 0.9, 0.12, 0.36, 0.3, 0.4], dtype=np.float32
470+
).reshape((2, 3))
471+
self.y_true = np.asarray(
472+
[0.5, 0.8, 0.12, 0.7, 0.43, 0.8], dtype=np.float32
473+
).reshape((2, 3))
474+
475+
self.batch_size = 2
476+
self.expected_losses = np.multiply(
477+
self.y_true, np.log(self.y_true / self.y_pred)
478+
)
479+
480+
def test_config(self):
481+
k_obj = losses.KLDivergence(reduction="sum", name="kld")
482+
self.assertEqual(k_obj.name, "kld")
483+
self.assertEqual(k_obj.reduction, "sum")
484+
485+
def test_unweighted(self):
486+
self.setup()
487+
k_obj = losses.KLDivergence()
488+
489+
loss = k_obj(self.y_true, self.y_pred)
490+
expected_loss = np.sum(self.expected_losses) / self.batch_size
491+
self.assertAlmostEqual(loss, expected_loss, 3)
492+
493+
def test_scalar_weighted(self):
494+
self.setup()
495+
k_obj = losses.KLDivergence()
496+
sample_weight = 2.3
497+
498+
loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
499+
expected_loss = (
500+
sample_weight * np.sum(self.expected_losses) / self.batch_size
501+
)
502+
self.assertAlmostEqual(loss, expected_loss, 3)
503+
504+
# Verify we get the same output when the same input is given
505+
loss_2 = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
506+
self.assertAlmostEqual(loss, loss_2, 3)
507+
508+
def test_sample_weighted(self):
509+
self.setup()
510+
k_obj = losses.KLDivergence()
511+
sample_weight = np.asarray([1.2, 3.4], dtype=np.float32).reshape((2, 1))
512+
loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
513+
514+
expected_loss = np.multiply(
515+
self.expected_losses,
516+
np.asarray(
517+
[1.2, 1.2, 1.2, 3.4, 3.4, 3.4], dtype=np.float32
518+
).reshape(2, 3),
519+
)
520+
expected_loss = np.sum(expected_loss) / self.batch_size
521+
self.assertAlmostEqual(loss, expected_loss, 3)
522+
523+
def test_timestep_weighted(self):
524+
self.setup()
525+
k_obj = losses.KLDivergence()
526+
y_true = self.y_true.reshape(2, 3, 1)
527+
y_pred = self.y_pred.reshape(2, 3, 1)
528+
sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape(2, 3)
529+
expected_losses = np.sum(
530+
np.multiply(y_true, np.log(y_true / y_pred)), axis=-1
531+
)
532+
loss = k_obj(y_true, y_pred, sample_weight=sample_weight)
533+
534+
num_timesteps = 3
535+
expected_loss = np.sum(expected_losses * sample_weight) / (
536+
self.batch_size * num_timesteps
537+
)
538+
self.assertAlmostEqual(loss, expected_loss, 3)
539+
540+
def test_zero_weighted(self):
541+
self.setup()
542+
k_obj = losses.KLDivergence()
543+
loss = k_obj(self.y_true, self.y_pred, sample_weight=0)
544+
self.assertAlmostEqual(loss, 0.0, 3)
545+
546+
547+
class PoissonTest(testing.TestCase):
548+
def setup(self):
549+
self.y_pred = np.asarray([1, 9, 2, 5, 2, 6], dtype=np.float32).reshape(
550+
(2, 3)
551+
)
552+
self.y_true = np.asarray([4, 8, 12, 8, 1, 3], dtype=np.float32).reshape(
553+
(2, 3)
554+
)
555+
556+
self.batch_size = 6
557+
self.expected_losses = self.y_pred - np.multiply(
558+
self.y_true, np.log(self.y_pred)
559+
)
560+
561+
def test_config(self):
562+
poisson_obj = losses.Poisson(reduction="sum", name="poisson")
563+
self.assertEqual(poisson_obj.name, "poisson")
564+
self.assertEqual(poisson_obj.reduction, "sum")
565+
566+
def test_unweighted(self):
567+
self.setup()
568+
poisson_obj = losses.Poisson()
569+
570+
loss = poisson_obj(self.y_true, self.y_pred)
571+
expected_loss = np.sum(self.expected_losses) / self.batch_size
572+
self.assertAlmostEqual(loss, expected_loss, 3)
573+
574+
def test_scalar_weighted(self):
575+
self.setup()
576+
poisson_obj = losses.Poisson()
577+
sample_weight = 2.3
578+
loss = poisson_obj(
579+
self.y_true, self.y_pred, sample_weight=sample_weight
580+
)
581+
expected_loss = (
582+
sample_weight * np.sum(self.expected_losses) / self.batch_size
583+
)
584+
self.assertAlmostEqual(loss, expected_loss, 3)
585+
self.assertAlmostEqual(loss, expected_loss, 3)
586+
587+
# Verify we get the same output when the same input is given
588+
loss_2 = poisson_obj(
589+
self.y_true, self.y_pred, sample_weight=sample_weight
590+
)
591+
self.assertAlmostEqual(loss, loss_2, 3)
592+
593+
def test_sample_weighted(self):
594+
self.setup()
595+
poisson_obj = losses.Poisson()
596+
597+
sample_weight = np.asarray([1.2, 3.4]).reshape((2, 1))
598+
loss = poisson_obj(
599+
self.y_true, self.y_pred, sample_weight=sample_weight
600+
)
601+
602+
expected_loss = np.multiply(
603+
self.expected_losses,
604+
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)),
605+
)
606+
expected_loss = np.sum(expected_loss) / self.batch_size
607+
self.assertAlmostEqual(loss, expected_loss, 3)
608+
609+
def test_timestep_weighted(self):
610+
self.setup()
611+
poisson_obj = losses.Poisson()
612+
y_true = self.y_true.reshape(2, 3, 1)
613+
y_pred = self.y_pred.reshape(2, 3, 1)
614+
sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape(2, 3, 1)
615+
expected_losses = y_pred - np.multiply(y_true, np.log(y_pred))
616+
617+
loss = poisson_obj(
618+
y_true,
619+
y_pred,
620+
sample_weight=np.asarray(sample_weight).reshape((2, 3)),
621+
)
622+
expected_loss = (
623+
np.sum(expected_losses * sample_weight) / self.batch_size
624+
)
625+
self.assertAlmostEqual(loss, expected_loss, 3)
626+
627+
def test_zero_weighted(self):
628+
self.setup()
629+
poisson_obj = losses.Poisson()
630+
loss = poisson_obj(self.y_true, self.y_pred, sample_weight=0)
631+
self.assertAlmostEqual(loss, 0.0, 3)

keras_core/metrics/__init__.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@
88
from keras_core.metrics.hinge_metrics import Hinge
99
from keras_core.metrics.hinge_metrics import SquaredHinge
1010
from keras_core.metrics.metric import Metric
11+
from keras_core.metrics.probabilistic_metrics import KLDivergence
12+
from keras_core.metrics.probabilistic_metrics import Poisson
1113
from keras_core.metrics.reduction_metrics import Mean
1214
from keras_core.metrics.reduction_metrics import MeanMetricWrapper
1315
from keras_core.metrics.reduction_metrics import Sum
@@ -26,6 +28,8 @@
2628
Hinge,
2729
SquaredHinge,
2830
CategoricalHinge,
31+
KLDivergence,
32+
Poisson,
2933
}
3034
ALL_OBJECTS_DICT = {cls.__name__: cls for cls in ALL_OBJECTS}
3135
ALL_OBJECTS_DICT.update(

0 commit comments

Comments
 (0)