|
1 | 1 | import numpy as np |
| 2 | +from logging import warning |
2 | 3 |
|
3 | 4 |
|
4 | | -def compute_unary(labels, M, GT_PROB=0.5): |
| 5 | +def unary_from_labels(labels, n_labels, gt_prob, zero_unsure=True): |
5 | 6 | """ |
6 | 7 | Simple classifier that is 50% certain that the annotation is correct. |
7 | 8 | (same as in the inference example). |
8 | 9 |
|
9 | 10 |
|
10 | 11 | Parameters |
11 | 12 | ---------- |
12 | | - labels: nummpy.array |
13 | | - The label-map. The label value `0` is not a label, but the special |
14 | | - value indicating that the location has no label/information and thus |
15 | | - every label is equally likely. |
16 | | - M: int |
17 | | - The number of labels there are, not including the special `0` value. |
18 | | - GT_PROB: float |
| 13 | + labels: numpy.array |
| 14 | + The label-map, i.e. an array of your data's shape where each unique |
| 15 | + value corresponds to a label. |
| 16 | + n_labels: int |
| 17 | + The total number of labels there are. |
| 18 | + If `zero_unsure` is True (the default), this number should not include |
| 19 | + `0` in counting the labels, since `0` is not a label! |
| 20 | + gt_prob: float |
19 | 21 | The certainty of the ground-truth (must be within (0,1)). |
| 22 | + zero_unsure: bool |
| 23 | + If `True`, treat the label value `0` as meaning "could be anything", |
| 24 | + i.e. entries with this value will get uniform unary probability. |
| 25 | + If `False`, do not treat the value `0` specially, but just as any |
| 26 | + other class. |
20 | 27 | """ |
21 | | - assert 0 < GT_PROB < 1, "`GT_PROB must be in (0,1)." |
| 28 | + assert 0 < gt_prob < 1, "`gt_prob must be in (0,1)." |
22 | 29 |
|
23 | 30 | labels = labels.flatten() |
24 | 31 |
|
25 | | - u_energy = -np.log(1.0 / M) |
26 | | - n_energy = -np.log((1.0 - GT_PROB) / (M - 1)) |
27 | | - p_energy = -np.log(GT_PROB) |
| 32 | + n_energy = -np.log((1.0 - gt_prob) / (n_labels - 1)) |
| 33 | + p_energy = -np.log(gt_prob) |
28 | 34 |
|
29 | 35 | # Note that the order of the following operations is important. |
30 | 36 | # That's because the later ones overwrite part of the former ones, and only |
31 | 37 | # after all of them is `U` correct! |
32 | | - U = np.full((M, len(labels)), n_energy, dtype='float32') |
33 | | - U[labels - 1, np.arange(U.shape[1])] = p_energy |
34 | | - U[:, labels == 0] = u_energy |
| 38 | + U = np.full((n_labels, len(labels)), n_energy, dtype='float32') |
| 39 | + U[labels - 1 if zero_unsure else labels, np.arange(U.shape[1])] = p_energy |
| 40 | + |
| 41 | + # Overwrite 0-labels using uniform probability, i.e. "unsure". |
| 42 | + if zero_unsure: |
| 43 | + U[:, labels == 0] = -np.log(1.0 / n_labels) |
| 44 | + |
35 | 45 | return U |
36 | 46 |
|
37 | 47 |
|
38 | | -def softmax_to_unary(sm, GT_PROB=1): |
39 | | - """ |
40 | | - Util function that converts softmax scores (classwise probabilities) to |
41 | | - unary potentials (the negative log likelihood per node). |
| 48 | +def compute_unary(labels, M, GT_PROB=0.5): |
| 49 | + """Deprecated, use `unary_from_labels` instead.""" |
| 50 | + warning("pydensecrf.compute_unary is deprecated, use unary_from_labels instead.") |
| 51 | + return unary_from_labels(labels, M, GT_PROB) |
| 52 | + |
| 53 | + |
| 54 | +def unary_from_softmax(sm, scale=None, clip=1e-5): |
| 55 | + """Converts softmax class-probabilities to unary potentials (NLL per node). |
42 | 56 |
|
43 | 57 | Parameters |
44 | 58 | ---------- |
45 | | - sm: nummpy.array |
46 | | - Softmax input. The first dimension is expected to be the classes, |
47 | | - all others will be flattend. |
48 | | - GT_PROB: float |
49 | | - The certainty of the softmax output (default is 1). |
50 | | -
|
| 59 | + sm: numpy.array |
| 60 | + Output of a softmax where the first dimension is the classes, |
| 61 | + all others will be flattend. This means `sm.shape[0] == n_classes`. |
| 62 | + scale: float |
| 63 | + The certainty of the softmax output (default is None). |
| 64 | + If not None, the softmax outputs are scaled to range from uniform |
| 65 | + probability for 0 outputs to `scale` probability for 1 outputs. |
| 66 | + clip: float |
| 67 | + Minimum value to which probability should be clipped. |
| 68 | + This is because the unary is the negative log of the probability, and |
| 69 | + log(0) = inf, so we need to clip 0 probabilities to a positive value. |
51 | 70 | """ |
52 | 71 | num_cls = sm.shape[0] |
53 | | - if GT_PROB < 1: |
| 72 | + if scale is not None: |
| 73 | + assert 0 < scale <= 1, "`scale` needs to be in (0,1]" |
54 | 74 | uniform = np.ones(sm.shape) / num_cls |
55 | | - sm = GT_PROB * sm + (1 - GT_PROB) * uniform |
| 75 | + sm = scale * sm + (1 - scale) * uniform |
| 76 | + if clip is not None: |
| 77 | + sm = np.clip(sm, clip, 1.0) |
56 | 78 | return -np.log(sm).reshape([num_cls, -1]).astype(np.float32) |
57 | 79 |
|
58 | 80 |
|
| 81 | +def softmax_to_unary(sm, GT_PROB=1): |
| 82 | + """Deprecated, use `unary_from_softmax` instead.""" |
| 83 | + warning("pydensecrf.softmax_to_unary is deprecated, use unary_from_softmax instead.") |
| 84 | + scale = None if GT_PROB == 1 else GT_PROB |
| 85 | + return unary_from_softmax(sm, scale, clip=None) |
| 86 | + |
| 87 | + |
59 | 88 | def create_pairwise_gaussian(sdims, shape): |
60 | 89 | """ |
61 | 90 | Util function that create pairwise gaussian potentials. This works for all |
|
0 commit comments