|
20 | 20 | # limitations under the License. |
21 | 21 | # |
22 | 22 | import itertools |
23 | | -from typing import List |
| 23 | +from typing import List, Tuple |
24 | 24 |
|
25 | 25 | import torch |
26 | 26 | from torch import nn |
@@ -106,6 +106,140 @@ def forward(self, inp): |
106 | 106 | return inp / torch.norm(inp, dim=1, keepdim=True) |
107 | 107 |
|
108 | 108 |
|
| 109 | +class LegacyMultiobjectiveModel(nn.Module): |
| 110 | + """Wrapper around contrastive learning models to all training with multiple objectives |
| 111 | +
|
| 112 | + Multi-objective training splits the last layer's feature representation into multiple |
| 113 | + chunks, which are then used for individual training objectives. |
| 114 | +
|
| 115 | + Args: |
| 116 | + module: The module to wrap |
| 117 | + dimensions: A tuple of dimension values to extract from the model's feature embedding. |
| 118 | + renormalize: If True, the individual feature slices will be re-normalized before |
| 119 | + getting returned---this option only makes sense in conjunction with a loss based |
| 120 | + on the cosine distance or dot product. |
| 121 | + output_mode: A mode as defined in ``MultiobjectiveModel.Mode``. Overlapping means that |
| 122 | + when ``dimensions`` are set to `(x0, x1, ...)``, features will be extracted from |
| 123 | + ``0:x0, 0:x1, ...``. When mode is set to separate, features are extracted from |
| 124 | + ``x0:x1, x1:x2, ...``. |
| 125 | + append_last_dimension: Defaults to True, and will allow to omit the last dimension in |
| 126 | + the ``dimensions`` argument (which should be equal to the output dimension) of the |
| 127 | + given model. |
| 128 | +
|
| 129 | + TODO: |
| 130 | + - Update nn.Module type annotation for ``module`` to cebra.models.Model |
| 131 | + """ |
| 132 | + |
| 133 | + class Mode: |
| 134 | + """Mode for slicing and potentially normalizing the output embedding. |
| 135 | +
|
| 136 | + The options are: |
| 137 | +
|
| 138 | + - ``OVERLAPPING``: When ``dimensions`` are set to `(x0, x1, ...)``, features will be |
| 139 | + extracted from ``0:x0, 0:x1, ...``. |
| 140 | + - ``SEPARATE``: Features are extracted from ``x0:x1, x1:x2, ...`` |
| 141 | +
|
| 142 | + """ |
| 143 | + |
| 144 | + OVERLAPPING = "overlapping" |
| 145 | + SEPARATE = "separate" |
| 146 | + _ALL = {OVERLAPPING, SEPARATE} |
| 147 | + |
| 148 | + def is_valid(self, mode): |
| 149 | + """Check if a given string representation is valid. |
| 150 | +
|
| 151 | + Args: |
| 152 | + mode: String representation of the mode. |
| 153 | +
|
| 154 | + Returns: |
| 155 | + ``True`` for a valid representation, ``False`` otherwise. |
| 156 | + """ |
| 157 | + return mode in _ALL # noqa: F821 |
| 158 | + |
| 159 | + def __init__( |
| 160 | + self, |
| 161 | + module: nn.Module, |
| 162 | + dimensions: Tuple[int], |
| 163 | + renormalize: bool = False, |
| 164 | + output_mode: str = "overlapping", |
| 165 | + append_last_dimension: bool = False, |
| 166 | + ): |
| 167 | + super().__init__() |
| 168 | + |
| 169 | + if not isinstance(module, cebra.models.Model): |
| 170 | + raise ValueError("Can only wrap models that are subclassing the " |
| 171 | + "cebra.models.Model abstract base class. " |
| 172 | + f"Got a model of type {type(module)}.") |
| 173 | + |
| 174 | + self.module = module |
| 175 | + self.renormalize = renormalize |
| 176 | + self.output_mode = output_mode |
| 177 | + |
| 178 | + self._norm = _Norm() |
| 179 | + self._compute_slices(dimensions, append_last_dimension) |
| 180 | + |
| 181 | + @property |
| 182 | + def get_offset(self): |
| 183 | + """See :py:meth:`cebra.models.model.Model.get_offset`.""" |
| 184 | + return self.module.get_offset |
| 185 | + |
| 186 | + @property |
| 187 | + def num_output(self): |
| 188 | + """See :py:attr:`cebra.models.model.Model.num_output`.""" |
| 189 | + return self.module.num_output |
| 190 | + |
| 191 | + def _compute_slices(self, dimensions, append_last_dimension): |
| 192 | + |
| 193 | + def _valid_dimensions(dimensions): |
| 194 | + return max(dimensions) == self.num_output |
| 195 | + |
| 196 | + if append_last_dimension: |
| 197 | + if _valid_dimensions(dimensions): |
| 198 | + raise ValueError( |
| 199 | + f"append_last_dimension should only be used if extra values are " |
| 200 | + f"available. Last requested dimensionality is already {dimensions[-1]}." |
| 201 | + ) |
| 202 | + dimensions += (self.num_output,) |
| 203 | + if not _valid_dimensions(dimensions): |
| 204 | + raise ValueError( |
| 205 | + f"Max of given dimensions needs to match the number of outputs " |
| 206 | + f"in the encoder network. Got {dimensions} and expected a " |
| 207 | + f"maximum value of {self.num_output}.") |
| 208 | + |
| 209 | + if self.output_mode == self.Mode.OVERLAPPING: |
| 210 | + self.feature_ranges = tuple( |
| 211 | + slice(0, dimension) for dimension in dimensions) |
| 212 | + elif self.output_mode == self.Mode.SEPARATE: |
| 213 | + from_dimension = (0,) + dimensions |
| 214 | + self.feature_ranges = tuple( |
| 215 | + slice(i, j) for i, j in zip(from_dimension, dimensions)) |
| 216 | + else: |
| 217 | + raise ValueError( |
| 218 | + f"Unknown mode: '{self.output_mode}', use one of {self.Mode._ALL}." |
| 219 | + ) |
| 220 | + |
| 221 | + def forward(self, inputs): |
| 222 | + """Compute multiple embeddings for a single signal input. |
| 223 | +
|
| 224 | + Args: |
| 225 | + inputs: The input tensor |
| 226 | +
|
| 227 | + Returns: |
| 228 | + A tuple of tensors which are sliced according to `self.feature_ranges` |
| 229 | + if `renormalize` is set to true, each of the tensors will be normalized |
| 230 | + across the first (feature) dimension. |
| 231 | +
|
| 232 | + TODO: |
| 233 | + - Cover this function with unit tests |
| 234 | + """ |
| 235 | + output = self.module(inputs) |
| 236 | + outputs = ( |
| 237 | + output[:, slice_features] for slice_features in self.feature_ranges) |
| 238 | + if self.renormalize: |
| 239 | + outputs = (self._norm(output) for output in outputs) |
| 240 | + return tuple(outputs) |
| 241 | + |
| 242 | + |
109 | 243 | class MultiobjectiveModel(nn.Module): |
110 | 244 | """Wrapper around contrastive learning models to all training with multiple objectives |
111 | 245 |
|
|
0 commit comments