Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 6 additions & 4 deletions src/setfit/modeling.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,7 @@ def fit(
if not end_to_end:
self.freeze("body")

dataloader = self._prepare_dataloader(x_train, y_train, batch_size, max_length)
dataloader = self._prepare_dataloader(list(x_train), list(y_train), batch_size, max_length)
criterion = self.model_head.get_loss_fn()
optimizer = self._prepare_optimizer(head_learning_rate, body_learning_rate, l2_weight)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.5)
Expand Down Expand Up @@ -314,8 +314,8 @@ def fit(
if not end_to_end:
self.unfreeze("body")
else: # train with sklearn
embeddings = self.model_body.encode(x_train, normalize_embeddings=self.normalize_embeddings)
self.model_head.fit(embeddings, y_train)
embeddings = self.model_body.encode(list(x_train), normalize_embeddings=self.normalize_embeddings)
self.model_head.fit(embeddings, list(y_train))
if self.labels is None and self.multi_target_strategy is None:
# Try to set the labels based on the head classes, if they exist
# This can fail in various ways, so we catch all exceptions
Expand Down Expand Up @@ -477,6 +477,7 @@ def _output_type_conversion(
outputs = torch.from_numpy(outputs)
return outputs

@torch.no_grad()
def predict_proba(
self,
inputs: Union[str, List[str]],
Expand Down Expand Up @@ -521,6 +522,7 @@ def predict_proba(
outputs = self._output_type_conversion(probs, as_numpy=as_numpy)
return outputs[0] if is_singular else outputs

@torch.no_grad()
def predict(
self,
inputs: Union[str, List[str]],
Expand Down Expand Up @@ -556,7 +558,7 @@ def predict(
is_singular = isinstance(inputs, str)
if is_singular:
inputs = [inputs]
embeddings = self.encode(inputs, batch_size=batch_size, show_progress_bar=show_progress_bar)
embeddings = self.encode(list(inputs), batch_size=batch_size, show_progress_bar=show_progress_bar)
preds = self.model_head.predict(embeddings)
# If labels are defined, we don't have multilabels & the output is not already strings, then we convert to string labels
if (
Expand Down
1 change: 1 addition & 0 deletions src/setfit/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -655,6 +655,7 @@ def train_classifier(
end_to_end=args.end_to_end,
)

@torch.no_grad()
def evaluate(self, dataset: Optional[Dataset] = None, metric_key_prefix: str = "test") -> Dict[str, float]:
"""
Computes the metrics for a given classifier.
Expand Down
7 changes: 4 additions & 3 deletions src/setfit/trainer_distillation.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,12 +85,12 @@ def get_dataset(
max_pairs: int = -1,
) -> Tuple[DataLoader, nn.Module, int, int]:
x_embd_student = self.teacher_model.model_body.encode(
x, convert_to_tensor=self.teacher_model.has_differentiable_head
list(x), convert_to_tensor=self.teacher_model.has_differentiable_head
)
cos_sim_matrix = util.cos_sim(x_embd_student, x_embd_student)

data_sampler = ContrastiveDistillationDataset(
x, cos_sim_matrix, args.num_iterations, args.sampling_strategy, max_pairs=max_pairs
list(x), cos_sim_matrix, args.num_iterations, args.sampling_strategy, max_pairs=max_pairs
)
dataset = Dataset.from_list(list(data_sampler))
loss = args.loss(self.model.model_body)
Expand All @@ -105,7 +105,8 @@ def train_classifier(self, x_train: List[str], args: Optional[TrainingArguments]
args (`TrainingArguments`, *optional*):
Temporarily change the training arguments for this training call.
"""
y_train = self.teacher_model.predict(x_train, as_numpy=not self.student_model.has_differentiable_head)
with torch.no_grad():
y_train = self.teacher_model.predict(x_train, as_numpy=not self.student_model.has_differentiable_head)
return super().train_classifier(x_train, y_train, args)


Expand Down
Loading