We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent b312460 commit 7d91ea1Copy full SHA for 7d91ea1
axelrod/strategies/attention.py
@@ -16,7 +16,7 @@
16
CLS_TOKEN = 0
17
PAD_TOKEN = 1
18
19
-DEVICES = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+DEVICES = torch.device("cpu")
20
21
model_weights = load_attention_model_weights()
22
@@ -329,7 +329,7 @@ def forward(self, input_ids: torch.Tensor) -> torch.Tensor:
329
pooled_output = self.pooler(sequence_output)
330
return self.action(pooled_output)
331
332
- def __eq__(self, other: "PlayerModel") -> bool:
+ def __eq__(self, other: object) -> bool:
333
return isinstance(other, PlayerModel)
334
335
0 commit comments