We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 611e21c commit 686cc66Copy full SHA for 686cc66
server/text_generation/models/causal_lm.py
@@ -65,7 +65,7 @@ def from_pb(
65
)
66
all_logprobs.append(None)
67
68
- pad_to_multiple_of = 8 if "gpu" in str(device) else None
+ pad_to_multiple_of = 8 if device.type == "cuda" else None
69
tokenized_inputs = tokenizer(
70
inputs,
71
return_tensors="pt",
server/text_generation/models/seq2seq_lm.py
@@ -77,7 +77,7 @@ def from_pb(
77
decoder_logprobs.append(None)
78
79
# Tokenize batch
80
81
82
83
0 commit comments