We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent bf0ec25 commit a500911Copy full SHA for a500911
ChatTTS/model/gpt.py
@@ -187,7 +187,10 @@ def _prepare_generation_inputs(
187
if cache_position is not None
188
else past_key_values.get_seq_length()
189
)
190
- max_cache_length = past_key_values.get_max_length()
+ try:
191
+ max_cache_length = past_key_values.get_max_cache_shape()
192
+ except:
193
+ max_cache_length = past_key_values.get_max_length() # deprecated in transformers 4.48
194
cache_length = (
195
past_length
196
if max_cache_length is None
0 commit comments