Skip to content

Commit fa7e89c

Browse files
committed
Update example
1 parent 48e6306 commit fa7e89c

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

examples/multimodal_grounding_qwen.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ def inference(self, settext_fn, update_fn):
126126

127127
job = ExLlamaV2DynamicJob(
128128
input_ids = input_ids,
129-
max_new_tokens = 500,
129+
max_new_tokens = 1000,
130130
decode_special_tokens = True,
131131
stop_conditions = [self.tokenizer.eos_token_id],
132132
gen_settings = ExLlamaV2Sampler.Settings.greedy(),
@@ -162,18 +162,18 @@ def get_grounding_bb(self, start, end) -> tuple:
162162
enclosed in the special tokens that Qwen would emit when prompted for grounding. Qwen is then strongly biased
163163
towards completing the bounding box.
164164
165-
Since we're using the same description as the model original generated, all keys/values for the system prompt,
166-
image and generated description up to the selection will be reused from the cache.
165+
Since we're using the same description as the model originally generated, all keys/values for the system
166+
prompt, image and generated description up to the selection will be reused from the cache.
167167
"""
168168

169169
if start >= end:
170-
return
170+
return None, None
171171

172172
# Including leading space
173173
if start > 0 and self.current_description[start - 1] == " ":
174174
start -= 1
175175

176-
# Repeat the same
176+
# Repeat the same prompt up to the selection, with grounding tokens added
177177
prompt = self.get_prompt()
178178
prompt += self.current_description[:start]
179179
prompt += "<|object_ref_start|>"

0 commit comments

Comments
 (0)