Skip to content

Commit 0064f69

Browse files
authored
[CI] Add test case with JSON schema using references + use xgrammar by default with OpenAI parse (#10935)
Signed-off-by: mgoin <[email protected]>
1 parent 35bae11 commit 0064f69

File tree

3 files changed

+68
-1
lines changed

3 files changed

+68
-1
lines changed

tests/entrypoints/conftest.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,45 @@ def sample_complex_json_schema():
100100
}
101101

102102

103+
@pytest.fixture
104+
def sample_definition_json_schema():
105+
return {
106+
'$defs': {
107+
'Step': {
108+
'properties': {
109+
'explanation': {
110+
'title': 'Explanation',
111+
'type': 'string'
112+
},
113+
'output': {
114+
'title': 'Output',
115+
'type': 'string'
116+
}
117+
},
118+
'required': ['explanation', 'output'],
119+
'title': 'Step',
120+
'type': 'object'
121+
}
122+
},
123+
'properties': {
124+
'steps': {
125+
'items': {
126+
'$ref': '#/$defs/Step'
127+
},
128+
'title': 'Steps',
129+
'type': 'array'
130+
},
131+
'final_answer': {
132+
'title': 'Final Answer',
133+
'type': 'string'
134+
}
135+
},
136+
'required': ['steps', 'final_answer'],
137+
'title': 'MathReasoning',
138+
'type': 'object'
139+
}
140+
141+
103142
@pytest.fixture
104143
def sample_guided_choice():
105144
return [

tests/entrypoints/llm/test_guided_generate.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,34 @@ def test_guided_complex_json_completion(sample_complex_json_schema, llm):
104104
schema=sample_complex_json_schema)
105105

106106

107+
@pytest.mark.skip_global_cleanup
108+
def test_guided_definition_json_completion(sample_definition_json_schema, llm):
109+
sampling_params = SamplingParams(temperature=1.0,
110+
max_tokens=1000,
111+
guided_decoding=GuidedDecodingParams(
112+
json=sample_definition_json_schema))
113+
outputs = llm.generate(prompts=[
114+
f"Give an example JSON for solving 8x + 7 = -23 "
115+
f"that fits this schema: {sample_definition_json_schema}"
116+
] * 2,
117+
sampling_params=sampling_params,
118+
use_tqdm=True)
119+
120+
assert outputs is not None
121+
122+
for output in outputs:
123+
assert output is not None
124+
assert isinstance(output, RequestOutput)
125+
prompt = output.prompt
126+
127+
generated_text = output.outputs[0].text
128+
assert generated_text is not None
129+
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
130+
output_json = json.loads(generated_text)
131+
jsonschema.validate(instance=output_json,
132+
schema=sample_definition_json_schema)
133+
134+
107135
@pytest.mark.skip_global_cleanup
108136
def test_guided_choice_completion(sample_guided_choice, llm):
109137
sampling_params = SamplingParams(

vllm/entrypoints/openai/protocol.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -387,7 +387,7 @@ def to_sampling_params(
387387
assert json_schema is not None
388388
self.guided_json = json_schema.json_schema
389389
if self.guided_decoding_backend is None:
390-
self.guided_decoding_backend = "lm-format-enforcer"
390+
self.guided_decoding_backend = "xgrammar"
391391

392392
guided_decoding = GuidedDecodingParams.from_optional(
393393
json=self._get_guided_json_from_tool() or self.guided_json,

0 commit comments

Comments
 (0)