@@ -60,12 +60,13 @@ class MockServingChat:
60
60
tokenizer : MockTokenizer
61
61
62
62
63
- def test_load_chat_template ():
63
+ @pytest .mark .asyncio
64
+ async def test_load_chat_template ():
64
65
# Testing chatml template
65
66
tokenizer = MockTokenizer ()
66
67
mock_serving_chat = MockServingChat (tokenizer )
67
- OpenAIServingChat ._load_chat_template (mock_serving_chat ,
68
- chat_template = chatml_jinja_path )
68
+ await OpenAIServingChat ._load_chat_template (
69
+ mock_serving_chat , chat_template = chatml_jinja_path )
69
70
70
71
template_content = tokenizer .chat_template
71
72
@@ -76,26 +77,28 @@ def test_load_chat_template():
76
77
{% if add_generation_prompt and messages[-1]['role'] != 'assistant' %}{{ '<|im_start|>assistant\\ n' }}{% endif %}""" # noqa: E501
77
78
78
79
79
- def test_no_load_chat_template_filelike ():
80
+ @pytest .mark .asyncio
81
+ async def test_no_load_chat_template_filelike ():
80
82
# Testing chatml template
81
83
template = "../../examples/does_not_exist"
82
84
tokenizer = MockTokenizer ()
83
85
84
86
mock_serving_chat = MockServingChat (tokenizer )
85
87
86
88
with pytest .raises (ValueError , match = "looks like a file path" ):
87
- OpenAIServingChat ._load_chat_template (mock_serving_chat ,
88
- chat_template = template )
89
+ await OpenAIServingChat ._load_chat_template (mock_serving_chat ,
90
+ chat_template = template )
89
91
90
92
91
- def test_no_load_chat_template_literallike ():
93
+ @pytest .mark .asyncio
94
+ async def test_no_load_chat_template_literallike ():
92
95
# Testing chatml template
93
96
template = "{{ messages }}"
94
97
tokenizer = MockTokenizer ()
95
98
96
99
mock_serving_chat = MockServingChat (tokenizer )
97
- OpenAIServingChat ._load_chat_template (mock_serving_chat ,
98
- chat_template = template )
100
+ await OpenAIServingChat ._load_chat_template (mock_serving_chat ,
101
+ chat_template = template )
99
102
template_content = tokenizer .chat_template
100
103
101
104
assert template_content == template
@@ -110,8 +113,8 @@ async def test_get_gen_prompt(model, template, add_generation_prompt,
110
113
# Initialize the tokenizer
111
114
tokenizer = get_tokenizer (tokenizer_name = model )
112
115
mock_serving_chat = MockServingChat (tokenizer )
113
- OpenAIServingChat ._load_chat_template (mock_serving_chat ,
114
- chat_template = template )
116
+ await OpenAIServingChat ._load_chat_template (mock_serving_chat ,
117
+ chat_template = template )
115
118
116
119
# Create a mock request object using keyword arguments
117
120
mock_request = ChatCompletionRequest (
0 commit comments