Skip to content

Commit 0eb9567

Browse files
authored
Merge branch 'develop' into add_ut_requirement
2 parents fcb8d4d + 8791ad4 commit 0eb9567

File tree

5 files changed

+205
-38
lines changed

5 files changed

+205
-38
lines changed

.github/workflows/_pre_ce_test.yml

Lines changed: 37 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -21,14 +21,19 @@ on:
2121
required: false
2222
type: string
2323
default: ""
24+
MODEL_CACHE_DIR:
25+
description: "Cache Dir Use"
26+
required: false
27+
type: string
28+
default: ""
2429

2530
concurrency:
2631
group: ${{ github.event.pull_request.number }}
2732
cancel-in-progress: true
2833

2934
jobs:
3035
run_ce_cases:
31-
runs-on: [self-hosted, GPU-L20-4Card]
36+
runs-on: [self-hosted, PRE_CE_RUN_2Card]
3237
steps:
3338
- name: Print current runner name
3439
run: |
@@ -67,37 +72,51 @@ jobs:
6772
env:
6873
docker_image: ${{ inputs.DOCKER_IMAGE }}
6974
fd_wheel_url: ${{ inputs.FASTDEPLOY_WHEEL_URL }}
75+
CACHE_DIR: ${{ inputs.CACHE_DIR }}
76+
MODEL_CACHE_DIR: ${{ inputs.MODEL_CACHE_DIR }}
7077
run: |
7178
runner_name="${{ runner.name }}"
72-
last_char="${runner_name: -1}"
79+
CARD_ID=$(echo "${runner_name}" | awk -F'-' '{print $NF}')
80+
DEVICES=$(echo "$CARD_ID" | fold -w1 | paste -sd,)
81+
DEVICE_PORT=$(echo "$DEVICES" | cut -d',' -f1)
82+
83+
FLASK_PORT=$((42068 + DEVICE_PORT * 100))
84+
FD_API_PORT=$((42088 + DEVICE_PORT * 100))
85+
FD_ENGINE_QUEUE_PORT=$((42058 + DEVICE_PORT * 100))
86+
FD_METRICS_PORT=$((42078 + DEVICE_PORT * 100))
87+
echo "Test ENV Parameter:"
88+
echo "========================================================="
89+
echo "FLASK_PORT=${FLASK_PORT}"
90+
echo "FD_API_PORT=${FD_API_PORT}"
91+
echo "FD_ENGINE_QUEUE_PORT=${FD_ENGINE_QUEUE_PORT}"
92+
echo "FD_METRICS_PORT=${FD_METRICS_PORT}"
93+
echo "DEVICES=${DEVICES}"
94+
echo "========================================================="
7395
74-
if [ "${last_char}" = "1" ]; then
75-
gpu_id=2
76-
DEVICES="2,3"
77-
else
78-
gpu_id=0
79-
DEVICES="0,1"
96+
CACHE_DIR="${CACHE_DIR:-$(dirname "$(dirname "${{ github.workspace }}")")}"
97+
echo "CACHE_DIR is set to ${CACHE_DIR}"
98+
if [ ! -f "${CACHE_DIR}/gitconfig" ]; then
99+
touch "${CACHE_DIR}/gitconfig"
80100
fi
81-
FD_API_PORT=$((9180 + gpu_id * 100))
82-
FD_ENGINE_QUEUE_PORT=$((9150 + gpu_id * 100))
83-
FD_METRICS_PORT=$((9170 + gpu_id * 100))
84101
85-
PARENT_DIR=$(dirname "$WORKSPACE")
86-
echo "PARENT_DIR:$PARENT_DIR"
87102
docker run --rm --net=host -v $(pwd):/workspace -w /workspace \
88-
-v "/ssd4/GithubActions/gitconfig:/etc/gitconfig:ro" \
89-
-v "/ssd4/GithubActions/ModelData:/ModelData:ro" \
90-
-v "/ssd4/GithubActions/CacheDir:/root/.cache" \
91-
-v "/ssd4/GithubActions/ConfigDir:/root/.config" \
103+
-v "${CACHE_DIR}/gitconfig:/etc/gitconfig:ro" \
104+
-v "${CACHE_DIR}/.cache:/root/.cache" \
105+
-v "${CACHE_DIR}/ConfigDir:/root/.config" \
106+
-v "${MODEL_CACHE_DIR}:/ModelData:ro" \
92107
-e "MODEL_PATH=/ModelData" \
93108
-e "FD_API_PORT=${FD_API_PORT}" \
94109
-e "FD_ENGINE_QUEUE_PORT=${FD_ENGINE_QUEUE_PORT}" \
95110
-e "FD_METRICS_PORT=${FD_METRICS_PORT}" \
111+
-e "FLASK_PORT=${FLASK_PORT}" \
96112
-e "fd_wheel_url=${fd_wheel_url}" \
97-
--gpus '"device='"${DEVICES}"'"' ${docker_image} /bin/bash -c '
113+
--gpus "\"device=${DEVICES}\"" ${docker_image} /bin/bash -c '
98114
git config --global --add safe.directory /workspace/FastDeploy
99115
cd FastDeploy
100116
python -m pip install --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
101117
python -m pip install ${fd_wheel_url}
118+
for port in $FLASK_PORT $FD_API_PORT $FD_ENGINE_QUEUE_PORT $FD_METRICS_PORT; do
119+
lsof -t -i :$port | xargs -r kill -9 || true
120+
done
102121
bash scripts/run_pre_ce.sh
103122
'

.github/workflows/_unit_test_coverage.yml

Lines changed: 35 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,11 @@ on:
2222
required: false
2323
type: string
2424
default: ""
25+
MODEL_CACHE_DIR:
26+
description: "Cache Dir Use"
27+
required: false
28+
type: string
29+
default: ""
2530

2631
jobs:
2732
run_tests_with_coverage:
@@ -67,11 +72,26 @@ jobs:
6772
fd_wheel_url: ${{ inputs.FASTDEPLOY_WHEEL_URL }}
6873
CACHE_DIR: ${{ inputs.CACHE_DIR }}
6974
BASE_REF: ${{ github.event.pull_request.base.ref }}
75+
MODEL_CACHE_DIR: ${{ inputs.MODEL_CACHE_DIR }}
7076
run: |
7177
set -x
7278
runner_name="${{ runner.name }}"
7379
CARD_ID=$(echo "${runner_name}" | awk -F'-' '{print $NF}')
74-
gpu_id=$(echo "$CARD_ID" | fold -w1 | paste -sd,)
80+
DEVICES=$(echo "$CARD_ID" | fold -w1 | paste -sd,)
81+
DEVICE_PORT=$(echo "$DEVICES" | cut -d',' -f1)
82+
83+
FLASK_PORT=$((42068 + DEVICE_PORT * 100))
84+
FD_API_PORT=$((42088 + DEVICE_PORT * 100))
85+
FD_ENGINE_QUEUE_PORT=$((42058 + DEVICE_PORT * 100))
86+
FD_METRICS_PORT=$((42078 + DEVICE_PORT * 100))
87+
echo "Test ENV Parameter:"
88+
echo "========================================================="
89+
echo "FLASK_PORT=${FLASK_PORT}"
90+
echo "FD_API_PORT=${FD_API_PORT}"
91+
echo "FD_ENGINE_QUEUE_PORT=${FD_ENGINE_QUEUE_PORT}"
92+
echo "FD_METRICS_PORT=${FD_METRICS_PORT}"
93+
echo "DEVICES=${DEVICES}"
94+
echo "========================================================="
7595
7696
CACHE_DIR="${CACHE_DIR:-$(dirname "$(dirname "${{ github.workspace }}")")}"
7797
echo "CACHE_DIR is set to ${CACHE_DIR}"
@@ -86,15 +106,21 @@ jobs:
86106
-v "${CACHE_DIR}/gitconfig:/etc/gitconfig:ro" \
87107
-v "${CACHE_DIR}/.cache:/root/.cache" \
88108
-v "${CACHE_DIR}/ConfigDir:/root/.config" \
109+
-v "${MODEL_CACHE_DIR}:/ModelData:ro" \
110+
-e "MODEL_PATH=/ModelData" \
111+
-e "FD_API_PORT=${FD_API_PORT}" \
112+
-e "FD_ENGINE_QUEUE_PORT=${FD_ENGINE_QUEUE_PORT}" \
113+
-e "FD_METRICS_PORT=${FD_METRICS_PORT}" \
114+
-e "FLASK_PORT=${FLASK_PORT}" \
89115
-e TZ="Asia/Shanghai" \
90116
-e "fd_wheel_url=${fd_wheel_url}" \
91117
-e "BASE_REF=${BASE_REF}" \
92-
--gpus "\"device=${gpu_id}\"" ${docker_image} /bin/bash -c '
118+
--gpus "\"device=${DEVICES}\"" ${docker_image} /bin/bash -c '
93119
94120
git config --global --add safe.directory /workspace/FastDeploy
95121
cd FastDeploy
96122
python -m pip install --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
97-
123+
98124
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
99125
python -m pip install -r scripts/unittest_requirement.txt
100126
@@ -157,7 +183,7 @@ jobs:
157183
echo "unittest_failed_url=${UNIT_TEST_RESULT_URL}" >> $GITHUB_OUTPUT
158184
echo "unittest_failed_url=${UNIT_TEST_RESULT_URL}" >> $GITHUB_ENV
159185
fi
160-
- name: Determine Unit Succ and whether the coverage rate reaches 80%
186+
- name: Check Unit Test Success
161187
shell: bash
162188
run: |
163189
if [ "$TEST_EXIT_CODE" -eq 8 ]; then
@@ -175,7 +201,11 @@ jobs:
175201
fi
176202
exit "$TEST_EXIT_CODE"
177203
fi
204+
echo "All tests passed"
178205
206+
- name: Verify Code Coverage Threshold (80%)
207+
shell: bash
208+
run: |
179209
if [ "$COVERAGE_EXIT_CODE" -eq 9 ]; then
180210
echo "Coverage generation failed (exit code 9)"
181211
filename=$(basename "$diff_cov_result_json_url")
@@ -195,7 +225,7 @@ jobs:
195225
fi
196226
exit "$COVERAGE_EXIT_CODE"
197227
fi
198-
echo "All tests and coverage passed"
228+
echo "coverage passed"
199229
exit 0
200230
201231
diff_coverage_report:

.github/workflows/pr_build_and_test.yml

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@ jobs:
4242
DOCKER_IMAGE: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleqa:fastdeploy-ciuse-cuda126-dailyupdate
4343
FASTDEPLOY_ARCHIVE_URL: ${{ needs.clone.outputs.repo_archive_url }}
4444
FASTDEPLOY_WHEEL_URL: ${{ needs.build.outputs.wheel_path }}
45+
MODEL_CACHE_DIR: "/ssd2/actions-runner/ModelData"
4546

4647
logprob_test:
4748
name: Run FastDeploy LogProb Tests
@@ -51,16 +52,17 @@ jobs:
5152
DOCKER_IMAGE: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleqa:fastdeploy-ciuse-cuda126-dailyupdate
5253
PADDLETEST_ARCHIVE_URL: "https://xly-devops.bj.bcebos.com/PaddleTest/PaddleTest.tar.gz"
5354
FASTDEPLOY_WHEEL_URL: ${{ needs.build.outputs.wheel_path }}
54-
MODEL_CACHE_DIR: "/ssd2/actions-runner/ModelCache"
55+
MODEL_CACHE_DIR: "/ssd2/actions-runner/ModelData"
5556

5657
pre_ce_test:
5758
name: Extracted partial CE model tasks to run in CI.
5859
needs: [clone,build]
5960
uses: ./.github/workflows/_pre_ce_test.yml
6061
with:
61-
DOCKER_IMAGE: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddle:fastdeploy-ciuse-cuda126
62+
DOCKER_IMAGE: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleqa:fastdeploy-ciuse-cuda126-dailyupdate
6263
FASTDEPLOY_ARCHIVE_URL: ${{ needs.clone.outputs.repo_archive_url }}
6364
FASTDEPLOY_WHEEL_URL: ${{ needs.build.outputs.wheel_path }}
65+
MODEL_CACHE_DIR: "/ssd2/actions-runner/ModelData"
6466

6567
base_test:
6668
name: Run Base Tests
@@ -70,4 +72,4 @@ jobs:
7072
DOCKER_IMAGE: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/paddlepaddle/paddleqa:fastdeploy-ciuse-cuda126-dailyupdate
7173
FASTDEPLOY_ARCHIVE_URL: ${{ needs.clone.outputs.repo_archive_url }}
7274
FASTDEPLOY_WHEEL_URL: ${{ needs.build.outputs.wheel_path }}
73-
MODEL_CACHE_DIR: "/ssd2/actions-runner/ModelCache"
75+
MODEL_CACHE_DIR: "/ssd2/actions-runner/ModelData"

fastdeploy/entrypoints/openai/serving_completion.py

Lines changed: 17 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -234,6 +234,15 @@ async def completion_full_generator(
234234
if dealer is not None:
235235
dealer.close()
236236

237+
def calc_finish_reason(self, max_tokens, token_num, output):
238+
if max_tokens is None or token_num != max_tokens:
239+
if self.engine_client.reasoning_parser == "ernie_x1" and output.get("finish_reason", "") == "tool_calls":
240+
return "tool_calls"
241+
else:
242+
return "stop"
243+
else:
244+
return "length"
245+
237246
async def completion_stream_generator(
238247
self,
239248
request: CompletionRequest,
@@ -334,19 +343,13 @@ async def completion_stream_generator(
334343
logprobs=logprobs_res,
335344
)
336345
)
337-
if res["finished"]:
338-
if request.max_tokens is None or output_tokens[idx] + 1 != request.max_tokens:
339-
chunk.choices[0].finish_reason = "stop"
340-
if (
341-
self.engine_client.reasoning_parser == "ernie_x1"
342-
and output.get("finish_reason", "") == "tool_calls"
343-
):
344-
chunk.choices[0].finish_reason = "tool_calls"
345-
else:
346-
chunk.choices[0].finish_reason = "length"
347-
348346
output_tokens[idx] += 1
349347

348+
if res["finished"]:
349+
choices[-1].finish_reason = self.calc_finish_reason(
350+
request.max_tokens, output_tokens[idx], output
351+
)
352+
350353
if len(choices) == max_streaming_response_tokens or res["finished"]:
351354
chunk = CompletionStreamResponse(
352355
id=request_id,
@@ -433,6 +436,8 @@ def request_output_to_completion_response(
433436
token_ids = output["token_ids"]
434437
output_text = output["text"]
435438

439+
finish_reason = self.calc_finish_reason(request.max_tokens, final_res["output_token_ids"], output)
440+
436441
choice_data = CompletionResponseChoice(
437442
token_ids=token_ids,
438443
index=len(choices),
@@ -442,7 +447,7 @@ def request_output_to_completion_response(
442447
reasoning_content=output.get("reasoning_content"),
443448
tool_calls=output.get("tool_call_content"),
444449
logprobs=aggregated_logprobs,
445-
finish_reason=None,
450+
finish_reason=finish_reason,
446451
)
447452
choices.append(choice_data)
448453

Lines changed: 111 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,111 @@
1+
import unittest
2+
from typing import List
3+
from unittest.mock import Mock
4+
5+
from fastdeploy.entrypoints.openai.serving_completion import (
6+
CompletionRequest,
7+
OpenAIServingCompletion,
8+
RequestOutput,
9+
)
10+
11+
12+
class TestOpenAIServingCompletion(unittest.TestCase):
13+
14+
def test_calc_finish_reason_tool_calls(self):
15+
# 创建一个模拟的engine_client,并设置reasoning_parser为"ernie_x1"
16+
engine_client = Mock()
17+
engine_client.reasoning_parser = "ernie_x1"
18+
# 创建一个OpenAIServingCompletion实例
19+
serving_completion = OpenAIServingCompletion(engine_client, "pid", "ips", 360)
20+
# 创建一个模拟的output,并设置finish_reason为"tool_calls"
21+
output = {"finish_reason": "tool_calls"}
22+
# 调用calc_finish_reason方法
23+
result = serving_completion.calc_finish_reason(None, 100, output)
24+
# 断言结果为"tool_calls"
25+
assert result == "tool_calls"
26+
27+
def test_calc_finish_reason_stop(self):
28+
# 创建一个模拟的engine_client,并设置reasoning_parser为"ernie_x1"
29+
engine_client = Mock()
30+
engine_client.reasoning_parser = "ernie_x1"
31+
# 创建一个OpenAIServingCompletion实例
32+
serving_completion = OpenAIServingCompletion(engine_client, "pid", "ips", 360)
33+
# 创建一个模拟的output,并设置finish_reason为其他值
34+
output = {"finish_reason": "other_reason"}
35+
# 调用calc_finish_reason方法
36+
result = serving_completion.calc_finish_reason(None, 100, output)
37+
# 断言结果为"stop"
38+
assert result == "stop"
39+
40+
def test_calc_finish_reason_length(self):
41+
# 创建一个模拟的engine_client
42+
engine_client = Mock()
43+
# 创建一个OpenAIServingCompletion实例
44+
serving_completion = OpenAIServingCompletion(engine_client, "pid", "ips", 360)
45+
# 创建一个模拟的output
46+
output = {}
47+
# 调用calc_finish_reason方法
48+
result = serving_completion.calc_finish_reason(100, 100, output)
49+
# 断言结果为"length"
50+
assert result == "length"
51+
52+
def test_request_output_to_completion_response(self):
53+
engine_client = Mock()
54+
# 创建一个OpenAIServingCompletion实例
55+
openai_serving_completion = OpenAIServingCompletion(engine_client, "pid", "ips", 360)
56+
final_res_batch: List[RequestOutput] = [
57+
{
58+
"prompt": "Hello, world!",
59+
"outputs": {
60+
"token_ids": [1, 2, 3],
61+
"text": " world!",
62+
"top_logprobs": {
63+
"a": 0.1,
64+
"b": 0.2,
65+
},
66+
},
67+
"output_token_ids": 3,
68+
},
69+
{
70+
"prompt": "Hello, world!",
71+
"outputs": {
72+
"token_ids": [4, 5, 6],
73+
"text": " world!",
74+
"top_logprobs": {
75+
"a": 0.3,
76+
"b": 0.4,
77+
},
78+
},
79+
"output_token_ids": 3,
80+
},
81+
]
82+
83+
request: CompletionRequest = Mock()
84+
request_id = "test_request_id"
85+
created_time = 1655136000
86+
model_name = "test_model"
87+
prompt_batched_token_ids = [[1, 2, 3], [4, 5, 6]]
88+
completion_batched_token_ids = [[7, 8, 9], [10, 11, 12]]
89+
90+
completion_response = openai_serving_completion.request_output_to_completion_response(
91+
final_res_batch=final_res_batch,
92+
request=request,
93+
request_id=request_id,
94+
created_time=created_time,
95+
model_name=model_name,
96+
prompt_batched_token_ids=prompt_batched_token_ids,
97+
completion_batched_token_ids=completion_batched_token_ids,
98+
)
99+
100+
assert completion_response.id == request_id
101+
assert completion_response.created == created_time
102+
assert completion_response.model == model_name
103+
assert len(completion_response.choices) == 2
104+
105+
# 验证 choices 的 text 属性
106+
assert completion_response.choices[0].text == "Hello, world! world!"
107+
assert completion_response.choices[1].text == "Hello, world! world!"
108+
109+
110+
if __name__ == "__main__":
111+
unittest.main()

0 commit comments

Comments
 (0)