Skip to content

Commit 59d2b45

Browse files
authored
[ez] Fix app/vllm response processing broken by change in policy (#123)
* [ez] Fix app/vllm response processing broken by change in policy * Lint
1 parent 63d1c35 commit 59d2b45

File tree

1 file changed

+3
-4
lines changed

1 file changed

+3
-4
lines changed

apps/vllm/main.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,11 +13,10 @@
1313
import argparse
1414
import asyncio
1515
from argparse import Namespace
16-
from typing import List
1716

1817
from forge.actors.policy import Policy, PolicyConfig, SamplingOverrides, WorkerConfig
1918
from forge.controller.service import ServiceConfig, shutdown_service, spawn_service
20-
from vllm.outputs import CompletionOutput
19+
from vllm.outputs import RequestOutput
2120

2221

2322
async def main():
@@ -89,11 +88,11 @@ async def run_vllm(service_config: ServiceConfig, config: PolicyConfig, prompt:
8988

9089
async with policy.session():
9190
print("Requesting generation...")
92-
responses: List[CompletionOutput] = await policy.generate.choose(prompt=prompt)
91+
response_output: RequestOutput = await policy.generate.choose(prompt=prompt)
9392

9493
print("\nGeneration Results:")
9594
print("=" * 80)
96-
for batch, response in enumerate(responses):
95+
for batch, response in enumerate(response_output.outputs):
9796
print(f"Sample {batch + 1}:")
9897
print(f"User: {prompt}")
9998
print(f"Assistant: {response.text}")

0 commit comments

Comments
 (0)