|
31 | 31 | # Create a sampling params object. |
32 | 32 | sampling_params = SamplingParams(temperature=0.0) |
33 | 33 |
|
34 | | -# Create an LLM without prefix caching as a baseline. |
35 | | -regular_llm = LLM(model="facebook/opt-125m", gpu_memory_utilization=0.4) |
36 | | - |
37 | | -print("Results without `enable_prefix_caching`") |
38 | | - |
39 | | -# Generate texts from the prompts. The output is a list of RequestOutput objects |
40 | | -# that contain the prompt, generated text, and other information. |
41 | | -outputs = regular_llm.generate(generating_prompts, sampling_params) |
42 | | - |
43 | | -regular_generated_texts = [] |
44 | | -# Print the outputs. |
45 | | -for output in outputs: |
46 | | - prompt = output.prompt |
47 | | - generated_text = output.outputs[0].text |
48 | | - regular_generated_texts.append(generated_text) |
49 | | - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") |
50 | | - |
51 | | -print("-" * 80) |
52 | | - |
53 | | -# Destroy the LLM object and free up the GPU memory. |
54 | | -del regular_llm |
55 | | -cleanup_dist_env_and_memory() |
56 | | - |
57 | | -# Create an LLM with prefix caching enabled. |
58 | | -prefix_cached_llm = LLM(model="facebook/opt-125m", |
59 | | - enable_prefix_caching=True, |
60 | | - gpu_memory_utilization=0.4) |
61 | | - |
62 | | -# Warmup so that the shared prompt's KV cache is computed. |
63 | | -prefix_cached_llm.generate(generating_prompts[0], sampling_params) |
64 | | - |
65 | | -# Generate with prefix caching. |
66 | | -outputs = prefix_cached_llm.generate(generating_prompts, sampling_params) |
67 | | - |
68 | | -print("Results with `enable_prefix_caching`") |
69 | | - |
70 | | -cached_generated_texts = [] |
71 | | -# Print the outputs. You should see the same outputs as before. |
72 | | -for output in outputs: |
73 | | - prompt = output.prompt |
74 | | - generated_text = output.outputs[0].text |
75 | | - cached_generated_texts.append(generated_text) |
76 | | - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") |
77 | | - |
78 | | -print("-" * 80) |
79 | | - |
80 | | -# Compare the results and display the speedup |
81 | | -generated_same = all([ |
82 | | - regular_generated_texts[i] == cached_generated_texts[i] |
83 | | - for i in range(len(prompts)) |
84 | | -]) |
85 | | -print(f"Generated answers are the same: {generated_same}") |
| 34 | + |
| 35 | +def main(): |
| 36 | + # Create an LLM without prefix caching as a baseline. |
| 37 | + regular_llm = LLM(model="facebook/opt-125m", gpu_memory_utilization=0.4) |
| 38 | + |
| 39 | + print("Results without `enable_prefix_caching`") |
| 40 | + |
| 41 | + # ruff: noqa: E501 |
| 42 | + # Generate texts from the prompts. The output is a list of RequestOutput objects |
| 43 | + # that contain the prompt, generated text, and other information. |
| 44 | + outputs = regular_llm.generate(generating_prompts, sampling_params) |
| 45 | + |
| 46 | + regular_generated_texts = [] |
| 47 | + # Print the outputs. |
| 48 | + print("-" * 50) |
| 49 | + for output in outputs: |
| 50 | + prompt = output.prompt |
| 51 | + generated_text = output.outputs[0].text |
| 52 | + regular_generated_texts.append(generated_text) |
| 53 | + print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}") |
| 54 | + print("-" * 50) |
| 55 | + |
| 56 | + # Destroy the LLM object and free up the GPU memory. |
| 57 | + del regular_llm |
| 58 | + cleanup_dist_env_and_memory() |
| 59 | + |
| 60 | + # Create an LLM with prefix caching enabled. |
| 61 | + prefix_cached_llm = LLM(model="facebook/opt-125m", |
| 62 | + enable_prefix_caching=True, |
| 63 | + gpu_memory_utilization=0.4) |
| 64 | + |
| 65 | + # Warmup so that the shared prompt's KV cache is computed. |
| 66 | + prefix_cached_llm.generate(generating_prompts[0], sampling_params) |
| 67 | + |
| 68 | + # Generate with prefix caching. |
| 69 | + outputs = prefix_cached_llm.generate(generating_prompts, sampling_params) |
| 70 | + |
| 71 | + print("Results with `enable_prefix_caching`") |
| 72 | + |
| 73 | + cached_generated_texts = [] |
| 74 | + # Print the outputs. You should see the same outputs as before. |
| 75 | + print("-" * 50) |
| 76 | + for output in outputs: |
| 77 | + prompt = output.prompt |
| 78 | + generated_text = output.outputs[0].text |
| 79 | + cached_generated_texts.append(generated_text) |
| 80 | + print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}") |
| 81 | + print("-" * 50) |
| 82 | + |
| 83 | + # Compare the results and display the speedup |
| 84 | + generated_same = all([ |
| 85 | + regular_generated_texts[i] == cached_generated_texts[i] |
| 86 | + for i in range(len(prompts)) |
| 87 | + ]) |
| 88 | + print(f"Generated answers are the same: {generated_same}") |
| 89 | + |
| 90 | + |
| 91 | +if __name__ == "__main__": |
| 92 | + main() |
0 commit comments