Skip to content

Commit f38b175

Browse files
committed
Add [SOSP 2024] LoongServe: Efficiently Serving Long-Context Large Language Models with Elastic Sequence Parallelism
1 parent 5ecb050 commit f38b175

File tree

1 file changed

+19
-0
lines changed

1 file changed

+19
-0
lines changed

model-inference-systems.bib

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,3 +151,22 @@ @inbook{10.1145/3676536.3676741
151151
numpages = {9},
152152
code = {https://github.com/PKU-SEC-Lab/AdapMoE},
153153
}
154+
155+
@inproceedings{10.1145/3694715.3695948,
156+
author = {Wu, Bingyang and Liu, Shengyu and Zhong, Yinmin and Sun, Peng and Liu, Xuanzhe and Jin, Xin},
157+
title = {LoongServe: Efficiently Serving Long-Context Large Language Models with Elastic Sequence Parallelism},
158+
year = {2024},
159+
isbn = {9798400712517},
160+
publisher = {Association for Computing Machinery},
161+
address = {New York, NY, USA},
162+
url = {https://doi.org/10.1145/3694715.3695948},
163+
doi = {10.1145/3694715.3695948},
164+
abstract = {The context window of large language models (LLMs) is rapidly increasing, leading to a huge variance in resource usage between different requests as well as between different phases of the same request. Restricted by static parallelism strategies, existing LLM serving systems cannot efficiently utilize the underlying resources to serve variable-length requests in different phases. To address this problem, we propose a new parallelism paradigm, elastic sequence parallelism (ESP), to elastically adapt to the variance across different requests and phases. Based on ESP, we design and build LoongServe, an LLM serving system that (1) improves computation efficiency by elastically adjusting the degree of parallelism in real-time, (2) improves communication efficiency by reducing key-value cache migration overhead and overlapping partial decoding communication with computation, and (3) improves GPU memory efficiency by reducing key-value cache fragmentation across instances. Our evaluation under diverse real-world datasets shows that LoongServe improves the throughput by up to 3.85\texttimes{} compared to chunked prefill and 5.81\texttimes{} compared to prefill-decoding disaggregation.},
165+
booktitle = {Proceedings of the ACM SIGOPS 30th Symposium on Operating Systems Principles},
166+
pages = {640–654},
167+
numpages = {15},
168+
keywords = {inference serving, large language models, elastic sequence parallelism},
169+
location = {Austin, TX, USA},
170+
series = {SOSP '24},
171+
code = {https://github.com/LoongServe/LoongServe},
172+
}

0 commit comments

Comments
 (0)