File tree Expand file tree Collapse file tree 1 file changed +8
-8
lines changed
Expand file tree Collapse file tree 1 file changed +8
-8
lines changed Original file line number Diff line number Diff line change @@ -78,32 +78,32 @@ Install LitServe via pip ([more options](https://lightning.ai/docs/litserve/home
7878pip install litserve
7979```
8080
81- [ Example 1] ( #inference-pipeline -example ) : Toy inference pipeline with multiple models.
81+ [ Example 1] ( #inference-engine -example ) : Toy inference pipeline with multiple models.
8282[ Example 2] ( #agent-example ) : Minimal agent to fetch the news (with OpenAI API).
8383([ Advanced examples] ( #featured-examples ) ):
8484
85- ### Inference pipeline example
85+ ### Inference engine example
8686
8787``` python
8888import litserve as ls
8989
9090# define the api to include any number of models, dbs, etc...
91- class InferencePipeline (ls .LitAPI ):
91+ class InferenceEngine (ls .LitAPI ):
9292 def setup (self , device ):
93- self .model1 = lambda x : x** 2
94- self .model2 = lambda x : x** 3
93+ self .text_model = lambda x : x** 2
94+ self .vision_model = lambda x : x** 3
9595
9696 def predict (self , request ):
9797 x = request[" input" ]
9898 # perform calculations using both models
99- a = self .model1 (x)
100- b = self .model2 (x)
99+ a = self .text_model (x)
100+ b = self .vision_model (x)
101101 c = a + b
102102 return {" output" : c}
103103
104104if __name__ == " __main__" :
105105 # 12+ features like batching, streaming, etc...
106- server = ls.LitServer(InferencePipeline (max_batch_size = 1 ), accelerator = " auto" )
106+ server = ls.LitServer(InferenceEngine (max_batch_size = 1 ), accelerator = " auto" )
107107 server.run(port = 8000 )
108108```
109109
You can’t perform that action at this time.
0 commit comments