Skip to content

Commit ef369d8

Browse files
[FEAT] Support for stream_completion in LLM Wrapper (#86)
* Exception handling for Prompt Service * Supporting stream completion in LLM wrapper * Update src/unstract/sdk/__init__.py Signed-off-by: Chandrasekharan M <[email protected]> --------- Signed-off-by: Chandrasekharan M <[email protected]> Co-authored-by: Chandrasekharan M <[email protected]>
1 parent 8e26f7b commit ef369d8

File tree

2 files changed

+15
-1
lines changed

2 files changed

+15
-1
lines changed

src/unstract/sdk/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
__version__ = "0.42.0"
1+
__version__ = "0.43.0"
22

33

44
def get_sdk_version():

src/unstract/sdk/llm.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
import re
33
from typing import Any, Optional
44

5+
from llama_index.core.base.llms.types import CompletionResponseGen
56
from llama_index.core.llms import LLM as LlamaIndexLLM
67
from llama_index.core.llms import CompletionResponse
78
from openai import APIError as OpenAIAPIError
@@ -80,6 +81,19 @@ def complete(
8081
except Exception as e:
8182
raise parse_llm_err(e) from e
8283

84+
def stream_complete(
85+
self,
86+
prompt: str,
87+
**kwargs: Any,
88+
) -> CompletionResponseGen:
89+
try:
90+
response: CompletionResponseGen = self._llm_instance.stream_complete(
91+
prompt, **kwargs
92+
)
93+
return response
94+
except Exception as e:
95+
raise parse_llm_err(e) from e
96+
8397
def _get_llm(self, adapter_instance_id: str) -> LlamaIndexLLM:
8498
"""Returns the LLM object for the tool.
8599

0 commit comments

Comments
 (0)