Skip to content

Commit 54dfaf0

Browse files
committed
patch : Discard is_conversation flag
1 parent c466333 commit 54dfaf0

File tree

2 files changed

+9
-11
lines changed

2 files changed

+9
-11
lines changed

src/pytgpt/api/__main__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
from pytgpt.console import API
1+
from pytgpt.console import API
22

3-
API.run()
3+
API.run()

src/pytgpt/api/v1.py

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
class UserPayload(BaseModel):
3535
prompt: str
3636
provider: str = "phind"
37-
is_conversation: bool = False
37+
# is_conversation: bool = False
3838
whole: bool = False
3939
max_tokens: PositiveInt = 600
4040
timeout: PositiveInt = 30
@@ -67,7 +67,7 @@ class ProviderResponse(BaseModel):
6767

6868
def init_provider(payload: UserPayload) -> object:
6969
return provider_map.get(payload.provider, GPT4FREE)(
70-
is_conversation=payload.is_conversation,
70+
is_conversation=False, # payload.is_conversation,
7171
max_tokens=payload.max_tokens,
7272
timeout=payload.timeout,
7373
proxies=(
@@ -86,11 +86,10 @@ async def non_stream(payload: UserPayload) -> ProviderResponse:
8686
8787
- `prompt` : User query.
8888
- `provider` : LLM provider name.
89-
- `is_conversation` : Flag for chatting conversationally.
90-
- `whole` : Include whole json formatted response as receiced from LLM provider.
89+
- `whole` : Return whole response body instead of text only.
9190
- `max_tokens` : Maximum number of tokens to be generated upon completion.
9291
- `timeout` : Http request timeout.
93-
- `proxy` : Http request proxies.
92+
- `proxy` : Http request proxy.
9493
"""
9594
try:
9695
provider_obj: LEO = init_provider(payload)
@@ -133,12 +132,11 @@ async def stream(payload: UserPayload) -> Any:
133132
"""Stream back response as received.
134133
135134
- `prompt` : User query.
136-
- `provider` : LLM provider name. from
137-
- `is_conversation` : Flag for chatting conversationally.
138-
- `whole` : Include whole json formatted response as receiced from LLM provider.
135+
- `provider` : LLM provider name.
136+
- `whole` : Return whole response body instead of text only.
139137
- `max_tokens` : Maximum number of tokens to be generated upon completion.
140138
- `timeout` : Http request timeout.
141-
- `proxy` : Http request proxies.
139+
- `proxy` : Http request proxy.
142140
"""
143141
return StreamingResponse(
144142
generate_streaming_response(payload),

0 commit comments

Comments
 (0)