Skip to content
This repository was archived by the owner on Sep 10, 2025. It is now read-only.

Commit 925b7bd

Browse files
Make API and server compatible with OpenAI API (#1043)
* Make API and server compatible with OpenAI API Modified encode_message and encode_dialog_prompt methods in generate.py to resolve streamlit errors. * Make API and server compatible with OpenAI API Added changes to generate.py and browser.py
1 parent 7d98090 commit 925b7bd

File tree

2 files changed

+4
-2
lines changed

2 files changed

+4
-2
lines changed

browser/browser.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import time
12
import streamlit as st
23
from openai import OpenAI
34

generate.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def encode_header(self, role) -> List[int]:
5656
return tokens
5757

5858
def encode_message(self, message) -> List[int]:
59-
tokens = self.encode_header(message.role)
59+
tokens = self.encode_header(message["role"])
6060
tokens.extend(
6161
self.tokenizer.encode(message["content"].strip(), bos=False, eos=False)
6262
)
@@ -69,7 +69,7 @@ def encode_dialog_prompt(self, dialog) -> List[int]:
6969
for message in dialog:
7070
tokens.extend(self.encode_message(message))
7171
# Add the start of an assistant message for the model to complete.
72-
tokens.extend(self.encode_header({"role": "assistant", "content": ""}))
72+
tokens.extend(self.encode_header("assistant")) # Pass role directly as a string
7373
return tokens
7474

7575

@@ -905,3 +905,4 @@ def main(args):
905905
check_args(args, verb)
906906
args = arg_init(args)
907907
main(args)
908+

0 commit comments

Comments
 (0)