Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions aperag/chat/websocket/base_consumer.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@
welcome_response,
)
from aperag.db.ops import query_bot, query_user_quota
from aperag.pipeline.base_pipeline import KUBE_CHAT_DOC_QA_REFERENCES, KUBE_CHAT_RELATED_QUESTIONS, \
KUBE_CHAT_DOCUMENT_URLS
from aperag.pipeline.base_pipeline import DOC_QA_REFERENCES, RELATED_QUESTIONS, \
DOCUMENT_URLS
from aperag.utils.constant import KEY_BOT_ID, KEY_CHAT_ID, KEY_USER_ID, KEY_WEBSOCKET_PROTOCOL
from aperag.utils.utils import now_unix_milliseconds

Expand Down Expand Up @@ -133,14 +133,14 @@ async def receive(self, text_data=None, bytes_data=None):
return

async for tokens in self.predict(data["data"], message_id=message_id):
if tokens.startswith(KUBE_CHAT_DOC_QA_REFERENCES):
references = json.loads(tokens[len(KUBE_CHAT_DOC_QA_REFERENCES):])
if tokens.startswith(DOC_QA_REFERENCES):
references = json.loads(tokens[len(DOC_QA_REFERENCES):])
continue
if tokens.startswith(KUBE_CHAT_RELATED_QUESTIONS):
related_question = ast.literal_eval(tokens[len(KUBE_CHAT_RELATED_QUESTIONS):])
if tokens.startswith(RELATED_QUESTIONS):
related_question = ast.literal_eval(tokens[len(RELATED_QUESTIONS):])
continue
if tokens.startswith(KUBE_CHAT_DOCUMENT_URLS):
urls = ast.literal_eval(tokens[len(KUBE_CHAT_DOCUMENT_URLS):])
if tokens.startswith(DOCUMENT_URLS):
urls = ast.literal_eval(tokens[len(DOCUMENT_URLS):])
continue

# streaming response
Expand Down
6 changes: 3 additions & 3 deletions aperag/chat/websocket/common_consumer.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
success_response,
)
from aperag.chat.websocket.base_consumer import BaseConsumer
from aperag.pipeline.base_pipeline import KUBE_CHAT_RELATED_QUESTIONS
from aperag.pipeline.base_pipeline import RELATED_QUESTIONS
from aperag.pipeline.common_pipeline import CommonPipeline
from aperag.readers.base_readers import DEFAULT_FILE_READER_CLS
from aperag.source.utils import gen_temporary_file
Expand Down Expand Up @@ -97,8 +97,8 @@ async def receive(self, text_data=None, bytes_data=None):
return

async for tokens in self.predict(data["data"], message_id=message_id, file=self.file):
if tokens.startswith(KUBE_CHAT_RELATED_QUESTIONS):
related_question = ast.literal_eval(tokens[len(KUBE_CHAT_RELATED_QUESTIONS):])
if tokens.startswith(RELATED_QUESTIONS):
related_question = ast.literal_eval(tokens[len(RELATED_QUESTIONS):])
continue

# streaming response
Expand Down
6 changes: 3 additions & 3 deletions aperag/pipeline/base_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,9 @@ class Message(BaseModel):
llm_context_window: Optional[int] = None


KUBE_CHAT_DOC_QA_REFERENCES = "|KUBE_CHAT_DOC_QA_REFERENCES|"
KUBE_CHAT_RELATED_QUESTIONS = "|KUBE_CHAT_RELATED_QUESTIONS|"
KUBE_CHAT_DOCUMENT_URLS = "|KUBE_CHAT_DOCUMENT_URLS|"
DOC_QA_REFERENCES = "|DOC_QA_REFERENCES|"
RELATED_QUESTIONS = "|RELATED_QUESTIONS|"
DOCUMENT_URLS = "|DOCUMENT_URLS|"


class Pipeline(ABC):
Expand Down
4 changes: 2 additions & 2 deletions aperag/pipeline/common_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
from langchain import PromptTemplate

from aperag.llm.prompts import COMMON_FILE_TEMPLATE
from aperag.pipeline.base_pipeline import KUBE_CHAT_RELATED_QUESTIONS, Message, Pipeline
from aperag.pipeline.base_pipeline import RELATED_QUESTIONS, Message, Pipeline
from aperag.utils.utils import now_unix_milliseconds

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -111,4 +111,4 @@ async def run(self, message, gen_references=False, message_id="", file=None):
related_questions.update(related_question_generate)
related_questions = list(related_questions)
random.shuffle(related_questions)
yield KUBE_CHAT_RELATED_QUESTIONS + str(related_questions[:3])
yield RELATED_QUESTIONS + str(related_questions[:3])
4 changes: 2 additions & 2 deletions aperag/pipeline/fake_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

from aperag.pipeline.base_pipeline import Pipeline

KUBE_CHAT_DOC_QA_REFERENCES = "|KUBE_CHAT_DOC_QA_REFERENCES|"
DOC_QA_REFERENCES = "|DOC_QA_REFERENCES|"


class FakePipeline(Pipeline):
Expand Down Expand Up @@ -61,4 +61,4 @@ async def run(self, message, gen_references=False, message_id=""):
await self.add_ai_message(message, message_id, response, references, urls=[])

if gen_references:
yield KUBE_CHAT_DOC_QA_REFERENCES + json.dumps(references)
yield DOC_QA_REFERENCES + json.dumps(references)
10 changes: 5 additions & 5 deletions aperag/pipeline/knowledge_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,8 @@
DEFAULT_CHINESE_PROMPT_TEMPLATE_V3,
DEFAULT_MODEL_MEMOTY_PROMPT_TEMPLATES,
)
from aperag.pipeline.base_pipeline import KUBE_CHAT_DOC_QA_REFERENCES, KUBE_CHAT_RELATED_QUESTIONS, \
Message, Pipeline, KUBE_CHAT_DOCUMENT_URLS
from aperag.pipeline.base_pipeline import DOC_QA_REFERENCES, RELATED_QUESTIONS, \
Message, Pipeline, DOCUMENT_URLS
from aperag.pipeline.keyword_extractor import IKExtractor
from aperag.query.query import DocumentWithScore, get_packed_answer
from aperag.readers.base_embedding import get_embedding_model, rerank
Expand Down Expand Up @@ -249,11 +249,11 @@ async def run(self, message, gen_references=False, message_id=""):
related_questions.update(related_question_generate)
related_questions = list(related_questions)
random.shuffle(related_questions)
yield KUBE_CHAT_RELATED_QUESTIONS + str(related_questions[:3])
yield RELATED_QUESTIONS + str(related_questions[:3])

if gen_references:
yield KUBE_CHAT_DOC_QA_REFERENCES + json.dumps(references)
yield DOC_QA_REFERENCES + json.dumps(references)

if document_url_list:
yield KUBE_CHAT_DOCUMENT_URLS + json.dumps(document_url_list)
yield DOCUMENT_URLS + json.dumps(document_url_list)

Loading