Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
80 commits
Select commit Hold shift + click to select a range
b36faff
chore: update document deletion logic to handle paragraph IDs and rel…
liuruibin Nov 3, 2025
4340a0a
fix: update WeCom QR code handling to use iframe and include callback…
wxg0103 Nov 4, 2025
ea7ea85
fix: adjust iframe styling for WeCom QR code component
wxg0103 Nov 4, 2025
8d3ee5d
fix: remove WeCom JSSDK dependency from package.json
wxg0103 Nov 4, 2025
7b0df86
perf: revert preload.
liqiang-fit2cloud Nov 4, 2025
e84391b
perf: revert preload.
liqiang-fit2cloud Nov 4, 2025
5ea3023
perf: revert preload.
liqiang-fit2cloud Nov 4, 2025
00acd37
perf: revert preload.
liqiang-fit2cloud Nov 4, 2025
5fcf122
perf: revert preload.
liqiang-fit2cloud Nov 4, 2025
41b60ed
perf: revert preload.
liqiang-fit2cloud Nov 4, 2025
d35cf16
perf: revert preload.
liqiang-fit2cloud Nov 4, 2025
70e4f5f
perf: revert preload.
liqiang-fit2cloud Nov 4, 2025
3c8ca22
perf: revert preload.
liqiang-fit2cloud Nov 4, 2025
89e7e7b
fix: Revise the copy
wangdan-fit2cloud Nov 4, 2025
e497ce2
feat: add subprocess handling for local model and conditional post ha…
liuruibin Nov 4, 2025
ba4e0b7
feat: Folder move
zhanweizhang7 Nov 4, 2025
2814e48
feat: Folder move order by name
zhanweizhang7 Nov 4, 2025
45f97fc
revert: add subprocess handling for local model and conditional post …
liuruibin Nov 4, 2025
607ed83
perf: Optimize file directory style
wangdan-fit2cloud Nov 5, 2025
568cc74
fix: remove env from sandbox.
liqiang-fit2cloud Nov 5, 2025
63beec4
feat: add Gunicorn preload option and initialize shared resources
liuruibin Nov 5, 2025
7a9c67d
fix: add empty environment variable dictionary for subprocess execution
liuruibin Nov 5, 2025
f417a5a
revert: add Gunicorn preload option and initialize shared resources
liuruibin Nov 5, 2025
1186752
feat: add MAXKB_SANDBOX_PYTHON_BANNED_HOSTS env to ban host for sandb…
liqiang-fit2cloud Nov 5, 2025
e4b90c6
feat: add MAXKB_SANDBOX_PYTHON_BANNED_HOSTS env to ban host for sandb…
liqiang-fit2cloud Nov 5, 2025
cb6835c
feat: add MAXKB_SANDBOX_PYTHON_BANNED_HOSTS env to ban host for sandb…
liqiang-fit2cloud Nov 5, 2025
c34c37f
fix: Fixed the issue where resource authorization modifications would…
wangdan-fit2cloud Nov 5, 2025
aed98a5
feat: add MAXKB_SANDBOX_PYTHON_BANNED_HOSTS env to ban host for sandb…
liqiang-fit2cloud Nov 5, 2025
3445e05
feat: add MAXKB_SANDBOX_PYTHON_BANNED_HOSTS env to ban host for sandb…
liqiang-fit2cloud Nov 5, 2025
fa1aee6
perf: Memory optimization (#4318)
shaohuzhang1 Nov 5, 2025
977019b
fix: Application workflow, after the loop ends, there is a recall nod…
shaohuzhang1 Nov 5, 2025
0c20a7d
fix: set environment variables for banned hosts and preload library i…
liuruibin Nov 5, 2025
de1147f
chore(deps): bump django in the pip group across 1 directory (#4320)
dependabot[bot] Nov 6, 2025
2508e38
fix: Set the startup environment variables (#4321)
shaohuzhang1 Nov 6, 2025
7623597
refactor: rename MAXKB_SANDBOX_PYTHON_BANNED_HOSTS to MAXKB_SANDBOX_P…
liqiang-fit2cloud Nov 6, 2025
983ccdc
fix: set environment variables for LD_PRELOAD and SANDBOX_ALLOW_HOSTS…
liuruibin Nov 6, 2025
88b73d0
refactor: rename MAXKB_SANDBOX_PYTHON_ALLOW_HOSTS_REGEXES to MAXKB_SA…
liqiang-fit2cloud Nov 6, 2025
c08056e
fix: Set the startup environment variables (#4322)
shaohuzhang1 Nov 6, 2025
be43d26
fix: Rearrange model dependency (#4324)
shaohuzhang1 Nov 6, 2025
d3ad608
refactor: rename MAXKB_SANDBOX_PYTHON_ALLOW_HOSTS_REGEXES to MAXKB_SA…
liqiang-fit2cloud Nov 6, 2025
be6679e
refactor: forbidden access by hostname or docker ip.
liqiang-fit2cloud Nov 6, 2025
c32a763
fix: dependency package (#4325)
shaohuzhang1 Nov 6, 2025
bba5f9b
fix: update globals_v initialization to use an empty dictionary in to…
liuruibin Nov 6, 2025
0b330c8
fix: dependency package (#4328)
shaohuzhang1 Nov 6, 2025
2dc6d81
feat: Folder directories support moving or dragging.
wangdan-fit2cloud Nov 6, 2025
ed14074
refactor: read SANDBOX_BANNED_HOSTS from file instead of env.
liqiang-fit2cloud Nov 7, 2025
1600160
refactor: never restart local_model worker if LOCAL_MODEL_HOST_WORKER=1.
liqiang-fit2cloud Nov 7, 2025
a1507c1
refactor: never restart scheduler worker if worker=1.
liqiang-fit2cloud Nov 7, 2025
aa269c1
refactor: set timeout for workers.
liqiang-fit2cloud Nov 7, 2025
4e369da
fix: correct parameter name for graceful timeout in Gunicorn configur…
liuruibin Nov 7, 2025
e0f9ada
fix: add .SANDBOX_BANNED_HOSTS to .gitignore
liuruibin Nov 7, 2025
e30cf91
refactor: ban host.docker.internal access by default.
liqiang-fit2cloud Nov 7, 2025
d26626f
fix: ensure worker count is properly converted to integer for max_req…
liuruibin Nov 7, 2025
28ec833
refactor: set MAXKB_SANDBOX_HOME env.
liqiang-fit2cloud Nov 7, 2025
8f3cd55
fix: fix typo.
liqiang-fit2cloud Nov 7, 2025
df1f1d6
fix: ensure worker count is properly converted to integer for max_req…
liqiang-fit2cloud Nov 7, 2025
bb58bbb
feat: Using the HuggingFace tokenizer (#4329)
shaohuzhang1 Nov 7, 2025
f6ebaa7
feat: Local model validation using local_madel (#4330)
shaohuzhang1 Nov 7, 2025
bb57783
refactor: change model path in MKTokenizer.
liqiang-fit2cloud Nov 7, 2025
474addc
refactor: add cl100k_base.tiktoken tokenizer.
liqiang-fit2cloud Nov 7, 2025
6b3a2d7
refactor: add cl100k_base.tiktoken tokenizer. (#4331)
shaohuzhang1 Nov 7, 2025
9b04eee
fix: add optional chaining to breadcrumbData length check
wxg0103 Nov 7, 2025
45bdaf5
refactor: remove scheduler service and related code
liuruibin Nov 7, 2025
706f387
refactor: enhance init_scheduler to include xpack job execution
liuruibin Nov 7, 2025
c9f330b
refactor: delete .SANDBOX_BANNED_HOSTS file if SANDBOX_PYTHON_BANNED_…
liqiang-fit2cloud Nov 10, 2025
663a6f2
refactor: throw EACCES error for IP, throw EAI_FAIL for domain name.
liqiang-fit2cloud Nov 10, 2025
fe5608d
fix: When having read permission for the application, the application…
zhanweizhang7 Nov 10, 2025
756d73e
fix: Application read permission
zhanweizhang7 Nov 10, 2025
35a246e
chore: update element-plus to 2.11.7 and jspdf to 3.0.3 in package.json
liuruibin Nov 11, 2025
2aa2d71
fix: remove streaming parameter from GeminiImage initialization
wxg0103 Nov 11, 2025
7468bfd
feat: The dividing line in the directory section can be dragged and d…
wangdan-fit2cloud Nov 11, 2025
923f3f1
fix: Fixing the display of conversation time during dialogue
wangdan-fit2cloud Nov 11, 2025
2050adf
refactor: never restart core worker if worker=1.
liqiang-fit2cloud Nov 11, 2025
cd89dd6
refactor: add code comment.
liqiang-fit2cloud Nov 11, 2025
eb0b8d6
chore: adjust default worker count calculation in command.py
liuruibin Nov 11, 2025
7135614
refactor: rename env CORE_WORKER to MAXKB_CORE_WORKER.
liqiang-fit2cloud Nov 12, 2025
d7127b9
fix: After adding form collection at the loop node, the variables fro…
shaohuzhang1 Nov 12, 2025
10dbda2
fix: include accessToken in WeCom QR code iframe URL
wxg0103 Nov 12, 2025
2194b8b
fix: Whisper stt model
zhanweizhang7 Nov 12, 2025
8b9fb1f
perf: Memory optimization
zhanweizhang7 Nov 14, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/build-and-push-vector-model.yml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ on:
inputs:
dockerImageTag:
description: 'Docker Image Tag'
default: 'v2.0.2'
default: 'v2.0.3'
required: true
architecture:
description: 'Architecture'
Expand Down
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -188,4 +188,5 @@ apps/models_provider/impl/*/icon/
apps/models_provider/impl/tencent_model_provider/credential/stt.py
apps/models_provider/impl/tencent_model_provider/model/stt.py
tmp/
config.yml
config.yml
.SANDBOX_BANNED_HOSTS
14 changes: 10 additions & 4 deletions apps/application/flow/step_node/loop_node/impl/base_loop_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,11 +224,17 @@ def handler(self, workflow):

class BaseLoopNode(ILoopNode):
def save_context(self, details, workflow_manage):
self.context['loop_context_data'] = details.get('loop_context_data')
self.context['loop_answer_data'] = details.get('loop_answer_data')
self.context['loop_node_data'] = details.get('loop_node_data')
self.context['result'] = details.get('result')
for key, value in details['context'].items():
if key not in self.context:
self.context[key] = value
self.answer_text = str(details.get('result'))
self.context['params'] = details.get('params')
self.context['run_time'] = details.get('run_time')
self.context['index'] = details.get('current_index')
self.context['item'] = details.get('current_item')
for key, value in (details.get('loop_context_data') or {}).items():
self.context[key] = value
self.answer_text = ""

def get_answer_list(self) -> List[Answer] | None:
result = []
Expand Down
65 changes: 51 additions & 14 deletions apps/application/flow/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@
"""
import asyncio
import json
import traceback
import queue
import threading
from typing import Iterator

from django.http import StreamingHttpResponse
Expand Down Expand Up @@ -227,6 +228,30 @@ def generate_tool_message_template(name, context):
return tool_message_template % (name, tool_message_json_template % (context))


# 全局单例事件循环
_global_loop = None
_loop_thread = None
_loop_lock = threading.Lock()


def get_global_loop():
"""获取全局共享的事件循环"""
global _global_loop, _loop_thread

with _loop_lock:
if _global_loop is None:
_global_loop = asyncio.new_event_loop()

def run_forever():
asyncio.set_event_loop(_global_loop)
_global_loop.run_forever()

_loop_thread = threading.Thread(target=run_forever, daemon=True, name="GlobalAsyncLoop")
_loop_thread.start()

return _global_loop


async def _yield_mcp_response(chat_model, message_list, mcp_servers, mcp_output_enable=True):
client = MultiServerMCPClient(json.loads(mcp_servers))
tools = await client.get_tools()
Expand All @@ -242,19 +267,31 @@ async def _yield_mcp_response(chat_model, message_list, mcp_servers, mcp_output_


def mcp_response_generator(chat_model, message_list, mcp_servers, mcp_output_enable=True):
loop = asyncio.new_event_loop()
try:
async_gen = _yield_mcp_response(chat_model, message_list, mcp_servers, mcp_output_enable)
while True:
try:
chunk = loop.run_until_complete(anext_async(async_gen))
yield chunk
except StopAsyncIteration:
break
except Exception as e:
maxkb_logger.error(f'Exception: {e}', exc_info=True)
finally:
loop.close()
"""使用全局事件循环,不创建新实例"""
result_queue = queue.Queue()
loop = get_global_loop() # 使用共享循环

async def _run():
try:
async_gen = _yield_mcp_response(chat_model, message_list, mcp_servers, mcp_output_enable)
async for chunk in async_gen:
result_queue.put(('data', chunk))
except Exception as e:
maxkb_logger.error(f'Exception: {e}', exc_info=True)
result_queue.put(('error', e))
finally:
result_queue.put(('done', None))

# 在全局循环中调度任务
asyncio.run_coroutine_threadsafe(_run(), loop)

while True:
msg_type, data = result_queue.get()
if msg_type == 'done':
break
if msg_type == 'error':
raise data
yield data


async def anext_async(agen):
Expand Down
76 changes: 57 additions & 19 deletions apps/application/flow/workflow_manage.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,25 +234,62 @@ def run_block(self, language='zh'):
非流式响应
@return: 结果
"""
self.run_chain_async(None, None, language)
while self.is_run():
pass
details = self.get_runtime_details()
message_tokens = sum([row.get('message_tokens') for row in details.values() if
'message_tokens' in row and row.get('message_tokens') is not None])
answer_tokens = sum([row.get('answer_tokens') for row in details.values() if
'answer_tokens' in row and row.get('answer_tokens') is not None])
answer_text_list = self.get_answer_text_list()
answer_text = '\n\n'.join(
'\n\n'.join([a.get('content') for a in answer]) for answer in
answer_text_list)
answer_list = reduce(lambda pre, _n: [*pre, *_n], answer_text_list, [])
self.work_flow_post_handler.handler(self)
return self.base_to_response.to_block_response(self.params['chat_id'],
self.params['chat_record_id'], answer_text, True
, message_tokens, answer_tokens,
_status=status.HTTP_200_OK if self.status == 200 else status.HTTP_500_INTERNAL_SERVER_ERROR,
other_params={'answer_list': answer_list})
try:
self.run_chain_async(None, None, language)
while self.is_run():
pass
details = self.get_runtime_details()
message_tokens = sum([row.get('message_tokens') for row in details.values() if
'message_tokens' in row and row.get('message_tokens') is not None])
answer_tokens = sum([row.get('answer_tokens') for row in details.values() if
'answer_tokens' in row and row.get('answer_tokens') is not None])
answer_text_list = self.get_answer_text_list()
answer_text = '\n\n'.join(
'\n\n'.join([a.get('content') for a in answer]) for answer in
answer_text_list)
answer_list = reduce(lambda pre, _n: [*pre, *_n], answer_text_list, [])
self.work_flow_post_handler.handler(self)

res = self.base_to_response.to_block_response(self.params['chat_id'],
self.params['chat_record_id'], answer_text, True
, message_tokens, answer_tokens,
_status=status.HTTP_200_OK if self.status == 200 else status.HTTP_500_INTERNAL_SERVER_ERROR,
other_params={'answer_list': answer_list})
finally:
self._cleanup()
return res

def _cleanup(self):
"""清理所有对象引用"""
# 清理列表
self.future_list.clear()
self.field_list.clear()
self.global_field_list.clear()
self.chat_field_list.clear()
self.image_list.clear()
self.video_list.clear()
self.document_list.clear()
self.audio_list.clear()
self.other_list.clear()
if hasattr(self, 'node_context'):
self.node_context.clear()

# 清理字典
self.context.clear()
self.chat_context.clear()
self.form_data.clear()

# 清理对象引用
self.node_chunk_manage = None
self.work_flow_post_handler = None
self.flow = None
self.start_node = None
self.current_node = None
self.current_result = None
self.chat_record = None
self.base_to_response = None
self.params = None
self.lock = None

def run_stream(self, current_node, node_result_future, language='zh'):
"""
Expand Down Expand Up @@ -307,6 +344,7 @@ def await_result(self):
'',
[],
'', True, message_tokens, answer_tokens, {})
self._cleanup()

def run_chain_async(self, current_node, node_result_future, language='zh'):
future = executor.submit(self.run_chain_manage, current_node, node_result_future, language)
Expand Down
61 changes: 58 additions & 3 deletions apps/application/serializers/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
@date:2025/6/9 13:42
@desc:
"""
from datetime import datetime
from typing import List

from django.core.cache import cache
Expand Down Expand Up @@ -226,10 +225,66 @@ def append_chat_record(self, chat_record: ChatRecord):
chat_record.save()
ChatCountSerializer(data={'chat_id': self.chat_id}).update_chat()

def to_dict(self):

return {
'chat_id': self.chat_id,
'chat_user_id': self.chat_user_id,
'chat_user_type': self.chat_user_type,
'knowledge_id_list': self.knowledge_id_list,
'exclude_document_id_list': self.exclude_document_id_list,
'application_id': self.application_id,
'chat_record_list': [self.chat_record_to_map(c) for c in self.chat_record_list],
'debug': self.debug
}

def chat_record_to_map(self, chat_record):
return {'id': chat_record.id,
'chat_id': chat_record.chat_id,
'vote_status': chat_record.vote_status,
'problem_text': chat_record.problem_text,
'answer_text': chat_record.answer_text,
'answer_text_list': chat_record.answer_text_list,
'message_tokens': chat_record.message_tokens,
'answer_tokens': chat_record.answer_tokens,
'const': chat_record.const,
'details': chat_record.details,
'improve_paragraph_id_list': chat_record.improve_paragraph_id_list,
'run_time': chat_record.run_time,
'index': chat_record.index}

@staticmethod
def map_to_chat_record(chat_record_dict):
ChatRecord(id=chat_record_dict.get('id'),
chat_id=chat_record_dict.get('chat_id'),
vote_status=chat_record_dict.get('vote_status'),
problem_text=chat_record_dict.get('problem_text'),
answer_text=chat_record_dict.get('answer_text'),
answer_text_list=chat_record_dict.get('answer_text_list'),
message_tokens=chat_record_dict.get('message_tokens'),
answer_tokens=chat_record_dict.get('answer_tokens'),
const=chat_record_dict.get('const'),
details=chat_record_dict.get('details'),
improve_paragraph_id_list=chat_record_dict.get('improve_paragraph_id_list'),
run_time=chat_record_dict.get('run_time'),
index=chat_record_dict.get('index'), )

def set_cache(self):
cache.set(Cache_Version.CHAT.get_key(key=self.chat_id), self, version=Cache_Version.CHAT.get_version(),
cache.set(Cache_Version.CHAT.get_key(key=self.chat_id), self.to_dict(),
version=Cache_Version.CHAT_INFO.get_version(),
timeout=60 * 30)

@staticmethod
def map_to_chat_info(chat_info_dict):
return ChatInfo(chat_info_dict.get('chat_id'), chat_info_dict.get('chat_user_id'),
chat_info_dict.get('chat_user_type'), chat_info_dict.get('knowledge_id_list'),
chat_info_dict.get('exclude_document_id_list'),
chat_info_dict.get('application_id'),
[ChatInfo.map_to_chat_record(c_r) for c_r in chat_info_dict.get('chat_record_list')])

@staticmethod
def get_cache(chat_id):
return cache.get(Cache_Version.CHAT.get_key(key=chat_id), version=Cache_Version.CHAT.get_version())
chat_info_dict = cache.get(Cache_Version.CHAT.get_key(key=chat_id), version=Cache_Version.CHAT_INFO.get_version())
if chat_info_dict:
return ChatInfo.map_to_chat_info(chat_info_dict)
return None
8 changes: 4 additions & 4 deletions apps/application/views/application_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,8 +125,8 @@ class OpenView(APIView):
responses=None,
tags=[_('Application')] # type: ignore
)
@has_permissions(PermissionConstants.APPLICATION_EDIT.get_workspace_application_permission(),
PermissionConstants.APPLICATION_EDIT.get_workspace_permission_workspace_manage_role(),
@has_permissions(PermissionConstants.APPLICATION_READ.get_workspace_application_permission(),
PermissionConstants.APPLICATION_READ.get_workspace_permission_workspace_manage_role(),
ViewPermission([RoleConstants.USER.get_workspace_role()],
[PermissionConstants.APPLICATION.get_workspace_application_permission()],
CompareConstants.AND),
Expand Down Expand Up @@ -167,8 +167,8 @@ class PromptGenerateView(APIView):
responses=None,
tags=[_('Application')] # type: ignore
)
@has_permissions(PermissionConstants.APPLICATION_EDIT.get_workspace_application_permission(),
PermissionConstants.APPLICATION_EDIT.get_workspace_permission_workspace_manage_role(),
@has_permissions(PermissionConstants.APPLICATION_READ.get_workspace_application_permission(),
PermissionConstants.APPLICATION_READ.get_workspace_permission_workspace_manage_role(),
ViewPermission([RoleConstants.USER.get_workspace_role()],
[PermissionConstants.APPLICATION.get_workspace_application_permission()],
CompareConstants.AND),
Expand Down
4 changes: 2 additions & 2 deletions apps/application/views/application_chat_record.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,8 @@ class ApplicationChatRecordOperateAPI(APIView):
)
@has_permissions(PermissionConstants.APPLICATION_CHAT_LOG_READ.get_workspace_application_permission(),
PermissionConstants.APPLICATION_CHAT_LOG_READ.get_workspace_permission_workspace_manage_role(),
PermissionConstants.APPLICATION_EDIT.get_workspace_application_permission(),
PermissionConstants.APPLICATION_EDIT.get_workspace_permission_workspace_manage_role(),
PermissionConstants.APPLICATION_READ.get_workspace_application_permission(),
PermissionConstants.APPLICATION_READ.get_workspace_permission_workspace_manage_role(),
ViewPermission([RoleConstants.USER.get_workspace_role()],
[PermissionConstants.APPLICATION.get_workspace_application_permission()],
CompareConstants.AND),
Expand Down
2 changes: 1 addition & 1 deletion apps/common/auth/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def to_string(self):
value = json.dumps(self.to_dict())
authentication = encrypt(value)
cache_key = hashlib.sha256(authentication.encode()).hexdigest()
authentication_cache.set(cache_key, value, version=Cache_Version.CHAT.value, timeout=60 * 60 * 2)
authentication_cache.set(cache_key, value, version=Cache_Version.CHAT.get_version(), timeout=60 * 60 * 2)
return authentication

@staticmethod
Expand Down
24 changes: 15 additions & 9 deletions apps/common/config/tokenizer_manage_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,24 @@
@desc:
"""

import os

class MKTokenizer:
def __init__(self, tokenizer):
self.tokenizer = tokenizer

def encode(self, text):
return self.tokenizer.encode(text).ids


class TokenizerManage:
tokenizer = None

@staticmethod
def get_tokenizer():
from transformers import BertTokenizer
if TokenizerManage.tokenizer is None:
TokenizerManage.tokenizer = BertTokenizer.from_pretrained(
'bert-base-cased',
cache_dir="/opt/maxkb-app/model/tokenizer",
local_files_only=True,
resume_download=False,
force_download=False)
return TokenizerManage.tokenizer
from tokenizers import Tokenizer
# 创建Tokenizer
model_path = os.path.join("/opt/maxkb-app", "model", "tokenizer", "models--bert-base-cased")
with open(f"{model_path}/refs/main", encoding="utf-8") as f: snapshot = f.read()
TokenizerManage.tokenizer = Tokenizer.from_file(f"{model_path}/snapshots/{snapshot}/tokenizer.json")
return MKTokenizer(TokenizerManage.tokenizer)
2 changes: 2 additions & 0 deletions apps/common/constants/cache_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ class Cache_Version(Enum):
# 对话
CHAT = "CHAT", lambda key: key

CHAT_INFO = "CHAT_INFO", lambda key: key

CHAT_VARIABLE = "CHAT_VARIABLE", lambda key: key

# 应用API KEY
Expand Down
3 changes: 2 additions & 1 deletion apps/common/event/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,10 @@
@desc:
"""
from django.core.cache import cache
from django.db.models import QuerySet
from django.utils.translation import gettext as _

from .listener_manage import *

from ..constants.cache_version import Cache_Version
from ..db.sql_execute import update_execute
from ..utils.lock import RedisLock
Expand Down
Loading
Loading