-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathschedule.py
More file actions
528 lines (436 loc) · 24.9 KB
/
schedule.py
File metadata and controls
528 lines (436 loc) · 24.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
import json
import os.path
from env_loader import ROBOT_ID, NONE_RESP_NICK_NAME, WELCOME, SILICON_FLOW_API_KEY
import os
import ast
from memucrud import MemuCrud
from get_chatcontent import get_chat_content
from typing import List, Dict
import asyncio
# 全局文件锁,用于保护record.json的并发访问
record_lock = asyncio.Lock()
async def read_chat_record() -> Dict[str, List[Dict[str, str]]]:
"""
安全地读取record.json文件
:return: 聊天记录字典
"""
async with record_lock:
if os.path.exists('record/record.json'):
with open('record/record.json', 'r', encoding='utf8') as f:
return json.load(f)
else:
# 确保目录存在
os.makedirs('record', exist_ok=True)
# 创建空文件
with open('record/record.json', 'w', encoding='utf8') as f:
json.dump({}, f)
return {}
async def save_chat_record(chat_record: Dict[str, List[Dict[str, str]]]):
"""
安全地保存record.json文件
:param chat_record: 聊天记录字典
"""
async with record_lock:
# 确保目录存在
os.makedirs('record', exist_ok=True)
with open('record/record.json', 'w', encoding='utf8') as f:
json.dump(chat_record, f, ensure_ascii=False, indent=2)
async def schedule(message, sing_nal):
"""
接口调度
:param message: 消息
:param sing_nal: 信号
:return:
"""
# 接收等待处置的消息!
message_data = json.loads(message)
# 验证消息格式,确保必要字段存在
if 'Data' not in message_data:
print(f"消息缺少 Data 字段,跳过处理: {message_data}")
return
if 'FromUserName' not in message_data['Data']:
print(f"消息缺少 FromUserName 字段,跳过处理: {message_data}")
return
# 检查 FromUserName 是否包含 string 字段
from_user = message_data['Data']['FromUserName']
if not isinstance(from_user, dict) or 'string' not in from_user:
print(f"FromUserName 格式不正确,跳过处理: {from_user}")
return
from_user_name = from_user['string']
# 初始化对象存储-桶!
# if not os.path.exists('data/bucket.json'):
# from action.procedure.create_bucket import create_cb
# # 创建桶并设置生命周期
# create_cb(message_data['RobotId'], "private")
# with open('data/bucket.json', 'w') as f:
# f.write(json.dumps({message_data['RobotId']: "private"}))
# 手动回复
if sing_nal == 1:
return
# 自动回复
if sing_nal == 0:
# 初始化知识库!
if message_data["Data"]["Content"]["string"] == '初始化' and message_data["Data"]["ToUserName"]["string"] == "filehelper":
# 知识库向量入库!
from RAG.emb_save_db import process_and_save
# 列出knowledge下所有文件!
filelist = os.listdir('RAG/knowledge')
# 循环向量入库!
for item in filelist:
result = await process_and_save("RAG/knowledge/%s" % item)
if result == "success":
from action.solo.sendtextmessage import send_text_message
send_text_message(
"filehelper",
"%s 文档向量化入库成功!" % item
)
else:
from action.solo.sendtextmessage import send_text_message
send_text_message(
"filehelper",
"%s 文档向量化入库失败!" % item
)
# 判断为好友添加信息
if from_user_name == "fmessage" and message_data['Data']['MsgType'] == 37:
from action.solo.agreeaddfriends import agree_friends
from action.solo.sendtextmessage import send_text_message
from action.solo.sendimagemessage import send_image_message
# 自动通过好友!
result = agree_friends(message_data, '你好!')
# 自动进行打招呼!
for i in range(len(ast.literal_eval(WELCOME))):
send_text_message(result.get("data", {}).get("wxid", ""), ast.literal_eval(WELCOME)[i])
# 判断消息非自己发送!被动进行回复!
if from_user_name != ROBOT_ID:
# 私聊>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>逻辑
if '@chatroom' not in from_user_name:
# 私聊文本事件!
if message_data["Data"]["MsgType"] == 1:
# 实例化memu!
memory = MemuCrud(SILICON_FLOW_API_KEY)
reade_memory = await memory.read()
# 从record.json文件中查询聊天记录信息!
chat_record = await read_chat_record()
record_list = chat_record.get(message_data["Data"]["FromUserName"]["string"], [])
if record_list is not None:
# 将聊天记录转成gemini所需的聊天记录格式!
memory_list: List[str] = []
for item in record_list:
if item.get('user', '') in str(reade_memory):
chat = get_chat_content(reade_memory, item.get('user', ''))
memory_list.append('user\n%s' % chat)
if item.get('assistant', '') in str(reade_memory):
chat = get_chat_content(reade_memory, item.get('assistant', ''))
memory_list.append('assistant\n%s' % chat)
memory_str = '\n'.join(memory_list)
else:
memory_str = '\n'
# 调用模型进行消息回复!
from modelapi.llm_handler import llm_client
response = await llm_client.generate_response(message_data['Data']['Content']['string'], memory_str)
# 模型拒绝回复,这里不回复!
if '拒绝' in response and len(response) <= 5:
# 将用户消息,写入到数据库中!
user_created_id = await memory.create(message_data['Data']['Content']['string'])
assistant_created_id = ''
# 判断用户是否在record.json文件中,在就追加,不再就写入!
if message_data["Data"]["FromUserName"]["string"] in chat_record:
# 将id更新到record.json文件中!
chat_record[message_data["Data"]["FromUserName"]["string"]].append({
"user": user_created_id,
"assistant": assistant_created_id
})
else:
chat_record[message_data["Data"]["FromUserName"]["string"]] = [
{
"user": user_created_id,
"assistant": assistant_created_id
}
]
# 保存数据到record.json中
await save_chat_record(chat_record)
return
else:
# 将用户消息和模型回复的消息,写入到数据库中!
user_created_id = await memory.create(message_data['Data']['Content']['string'])
assistant_created_id = await memory.create(response)
# 判断用户是否在record.json文件中,在就追加,不再就写入!
if message_data["Data"]["FromUserName"]["string"] in chat_record:
# 将id更新到record.json文件中!
chat_record[message_data["Data"]["FromUserName"]["string"]].append({
"user": user_created_id,
"assistant": assistant_created_id
})
else:
chat_record[message_data["Data"]["FromUserName"]["string"]] = [
{
"user": user_created_id,
"assistant": assistant_created_id
}
]
# 保存数据到record.json中
await save_chat_record(chat_record)
# 将模型回复的消息,回复给用户!
from action.solo.sendtextmessage import send_text_message
send_text_message(
from_user_name,
response
)
# 私聊图片事件
if message_data["Data"]["MsgType"] == 3:
return
# 私聊语音消息事件
if message_data["Data"]["MsgType"] == 34:
from action.solo.downloadvoicemessage import get_voice_silk
from tencentapi.tencentvoicetext import voice_to_word
# 获取音频文件!
get_voice_silk(message_data, 1)
# 调用一句话识别!
voice_result = json.loads(voice_to_word('voice/auto.silk'))
voice_text = voice_result.get("Result", "")
# 实例化memu!
memory = MemuCrud(SILICON_FLOW_API_KEY)
reade_memory = await memory.read()
# 从record.json文件中查询聊天记录信息!
chat_record = await read_chat_record()
record_list = chat_record.get(message_data["Data"]["FromUserName"]["string"], [])
if record_list is not None:
# 将聊天记录转成gemini所需的聊天记录格式!
memory_list: List[str] = []
for item in record_list:
if item.get('user', '') in str(reade_memory):
chat = get_chat_content(reade_memory, item.get('user', ''))
memory_list.append('user\n%s' % chat)
if item.get('assistant', '') in str(reade_memory):
chat = get_chat_content(reade_memory, item.get('assistant', ''))
memory_list.append('assistant\n%s' % chat)
memory_str = '\n'.join(memory_list)
else:
memory_str = '\n'
# 调用模型进行消息回复!
from modelapi.llm_handler import llm_client
response = await llm_client.generate_response(voice_text, memory_str)
# 模型拒绝回复,这里不回复!
if '拒绝' in response and len(response) <= 5:
# 将用户消息,写入到数据库中!
user_created_id = await memory.create(voice_text)
assistant_created_id = ''
# 判断用户是否在record.json文件中,在就追加,不再就写入!
if message_data["Data"]["FromUserName"]["string"] in chat_record:
# 将id更新到record.json文件中!
chat_record[message_data["Data"]["FromUserName"]["string"]].append({
"user": user_created_id,
"assistant": assistant_created_id
})
else:
chat_record[message_data["Data"]["FromUserName"]["string"]] = [
{
"user": user_created_id,
"assistant": assistant_created_id
}
]
# 保存数据到record.json中
await save_chat_record(chat_record)
return
else:
# 将用户消息和模型回复的消息,写入到数据库中!
user_created_id = await memory.create(voice_text)
assistant_created_id = await memory.create(response)
# 判断用户是否在record.json文件中,在就追加,不再就写入!
if message_data["Data"]["FromUserName"]["string"] in chat_record:
# 将id更新到record.json文件中!
chat_record[message_data["Data"]["FromUserName"]["string"]].append({
"user": user_created_id,
"assistant": assistant_created_id
})
else:
chat_record[message_data["Data"]["FromUserName"]["string"]] = [
{
"user": user_created_id,
"assistant": assistant_created_id
}
]
# 保存数据到record.json中
await save_chat_record(chat_record)
# 将模型回复的消息,回复给用户!
from action.solo.sendtextmessage import send_text_message
send_text_message(
from_user_name,
response
)
# 私聊视频事件
if message_data["Data"]["MsgType"] == 43:
return
# 群聊>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>逻辑
if '@chatroom' in from_user_name:
# 群聊文本消息事件
if message_data["Data"]["MsgType"] == 1:
# 判断是否再不回复列表中。
nick_name = message_data.get('Data', {}).get("PushContent", "").split(':')[0].strip()
if nick_name not in ast.literal_eval(NONE_RESP_NICK_NAME):
# 提取群聊消息内容(格式:wxid:\n消息内容)
content_string = message_data["Data"]["Content"]["string"]
if ":\n" in content_string:
group_message_text = content_string.split(":\n", 1)[1]
else:
group_message_text = content_string
# 实例化memu!
memory = MemuCrud(SILICON_FLOW_API_KEY)
reade_memory = await memory.read()
# 从record.json文件中查询聊天记录信息!
chat_record = await read_chat_record()
record_list = chat_record.get(message_data["Data"]["FromUserName"]["string"], [])
if record_list is not None:
# 将聊天记录转成gemini所需的聊天记录格式!
memory_list: List[str] = []
for item in record_list:
if item.get('user', '') in str(reade_memory):
chat = get_chat_content(reade_memory, item.get('user', ''))
memory_list.append('user\n%s' % chat)
if item.get('assistant', '') in str(reade_memory):
chat = get_chat_content(reade_memory, item.get('assistant', ''))
memory_list.append('assistant\n%s' % chat)
memory_str = '\n'.join(memory_list)
else:
memory_str = '\n'
# 调用模型进行消息回复!
from modelapi.llm_handler import llm_client
response = await llm_client.generate_response(group_message_text, memory_str)
# 模型拒绝回复,这里不回复!
if '拒绝' in response and len(response) <= 5:
# 将用户消息,写入到数据库中!
user_created_id = await memory.create(group_message_text)
assistant_created_id = ''
# 判断用户是否在record.json文件中,在就追加,不再就写入!
if message_data["Data"]["FromUserName"]["string"] in chat_record:
# 将id更新到record.json文件中!
chat_record[message_data["Data"]["FromUserName"]["string"]].append({
"user": user_created_id,
"assistant": assistant_created_id
})
else:
chat_record[message_data["Data"]["FromUserName"]["string"]] = [
{
"user": user_created_id,
"assistant": assistant_created_id
}
]
# 保存数据到record.json中
await save_chat_record(chat_record)
return
else:
# 将用户消息和模型回复的消息,写入到数据库中!
user_created_id = await memory.create(group_message_text)
assistant_created_id = await memory.create(response)
# 判断用户是否在record.json文件中,在就追加,不再就写入!
group_who_wx_id = message_data["Data"]["Content"]["string"].split(':')[0].strip()
if group_who_wx_id in chat_record:
# 将id更新到record.json文件中!
chat_record[group_who_wx_id].append({
"user": user_created_id,
"assistant": assistant_created_id
})
else:
chat_record[group_who_wx_id] = [
{
"user": user_created_id,
"assistant": assistant_created_id
}
]
# 保存数据到record.json中
await save_chat_record(chat_record)
# 将模型回复的消息,回复给用户!
from action.solo.sendtextmessage import send_text_message
send_text_message(
from_user_name,
response
)
# 群聊图片事件
if message_data["Data"]["MsgType"] == 3:
return
# 群聊语音消息事件
if message_data["Data"]["MsgType"] == 34:
# 判断是否再不回复列表中!
nickname = message_data.get('Data', {}).get("PushContent", "")
if all(item not in nickname for item in ast.literal_eval(NONE_RESP_NICK_NAME)):
from action.solo.downloadvoicemessage import get_voice_silk
from tencentapi.tencentvoicetext import voice_to_word
# 获取音频文件!
get_voice_silk(message_data, 0)
# 调用一句话识别!
voice_result = json.loads(voice_to_word('voice/auto.silk'))
voice_text = voice_result.get("Result", "")
# 实例化memu!
memory = MemuCrud(SILICON_FLOW_API_KEY)
reade_memory = await memory.read()
# 从record.json文件中查询聊天记录信息!
chat_record = await read_chat_record()
record_list = chat_record.get(message_data["Data"]["FromUserName"]["string"], [])
if record_list is not None:
# 将聊天记录转成gemini所需的聊天记录格式!
memory_list: List[str] = []
for item in record_list:
if item.get('user', '') in str(reade_memory):
chat = get_chat_content(reade_memory, item.get('user', ''))
memory_list.append('user\n%s' % chat)
if item.get('assistant', '') in str(reade_memory):
chat = get_chat_content(reade_memory, item.get('assistant', ''))
memory_list.append('assistant\n%s' % chat)
memory_str = '\n'.join(memory_list)
else:
memory_str = '\n'
# 调用模型进行消息回复!
from modelapi.llm_handler import llm_client
response = await llm_client.generate_response(voice_text, memory_str)
# 模型拒绝回复,这里不回复!
if '拒绝' in response and len(response) <= 5:
# 将用户消息,写入到数据库中!
user_created_id = await memory.create(voice_text)
assistant_created_id = ''
# 判断用户是否在record.json文件中,在就追加,不再就写入!
if message_data["Data"]["FromUserName"]["string"] in chat_record:
# 将id更新到record.json文件中!
chat_record[message_data["Data"]["FromUserName"]["string"]].append({
"user": user_created_id,
"assistant": assistant_created_id
})
else:
chat_record[message_data["Data"]["FromUserName"]["string"]] = [
{
"user": user_created_id,
"assistant": assistant_created_id
}
]
# 保存数据到record.json中
await save_chat_record(chat_record)
return
else:
# 将用户消息和模型回复的消息,写入到数据库中!
user_created_id = await memory.create(voice_text)
assistant_created_id = await memory.create(response)
# 判断用户是否在record.json文件中,在就追加,不再就写入!
group_who_wx_id = message_data["Data"]["Content"]["string"].split(':')[0].strip()
if group_who_wx_id in chat_record:
# 将id更新到record.json文件中!
chat_record[group_who_wx_id].append({
"user": user_created_id,
"assistant": assistant_created_id
})
else:
chat_record[group_who_wx_id] = [
{
"user": user_created_id,
"assistant": assistant_created_id
}
]
# 保存数据到record.json中
await save_chat_record(chat_record)
# 将模型回复的消息,回复给用户!
from action.solo.sendtextmessage import send_text_message
send_text_message(
from_user_name,
response
)
# 群聊视频事件
if message_data["Data"]["MsgType"] == 43:
return