3535 update_character_state_prompt ,
3636 first_chapter_draft_prompt ,
3737 next_chapter_draft_prompt ,
38- summarize_recent_chapters_prompt
38+ summarize_recent_chapters_prompt ,
39+ create_character_state_prompt
3940)
4041
4142# 章节目录解析
@@ -146,20 +147,20 @@ def init_vector_store(
146147
147148 try :
148149 class LCEmbeddingWrapper (LCEmbeddings ):
149- def embed_documents (self , doc_texts : List [str ]) -> List [List [float ]]:
150+ def embed_documents (self , texts : List [str ]) -> List [List [float ]]:
150151 return call_with_retry (
151152 func = embedding_adapter .embed_documents ,
152153 max_retries = 3 ,
153154 fallback_return = [],
154- doc_texts = doc_texts
155+ texts = texts
155156 )
156157
157- def embed_query (self , query_text : str ) -> List [float ]:
158+ def embed_query (self , query : str ) -> List [float ]:
158159 res = call_with_retry (
159160 func = embedding_adapter .embed_query ,
160161 max_retries = 3 ,
161162 fallback_return = [],
162- query_text = query_text
163+ query = query
163164 )
164165 return res
165166
@@ -195,20 +196,20 @@ def load_vector_store(
195196
196197 try :
197198 class LCEmbeddingWrapper (LCEmbeddings ):
198- def embed_documents (self , doc_texts : List [str ]) -> List [List [float ]]:
199+ def embed_documents (self , texts : List [str ]) -> List [List [float ]]:
199200 return call_with_retry (
200201 func = embedding_adapter .embed_documents ,
201202 max_retries = 3 ,
202203 fallback_return = [],
203- doc_texts = doc_texts
204+ texts = texts
204205 )
205206
206- def embed_query (self , query_text : str ) -> List [float ]:
207+ def embed_query (self , query : str ) -> List [float ]:
207208 res = call_with_retry (
208209 func = embedding_adapter .embed_query ,
209210 max_retries = 3 ,
210211 fallback_return = [],
211- query_text = query_text
212+ query = query
212213 )
213214 return res
214215
@@ -247,8 +248,9 @@ def split_text_for_vectorstore(chapter_text: str,
247248 """
248249 if not chapter_text .strip ():
249250 return []
250-
251+
251252 nltk .download ('punkt' , quiet = True )
253+ nltk .download ('punkt_tab' , quiet = True )
252254 sentences = nltk .sent_tokenize (chapter_text )
253255 if not sentences :
254256 return []
@@ -331,6 +333,7 @@ def get_relevant_context_from_vector_store(
331333 """
332334 从向量库中检索与 query 最相关的 k 条文本,拼接后返回。
333335 如果向量库加载/检索失败,则返回空字符串。
336+ 最终只返回最多2000字符的检索片段。
334337 """
335338 store = load_vector_store (embedding_adapter , filepath )
336339 if not store :
@@ -343,6 +346,9 @@ def get_relevant_context_from_vector_store(
343346 logging .info (f"No relevant documents found for query '{ query } '. Returning empty context." )
344347 return ""
345348 combined = "\n " .join ([d .page_content for d in docs ])
349+ # 限制长度最多2000字符
350+ if len (combined ) > 2000 :
351+ combined = combined [:2000 ]
346352 return combined
347353 except Exception as e :
348354 logging .warning (f"Similarity search failed: { e } " )
@@ -470,6 +476,10 @@ def Novel_architecture_generate(
470476 若在中间任何一步报错且重试多次失败,则将已经生成的内容写入 partial_architecture.json 并退出;
471477 下次调用时可从该步骤继续。
472478 最终输出 Novel_architecture.txt
479+
480+ 新增:
481+ - 在完成角色动力学设定后,依据该角色体系,使用 create_character_state_prompt 生成初始角色状态表,
482+ 并存储到 character_state.txt,后续维护更新。
473483 """
474484 os .makedirs (filepath , exist_ok = True )
475485
@@ -521,6 +531,28 @@ def Novel_architecture_generate(
521531 else :
522532 logging .info ("Step2 already done. Skipping..." )
523533
534+ # 在完成角色动力学设定后,生成初始角色状态表
535+ if "character_dynamics_result" in partial_data and "character_state_result" not in partial_data :
536+ logging .info ("Generating initial character state from character dynamics ..." )
537+ prompt_char_state_init = create_character_state_prompt .format (
538+ character_dynamics = partial_data ["character_dynamics_result" ].strip ()
539+ )
540+ character_state_init = invoke_with_cleaning (llm_adapter , prompt_char_state_init )
541+ if not character_state_init .strip ():
542+ logging .warning ("create_character_state_prompt generation failed." )
543+ # 写入目前已有结果,然后退出
544+ save_partial_architecture_data (filepath , partial_data )
545+ return
546+
547+ partial_data ["character_state_result" ] = character_state_init
548+ # 保存到文件
549+ character_state_file = os .path .join (filepath , "character_state.txt" )
550+ clear_file_content (character_state_file )
551+ save_string_to_txt (character_state_init , character_state_file )
552+
553+ save_partial_architecture_data (filepath , partial_data )
554+ logging .info ("Initial character state created and saved." )
555+
524556 # Step3: 世界观
525557 if "world_building_result" not in partial_data :
526558 logging .info ("Step3: Generating world_building_prompt ..." )
@@ -595,16 +627,32 @@ def compute_chunk_size(number_of_chapters: int, max_tokens: int) -> int:
595627 并确保 chunk_size 不会小于1或大于实际章节数。
596628 """
597629 tokens_per_chapter = 100.0
598- ratio = max_tokens / tokens_per_chapter # 例如:8192 / 100 = 81.92
599- ratio_rounded_to_10 = int (ratio // 10 ) * 10 # => 80
600- chunk_size = ratio_rounded_to_10 - 10 # => 70
630+ ratio = max_tokens / tokens_per_chapter
631+ ratio_rounded_to_10 = int (ratio // 10 ) * 10
632+ chunk_size = ratio_rounded_to_10 - 10
601633 if chunk_size < 1 :
602634 chunk_size = 1
603635 if chunk_size > number_of_chapters :
604636 chunk_size = number_of_chapters
605637 return chunk_size
606638
607639
640+ def limit_chapter_blueprint (blueprint_text : str , limit_chapters : int = 100 ) -> str :
641+ """
642+ 从已有章节目录中只取最近的 limit_chapters 章,以避免 prompt 超长。
643+ """
644+ pattern = r"(第\s*\d+\s*章.*?)(?=第\s*\d+\s*章|$)"
645+ chapters = re .findall (pattern , blueprint_text , flags = re .DOTALL )
646+ if not chapters :
647+ return blueprint_text
648+
649+ if len (chapters ) <= limit_chapters :
650+ return blueprint_text
651+
652+ selected = chapters [- limit_chapters :]
653+ return "\n \n " .join (selected ).strip ()
654+
655+
608656# ============ 2) 生成章节蓝图(新增分块逻辑 + 断点续跑) ============
609657
610658def Chapter_blueprint_generate (
@@ -621,6 +669,7 @@ def Chapter_blueprint_generate(
621669 """
622670 若 Novel_directory.txt 已存在且内容非空,则表示可能是之前的部分生成结果;
623671 解析其中已有的章节数,从下一个章节继续分块生成;
672+ 对于已有章节目录,传入时仅保留最近100章目录,避免prompt过长。
624673 否则:
625674 - 若章节数 <= chunk_size,直接一次性生成
626675 - 若章节数 > chunk_size,进行分块生成
@@ -674,10 +723,11 @@ def Chapter_blueprint_generate(
674723 current_start = max_existing_chap + 1
675724 while current_start <= number_of_chapters :
676725 current_end = min (current_start + chunk_size - 1 , number_of_chapters )
726+ limited_blueprint = limit_chapter_blueprint (final_blueprint , 100 )
677727
678728 chunk_prompt = chunked_chapter_blueprint_prompt .format (
679729 novel_architecture = architecture_text ,
680- chapter_list = final_blueprint , # 已有的章节列表文本
730+ chapter_list = limited_blueprint , # 只保留最近100章
681731 number_of_chapters = number_of_chapters ,
682732 n = current_start ,
683733 m = current_end
@@ -694,7 +744,7 @@ def Chapter_blueprint_generate(
694744
695745 final_blueprint += "\n \n " + chunk_result .strip ()
696746
697- # 实时写入,以免中途崩溃造成丢失
747+ # 实时写入
698748 clear_file_content (filename_dir )
699749 save_string_to_txt (final_blueprint .strip (), filename_dir )
700750
@@ -726,10 +776,11 @@ def Chapter_blueprint_generate(
726776 current_start = 1
727777 while current_start <= number_of_chapters :
728778 current_end = min (current_start + chunk_size - 1 , number_of_chapters )
779+ limited_blueprint = limit_chapter_blueprint (final_blueprint , 100 )
729780
730781 chunk_prompt = chunked_chapter_blueprint_prompt .format (
731782 novel_architecture = architecture_text ,
732- chapter_list = final_blueprint , # 已有的章节列表文本
783+ chapter_list = limited_blueprint , # 只保留最近100章
733784 number_of_chapters = number_of_chapters ,
734785 n = current_start ,
735786 m = current_end
@@ -969,6 +1020,7 @@ def finalize_chapter(
9691020 timeout = timeout
9701021 )
9711022
1023+ # 更新全局摘要
9721024 prompt_summary = summary_prompt .format (
9731025 chapter_text = chapter_text ,
9741026 global_summary = old_global_summary
@@ -977,6 +1029,7 @@ def finalize_chapter(
9771029 if not new_global_summary .strip ():
9781030 new_global_summary = old_global_summary
9791031
1032+ # 更新角色状态
9801033 prompt_char_state = update_character_state_prompt .format (
9811034 chapter_text = chapter_text ,
9821035 old_state = old_character_state
@@ -1040,6 +1093,7 @@ def advanced_split_content(content: str,
10401093 similarity_threshold : float = 0.7 ,
10411094 max_length : int = 500 ) -> List [str ]:
10421095 nltk .download ('punkt' , quiet = True )
1096+ nltk .download ('punkt_tab' , quiet = True )
10431097 sentences = nltk .sent_tokenize (content )
10441098 if not sentences :
10451099 return []
0 commit comments