@@ -85,10 +85,10 @@ console.log('new')
8585 // 测试用例3:混合问题(指令缩进 + 代码块未闭合)
8686 test ( 'Case3: Mixed issues' , ( ) => {
8787 const input = `
88- ## FILE:test.txt
89- ## OPERATION:CREATE
90- ## NEW_CONTENT
91- function test() {`;
88+ ## FILE:test.txt
89+ ## OPERATION:CREATE
90+ ## NEW_CONTENT
91+ function test() {` ;
9292
9393 const expected = `
9494## FILE:test.txt
@@ -138,4 +138,230 @@ console.log('test')
138138
139139 assert . strictEqual ( TCVB . autoFixTCVBContent ( input ) . trim ( ) , expected ) ;
140140 } ) ;
141+ } ) ;
142+
143+ // 测试用例5:无效的闭合顺序
144+ test ( 'Case6: miss markdown' , ( ) => {
145+ const input = `
146+ ## BEGIN_TCVB
147+ ## FILE:d:/lab/GPT-SoVITS_yefanFork/tools/subfix_webui.py
148+ ## OPERATION:GLOBAL-REPLACE
149+ ## OLD_CONTENT
150+ with gr.Row():
151+ batchsize_slider = gr.Slider(
152+ minimum=1, maximum=g_batch, value=g_batch, step=1, label="Batch Size", scale=3, interactive=False
153+ )
154+ interval_slider = gr.Slider(minimum=0, maximum=2, value=0, step=0.01, label="Interval", scale=3)
155+ btn_theme_dark = gr.Button("Light Theme", link="?__theme=light", scale=1)
156+ btn_theme_light = gr.Button("Dark Theme", link="?__theme=dark", scale=1)
157+
158+ demo.load(
159+ b_change_index,
160+ inputs=[
161+ index_slider,
162+ batchsize_slider,
163+ ],
164+ outputs=[*g_text_list, *g_audio_list, *g_checkbox_list],
165+ )
166+ \`\`\`
167+ ## NEW_CONTENT
168+ with gr.Row():
169+ batchsize_slider = gr.Slider(
170+ minimum=1, maximum=g_batch, value=g_batch, step=1, label="Batch Size", scale=3, interactive=False
171+ )
172+ interval_slider = gr.Slider(minimum=0, maximum=2, value=0, step=0.01, label="Interval", scale=3)
173+ btn_theme_dark = gr.Button("Light Theme", link="?__theme=light", scale=1)
174+ btn_theme_light = gr.Button("Dark Theme", link="?__theme=dark", scale=1)
175+ btn_send_to_infer = gr.Button("Send to Inference", variant="primary", scale=1)
176+
177+ def b_send_to_inference(*checkbox_list):
178+ selected_data = []
179+ for i, checkbox in enumerate(checkbox_list):
180+ if checkbox and g_index + i < len(g_data_json):
181+ selected_data.append({
182+ "audio_path": g_data_json[g_index + i][g_json_key_path],
183+ "text": g_data_json[g_index + i][g_json_key_text].strip()
184+ })
185+ if selected_data:
186+ try:
187+ with open("shared_audio_ref.txt", "w", encoding="utf-8") as f:
188+ json.dump(selected_data[0], f)
189+ return gr.Info("Sent to inference page successfully!")
190+ except Exception as e:
191+ return gr.Warning(f"Failed to send: {str(e)}")
192+ return gr.Warning("No audio selected!")
193+
194+ btn_send_to_infer.click(
195+ b_send_to_inference,
196+ inputs=[*g_checkbox_list],
197+ outputs=[]
198+ )
199+
200+ demo.load(
201+ b_change_index,
202+ inputs=[
203+ index_slider,
204+ batchsize_slider,
205+ ],
206+ outputs=[*g_text_list, *g_audio_list, *g_checkbox_list],
207+ )
208+
209+ ## FILE:d:/lab/GPT-SoVITS_yefanFork/GPT_SoVITS/inference_webui.py
210+ ## OPERATION:GLOBAL-REPLACE
211+ ## OLD_CONTENT
212+ gr.Markdown(
213+ html_left(
214+ i18n("使用无参考文本模式时建议使用微调的GPT")
215+ + "<br>"
216+ + i18n("听不清参考音频说的啥(不晓得写啥)可以开。开启后无视填写的参考文本。")
217+ )
218+ )
219+ prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="", lines=5, max_lines=5, scale=1)
220+ \`\`\`
221+ ## NEW_CONTENT
222+ gr.Markdown(
223+ html_left(
224+ i18n("使用无参考文本模式时建议使用微调的GPT")
225+ + "<br>"
226+ + i18n("听不清参考音频说的啥(不晓得写啥)可以开。开启后无视填写的参考文本。")
227+ )
228+ )
229+ btn_load_ref = gr.Button(i18n("Load from Annotation"), variant="secondary", scale=1)
230+ prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="", lines=5, max_lines=5, scale=1)
231+
232+ def load_annotation_data():
233+ try:
234+ if os.path.exists("shared_audio_ref.txt"):
235+ with open("shared_audio_ref.txt", "r", encoding="utf-8") as f:
236+ data = json.load(f)
237+ return data["audio_path"], data["text"], gr.Info("Loaded from annotation!")
238+ return None, None, gr.Warning("No shared data found!")
239+ except Exception as e:
240+ return None, None, gr.Warning(f"Load failed: {str(e)}")
241+
242+ btn_load_ref.click(
243+ load_annotation_data,
244+ outputs=[inp_ref, prompt_text]
245+ )
246+
247+ ## END_TCVB
248+ ` ;
249+
250+ const expected = `
251+ ## BEGIN_TCVB
252+ ## FILE:d:/lab/GPT-SoVITS_yefanFork/tools/subfix_webui.py
253+ ## OPERATION:GLOBAL-REPLACE
254+ ## OLD_CONTENT
255+ \`\`\`
256+ with gr.Row():
257+ batchsize_slider = gr.Slider(
258+ minimum=1, maximum=g_batch, value=g_batch, step=1, label="Batch Size", scale=3, interactive=False
259+ )
260+ interval_slider = gr.Slider(minimum=0, maximum=2, value=0, step=0.01, label="Interval", scale=3)
261+ btn_theme_dark = gr.Button("Light Theme", link="?__theme=light", scale=1)
262+ btn_theme_light = gr.Button("Dark Theme", link="?__theme=dark", scale=1)
263+
264+ demo.load(
265+ b_change_index,
266+ inputs=[
267+ index_slider,
268+ batchsize_slider,
269+ ],
270+ outputs=[*g_text_list, *g_audio_list, *g_checkbox_list],
271+ )
272+ \`\`\`
273+ ## NEW_CONTENT
274+ \`\`\`
275+ with gr.Row():
276+ batchsize_slider = gr.Slider(
277+ minimum=1, maximum=g_batch, value=g_batch, step=1, label="Batch Size", scale=3, interactive=False
278+ )
279+ interval_slider = gr.Slider(minimum=0, maximum=2, value=0, step=0.01, label="Interval", scale=3)
280+ btn_theme_dark = gr.Button("Light Theme", link="?__theme=light", scale=1)
281+ btn_theme_light = gr.Button("Dark Theme", link="?__theme=dark", scale=1)
282+ btn_send_to_infer = gr.Button("Send to Inference", variant="primary", scale=1)
283+
284+ def b_send_to_inference(*checkbox_list):
285+ selected_data = []
286+ for i, checkbox in enumerate(checkbox_list):
287+ if checkbox and g_index + i < len(g_data_json):
288+ selected_data.append({
289+ "audio_path": g_data_json[g_index + i][g_json_key_path],
290+ "text": g_data_json[g_index + i][g_json_key_text].strip()
291+ })
292+ if selected_data:
293+ try:
294+ with open("shared_audio_ref.txt", "w", encoding="utf-8") as f:
295+ json.dump(selected_data[0], f)
296+ return gr.Info("Sent to inference page successfully!")
297+ except Exception as e:
298+ return gr.Warning(f"Failed to send: {str(e)}")
299+ return gr.Warning("No audio selected!")
300+
301+ btn_send_to_infer.click(
302+ b_send_to_inference,
303+ inputs=[*g_checkbox_list],
304+ outputs=[]
305+ )
306+
307+ demo.load(
308+ b_change_index,
309+ inputs=[
310+ index_slider,
311+ batchsize_slider,
312+ ],
313+ outputs=[*g_text_list, *g_audio_list, *g_checkbox_list],
314+ )
315+ \`\`\`
316+ ## FILE:d:/lab/GPT-SoVITS_yefanFork/GPT_SoVITS/inference_webui.py
317+ ## OPERATION:GLOBAL-REPLACE
318+ ## OLD_CONTENT
319+ \`\`\`
320+ gr.Markdown(
321+ html_left(
322+ i18n("使用无参考文本模式时建议使用微调的GPT")
323+ + "<br>"
324+ + i18n("听不清参考音频说的啥(不晓得写啥)可以开。开启后无视填写的参考文本。")
325+ )
326+ )
327+ prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="", lines=5, max_lines=5, scale=1)
328+ \`\`\`
329+ ## NEW_CONTENT
330+ \`\`\`
331+ gr.Markdown(
332+ html_left(
333+ i18n("使用无参考文本模式时建议使用微调的GPT")
334+ + "<br>"
335+ + i18n("听不清参考音频说的啥(不晓得写啥)可以开。开启后无视填写的参考文本。")
336+ )
337+ )
338+ btn_load_ref = gr.Button(i18n("Load from Annotation"), variant="secondary", scale=1)
339+ prompt_text = gr.Textbox(label=i18n("参考音频的文本"), value="", lines=5, max_lines=5, scale=1)
340+
341+ def load_annotation_data():
342+ try:
343+ if os.path.exists("shared_audio_ref.txt"):
344+ with open("shared_audio_ref.txt", "r", encoding="utf-8") as f:
345+ data = json.load(f)
346+ return data["audio_path"], data["text"], gr.Info("Loaded from annotation!")
347+ return None, None, gr.Warning("No shared data found!")
348+ except Exception as e:
349+ return None, None, gr.Warning(f"Load failed: {str(e)}")
350+
351+ btn_load_ref.click(
352+ load_annotation_data,
353+ outputs=[inp_ref, prompt_text]
354+ )
355+ \`\`\`
356+ ## END_TCVB
357+ ` . trim ( ) ;
358+
359+ // 测试修复后的内容是否符合预期
360+ const fixedContent = TCVB . autoFixTCVBContent ( input ) . trim ( ) ;
361+ assert . strictEqual ( fixedContent , expected ) ;
362+
363+ // 测试修复后的内容能否被TCVB类正确解析(不抛出异常)
364+ assert . doesNotThrow ( ( ) => {
365+ const t1 = new TCVB ( fixedContent ) ;
366+ } , "修复后的内容应该能被TCVB类正确解析" ) ;
141367} ) ;
0 commit comments