Skip to content

Commit 704cd7b

Browse files
Fix editorial workflow adding code fences to files (#676)
LLMs often wrap their markdown output in ```mdx or ```markdown code fences, even when instructed to return only the content. This caused the editorial review workflow to commit files with spurious code fence markers at the beginning and end. Added stripCodeFences() helper that removes these markers from only the start and end of responses, preserving any legitimate code blocks within the content. Fixes issue seen in PR #673. Co-authored-by: Rachel Lee Nabors <[email protected]> Co-authored-by: Claude Opus 4.5 <[email protected]>
1 parent bdb8de6 commit 704cd7b

File tree

1 file changed

+23
-5
lines changed

1 file changed

+23
-5
lines changed

scripts/vale-editorial.ts

Lines changed: 23 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,8 @@ const MAX_AI_TOKENS = 8192;
4646
const OWNER = "ArcadeAI";
4747
const REPO = "docs";
4848
const EDITORIAL_COMMENT_REGEX = /<!-- Editorial: (.+?) -->/;
49+
const CODE_FENCE_OPEN_REGEX = /^```(?:mdx?|markdown)?\n/;
50+
const CODE_FENCE_CLOSE_REGEX = /\n```$/;
4951
const HTTP_UNPROCESSABLE_ENTITY = 422;
5052
const MEGABYTE = KILOBYTE * KILOBYTE;
5153
const MAX_BUFFER_MB = 10;
@@ -84,6 +86,15 @@ type ValeIssue = {
8486

8587
type ValeOutput = Record<string, ValeIssue[]>;
8688

89+
// Strip markdown code fences from the beginning and end of LLM responses
90+
// LLMs often wrap their output in ```mdx or ```markdown blocks
91+
function stripCodeFences(content: string): string {
92+
let result = content.trim();
93+
result = result.replace(CODE_FENCE_OPEN_REGEX, "");
94+
result = result.replace(CODE_FENCE_CLOSE_REGEX, "");
95+
return result;
96+
}
97+
8798
// Run Vale on content and return issues
8899
function runValeOnContent(filename: string, content: string): ValeIssue[] {
89100
// Create temp directory and file with correct extension
@@ -155,15 +166,18 @@ async function fixValeIssues(
155166
messages: [{ role: "user", content: prompt }],
156167
});
157168
const textBlock = response.content.find((b) => b.type === "text");
158-
return textBlock?.type === "text" ? textBlock.text : content;
169+
return textBlock?.type === "text"
170+
? stripCodeFences(textBlock.text)
171+
: content;
159172
}
160173

161174
const response = await ai.client.chat.completions.create({
162175
model: "gpt-4-turbo",
163176
max_tokens: MAX_AI_TOKENS,
164177
messages: [{ role: "user", content: prompt }],
165178
});
166-
return response.choices[0]?.message?.content ?? content;
179+
const result = response.choices[0]?.message?.content;
180+
return result ? stripCodeFences(result) : content;
167181
} catch (error) {
168182
console.error("Error fixing Vale issues:", error);
169183
return content;
@@ -266,7 +280,7 @@ async function getEditorialFromAnthropic(
266280
return null;
267281
}
268282

269-
const revisedContent = textBlock.text.trim();
283+
const revisedContent = stripCodeFences(textBlock.text);
270284

271285
if (revisedContent === "NO_CHANGES_NEEDED") {
272286
return null;
@@ -298,8 +312,12 @@ async function getEditorialFromOpenAI(
298312
messages: [{ role: "user", content: prompt }],
299313
});
300314

301-
const revisedContent = response.choices[0]?.message?.content?.trim();
302-
if (!revisedContent || revisedContent === "NO_CHANGES_NEEDED") {
315+
const rawContent = response.choices[0]?.message?.content;
316+
if (!rawContent) {
317+
return null;
318+
}
319+
const revisedContent = stripCodeFences(rawContent);
320+
if (revisedContent === "NO_CHANGES_NEEDED") {
303321
return null;
304322
}
305323

0 commit comments

Comments
 (0)