Skip to content

Commit 95bf085

Browse files
authored
Merge pull request #20 from Code-the-Dream-School/SCRUM-29-backend-generate-summary-for-uploaded-resource
Scrum 29 backend generate summary for uploaded resource
2 parents cfd1d11 + a4beb09 commit 95bf085

File tree

4 files changed

+244
-16
lines changed

4 files changed

+244
-16
lines changed

backend/src/ai/llm.ts

Lines changed: 82 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,51 @@
1-
// backend/src/ai/llm.ts
1+
import type { GenerateContentResult } from "@google/generative-ai";
2+
23
export type GenerateTextParams = {
34
prompt: string;
45
};
56

6-
export const generateText = async ({ prompt }: GenerateTextParams): Promise<string> => {
7+
const MAX_RETRIES = 3;
8+
const INITIAL_RETRY_DELAY_MS = 1000;
9+
const TIMEOUT_MS = 30000;
10+
11+
const sleep = (ms: number): Promise<void> =>
12+
new Promise((resolve) => setTimeout(resolve, ms));
13+
14+
const isRetryableError = (error: any): boolean => {
15+
const message = error?.message?.toLowerCase() || "";
16+
const status = error?.status || error?.statusCode || 0;
17+
18+
return (
19+
status === 429 ||
20+
status === 500 ||
21+
status === 502 ||
22+
status === 503 ||
23+
status === 504 ||
24+
message.includes("rate limit") ||
25+
message.includes("timeout") ||
26+
message.includes("network") ||
27+
message.includes("econnreset") ||
28+
message.includes("econnrefused")
29+
);
30+
};
31+
32+
const generateWithTimeout = async (
33+
gemini: any,
34+
prompt: string
35+
): Promise<GenerateContentResult> => {
36+
return Promise.race([
37+
gemini.generateContent({
38+
contents: [{ role: "user", parts: [{ text: prompt }] }],
39+
}),
40+
new Promise<never>((_, reject) =>
41+
setTimeout(() => reject(new Error("Request timeout")), TIMEOUT_MS)
42+
),
43+
]);
44+
};
45+
46+
export const generateText = async ({
47+
prompt,
48+
}: GenerateTextParams): Promise<string> => {
749
const mode = (process.env.LLM_MODE || "stub").toLowerCase();
850
console.log(`[llm] mode=${mode}`);
951

@@ -21,9 +63,6 @@ export const generateText = async ({ prompt }: GenerateTextParams): Promise<stri
2163
const { GoogleGenerativeAI } = await import("@google/generative-ai");
2264
const genAI = new GoogleGenerativeAI(apiKey);
2365

24-
// IMPORTANT:
25-
// Do NOT force responseMimeType=application/json — it often truncates/cuts off output.
26-
// We will enforce JSON via the prompt + backend parsing.
2766
const generationConfig: any = {
2867
temperature: 0,
2968
maxOutputTokens: 4096,
@@ -34,12 +73,43 @@ export const generateText = async ({ prompt }: GenerateTextParams): Promise<stri
3473
generationConfig,
3574
});
3675

37-
const result = await gemini.generateContent({
38-
contents: [{ role: "user", parts: [{ text: prompt }] }],
39-
});
76+
let lastError: any;
77+
78+
for (let attempt = 0; attempt < MAX_RETRIES; attempt++) {
79+
try {
80+
const result = await generateWithTimeout(gemini, prompt);
81+
82+
const text = result.response.text() || "";
83+
console.log(
84+
`[llm] total attempts=${attempt + 1}, success=true, response chars=${
85+
text.length
86+
}`
87+
);
88+
console.log(
89+
`[llm] response preview=${JSON.stringify(text.slice(0, 300))}`
90+
);
91+
return text;
92+
} catch (error: any) {
93+
lastError = error;
94+
console.error(
95+
`[llm] attempt ${attempt + 1}/${MAX_RETRIES} failed:`,
96+
error.message
97+
);
98+
99+
if (attempt < MAX_RETRIES - 1 && isRetryableError(error)) {
100+
const delayMs = INITIAL_RETRY_DELAY_MS * Math.pow(2, attempt);
101+
console.log(`[llm] retrying in ${delayMs}ms...`);
102+
await sleep(delayMs);
103+
} else {
104+
break;
105+
}
106+
}
107+
}
40108

41-
const text = result.response.text() || "";
42-
console.log(`[llm] response chars=${text.length}`);
43-
console.log(`[llm] response preview=${JSON.stringify(text.slice(0, 300))}`);
44-
return text;
109+
console.error(`[llm] total attempts=${MAX_RETRIES}, success=false`);
110+
throw new Error(
111+
`Failed to generate text after ${MAX_RETRIES} attempts: ${
112+
lastError?.message || "Unknown error"
113+
}`
114+
);
45115
};

backend/src/controllers/resourceController.ts

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import {
99
parseTags,
1010
normalizeText,
1111
} from "../utils/validation";
12+
import { generateSummaryFromText } from "../services/summaryGenerator";
1213

1314
const requireUserObjectId = (
1415
req: Request,
@@ -265,3 +266,77 @@ export const deleteResource = async (
265266
next(error);
266267
}
267268
};
269+
270+
export const generateSummary = async (
271+
req: Request,
272+
res: Response,
273+
next: NextFunction
274+
) => {
275+
try {
276+
const ownerId = requireUserObjectId(req, res);
277+
if (!ownerId) return;
278+
279+
const { id } = req.params;
280+
281+
const resource = await Resource.findOne({ _id: id, ownerId });
282+
283+
if (!resource) {
284+
return res.status(404).json({
285+
success: false,
286+
error: "Resource not found",
287+
});
288+
}
289+
290+
try {
291+
const summaryContent = await generateSummaryFromText(
292+
resource.textContent
293+
);
294+
295+
resource.summary = {
296+
content: summaryContent,
297+
createdAt: new Date(),
298+
};
299+
300+
await resource.save();
301+
302+
return res.status(200).json({
303+
success: true,
304+
summary: resource.summary,
305+
});
306+
} catch (summaryError: any) {
307+
if (summaryError instanceof Error) {
308+
if (
309+
summaryError.message.includes("too short") ||
310+
summaryError.message.includes("too long") ||
311+
summaryError.message.includes("required")
312+
) {
313+
return res.status(400).json({
314+
success: false,
315+
error: summaryError.message,
316+
});
317+
}
318+
319+
if (summaryError.message.includes("GEMINI_API_KEY")) {
320+
console.error("[resource] Gemini API key missing");
321+
return res.status(500).json({
322+
success: false,
323+
error: "Summary service is not configured. Please contact support.",
324+
});
325+
}
326+
327+
return res.status(503).json({
328+
success: false,
329+
error: summaryError.message,
330+
});
331+
}
332+
333+
return res.status(500).json({
334+
success: false,
335+
error: "An unexpected error occurred while generating summary",
336+
});
337+
}
338+
} catch (error: any) {
339+
if (handleCastError(error, res)) return;
340+
next(error);
341+
}
342+
};

backend/src/routes/resourceRoutes.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,19 +5,19 @@ import {
55
getResourceById,
66
updateResource,
77
deleteResource,
8+
generateSummary,
89
} from "../controllers/resourceController";
910

1011
const router = express.Router();
1112

12-
router
13-
.route("/")
14-
.get(getUserResources)
15-
.post(createResource);
13+
router.route("/").get(getUserResources).post(createResource);
1614

1715
router
1816
.route("/:id")
1917
.get(getResourceById)
2018
.patch(updateResource)
2119
.delete(deleteResource);
2220

21+
router.post("/:id/summary", generateSummary);
22+
2323
export default router;
Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,83 @@
1+
import { generateText } from "../ai/llm";
2+
import { LIMITS } from "../config/constants";
3+
4+
const MAX_SUMMARY_CHARS = 5000;
5+
const MAX_INPUT_CHARS = LIMITS.TEXT_CONTENT_MAX_LENGTH;
6+
const MIN_INPUT_CHARS = 100;
7+
8+
const SUMMARY_PROMPT = `
9+
You are a study assistant that creates concise summaries of educational content.
10+
11+
Task: Create a clear, structured summary of the provided text.
12+
13+
Formatting rules:
14+
- Plain text only (no Markdown, no code fences, no headings).
15+
- Use short paragraphs separated by line breaks.
16+
- Keep the summary under ${MAX_SUMMARY_CHARS} characters.
17+
- Focus on key concepts, main ideas, and important details.
18+
19+
Quality rules:
20+
- Be accurate and comprehensive.
21+
- Maintain the original meaning and context.
22+
- Use clear, simple language suitable for studying.
23+
`.trim();
24+
25+
export const generateSummaryFromText = async (
26+
text: string
27+
): Promise<string> => {
28+
const cleanedText = (text || "").trim();
29+
30+
if (!cleanedText) {
31+
throw new Error("Text content is required for summary generation.");
32+
}
33+
34+
if (cleanedText.length < MIN_INPUT_CHARS) {
35+
throw new Error(
36+
`Text is too short to generate a meaningful summary (minimum ${MIN_INPUT_CHARS} characters).`
37+
);
38+
}
39+
40+
if (cleanedText.length > MAX_INPUT_CHARS) {
41+
throw new Error(
42+
`Text is too long (maximum ${MAX_INPUT_CHARS} characters).`
43+
);
44+
}
45+
46+
const prompt = `
47+
${SUMMARY_PROMPT}
48+
49+
Text to summarize:
50+
"""
51+
${cleanedText}
52+
"""
53+
54+
Summary:
55+
`.trim();
56+
57+
try {
58+
const raw = await generateText({ prompt });
59+
const summary = raw?.trim() || "";
60+
61+
if (!summary) {
62+
throw new Error("Failed to generate summary. Please try again.");
63+
}
64+
65+
if (summary.length > MAX_SUMMARY_CHARS) {
66+
const truncated = summary.slice(0, MAX_SUMMARY_CHARS);
67+
const lastSpace = truncated.lastIndexOf(" ");
68+
return lastSpace > 0 ? truncated.slice(0, lastSpace) : truncated;
69+
}
70+
71+
return summary;
72+
} catch (error) {
73+
console.error("[summary] LLM generation failed:", error);
74+
75+
if (error instanceof Error && error.message.includes("GEMINI_API_KEY")) {
76+
throw error;
77+
}
78+
79+
throw new Error(
80+
"Unable to generate summary at this time. Please try again later."
81+
);
82+
}
83+
};

0 commit comments

Comments
 (0)