11import { NextRequest , NextResponse } from 'next/server' ;
2- import { getGitHubModelsService } from '../../../lib/services/github-models.services'
2+ //import { getGitHubModelsService } from '../../../lib/services/github-models.services'
3+ import { getLangChainMicroblogService } from '@/lib/services/langchain.factory' ;
34import type { GenerateApiRequest , GenerateApiResponse } from '@/types' ;
45
56// Rate limiting configuration
@@ -10,114 +11,123 @@ const requestCounts = new Map<string, { count: number; resetTime: number }>();
1011// Input validation
1112function validateRequest ( body : any ) : GenerateApiRequest | null {
1213 const { topic, tone, keywords } = body ;
13-
14+
1415 if ( ! topic || typeof topic !== 'string' ) {
1516 return null ;
1617 }
17-
18+
1819 if ( ! tone || typeof tone !== 'string' ) {
1920 return null ;
2021 }
21-
22+
2223 if ( keywords !== undefined && typeof keywords !== 'string' ) {
2324 return null ;
2425 }
25-
26+
2627 return { topic : topic . trim ( ) , tone : tone . toLowerCase ( ) , keywords : keywords ?. trim ( ) } ;
2728}
2829
2930// Rate limiting implementation
3031function checkRateLimit ( clientId : string ) : boolean {
3132 const now = Date . now ( ) ;
3233 const clientData = requestCounts . get ( clientId ) ;
33-
34+
3435 if ( ! clientData || now > clientData . resetTime ) {
3536 requestCounts . set ( clientId , {
3637 count : 1 ,
3738 resetTime : now + RATE_LIMIT_WINDOW ,
3839 } ) ;
3940 return true ;
4041 }
41-
42+
4243 if ( clientData . count >= MAX_REQUESTS_PER_WINDOW ) {
4344 return false ;
4445 }
45-
46+
4647 clientData . count += 1 ;
4748 return true ;
4849}
4950
5051export async function POST ( request : NextRequest ) {
5152 try {
5253 // Extract client identifier (IP or session)
53- const clientId = request . headers . get ( 'x-forwarded-for' ) ||
54- request . headers . get ( 'x-real-ip' ) ||
55- 'anonymous' ;
56-
54+ const clientId = request . headers . get ( 'x-forwarded-for' ) ||
55+ request . headers . get ( 'x-real-ip' ) ||
56+ 'anonymous' ;
57+
5758 // Check rate limit
5859 if ( ! checkRateLimit ( clientId ) ) {
5960 return NextResponse . json (
6061 { success : false , error : 'Rate limit exceeded. Please try again later.' } ,
6162 { status : 429 }
6263 ) ;
6364 }
64-
65+
6566 // Parse and validate request body
6667 const body = await request . json ( ) ;
6768 const validatedData = validateRequest ( body ) ;
68-
69+
6970 if ( ! validatedData ) {
7071 return NextResponse . json (
7172 { success : false , error : 'Invalid request data' } ,
7273 { status : 400 }
7374 ) ;
7475 }
75-
76+
7677 // Additional validation
7778 if ( validatedData . topic . length < 10 || validatedData . topic . length > 280 ) {
7879 return NextResponse . json (
7980 { success : false , error : 'Topic must be between 10 and 280 characters' } ,
8081 { status : 400 }
8182 ) ;
8283 }
83-
84+
8485 const validTones = [ 'technical' , 'casual' , 'motivational' ] ;
8586 if ( ! validTones . includes ( validatedData . tone ) ) {
8687 return NextResponse . json (
8788 { success : false , error : 'Invalid tone of voice' } ,
8889 { status : 400 }
8990 ) ;
9091 }
91-
92+
9293 // Generate content using the service
93- const service = getGitHubModelsService ( ) ;
94+ const service = getLangChainMicroblogService ( {
95+ enableLogging : process . env . NODE_ENV === 'development' ,
96+ enableRetry : true ,
97+ customRetryConfig : {
98+ maxAttempts : 3 ,
99+ baseDelayMs : 1000
100+ }
101+ } ) ;
102+
94103 const generatedContent = await service . generateMicroblogContent (
95104 validatedData . topic ,
96105 validatedData . tone ,
97106 validatedData . keywords
98107 ) ;
99-
108+
100109 // Return successful response
101110 const response : GenerateApiResponse = {
102111 success : true ,
103112 content : generatedContent ,
104113 } ;
105-
114+
106115 return NextResponse . json ( response , {
107116 status : 200 ,
108117 headers : {
109118 'Cache-Control' : 'no-store, max-age=0' ,
110119 'Content-Type' : 'application/json' ,
120+ 'X-Service-Used' : 'langchain'
111121 } ,
112122 } ) ;
113-
123+
114124 } catch ( error ) {
115- console . error ( 'Generation API error:' , error ) ;
116-
125+ console . error ( 'LangChain Generation API error:' , error ) ;
126+
117127 // Determine error type and status code
118128 let statusCode = 500 ;
119129 let errorMessage = 'An unexpected error occurred' ;
120-
130+
121131 if ( error instanceof Error ) {
122132 if ( error . message . includes ( 'environment variables' ) ) {
123133 statusCode = 500 ;
@@ -128,26 +138,45 @@ export async function POST(request: NextRequest) {
128138 } else if ( error . message . includes ( 'Invalid' ) ) {
129139 statusCode = 400 ;
130140 errorMessage = error . message ;
141+ } else if ( error . message . includes ( 'LangChain' ) ) {
142+ statusCode = 500 ;
143+ errorMessage = 'AI service temporarily unavailable'
131144 } else {
132145 errorMessage = 'Failed to generate content' ;
133146 }
134147 }
135-
148+
136149 return NextResponse . json (
137- { success : false , error : errorMessage } ,
150+ { success : false , error : errorMessage , service : 'langchain' } ,
138151 { status : statusCode }
139152 ) ;
140153 }
141154}
142155
143156// Health check endpoint
144157export async function GET ( ) {
145- return NextResponse . json (
146- {
158+
159+ try {
160+ const service = getLangChainMicroblogService ( ) ;
161+ const testResult = await service . testConnection ( ) ;
162+
163+ return NextResponse . json (
164+ {
147165 status : 'healthy' ,
148- service : 'generate-api' ,
166+ service : 'langchain-generate-api' ,
167+ timestamp : new Date ( ) . toISOString ( ) ,
168+ connectivity : {
169+ success : testResult . success ,
170+ latency : testResult . latency ,
171+ model : testResult . model
172+ }
173+ } ) ;
174+ } catch ( error ) {
175+ return NextResponse . json ( {
176+ status : 'unhealthy' ,
177+ service : 'langchain-generate-api' ,
149178 timestamp : new Date ( ) . toISOString ( ) ,
150- } ,
151- { status : 200 }
152- ) ;
179+ error : error instanceof Error ? error . message : 'Unknown error'
180+ } , { status : 503 } )
181+ }
153182}
0 commit comments