1
+ /**
2
+ * Configurable fetch utility with caching, rate limiting, and retry logic
3
+ * This is shared across all MCP servers for consistent HTTP handling
4
+ */
5
+
6
+ import NodeFetchCache , { MemoryCache } from 'node-fetch-cache' ;
7
+
8
+ export interface FetchConfig {
9
+ /** Cache TTL in milliseconds (default: 5 minutes) */
10
+ cacheTTL ?: number ;
11
+ /** Enable/disable caching (default: true) */
12
+ enableCache ?: boolean ;
13
+ /** Request timeout in milliseconds (default: 30 seconds) */
14
+ timeout ?: number ;
15
+ /** Number of retry attempts (default: 3) */
16
+ retries ?: number ;
17
+ /** Base delay for exponential backoff in ms (default: 1000) */
18
+ retryDelay ?: number ;
19
+ /** Custom headers to include with all requests */
20
+ defaultHeaders ?: Record < string , string > ;
21
+ /** Rate limiting: max requests per minute (default: 60) */
22
+ rateLimit ?: number ;
23
+ }
24
+
25
+ export class ConfigurableFetch {
26
+ private fetch : typeof fetch ;
27
+ private config : Required < FetchConfig > ;
28
+ private requestTimes : number [ ] = [ ] ;
29
+
30
+ constructor ( config : FetchConfig = { } ) {
31
+ this . config = {
32
+ cacheTTL : config . cacheTTL ?? 5 * 60 * 1000 , // 5 minutes
33
+ enableCache : config . enableCache ?? true ,
34
+ timeout : config . timeout ?? 30 * 1000 , // 30 seconds
35
+ retries : config . retries ?? 3 ,
36
+ retryDelay : config . retryDelay ?? 1000 ,
37
+ defaultHeaders : config . defaultHeaders ?? { } ,
38
+ rateLimit : config . rateLimit ?? 60 , // 60 requests per minute
39
+ } ;
40
+
41
+ // Initialize fetch with or without cache
42
+ if ( this . config . enableCache ) {
43
+ this . fetch = NodeFetchCache . create ( {
44
+ cache : new MemoryCache ( { ttl : this . config . cacheTTL } ) ,
45
+ } ) ;
46
+ } else {
47
+ this . fetch = fetch ;
48
+ }
49
+ }
50
+
51
+ /**
52
+ * Make a fetch request with built-in retry, timeout, and rate limiting
53
+ */
54
+ async request ( url : string , options : RequestInit = { } ) : Promise < Response > {
55
+ await this . checkRateLimit ( ) ;
56
+
57
+ const requestOptions : RequestInit = {
58
+ ...options ,
59
+ headers : {
60
+ ...this . config . defaultHeaders ,
61
+ ...options . headers ,
62
+ } ,
63
+ signal : this . createTimeoutSignal ( ) ,
64
+ } ;
65
+
66
+ return this . executeWithRetry ( url , requestOptions ) ;
67
+ }
68
+
69
+ /**
70
+ * Convenience method for GET requests
71
+ */
72
+ async get ( url : string , options : RequestInit = { } ) : Promise < Response > {
73
+ return this . request ( url , { ...options , method : 'GET' } ) ;
74
+ }
75
+
76
+ /**
77
+ * Convenience method for POST requests
78
+ */
79
+ async post ( url : string , data ?: any , options : RequestInit = { } ) : Promise < Response > {
80
+ const requestOptions : RequestInit = {
81
+ ...options ,
82
+ method : 'POST' ,
83
+ headers : {
84
+ 'Content-Type' : 'application/json' ,
85
+ ...options . headers ,
86
+ } ,
87
+ } ;
88
+
89
+ if ( data ) {
90
+ requestOptions . body = JSON . stringify ( data ) ;
91
+ }
92
+
93
+ return this . request ( url , requestOptions ) ;
94
+ }
95
+
96
+ /**
97
+ * Fetch and parse JSON response
98
+ */
99
+ async json < T = any > ( url : string , options : RequestInit = { } ) : Promise < T > {
100
+ const response = await this . get ( url , options ) ;
101
+
102
+ if ( ! response . ok ) {
103
+ throw new Error ( `HTTP ${ response . status } : ${ response . statusText } ` ) ;
104
+ }
105
+
106
+ return response . json ( ) ;
107
+ }
108
+
109
+ /**
110
+ * Create a timeout signal for the request
111
+ */
112
+ private createTimeoutSignal ( ) : AbortSignal {
113
+ const controller = new AbortController ( ) ;
114
+ setTimeout ( ( ) => controller . abort ( ) , this . config . timeout ) ;
115
+ return controller . signal ;
116
+ }
117
+
118
+ /**
119
+ * Execute request with exponential backoff retry
120
+ */
121
+ private async executeWithRetry ( url : string , options : RequestInit ) : Promise < Response > {
122
+ let lastError : Error ;
123
+
124
+ for ( let attempt = 0 ; attempt <= this . config . retries ; attempt ++ ) {
125
+ try {
126
+ const response = await this . fetch ( url , options ) ;
127
+
128
+ // Don't retry on client errors (4xx), only server errors (5xx) and network issues
129
+ if ( response . ok || ( response . status >= 400 && response . status < 500 ) ) {
130
+ return response ;
131
+ }
132
+
133
+ throw new Error ( `HTTP ${ response . status } : ${ response . statusText } ` ) ;
134
+ } catch ( error ) {
135
+ lastError = error as Error ;
136
+
137
+ // Don't retry on last attempt
138
+ if ( attempt === this . config . retries ) {
139
+ break ;
140
+ }
141
+
142
+ // Calculate exponential backoff delay
143
+ const delay = this . config . retryDelay * Math . pow ( 2 , attempt ) ;
144
+ await new Promise ( resolve => setTimeout ( resolve , delay ) ) ;
145
+ }
146
+ }
147
+
148
+ throw new Error ( `Request failed after ${ this . config . retries + 1 } attempts: ${ lastError . message } ` ) ;
149
+ }
150
+
151
+ /**
152
+ * Check and enforce rate limiting
153
+ */
154
+ private async checkRateLimit ( ) : Promise < void > {
155
+ const now = Date . now ( ) ;
156
+ const oneMinuteAgo = now - 60 * 1000 ;
157
+
158
+ // Remove old requests
159
+ this . requestTimes = this . requestTimes . filter ( time => time > oneMinuteAgo ) ;
160
+
161
+ // Check if we're at the rate limit
162
+ if ( this . requestTimes . length >= this . config . rateLimit ) {
163
+ const oldestRequest = this . requestTimes [ 0 ] ;
164
+ const waitTime = ( oldestRequest + 60 * 1000 ) - now ;
165
+
166
+ if ( waitTime > 0 ) {
167
+ await new Promise ( resolve => setTimeout ( resolve , waitTime ) ) ;
168
+ }
169
+ }
170
+
171
+ // Record this request
172
+ this . requestTimes . push ( now ) ;
173
+ }
174
+
175
+ /**
176
+ * Clear the cache (if caching is enabled)
177
+ */
178
+ clearCache ( ) : void {
179
+ if ( this . config . enableCache && this . fetch . cache ) {
180
+ this . fetch . cache . clear ( ) ;
181
+ }
182
+ }
183
+
184
+ /**
185
+ * Get cache statistics (if caching is enabled)
186
+ */
187
+ getCacheStats ( ) : { size : number ; hits : number ; misses : number } | null {
188
+ if ( this . config . enableCache && this . fetch . cache ) {
189
+ return {
190
+ size : this . fetch . cache . size || 0 ,
191
+ hits : this . fetch . cache . hits || 0 ,
192
+ misses : this . fetch . cache . misses || 0 ,
193
+ } ;
194
+ }
195
+ return null ;
196
+ }
197
+ }
198
+
199
+ /**
200
+ * Create a configured fetch instance with sensible defaults for MCP servers
201
+ */
202
+ export function createMCPFetch ( config : FetchConfig = { } ) : ConfigurableFetch {
203
+ return new ConfigurableFetch ( {
204
+ cacheTTL : 5 * 60 * 1000 , // 5 minutes
205
+ enableCache : true ,
206
+ timeout : 30 * 1000 , // 30 seconds
207
+ retries : 3 ,
208
+ retryDelay : 1000 ,
209
+ rateLimit : 60 , // 60 requests per minute
210
+ defaultHeaders : {
211
+ 'User-Agent' : 'MCP-Server/1.0' ,
212
+ } ,
213
+ ...config ,
214
+ } ) ;
215
+ }
216
+
217
+ /**
218
+ * Default fetch instance for MCP servers
219
+ */
220
+ export const mcpFetch = createMCPFetch ( ) ;
0 commit comments