@@ -13,7 +13,7 @@ export const JSONRPC_VERSION = "2.0";
13
13
/**
14
14
* A progress token, used to associate progress notifications with the original request.
15
15
*/
16
- export const ProgressTokenSchema = z . union ( [ z . string ( ) , z . number ( ) ] ) ;
16
+ export const ProgressTokenSchema = z . union ( [ z . string ( ) , z . number ( ) . int ( ) ] ) ;
17
17
18
18
export const RequestSchema = z . object ( {
19
19
method : z . string ( ) ,
@@ -47,7 +47,7 @@ export const ResultSchema = z.object({
47
47
/**
48
48
* A uniquely identifying ID for a request in JSON-RPC.
49
49
*/
50
- export const RequestIdSchema = z . union ( [ z . string ( ) , z . number ( ) ] ) ;
50
+ export const RequestIdSchema = z . union ( [ z . string ( ) , z . number ( ) . int ( ) ] ) ;
51
51
52
52
/**
53
53
* A request that expects a response.
@@ -90,7 +90,7 @@ export const JSONRPCErrorSchema = z.object({
90
90
/**
91
91
* The error type that occurred.
92
92
*/
93
- code : z . number ( ) ,
93
+ code : z . number ( ) . int ( ) ,
94
94
/**
95
95
* A short description of the error. The message SHOULD be limited to a concise single sentence.
96
96
*/
@@ -218,14 +218,10 @@ export const ProgressNotificationSchema = NotificationSchema.extend({
218
218
progressToken : ProgressTokenSchema ,
219
219
/**
220
220
* The progress thus far. This should increase every time progress is made, even if the total is unknown.
221
- *
222
- * @TJS -type number
223
221
*/
224
222
progress : z . number ( ) ,
225
223
/**
226
224
* Total number of items to process (or total progress required), if known.
227
- *
228
- * @TJS -type number
229
225
*/
230
226
total : z . optional ( z . number ( ) )
231
227
} )
@@ -599,14 +595,11 @@ export const CreateMessageRequestSchema = RequestSchema.extend({
599
595
* A request to include context from one or more MCP servers (including the caller), to be attached to the prompt. The client MAY ignore this request.
600
596
*/
601
597
includeContext : z . optional ( z . enum ( [ "none" , "thisServer" , "allServers" ] ) ) ,
602
- /**
603
- * @TJS -type number
604
- */
605
598
temperature : z . optional ( z . number ( ) ) ,
606
599
/**
607
600
* The maximum number of tokens to sample, as requested by the server. The client MAY choose to sample fewer tokens than requested.
608
601
*/
609
- maxTokens : z . number ( ) ,
602
+ maxTokens : z . number ( ) . int ( ) ,
610
603
stopSequences : z . optional ( z . array ( z . string ( ) ) ) ,
611
604
/**
612
605
* Optional metadata to pass through to the LLM provider. The format of this metadata is provider-specific.
@@ -703,7 +696,7 @@ export const CompleteResultSchema = ResultSchema.extend({
703
696
/**
704
697
* The total number of completion options available. This can exceed the number of values actually sent in the response.
705
698
*/
706
- total : z . optional ( z . number ( ) ) ,
699
+ total : z . optional ( z . number ( ) . int ( ) ) ,
707
700
/**
708
701
* Indicates whether there are additional completion options beyond those provided in the current response, even if the exact total is unknown.
709
702
*/
0 commit comments