Skip to content

Commit 1634343

Browse files
committed
Update types for spec changes
1 parent 60e9c7a commit 1634343

File tree

1 file changed

+151
-12
lines changed

1 file changed

+151
-12
lines changed

mcp_python/types.py

Lines changed: 151 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
11
from typing import Any, Generic, Literal, TypeVar
22

3-
from pydantic import BaseModel, ConfigDict, RootModel
3+
from pydantic import BaseModel, ConfigDict, FileUrl, RootModel
44
from pydantic.networks import AnyUrl
55

66
"""
77
Model Context Protocol bindings for Python
88
9-
These bindings were generated from https://github.com/anthropic-experimental/mcp-spec,
9+
These bindings were generated from https://github.com/modelcontextprotocol/specification,
1010
using Claude, with a prompt something like the following:
1111
1212
Generate idiomatic Python bindings for this schema for MCP, or the "Model Context
@@ -21,7 +21,7 @@
2121
not separate types in the schema.
2222
"""
2323

24-
LATEST_PROTOCOL_VERSION = "2024-10-07"
24+
LATEST_PROTOCOL_VERSION = "2024-11-05"
2525

2626
ProgressToken = str | int
2727
Cursor = str
@@ -191,6 +191,8 @@ class ClientCapabilities(BaseModel):
191191
"""Experimental, non-standard capabilities that the client supports."""
192192
sampling: dict[str, Any] | None = None
193193
"""Present if the client supports sampling from an LLM."""
194+
roots: dict[str, Any] | None = None
195+
"""Present if the client supports listing roots."""
194196
model_config = ConfigDict(extra="allow")
195197

196198

@@ -556,12 +558,33 @@ class SamplingMessage(BaseModel):
556558
model_config = ConfigDict(extra="allow")
557559

558560

561+
class EmbeddedResource(BaseModel):
562+
"""
563+
The contents of a resource, embedded into a prompt or tool call result.
564+
565+
It is up to the client how best to render embedded resources for the benefit
566+
of the LLM and/or the user.
567+
"""
568+
569+
type: Literal["resource"]
570+
resource: TextResourceContents | BlobResourceContents
571+
model_config = ConfigDict(extra="allow")
572+
573+
574+
class PromptMessage(BaseModel):
575+
"""Describes a message returned as part of a prompt."""
576+
577+
role: Role
578+
content: TextContent | ImageContent | EmbeddedResource
579+
model_config = ConfigDict(extra="allow")
580+
581+
559582
class GetPromptResult(Result):
560583
"""The server's response to a prompts/get request from the client."""
561584

562585
description: str | None = None
563586
"""An optional description for the prompt."""
564-
messages: list[SamplingMessage]
587+
messages: list[PromptMessage]
565588

566589

567590
class PromptListChangedNotification(Notification):
@@ -617,7 +640,8 @@ class CallToolRequest(Request):
617640
class CallToolResult(Result):
618641
"""The server's response to a tool call."""
619642

620-
toolResult: Any
643+
content: list[TextContent | ImageContent | EmbeddedResource]
644+
isError: bool
621645

622646

623647
class ToolListChangedNotification(Notification):
@@ -630,7 +654,7 @@ class ToolListChangedNotification(Notification):
630654
params: NotificationParams | None = None
631655

632656

633-
LoggingLevel = Literal["debug", "info", "warning", "error"]
657+
LoggingLevel = Literal["debug", "info", "notice", "warning", "error", "critical", "alert", "emergency"]
634658

635659

636660
class SetLevelRequestParams(RequestParams):
@@ -673,10 +697,71 @@ class LoggingMessageNotification(Notification):
673697
IncludeContext = Literal["none", "thisServer", "allServers"]
674698

675699

700+
class ModelHint(BaseModel):
701+
"""Hints to use for model selection."""
702+
703+
name: str | None = None
704+
"""A hint for a model name."""
705+
706+
model_config = ConfigDict(extra="allow")
707+
708+
709+
class ModelPreferences(BaseModel):
710+
"""
711+
The server's preferences for model selection, requested of the client during sampling.
712+
713+
Because LLMs can vary along multiple dimensions, choosing the "best" model is
714+
rarely straightforward. Different models excel in different areas—some are
715+
faster but less capable, others are more capable but more expensive, and so
716+
on. This interface allows servers to express their priorities across multiple
717+
dimensions to help clients make an appropriate selection for their use case.
718+
719+
These preferences are always advisory. The client MAY ignore them. It is also
720+
up to the client to decide how to interpret these preferences and how to
721+
balance them against other considerations.
722+
"""
723+
724+
hints: list[ModelHint] | None = None
725+
"""
726+
Optional hints to use for model selection.
727+
728+
If multiple hints are specified, the client MUST evaluate them in order
729+
(such that the first match is taken).
730+
731+
The client SHOULD prioritize these hints over the numeric priorities, but
732+
MAY still use the priorities to select from ambiguous matches.
733+
"""
734+
735+
costPriority: float | None = None
736+
"""
737+
How much to prioritize cost when selecting a model. A value of 0 means cost
738+
is not important, while a value of 1 means cost is the most important
739+
factor.
740+
"""
741+
742+
speedPriority: float | None = None
743+
"""
744+
How much to prioritize sampling speed (latency) when selecting a model. A
745+
value of 0 means speed is not important, while a value of 1 means speed is
746+
the most important factor.
747+
"""
748+
749+
intelligencePriority: float | None = None
750+
"""
751+
How much to prioritize intelligence and capabilities when selecting a
752+
model. A value of 0 means intelligence is not important, while a value of 1
753+
means intelligence is the most important factor.
754+
"""
755+
756+
model_config = ConfigDict(extra="allow")
757+
758+
676759
class CreateMessageRequestParams(RequestParams):
677760
"""Parameters for creating a message."""
678761

679762
messages: list[SamplingMessage]
763+
modelPreferences: ModelPreferences | None = None
764+
"""The server's preferences for which model to select. The client MAY ignore these preferences."""
680765
systemPrompt: str | None = None
681766
"""An optional system prompt the server wants to use for sampling."""
682767
includeContext: IncludeContext | None = None
@@ -700,7 +785,7 @@ class CreateMessageRequest(Request):
700785
params: CreateMessageRequestParams
701786

702787

703-
StopReason = Literal["endTurn", "stopSequence", "maxTokens"]
788+
StopReason = Literal["endTurn", "stopSequence", "maxTokens"] | str
704789

705790

706791
class CreateMessageResult(Result):
@@ -710,8 +795,8 @@ class CreateMessageResult(Result):
710795
content: TextContent | ImageContent
711796
model: str
712797
"""The name of the model that generated the message."""
713-
stopReason: StopReason
714-
"""The reason why sampling stopped."""
798+
stopReason: StopReason | None = None
799+
"""The reason why sampling stopped, if known."""
715800

716801

717802
class ResourceReference(BaseModel):
@@ -781,6 +866,60 @@ class CompleteResult(Result):
781866
completion: Completion
782867

783868

869+
class ListRootsRequest(Request):
870+
"""
871+
Sent from the server to request a list of root URIs from the client. Roots allow
872+
servers to ask for specific directories or files to operate on. A common example
873+
for roots is providing a set of repositories or directories a server should operate
874+
on.
875+
876+
This request is typically used when the server needs to understand the file system
877+
structure or access specific locations that the client has permission to read from.
878+
"""
879+
880+
method: Literal["roots/list"]
881+
params: RequestParams | None = None
882+
883+
884+
class Root(BaseModel):
885+
"""Represents a root directory or file that the server can operate on."""
886+
887+
uri: FileUrl
888+
"""
889+
The URI identifying the root. This *must* start with file:// for now.
890+
This restriction may be relaxed in future versions of the protocol to allow
891+
other URI schemes.
892+
"""
893+
name: str | None = None
894+
"""
895+
An optional name for the root. This can be used to provide a human-readable
896+
identifier for the root, which may be useful for display purposes or for
897+
referencing the root in other parts of the application.
898+
"""
899+
model_config = ConfigDict(extra="allow")
900+
901+
902+
class ListRootsResult(Result):
903+
"""
904+
The client's response to a roots/list request from the server.
905+
This result contains an array of Root objects, each representing a root directory
906+
or file that the server can operate on.
907+
"""
908+
909+
roots: list[Root]
910+
911+
912+
class RootsListChangedNotification(Notification):
913+
"""
914+
A notification from the client to the server, informing it that the list of roots has changed.
915+
This notification should be sent whenever the client adds, removes, or modifies any root.
916+
The server should then request an updated list of roots using the ListRootsRequest.
917+
"""
918+
919+
method: Literal["notifications/roots/list_changed"]
920+
params: NotificationParams | None = None
921+
922+
784923
class ClientRequest(
785924
RootModel[
786925
PingRequest
@@ -801,15 +940,15 @@ class ClientRequest(
801940
pass
802941

803942

804-
class ClientNotification(RootModel[ProgressNotification | InitializedNotification]):
943+
class ClientNotification(RootModel[ProgressNotification | InitializedNotification | RootsListChangedNotification]):
805944
pass
806945

807946

808-
class ClientResult(RootModel[EmptyResult | CreateMessageResult]):
947+
class ClientResult(RootModel[EmptyResult | CreateMessageResult | ListRootsResult]):
809948
pass
810949

811950

812-
class ServerRequest(RootModel[PingRequest | CreateMessageRequest]):
951+
class ServerRequest(RootModel[PingRequest | CreateMessageRequest | ListRootsRequest]):
813952
pass
814953

815954

0 commit comments

Comments
 (0)