|
| 1 | +from .cdn import CDNService |
| 2 | +from ...consts import consts |
| 3 | +from ...tools import tools |
| 4 | +import logging |
| 5 | +from mcp import types |
| 6 | +from typing import Optional, List |
| 7 | + |
| 8 | +logger = logging.getLogger(consts.LOGGER_NAME) |
| 9 | + |
| 10 | + |
| 11 | +def _build_base_list( |
| 12 | + code: Optional[int], |
| 13 | + error: Optional[str], |
| 14 | + request_id: Optional[str], |
| 15 | +) -> List[str]: |
| 16 | + rets = [] |
| 17 | + if code: |
| 18 | + rets.append(f"Status Code: {code}") |
| 19 | + if error: |
| 20 | + rets.append(f"Message: {error}") |
| 21 | + if request_id: |
| 22 | + rets.append(f"RequestID: {request_id}") |
| 23 | + return rets |
| 24 | + |
| 25 | + |
| 26 | +class _ToolImpl: |
| 27 | + def __init__(self, cdn: CDNService): |
| 28 | + self._cdn = cdn |
| 29 | + |
| 30 | + @tools.tool_meta( |
| 31 | + types.Tool( |
| 32 | + name="CDNPrefetchUrls", |
| 33 | + description="Newly added resources are proactively retrieved by the CDN and stored on its cache nodes in advance. Users simply submit the resource URLs, and the CDN automatically triggers the prefetch process.", |
| 34 | + inputSchema={ |
| 35 | + "type": "object", |
| 36 | + "additionalProperties": False, |
| 37 | + "properties": { |
| 38 | + "urls": { |
| 39 | + "type": "array", |
| 40 | + "description": "List of individual URLs to prefetch (max 60 items). Must be full URLs with protocol, e.g. 'http://example.com/file.zip'", |
| 41 | + "items": { |
| 42 | + "type": "string", |
| 43 | + "format": "uri", |
| 44 | + "pattern": "^https?://", |
| 45 | + "examples": [ |
| 46 | + "https://cdn.example.com/images/photo.jpg", |
| 47 | + "http://static.example.com/downloads/app.exe", |
| 48 | + ], |
| 49 | + }, |
| 50 | + "maxItems": 60, |
| 51 | + "minItems": 1, |
| 52 | + } |
| 53 | + }, |
| 54 | + "required": ["urls"], |
| 55 | + }, |
| 56 | + ) |
| 57 | + ) |
| 58 | + def prefetch_urls(self, **kwargs) -> list[types.TextContent]: |
| 59 | + ret = self._cdn.prefetch_urls(**kwargs) |
| 60 | + |
| 61 | + rets = _build_base_list(ret.code, ret.error, ret.requestId) |
| 62 | + if ret.invalidUrls: |
| 63 | + rets.append(f"Invalid URLs: {ret.invalidUrls}") |
| 64 | + if ret.code // 100 == 2: |
| 65 | + if ret.quotaDay is not None: |
| 66 | + rets.append(f"Today's prefetch quota: {ret.quotaDay}") |
| 67 | + if ret.surplusDay is not None: |
| 68 | + rets.append(f"Today's remaining quota: {ret.surplusDay}") |
| 69 | + |
| 70 | + return [ |
| 71 | + types.TextContent( |
| 72 | + type="text", |
| 73 | + text="\n".join(rets), |
| 74 | + ) |
| 75 | + ] |
| 76 | + |
| 77 | + @tools.tool_meta( |
| 78 | + types.Tool( |
| 79 | + name="CDNRefresh", |
| 80 | + description="This function marks resources cached on CDN nodes as expired. When users access these resources again, the CDN nodes will fetch the latest version from the origin server and store them anew.", |
| 81 | + inputSchema={ |
| 82 | + "type": "object", |
| 83 | + "additionalProperties": False, # 不允许出现未定义的属性 |
| 84 | + "properties": { |
| 85 | + "urls": { |
| 86 | + "type": "array", |
| 87 | + "items": { |
| 88 | + "type": "string", |
| 89 | + "format": "uri", |
| 90 | + "pattern": "^https?://", # 匹配http://或https://开头的URL |
| 91 | + "examples": ["http://bar.foo.com/index.html"], |
| 92 | + }, |
| 93 | + "maxItems": 60, |
| 94 | + "description": "List of exact URLs to refresh (max 60 items). Must be full URLs with protocol, e.g. 'http://example.com/path/page.html'", |
| 95 | + }, |
| 96 | + "dirs": { |
| 97 | + "type": "array", |
| 98 | + "items": { |
| 99 | + "type": "string", |
| 100 | + "pattern": "^https?://.*/(\\*|$)", # 匹配以http://或https://开头的URL,并以/或者以/*结尾的字符串 |
| 101 | + "examples": [ |
| 102 | + "http://bar.foo.com/dir/", |
| 103 | + "http://bar.foo.com/images/*", |
| 104 | + ], |
| 105 | + }, |
| 106 | + "maxItems": 10, |
| 107 | + "description": "List of directory patterns to refresh (max 10 items). Must end with '/' or '/*' to indicate directory scope", |
| 108 | + }, |
| 109 | + }, |
| 110 | + "anyOf": [ # 至少有一个是非空数组 |
| 111 | + { |
| 112 | + "required": ["urls"], |
| 113 | + "properties": {"urls": {"not": {"maxItems": 0}}}, |
| 114 | + }, |
| 115 | + { |
| 116 | + "required": ["dirs"], |
| 117 | + "properties": {"dirs": {"not": {"maxItems": 0}}}, |
| 118 | + }, |
| 119 | + ], |
| 120 | + }, |
| 121 | + ) |
| 122 | + ) |
| 123 | + def refresh(self, **kwargs) -> list[types.TextContent]: |
| 124 | + ret = self._cdn.refresh(**kwargs) |
| 125 | + rets = _build_base_list(ret.code, ret.error, ret.requestId) |
| 126 | + if ret.taskIds is not None: |
| 127 | + # 这个可能暂时用不到 |
| 128 | + pass |
| 129 | + if ret.invalidUrls: |
| 130 | + rets.append(f"Invalid URLs list: {ret.invalidUrls}") |
| 131 | + if ret.invalidDirs: |
| 132 | + rets.append(f"Invalid dirs: {ret.invalidDirs}") |
| 133 | + |
| 134 | + if ret.code // 100 == 2: |
| 135 | + if ret.urlQuotaDay is not None: |
| 136 | + rets.append(f"Today's URL refresh quota: {ret.urlQuotaDay}") |
| 137 | + if ret.urlSurplusDay is not None: |
| 138 | + rets.append(f"Today's remaining URL refresh quota: {ret.urlSurplusDay}") |
| 139 | + if ret.dirQuotaDay is not None: |
| 140 | + rets.append(f"Today's directory refresh quota: {ret.dirQuotaDay}") |
| 141 | + if ret.dirSurplusDay is not None: |
| 142 | + rets.append( |
| 143 | + f"Today's remaining directory refresh quota: {ret.dirSurplusDay}" |
| 144 | + ) |
| 145 | + return [ |
| 146 | + types.TextContent( |
| 147 | + type="text", |
| 148 | + text="\n".join(rets), |
| 149 | + ) |
| 150 | + ] |
| 151 | + |
| 152 | + |
| 153 | +def register_tools(cdn: CDNService): |
| 154 | + tool_impl = _ToolImpl(cdn) |
| 155 | + tools.auto_register_tools( |
| 156 | + [ |
| 157 | + tool_impl.refresh, |
| 158 | + tool_impl.prefetch_urls, |
| 159 | + ] |
| 160 | + ) |
0 commit comments