|
| 1 | +""" |
| 2 | +Contains the main functionality of the JSONSchemaLexer. |
| 3 | +""" |
| 4 | + |
| 5 | +from typing import ClassVar |
| 6 | + |
| 7 | +from pygments.lexers.data import ( # type: ignore[reportMissingTypeStubs] |
| 8 | + JsonLexer, |
| 9 | +) |
| 10 | +from pygments.token import Token |
| 11 | + |
| 12 | + |
| 13 | +class JSONSchemaLexer(JsonLexer): |
| 14 | + """ |
| 15 | + For JSONSchema. |
| 16 | + """ |
| 17 | + |
| 18 | + name = "JSON Schema Lexer" |
| 19 | + |
| 20 | + data_types: ClassVar[list[str]] = [ |
| 21 | + "object", |
| 22 | + "integer", |
| 23 | + "string", |
| 24 | + "number", |
| 25 | + "array", |
| 26 | + "boolean", |
| 27 | + "null", |
| 28 | + ] |
| 29 | + core_keywords: ClassVar[list[str]] = [ |
| 30 | + "$schema", |
| 31 | + "$id", |
| 32 | + "$ref", |
| 33 | + "$defs", |
| 34 | + "$comment", |
| 35 | + "$dynamicAnchor", |
| 36 | + "$dynamicRef", |
| 37 | + "$anchor", |
| 38 | + "$vocabulary", |
| 39 | + ] |
| 40 | + applicator_keywords: ClassVar[list[str]] = [ |
| 41 | + "oneOf", |
| 42 | + "allOf", |
| 43 | + "anyOf", |
| 44 | + "if", |
| 45 | + "then", |
| 46 | + "else", |
| 47 | + "not", |
| 48 | + "properties", |
| 49 | + "patternProperties", |
| 50 | + "additionalProperties", |
| 51 | + "dependentSchemas", |
| 52 | + "propertyNames", |
| 53 | + "prefixItems", |
| 54 | + "contains", |
| 55 | + "items", |
| 56 | + ] |
| 57 | + meta_data_keywords: ClassVar[list[str]] = [ |
| 58 | + "title", |
| 59 | + "description", |
| 60 | + "default", |
| 61 | + "deprecated", |
| 62 | + "examples", |
| 63 | + "readOnly", |
| 64 | + "writeOnly", |
| 65 | + ] |
| 66 | + validation_keywords: ClassVar[list[str]] = [ |
| 67 | + "type", |
| 68 | + "enum", |
| 69 | + "const", |
| 70 | + "minLength", |
| 71 | + "maxLength", |
| 72 | + "pattern", |
| 73 | + "maximum", |
| 74 | + "exclusiveMinimum", |
| 75 | + "multipleOf", |
| 76 | + "exclusiveMaximum", |
| 77 | + "minimum", |
| 78 | + "dependentRequired", |
| 79 | + "minProperties", |
| 80 | + "maxProperties", |
| 81 | + "required", |
| 82 | + "minItems", |
| 83 | + "maxItems", |
| 84 | + "minContains", |
| 85 | + "maxContains", |
| 86 | + "uniqueItems", |
| 87 | + ] |
| 88 | + other_keywords: ClassVar[list[str]] = [ |
| 89 | + "format", |
| 90 | + "unevaluatedItems", |
| 91 | + "unevaluatedProperties", |
| 92 | + "contentEncoding", |
| 93 | + "contentMediaType", |
| 94 | + "contentSchema", |
| 95 | + "format_assertion", |
| 96 | + ] |
| 97 | + |
| 98 | + parsed_keywords: ClassVar[list[str]] = [ |
| 99 | + '"%s"' % keyword |
| 100 | + for keyword in ( |
| 101 | + core_keywords |
| 102 | + + applicator_keywords |
| 103 | + + meta_data_keywords |
| 104 | + + validation_keywords |
| 105 | + + other_keywords |
| 106 | + ) |
| 107 | + ] |
| 108 | + |
| 109 | + parsed_data_types: ClassVar[list[str]] = [ |
| 110 | + '"%s"' % data_type for data_type in data_types |
| 111 | + ] |
| 112 | + |
| 113 | + def get_tokens_unprocessed(self, text: str): # type: ignore[reportUnknownParameterType] |
| 114 | + """ |
| 115 | + Add token classes to it according to JSON Schema. |
| 116 | + """ |
| 117 | + for start, token, value in super().get_tokens_unprocessed(text): # type: ignore[reportUnknownVariableType] |
| 118 | + if token is Token.Name.Tag and value in self.parsed_keywords: |
| 119 | + yield start, Token.Keyword, value |
| 120 | + elif ( |
| 121 | + token is Token.String.Double |
| 122 | + and value in self.parsed_data_types |
| 123 | + ): |
| 124 | + yield start, Token.Name.Decorator, value |
| 125 | + else: |
| 126 | + yield start, token, value |
0 commit comments