|
61 | 61 | from .protocol_part import RobotLanguageServerProtocolPart
|
62 | 62 |
|
63 | 63 | ROBOT_KEYWORD_INNER = "KEYWORD_INNER"
|
| 64 | +ROBOT_NAMED_ARGUMENT = "NAMED_ARGUMENT" |
| 65 | +ROBOT_OPERATOR = "OPERATOR" |
64 | 66 |
|
65 | 67 |
|
66 | 68 | class RobotSemTokenTypes(Enum):
|
@@ -175,6 +177,8 @@ def generate_mapping(cls) -> Dict[str, Tuple[Enum, Optional[Set[Enum]]]]:
|
175 | 177 | frozenset({RobotToken.VARIABLE, RobotToken.ASSIGN}): (RobotSemTokenTypes.VARIABLE, None),
|
176 | 178 | frozenset({RobotToken.KEYWORD}): (RobotSemTokenTypes.KEYWORD, None),
|
177 | 179 | frozenset({ROBOT_KEYWORD_INNER}): (RobotSemTokenTypes.KEYWORD_INNER, None),
|
| 180 | + frozenset({ROBOT_NAMED_ARGUMENT}): (RobotSemTokenTypes.VARIABLE, None), |
| 181 | + frozenset({ROBOT_OPERATOR}): (SemanticTokenTypes.OPERATOR, None), |
178 | 182 | frozenset({RobotToken.NAME}): (RobotSemTokenTypes.NAME, None),
|
179 | 183 | frozenset({RobotToken.CONTINUATION}): (RobotSemTokenTypes.CONTINUATION, None),
|
180 | 184 | frozenset({RobotToken.SEPARATOR}): (RobotSemTokenTypes.SEPARATOR, None),
|
@@ -370,26 +374,6 @@ async def generate_sem_tokens(
|
370 | 374 | resources_matchers: Container[KeywordMatcher],
|
371 | 375 | ) -> AsyncGenerator[SemTokenInfo, None]:
|
372 | 376 | from robot.parsing.lexer.tokens import Token as RobotToken
|
373 |
| - from robot.parsing.model.statements import Fixture, KeywordCall |
374 |
| - from robot.utils.escaping import split_from_equals |
375 |
| - |
376 |
| - if token.type in {RobotToken.ARGUMENT} and isinstance(node, (KeywordCall, Fixture)): |
377 |
| - name, value = split_from_equals(token.value) |
378 |
| - if value is not None: |
379 |
| - if isinstance(node, KeywordCall): |
380 |
| - doc = await namespace.find_keyword(node.keyword) |
381 |
| - elif isinstance(node, Fixture): |
382 |
| - doc = await namespace.find_keyword(node.name) |
383 |
| - else: |
384 |
| - doc = None |
385 |
| - |
386 |
| - if doc and any(v for v in doc.args if v.name == name): |
387 |
| - length = len(name) |
388 |
| - yield SemTokenInfo.from_token(token, RobotSemTokenTypes.VARIABLE, length=length) |
389 |
| - yield SemTokenInfo.from_token( |
390 |
| - token, SemanticTokenTypes.OPERATOR, col_offset=token.col_offset + length, length=1 |
391 |
| - ) |
392 |
| - token = RobotToken(token.type, value, token.lineno, token.col_offset + length + 1, token.error) |
393 | 377 |
|
394 | 378 | if token.type in {*RobotToken.ALLOW_VARIABLES, RobotToken.KEYWORD, ROBOT_KEYWORD_INNER}:
|
395 | 379 |
|
@@ -431,9 +415,9 @@ async def skip_non_data_tokens() -> AsyncGenerator[Tuple[Token, ast.AST], None]:
|
431 | 415 | yield arguments[0], node,
|
432 | 416 | arguments = arguments[1:]
|
433 | 417 |
|
434 |
| - yield kw_token, node |
435 |
| - |
436 | 418 | if kw_doc is not None and kw_doc.is_any_run_keyword():
|
| 419 | + yield kw_token, node |
| 420 | + |
437 | 421 | async for b in skip_non_data_tokens():
|
438 | 422 | yield b
|
439 | 423 |
|
@@ -592,9 +576,41 @@ async def generate_run_kw_if() -> AsyncGenerator[Tuple[Token, ast.AST], None]:
|
592 | 576 | async for e in generate_run_kw_if():
|
593 | 577 | yield e
|
594 | 578 | else:
|
| 579 | + async for a in self.generate_keyword_tokens(namespace, kw_token, arguments, node): |
| 580 | + yield a |
| 581 | + |
| 582 | + async def generate_keyword_tokens( |
| 583 | + self, |
| 584 | + namespace: Namespace, |
| 585 | + kw_token: Token, |
| 586 | + arguments: List[Token], |
| 587 | + node: ast.AST, |
| 588 | + ) -> AsyncGenerator[Tuple[Token, ast.AST], None]: |
| 589 | + from robot.parsing.lexer import Token as RobotToken |
| 590 | + from robot.utils.escaping import split_from_equals |
| 591 | + |
| 592 | + yield kw_token, node |
| 593 | + |
| 594 | + doc: Optional[KeywordDoc] = None |
| 595 | + for token in arguments: |
| 596 | + if token.type in [RobotToken.ARGUMENT]: |
| 597 | + name, value = split_from_equals(token.value) |
| 598 | + if value is not None: |
| 599 | + if doc is None: |
| 600 | + doc = await namespace.find_keyword(kw_token.value) |
| 601 | + |
| 602 | + if doc and any(v for v in doc.args if v.name == name): |
| 603 | + length = len(name) |
| 604 | + yield RobotToken(ROBOT_NAMED_ARGUMENT, name, token.lineno, token.col_offset), node |
| 605 | + |
| 606 | + yield RobotToken(ROBOT_OPERATOR, "=", token.lineno, token.col_offset + length), node |
| 607 | + yield RobotToken( |
| 608 | + token.type, value, token.lineno, token.col_offset + length + 1, token.error |
| 609 | + ), node |
| 610 | + |
| 611 | + continue |
595 | 612 |
|
596 |
| - for a in arguments: |
597 |
| - yield a, node |
| 613 | + yield token, node |
598 | 614 |
|
599 | 615 | @_logger.call
|
600 | 616 | async def collect(
|
@@ -634,19 +650,30 @@ async def get_tokens() -> AsyncGenerator[Tuple[Token, ast.AST], None]:
|
634 | 650 | kw = name
|
635 | 651 | if kw:
|
636 | 652 | kw_doc = await namespace.find_keyword(kw_token.value)
|
637 |
| - if kw_doc is not None and kw_doc.is_any_run_keyword(): |
638 |
| - async for t in self.generate_run_kw_tokens( |
639 |
| - namespace, |
640 |
| - builtin_library_doc, |
641 |
| - libraries_matchers, |
642 |
| - resources_matchers, |
643 |
| - kw_doc, |
644 |
| - kw_token, |
645 |
| - node.tokens[node.tokens.index(kw_token) + 1 :], |
646 |
| - node, |
647 |
| - ): |
648 |
| - yield t |
649 |
| - continue |
| 653 | + if kw_doc is not None: |
| 654 | + if kw_doc.is_any_run_keyword(): |
| 655 | + async for t in self.generate_run_kw_tokens( |
| 656 | + namespace, |
| 657 | + builtin_library_doc, |
| 658 | + libraries_matchers, |
| 659 | + resources_matchers, |
| 660 | + kw_doc, |
| 661 | + kw_token, |
| 662 | + node.tokens[node.tokens.index(kw_token) + 1 :], |
| 663 | + node, |
| 664 | + ): |
| 665 | + yield t |
| 666 | + continue |
| 667 | + else: |
| 668 | + async for t in self.generate_keyword_tokens( |
| 669 | + namespace, |
| 670 | + kw_token, |
| 671 | + node.tokens[node.tokens.index(kw_token) + 1 :], |
| 672 | + node, |
| 673 | + ): |
| 674 | + yield t |
| 675 | + |
| 676 | + continue |
650 | 677 |
|
651 | 678 | for token in node.tokens:
|
652 | 679 | yield token, node
|
|
0 commit comments