23
23
from robot .parsing .lexer .tokens import Token
24
24
from robot .parsing .model .statements import (
25
25
Arguments ,
26
+ Documentation ,
26
27
Fixture ,
27
28
KeywordCall ,
28
29
LibraryImport ,
62
63
from robotcode .robot .diagnostics .namespace import DEFAULT_BDD_PREFIXES , Namespace
63
64
from robotcode .robot .utils import get_robot_version
64
65
from robotcode .robot .utils .ast import (
66
+ cached_isinstance ,
65
67
iter_nodes ,
66
68
iter_over_keyword_names_and_owners ,
67
69
token_in_range ,
@@ -120,6 +122,7 @@ class RobotSemTokenTypes(Enum):
120
122
121
123
class RobotSemTokenModifiers (Enum ):
122
124
BUILTIN = "builtin"
125
+ EMBEDDED = "embedded"
123
126
124
127
125
128
@dataclass
@@ -340,6 +343,7 @@ def generate_sem_sub_tokens(
340
343
node : ast .AST ,
341
344
col_offset : Optional [int ] = None ,
342
345
length : Optional [int ] = None ,
346
+ yield_arguments : bool = False ,
343
347
) -> Iterator [SemTokenInfo ]:
344
348
sem_info = cls .mapping ().get (token .type , None ) if token .type is not None else None
345
349
if sem_info is not None :
@@ -391,7 +395,7 @@ def generate_sem_sub_tokens(
391
395
yield SemTokenInfo .from_token (token , sem_type , sem_mod )
392
396
393
397
elif token .type in [Token .KEYWORD , ROBOT_KEYWORD_INNER ] or (
394
- token .type == Token .NAME and isinstance (node , ( Fixture , Template , TestTemplate ) )
398
+ token .type == Token .NAME and cached_isinstance (node , Fixture , Template , TestTemplate )
395
399
):
396
400
if (
397
401
namespace .find_keyword (
@@ -461,6 +465,9 @@ def generate_sem_sub_tokens(
461
465
462
466
kw_index = len (kw_namespace ) + 1 if kw_namespace else 0
463
467
468
+ if token .type == Token .NAME and kw_doc is not None :
469
+ sem_type = RobotSemTokenTypes .KEYWORD
470
+
464
471
if kw_namespace :
465
472
kw = token .value [kw_index :]
466
473
@@ -501,13 +508,25 @@ def generate_sem_sub_tokens(
501
508
col_offset + kw_index + start ,
502
509
arg_start - start ,
503
510
)
504
- yield SemTokenInfo . from_token (
505
- token ,
506
- RobotSemTokenTypes . EMBEDDED_ARGUMENT ,
507
- sem_mod ,
508
- col_offset + kw_index + arg_start ,
509
- arg_end - arg_start ,
511
+
512
+ embedded_token = Token (
513
+ Token . ARGUMENT ,
514
+ token . value [ arg_start : arg_end ] ,
515
+ token . lineno ,
516
+ token . col_offset + arg_start ,
510
517
)
518
+
519
+ for sub_token in ModelHelper .tokenize_variables (
520
+ embedded_token ,
521
+ ignore_errors = True ,
522
+ identifiers = "$@&%" ,
523
+ ):
524
+ for e in cls .generate_sem_sub_tokens (
525
+ namespace , builtin_library_doc , sub_token , node , yield_arguments = True
526
+ ):
527
+ e .sem_modifiers = {RobotSemTokenModifiers .EMBEDDED }
528
+ yield e
529
+
511
530
start = arg_end + 1
512
531
513
532
if start < end :
@@ -521,7 +540,7 @@ def generate_sem_sub_tokens(
521
540
522
541
else :
523
542
yield SemTokenInfo .from_token (token , sem_type , sem_mod , col_offset + kw_index , len (kw ))
524
- elif token .type == Token .NAME and isinstance (node , ( LibraryImport , ResourceImport , VariablesImport ) ):
543
+ elif token .type == Token .NAME and cached_isinstance (node , LibraryImport , ResourceImport , VariablesImport ):
525
544
if "\\ " in token .value :
526
545
if col_offset is None :
527
546
col_offset = token .col_offset
@@ -543,7 +562,9 @@ def generate_sem_sub_tokens(
543
562
length ,
544
563
)
545
564
elif get_robot_version () >= (5 , 0 ) and token .type == Token .OPTION :
546
- if (isinstance (node , ExceptHeader ) or isinstance (node , WhileHeader )) and "=" in token .value :
565
+ if (
566
+ cached_isinstance (node , ExceptHeader ) or cached_isinstance (node , WhileHeader )
567
+ ) and "=" in token .value :
547
568
if col_offset is None :
548
569
col_offset = token .col_offset
549
570
@@ -589,7 +610,12 @@ def generate_sem_sub_tokens(
589
610
1 ,
590
611
)
591
612
else :
592
- if token .type != Token .ARGUMENT or token .type != Token .NAME and isinstance (node , Metadata ):
613
+ if (
614
+ yield_arguments
615
+ or token .type != Token .ARGUMENT
616
+ or token .type != Token .NAME
617
+ and cached_isinstance (node , Metadata )
618
+ ):
593
619
yield SemTokenInfo .from_token (token , sem_type , sem_mod , col_offset , length )
594
620
595
621
def generate_sem_tokens (
@@ -602,25 +628,25 @@ def generate_sem_tokens(
602
628
if (
603
629
token .type in {Token .ARGUMENT , Token .TESTCASE_NAME , Token .KEYWORD_NAME }
604
630
or token .type == Token .NAME
605
- and isinstance (node , ( VariablesImport , LibraryImport , ResourceImport ) )
631
+ and cached_isinstance (node , VariablesImport , LibraryImport , ResourceImport )
606
632
):
607
- if (isinstance ( node , Variable ) and token . type == Token . ARGUMENT and node . name and node . name [ 0 ] == "&" ) or (
608
- isinstance (node , Arguments )
609
- ):
633
+ if (
634
+ cached_isinstance (node , Variable ) and token . type == Token . ARGUMENT and node . name and node . name [ 0 ] == "&"
635
+ ) or ( cached_isinstance ( node , Arguments )) :
610
636
name , value = split_from_equals (token .value )
611
637
if value is not None :
612
638
length = len (name )
613
639
614
640
yield SemTokenInfo .from_token (
615
641
Token (
616
- ROBOT_NAMED_ARGUMENT if isinstance (node , Variable ) else SemanticTokenTypes .PARAMETER ,
642
+ ROBOT_NAMED_ARGUMENT if cached_isinstance (node , Variable ) else SemanticTokenTypes .PARAMETER ,
617
643
name ,
618
644
token .lineno ,
619
645
token .col_offset ,
620
646
),
621
647
(
622
648
RobotSemTokenTypes .NAMED_ARGUMENT
623
- if isinstance (node , Variable )
649
+ if cached_isinstance (node , Variable )
624
650
else SemanticTokenTypes .PARAMETER
625
651
),
626
652
)
@@ -640,7 +666,7 @@ def generate_sem_tokens(
640
666
token .col_offset + length + 1 ,
641
667
token .error ,
642
668
)
643
- elif isinstance (node , Arguments ) and name :
669
+ elif cached_isinstance (node , Arguments ) and name :
644
670
yield SemTokenInfo .from_token (
645
671
Token (
646
672
ROBOT_NAMED_ARGUMENT ,
@@ -663,11 +689,13 @@ def generate_sem_tokens(
663
689
ignore_errors = True ,
664
690
identifiers = "$" if token .type == Token .KEYWORD_NAME else "$@&%" ,
665
691
):
666
- for e in self .generate_sem_sub_tokens (namespace , builtin_library_doc , sub_token , node ):
692
+ for e in self .generate_sem_sub_tokens (
693
+ namespace , builtin_library_doc , sub_token , node , yield_arguments = True
694
+ ):
667
695
yield e
668
696
669
697
else :
670
- for e in self .generate_sem_sub_tokens (namespace , builtin_library_doc , token , node ):
698
+ for e in self .generate_sem_sub_tokens (namespace , builtin_library_doc , token , node , yield_arguments = True ):
671
699
yield e
672
700
673
701
def generate_run_kw_tokens (
@@ -956,8 +984,8 @@ def get_tokens() -> Iterator[Tuple[Token, ast.AST]]:
956
984
for node in iter_nodes (model ):
957
985
check_current_task_canceled ()
958
986
959
- if isinstance (node , Statement ):
960
- if isinstance (node , LibraryImport ) and node .name :
987
+ if cached_isinstance (node , Statement ):
988
+ if cached_isinstance (node , LibraryImport ) and node .name :
961
989
lib_doc = namespace .get_imported_library_libdoc (node .name , node .args , node .alias )
962
990
kw_doc = lib_doc .inits .keywords [0 ] if lib_doc and lib_doc .inits else None
963
991
if lib_doc is not None :
@@ -1009,7 +1037,7 @@ def get_tokens() -> Iterator[Tuple[Token, ast.AST]]:
1009
1037
1010
1038
yield token , node
1011
1039
continue
1012
- if isinstance (node , VariablesImport ) and node .name :
1040
+ if cached_isinstance (node , VariablesImport ) and node .name :
1013
1041
lib_doc = namespace .get_imported_variables_libdoc (node .name , node .args )
1014
1042
kw_doc = lib_doc .inits .keywords [0 ] if lib_doc and lib_doc .inits else None
1015
1043
if lib_doc is not None :
@@ -1061,12 +1089,12 @@ def get_tokens() -> Iterator[Tuple[Token, ast.AST]]:
1061
1089
1062
1090
yield token , node
1063
1091
continue
1064
- if isinstance (node , ( KeywordCall , Fixture ) ):
1092
+ if cached_isinstance (node , KeywordCall , Fixture ):
1065
1093
kw_token = cast (
1066
1094
Token ,
1067
1095
(
1068
1096
node .get_token (Token .KEYWORD )
1069
- if isinstance (node , KeywordCall )
1097
+ if cached_isinstance (node , KeywordCall )
1070
1098
else node .get_token (Token .NAME )
1071
1099
),
1072
1100
)
@@ -1109,8 +1137,16 @@ def get_tokens() -> Iterator[Tuple[Token, ast.AST]]:
1109
1137
yield kw_res
1110
1138
1111
1139
continue
1140
+ if cached_isinstance (node , Documentation ):
1141
+ for token in node .tokens :
1142
+ if token .type == Token .ARGUMENT :
1143
+ continue
1144
+ yield token , node
1145
+ continue
1112
1146
1113
1147
for token in node .tokens :
1148
+ if token .type == Token .COMMENT :
1149
+ continue
1114
1150
yield token , node
1115
1151
1116
1152
lines = document .get_lines ()
@@ -1136,6 +1172,7 @@ def get_tokens() -> Iterator[Tuple[Token, ast.AST]]:
1136
1172
),
1137
1173
),
1138
1174
)
1175
+
1139
1176
token_col_offset = token_range .start .character
1140
1177
token_length = token_range .end .character - token_range .start .character
1141
1178
0 commit comments