@@ -325,8 +325,8 @@ async def generate_sem_sub_tokens(
325
325
for g in cls .ESCAPE_REGEX .finditer (token .value ):
326
326
yield SemTokenInfo .from_token (
327
327
token ,
328
- sem_info [ 0 ] if g .group ("x" ) is None or g .end () - g .start () == 1 else RobotSemTokenTypes .ESCAPE ,
329
- sem_info [ 1 ] ,
328
+ sem_type if g .group ("x" ) is None or g .end () - g .start () == 1 else RobotSemTokenTypes .ESCAPE ,
329
+ sem_mod ,
330
330
col_offset + g .start (),
331
331
g .end () - g .start (),
332
332
)
@@ -491,9 +491,26 @@ async def generate_sem_tokens(
491
491
resources_matchers : Dict [KeywordMatcher , ResourceEntry ],
492
492
) -> AsyncGenerator [SemTokenInfo , None ]:
493
493
from robot .parsing .lexer .tokens import Token as RobotToken
494
+ from robot .parsing .model .statements import Variable
495
+ from robot .utils .escaping import split_from_equals
494
496
495
497
if token .type in {RobotToken .ARGUMENT , RobotToken .TESTCASE_NAME , RobotToken .KEYWORD_NAME }:
496
498
499
+ if isinstance (node , Variable ) and token .type == RobotToken .ARGUMENT and node .name and node .name [0 ] == "&" :
500
+ name , value = split_from_equals (token .value )
501
+ if value is not None :
502
+ length = len (name )
503
+
504
+ yield SemTokenInfo .from_token (
505
+ RobotToken (ROBOT_NAMED_ARGUMENT , name , token .lineno , token .col_offset ),
506
+ RobotSemTokenTypes .NAMED_ARGUMENT ,
507
+ )
508
+ yield SemTokenInfo .from_token (
509
+ RobotToken (ROBOT_OPERATOR , "=" , token .lineno , token .col_offset + length ),
510
+ SemanticTokenTypes .OPERATOR ,
511
+ )
512
+ token = RobotToken (token .type , value , token .lineno , token .col_offset + length + 1 , token .error )
513
+
497
514
for sub_token in self ._tokenize_variables (
498
515
token ,
499
516
ignore_errors = True ,
0 commit comments