@@ -249,7 +249,7 @@ exports.Lexer = class Lexer
249
249
colonToken = @ token ' :' , ' :' , offset : colonOffset
250
250
colonToken .jsxColon = yes if inJSXTag # used by rewriter
251
251
if inJSXTag and tag is ' IDENTIFIER' and prev[0 ] isnt ' :'
252
- @ token ' ,' , ' ,' , length : 0 , origin : tagToken
252
+ @ token ' ,' , ' ,' , length : 0 , origin : tagToken, generated : yes
253
253
254
254
input .length
255
255
@@ -317,7 +317,7 @@ exports.Lexer = class Lexer
317
317
@ validateUnicodeCodePointEscapes value, delimiter : quote
318
318
319
319
if @ atJSXTag ()
320
- @ token ' ,' , ' ,' , length : 0 , origin : @prev
320
+ @ token ' ,' , ' ,' , length : 0 , origin : @prev , generated : yes
321
321
322
322
end
323
323
@@ -666,8 +666,15 @@ exports.Lexer = class Lexer
666
666
return 2
667
667
else if firstChar is ' {'
668
668
if prevChar is ' :'
669
- token = @ token ' (' , ' ('
669
+ # This token represents the start of a JSX attribute value
670
+ # that’s an expression (e.g. the `{b}` in `<div a={b} />`).
671
+ # Our grammar represents the beginnings of expressions as `(`
672
+ # tokens, so make this into a `(` token that displays as `{`.
673
+ token = @ token ' (' , ' {'
670
674
@jsxObjAttribute [@jsxDepth ] = no
675
+ # tag attribute name as JSX
676
+ addTokenData @tokens [@tokens .length - 3 ],
677
+ jsx : yes
671
678
else
672
679
token = @ token ' {' , ' {'
673
680
@jsxObjAttribute [@jsxDepth ] = yes
@@ -683,7 +690,7 @@ exports.Lexer = class Lexer
683
690
@ token ' ,' , ' JSX_COMMA' , generated : yes
684
691
{tokens , index : end } =
685
692
@ matchWithInterpolations INSIDE_JSX, ' >' , ' </' , JSX_INTERPOLATION
686
- @ mergeInterpolationTokens tokens, {endOffset : end}, (value ) =>
693
+ @ mergeInterpolationTokens tokens, {endOffset : end, jsx : yes }, (value ) =>
687
694
@ validateUnicodeCodePointEscapes value, delimiter : ' >'
688
695
match = JSX_IDENTIFIER .exec (@chunk [end... ]) or JSX_FRAGMENT_IDENTIFIER .exec (@chunk [end... ])
689
696
if not match or match[1 ] isnt " #{ jsxTag .name }#{ (" .#{ property} " for property in jsxTag .properties ).join ' ' } "
@@ -717,8 +724,8 @@ exports.Lexer = class Lexer
717
724
@ token ' }' , ' }'
718
725
@jsxObjAttribute [@jsxDepth ] = no
719
726
else
720
- @ token ' )' , ' ) '
721
- @ token ' ,' , ' ,'
727
+ @ token ' )' , ' } '
728
+ @ token ' ,' , ' ,' , generated : yes
722
729
return 1
723
730
else
724
731
return 0
@@ -916,8 +923,8 @@ exports.Lexer = class Lexer
916
923
917
924
unless braceInterpolator
918
925
# We are not using `{` and `}`, so wrap the interpolated tokens instead.
919
- open = @ makeToken ' INTERPOLATION_START' , ' (' , offset : offsetInChunk, length : 0
920
- close = @ makeToken ' INTERPOLATION_END' , ' )' , offset : offsetInChunk + index, length : 0
926
+ open = @ makeToken ' INTERPOLATION_START' , ' (' , offset : offsetInChunk, length : 0 , generated : yes
927
+ close = @ makeToken ' INTERPOLATION_END' , ' )' , offset : offsetInChunk + index, length : 0 , generated : yes
921
928
nested = [open, nested... , close]
922
929
923
930
# Push a fake `'TOKENS'` token, which will get turned into real tokens later.
@@ -936,10 +943,10 @@ exports.Lexer = class Lexer
936
943
# of `'NEOSTRING'`s are converted using `fn` and turned into strings using
937
944
# `options` first.
938
945
mergeInterpolationTokens : (tokens , options , fn ) ->
939
- {quote , indent , double , heregex , endOffset } = options
946
+ {quote , indent , double , heregex , endOffset , jsx } = options
940
947
941
948
if tokens .length > 1
942
- lparen = @ token ' STRING_START' , ' (' , length : quote ? .length ? 0 , data : {quote}
949
+ lparen = @ token ' STRING_START' , ' (' , length : quote ? .length ? 0 , data : {quote}, generated : not quote ? . length
943
950
944
951
firstIndex = @tokens .length
945
952
$ = tokens .length - 1
@@ -949,8 +956,7 @@ exports.Lexer = class Lexer
949
956
when ' TOKENS'
950
957
# There are comments (and nothing else) in this interpolation.
951
958
if value .length is 2 and (value[0 ].comments or value[1 ].comments )
952
- placeholderToken = @ makeToken ' JS' , ' '
953
- placeholderToken .generated = yes
959
+ placeholderToken = @ makeToken ' JS' , ' ' , generated : yes
954
960
# Use the same location data as the first parenthesis.
955
961
placeholderToken[2 ] = value[0 ][2 ]
956
962
for val in value when val .comments
@@ -968,12 +974,13 @@ exports.Lexer = class Lexer
968
974
addTokenData token, finalChunk : yes if i is $
969
975
addTokenData token, {indent, quote, double}
970
976
addTokenData token, {heregex} if heregex
977
+ addTokenData token, {jsx} if jsx
971
978
token[0 ] = ' STRING'
972
979
token[1 ] = ' "' + converted + ' "'
973
980
if tokens .length is 1 and quote?
974
981
token[2 ].first_column -= quote .length
975
982
if token[1 ].substr (- 2 , 1 ) is ' \n '
976
- token[2 ].last_line += 1
983
+ token[2 ].last_line += 1
977
984
token[2 ].last_column = quote .length - 1
978
985
else
979
986
token[2 ].last_column += quote .length
@@ -1002,7 +1009,7 @@ exports.Lexer = class Lexer
1002
1009
]
1003
1010
]
1004
1011
lparen[2 ] = lparen .origin [2 ]
1005
- rparen = @ token ' STRING_END' , ' )' , offset : endOffset - (quote ? ' ' ).length , length : quote ? .length ? 0
1012
+ rparen = @ token ' STRING_END' , ' )' , offset : endOffset - (quote ? ' ' ).length , length : quote ? .length ? 0 , generated : not quote ? . length
1006
1013
1007
1014
# Pairs up a closing token, ensuring that all listed pairs of tokens are
1008
1015
# correctly balanced throughout the course of the token stream.
0 commit comments