Skip to content

Commit c7e538a

Browse files
helixbassGeoffreyBooth
authored andcommitted
AST: token cleanups (#5256)
* astNode() * no override ast() * add suggested comments * astInitialize() * astAddReturns() * separate step * recognize quoted constructor * add params to scope * object colon location data * mark generated JSX tokens * more generated tokens * Add explanation for token seeming mismatch
1 parent 44be72a commit c7e538a

File tree

2 files changed

+56
-28
lines changed

2 files changed

+56
-28
lines changed

lib/coffeescript/lexer.js

Lines changed: 35 additions & 14 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/lexer.coffee

Lines changed: 21 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,7 @@ exports.Lexer = class Lexer
249249
colonToken = @token ':', ':', offset: colonOffset
250250
colonToken.jsxColon = yes if inJSXTag # used by rewriter
251251
if inJSXTag and tag is 'IDENTIFIER' and prev[0] isnt ':'
252-
@token ',', ',', length: 0, origin: tagToken
252+
@token ',', ',', length: 0, origin: tagToken, generated: yes
253253

254254
input.length
255255

@@ -317,7 +317,7 @@ exports.Lexer = class Lexer
317317
@validateUnicodeCodePointEscapes value, delimiter: quote
318318

319319
if @atJSXTag()
320-
@token ',', ',', length: 0, origin: @prev
320+
@token ',', ',', length: 0, origin: @prev, generated: yes
321321

322322
end
323323

@@ -666,8 +666,15 @@ exports.Lexer = class Lexer
666666
return 2
667667
else if firstChar is '{'
668668
if prevChar is ':'
669-
token = @token '(', '('
669+
# This token represents the start of a JSX attribute value
670+
# that’s an expression (e.g. the `{b}` in `<div a={b} />`).
671+
# Our grammar represents the beginnings of expressions as `(`
672+
# tokens, so make this into a `(` token that displays as `{`.
673+
token = @token '(', '{'
670674
@jsxObjAttribute[@jsxDepth] = no
675+
# tag attribute name as JSX
676+
addTokenData @tokens[@tokens.length - 3],
677+
jsx: yes
671678
else
672679
token = @token '{', '{'
673680
@jsxObjAttribute[@jsxDepth] = yes
@@ -683,7 +690,7 @@ exports.Lexer = class Lexer
683690
@token ',', 'JSX_COMMA', generated: yes
684691
{tokens, index: end} =
685692
@matchWithInterpolations INSIDE_JSX, '>', '</', JSX_INTERPOLATION
686-
@mergeInterpolationTokens tokens, {endOffset: end}, (value) =>
693+
@mergeInterpolationTokens tokens, {endOffset: end, jsx: yes}, (value) =>
687694
@validateUnicodeCodePointEscapes value, delimiter: '>'
688695
match = JSX_IDENTIFIER.exec(@chunk[end...]) or JSX_FRAGMENT_IDENTIFIER.exec(@chunk[end...])
689696
if not match or match[1] isnt "#{jsxTag.name}#{(".#{property}" for property in jsxTag.properties).join ''}"
@@ -717,8 +724,8 @@ exports.Lexer = class Lexer
717724
@token '}', '}'
718725
@jsxObjAttribute[@jsxDepth] = no
719726
else
720-
@token ')', ')'
721-
@token ',', ','
727+
@token ')', '}'
728+
@token ',', ',', generated: yes
722729
return 1
723730
else
724731
return 0
@@ -916,8 +923,8 @@ exports.Lexer = class Lexer
916923

917924
unless braceInterpolator
918925
# We are not using `{` and `}`, so wrap the interpolated tokens instead.
919-
open = @makeToken 'INTERPOLATION_START', '(', offset: offsetInChunk, length: 0
920-
close = @makeToken 'INTERPOLATION_END', ')', offset: offsetInChunk + index, length: 0
926+
open = @makeToken 'INTERPOLATION_START', '(', offset: offsetInChunk, length: 0, generated: yes
927+
close = @makeToken 'INTERPOLATION_END', ')', offset: offsetInChunk + index, length: 0, generated: yes
921928
nested = [open, nested..., close]
922929

923930
# Push a fake `'TOKENS'` token, which will get turned into real tokens later.
@@ -936,10 +943,10 @@ exports.Lexer = class Lexer
936943
# of `'NEOSTRING'`s are converted using `fn` and turned into strings using
937944
# `options` first.
938945
mergeInterpolationTokens: (tokens, options, fn) ->
939-
{quote, indent, double, heregex, endOffset} = options
946+
{quote, indent, double, heregex, endOffset, jsx} = options
940947

941948
if tokens.length > 1
942-
lparen = @token 'STRING_START', '(', length: quote?.length ? 0, data: {quote}
949+
lparen = @token 'STRING_START', '(', length: quote?.length ? 0, data: {quote}, generated: not quote?.length
943950

944951
firstIndex = @tokens.length
945952
$ = tokens.length - 1
@@ -949,8 +956,7 @@ exports.Lexer = class Lexer
949956
when 'TOKENS'
950957
# There are comments (and nothing else) in this interpolation.
951958
if value.length is 2 and (value[0].comments or value[1].comments)
952-
placeholderToken = @makeToken 'JS', ''
953-
placeholderToken.generated = yes
959+
placeholderToken = @makeToken 'JS', '', generated: yes
954960
# Use the same location data as the first parenthesis.
955961
placeholderToken[2] = value[0][2]
956962
for val in value when val.comments
@@ -968,12 +974,13 @@ exports.Lexer = class Lexer
968974
addTokenData token, finalChunk: yes if i is $
969975
addTokenData token, {indent, quote, double}
970976
addTokenData token, {heregex} if heregex
977+
addTokenData token, {jsx} if jsx
971978
token[0] = 'STRING'
972979
token[1] = '"' + converted + '"'
973980
if tokens.length is 1 and quote?
974981
token[2].first_column -= quote.length
975982
if token[1].substr(-2, 1) is '\n'
976-
token[2].last_line +=1
983+
token[2].last_line += 1
977984
token[2].last_column = quote.length - 1
978985
else
979986
token[2].last_column += quote.length
@@ -1002,7 +1009,7 @@ exports.Lexer = class Lexer
10021009
]
10031010
]
10041011
lparen[2] = lparen.origin[2]
1005-
rparen = @token 'STRING_END', ')', offset: endOffset - (quote ? '').length, length: quote?.length ? 0
1012+
rparen = @token 'STRING_END', ')', offset: endOffset - (quote ? '').length, length: quote?.length ? 0, generated: not quote?.length
10061013

10071014
# Pairs up a closing token, ensuring that all listed pairs of tokens are
10081015
# correctly balanced throughout the course of the token stream.

0 commit comments

Comments
 (0)