diff --git a/.travis.yml b/.travis.yml index 080f28c..a948ac8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,15 +1,22 @@ language: node_js node_js: - - "6" - - "7" - "8" - "9" - "10" - "11" + - "12" + - "13" + - "14" + - "15" + - "16" + - "17" + - "18" + - "19" + - "20" jobs: include: - stage: test & cover - node_js: "12" + node_js: "21" script: - npm run cover - npm run coverage \ No newline at end of file diff --git a/src/lib/Lexer.ts b/src/lib/Lexer.ts index 56f5a98..591dc26 100644 --- a/src/lib/Lexer.ts +++ b/src/lib/Lexer.ts @@ -1,6 +1,6 @@ import {Token} from "./Token"; -import {TokenType} from "./TokenType"; import {SyntaxError} from "./SyntaxError"; +import {TokenType} from "./TokenType"; enum LexerState { COMMENT = 'COMMENT', @@ -328,7 +328,7 @@ export class Lexer { let match = this.statementStartRegExp.exec(data); // push the text - this.pushToken(TokenType.TEXT, match ? data.substr(0, match.index) : data); + this.pushToken("TEXT", match ? data.substr(0, match.index) : data); if (match) { let tag: string = match[1]; @@ -339,7 +339,7 @@ export class Lexer { this.currentVarBlockLine = this.line; this.currentVarBlockColumn = this.column; - this.pushToken(TokenType.COMMENT_START, tag); + this.pushToken("COMMENT_START", tag); this.pushModifier(modifier); this.pushState(LexerState.COMMENT); break; @@ -348,30 +348,30 @@ export class Lexer { this.currentVarBlockLine = this.line; this.currentVarBlockColumn = this.column; - this.pushToken(TokenType.TAG_START, match[1]); + this.pushToken("TAG_START", match[1]); this.pushModifier(match[2]); - this.pushToken(TokenType.WHITESPACE, match[3]); - this.pushToken(TokenType.NAME, match[4]); // verbatim itself - this.pushToken(TokenType.WHITESPACE, match[5]); + this.pushToken("WHITESPACE", match[3]); + this.pushToken("NAME", match[4]); // verbatim itself + this.pushToken("WHITESPACE", match[5]); this.pushModifier(match[6]); - this.pushToken(TokenType.TAG_END, match[7]); + this.pushToken("TAG_END", match[7]); this.pushState(LexerState.VERBATIM); } else if ((match = this.lineTagRegExp.exec(this.source.substring(this.cursor))) !== null) { - this.pushToken(TokenType.TAG_START, match[1]); + this.pushToken("TAG_START", match[1]); if (match[2].length > 0) { - this.pushToken(TokenType.WHITESPACE, match[2]); + this.pushToken("WHITESPACE", match[2]); } - this.pushToken(TokenType.NAME, match[3]); // line itself - this.pushToken(TokenType.WHITESPACE, match[4]); - this.pushToken(TokenType.NUMBER, match[5]); + this.pushToken("NAME", match[3]); // line itself + this.pushToken("WHITESPACE", match[4]); + this.pushToken("NUMBER", match[5]); if (match[6].length > 0) { - this.pushToken(TokenType.WHITESPACE, match[6]); + this.pushToken("WHITESPACE", match[6]); } - this.pushToken(TokenType.TAG_END, match[7]); + this.pushToken("TAG_END", match[7]); this.line = Number(match[5]); this.column = 0; @@ -379,7 +379,7 @@ export class Lexer { this.currentVarBlockLine = this.line; this.currentVarBlockColumn = this.column; - this.pushToken(TokenType.TAG_START, tag); + this.pushToken("TAG_START", tag); this.pushModifier(modifier); this.pushState(LexerState.TAG); } @@ -388,7 +388,7 @@ export class Lexer { this.currentVarBlockLine = this.line; this.currentVarBlockColumn = this.column; - this.pushToken(TokenType.VARIABLE_START, tag); + this.pushToken("VARIABLE_START", tag); this.pushModifier(modifier); this.pushState(LexerState.VARIABLE); @@ -398,7 +398,7 @@ export class Lexer { } } - this.pushToken(TokenType.EOF, null); + this.pushToken("EOF", null); if (this.state == LexerState.VARIABLE) { throw new SyntaxError(`Unclosed variable opened at {${this.currentVarBlockLine}:${this.currentVarBlockColumn}}.`, this.line, this.column); @@ -420,28 +420,28 @@ export class Lexer { // test operator if ((match = this.testOperatorRegExp.exec(candidate)) !== null) { - this.pushToken(TokenType.TEST_OPERATOR, match[0]); + this.pushToken("TEST_OPERATOR", match[0]); } // arrow else if ((match = this.arrowOperatorRegExp.exec(candidate)) !== null) { - this.pushToken(TokenType.ARROW, match[0]); + this.pushToken("ARROW", match[0]); } // operator else if ((match = this.operatorRegExp.exec(candidate)) !== null) { - this.pushToken(TokenType.OPERATOR, match[0]); + this.pushToken("OPERATOR", match[0]); } // name else if ((match = this.nameRegExp.exec(candidate)) !== null) { - this.pushToken(TokenType.NAME, match[0]); + this.pushToken("NAME", match[0]); } // number else if ((match = this.numberRegExp.exec(candidate)) !== null) { - this.pushToken(TokenType.NUMBER, match[0]); + this.pushToken("NUMBER", match[0]); } // opening bracket else if (this.openingBracketRegExp.test(singleCharacterCandidate)) { this.pushScope(singleCharacterCandidate); - this.pushToken(TokenType.PUNCTUATION, singleCharacterCandidate); + this.pushToken("PUNCTUATION", singleCharacterCandidate); } // closing bracket else if (this.closingBracketRegExp.test(singleCharacterCandidate)) { @@ -453,12 +453,12 @@ export class Lexer { throw new SyntaxError(`Unclosed bracket "${this.scope.value}" opened at {${this.scope.line}:${this.scope.column}}.`, this.line, this.column); } - this.pushToken(TokenType.PUNCTUATION, singleCharacterCandidate); + this.pushToken("PUNCTUATION", singleCharacterCandidate); this.popScope(); } // punctuation else if (this.punctuationRegExp.test(singleCharacterCandidate)) { - this.pushToken(TokenType.PUNCTUATION, singleCharacterCandidate); + this.pushToken("PUNCTUATION", singleCharacterCandidate); } // string else if ((match = this.stringRegExp.exec(candidate)) !== null) { @@ -466,20 +466,20 @@ export class Lexer { let value = match[2] || match[5]; let closingBracket = match[3] || match[6]; - this.pushToken(TokenType.OPENING_QUOTE, openingBracket); + this.pushToken("OPENING_QUOTE", openingBracket); if (value !== undefined) { - this.pushToken(TokenType.STRING, value); + this.pushToken("STRING", value); } - this.pushToken(TokenType.CLOSING_QUOTE, closingBracket); + this.pushToken("CLOSING_QUOTE", closingBracket); } // double quoted string else if ((match = this.doubleQuotedStringDelimiterRegExp.exec(candidate)) !== null) { let value = match[0]; this.pushScope(value, value); - this.pushToken(TokenType.OPENING_QUOTE, value); + this.pushToken("OPENING_QUOTE", value); this.pushState(LexerState.DOUBLE_QUOTED_STRING); } // unlexable @@ -499,7 +499,7 @@ export class Lexer { let modifier = match[1] || match[3]; this.pushModifier(modifier); - this.pushToken(TokenType.TAG_END, tag); + this.pushToken("TAG_END", tag); this.popState(); } else { this.lexExpression(); @@ -513,7 +513,7 @@ export class Lexer { if (!this.scope && ((match = this.variableEndRegExp.exec(this.source.substring(this.cursor))) !== null)) { this.pushModifier(match[1]); - this.pushToken(TokenType.VARIABLE_END, match[2]); + this.pushToken("VARIABLE_END", match[2]); this.popState(); } else { this.lexExpression(); @@ -533,15 +533,15 @@ export class Lexer { let text = this.source.substr(this.cursor, match.index); - this.pushToken(TokenType.TEXT, text); + this.pushToken("TEXT", text); - this.pushToken(TokenType.TAG_START, match[1]); + this.pushToken("TAG_START", match[1]); this.pushModifier(match[2]); - this.pushToken(TokenType.WHITESPACE, match[3]); - this.pushToken(TokenType.NAME, match[4]); // endverbatim itself - this.pushToken(TokenType.WHITESPACE, match[5]); + this.pushToken("WHITESPACE", match[3]); + this.pushToken("NAME", match[4]); // endverbatim itself + this.pushToken("WHITESPACE", match[5]); this.pushModifier(match[6]); - this.pushToken(TokenType.TAG_END, match[7]); + this.pushToken("TAG_END", match[7]); this.popState(); } @@ -550,7 +550,7 @@ export class Lexer { let candidate: string = this.source.substring(this.cursor); if ((match = this.whitespaceRegExp.exec(candidate)) !== null) { - this.pushToken(TokenType.WHITESPACE, match[0]); + this.pushToken("WHITESPACE", match[0]); } } @@ -570,10 +570,10 @@ export class Lexer { let modifier = match[2] || match[5]; let value = match[3] || match[6]; - this.pushToken(TokenType.TEXT, text); + this.pushToken("TEXT", text); this.lexWhitespace(); this.pushModifier(modifier); - this.pushToken(TokenType.COMMENT_END, value); + this.pushToken("COMMENT_END", value); this.popState(); } @@ -583,14 +583,14 @@ export class Lexer { if ((match = this.interpolationStartRegExp.exec(this.source.substring(this.cursor))) !== null) { let tag = match[1]; - this.pushToken(TokenType.INTERPOLATION_START, tag); - this.pushToken(TokenType.WHITESPACE, match[2]); + this.pushToken("INTERPOLATION_START", tag); + this.pushToken("WHITESPACE", match[2]); this.pushScope(tag, this.interpolationPair[1]); this.pushState(LexerState.INTERPOLATION); } else if (((match = this.doubleQuotedStringContentRegExp.exec(this.source.substring(this.cursor))) !== null) && (match[0].length > 0)) { - this.pushToken(TokenType.STRING, match[0]); + this.pushToken("STRING", match[0]); } else { - this.pushToken(TokenType.CLOSING_QUOTE, this.scope.value); + this.pushToken("CLOSING_QUOTE", this.scope.value); this.popScope(); this.popState(); } @@ -603,8 +603,8 @@ export class Lexer { let tag = match[2]; let whitespace = match[1] || ''; - this.pushToken(TokenType.WHITESPACE, whitespace); - this.pushToken(TokenType.INTERPOLATION_END, tag); + this.pushToken("WHITESPACE", whitespace); + this.pushToken("INTERPOLATION_END", tag); this.popScope(); this.popState(); } else { @@ -627,7 +627,7 @@ export class Lexer { } private pushToken(type: TokenType, value: any) { - if ((type === TokenType.TEXT || type === TokenType.WHITESPACE) && (value.length < 1)) { + if ((type === "TEXT" || type === "WHITESPACE") && (value.length < 1)) { return; } @@ -642,7 +642,7 @@ export class Lexer { private pushModifier(modifier: string) { if (modifier) { - this.pushToken(modifier === this.trimmingModifier ? TokenType.TRIMMING_MODIFIER : TokenType.LINE_TRIMMING_MODIFIER, modifier); + this.pushToken(modifier === this.trimmingModifier ? "TRIMMING_MODIFIER" : "LINE_TRIMMING_MODIFIER", modifier); } } diff --git a/src/lib/Token.ts b/src/lib/Token.ts index 7ee2a90..a068668 100644 --- a/src/lib/Token.ts +++ b/src/lib/Token.ts @@ -1,4 +1,4 @@ -import {TokenType, typeToString} from "./TokenType"; +import {TokenType} from "./TokenType"; export class Token { private readonly _type: TokenType; @@ -73,7 +73,7 @@ export class Token { * @return {string} */ public toString(): string { - return `${typeToString(this.type, true)}(${this.value ? this.value : ''})`; + return `${this.type}(${this.value ? this.value : ''})`; } /** diff --git a/src/lib/TokenStream.ts b/src/lib/TokenStream.ts index 7b17715..796bdbb 100644 --- a/src/lib/TokenStream.ts +++ b/src/lib/TokenStream.ts @@ -23,48 +23,48 @@ type TokenVisitor = (token: Token, stream: TokenStream) => Token; * @return {Token} */ export const astVisitor: TokenVisitor = (token: Token, stream: TokenStream): Token => { - if (!token.test(TokenType.WHITESPACE) && - !token.test(TokenType.TRIMMING_MODIFIER) && - !token.test(TokenType.LINE_TRIMMING_MODIFIER)) { + if (!token.test("WHITESPACE") && + !token.test("TRIMMING_MODIFIER") && + !token.test("LINE_TRIMMING_MODIFIER")) { let tokenValue: string = token.value; let tokenLine: number = token.line; let tokenColumn: number = token.column; - if (token.test(TokenType.EOF)) { + if (token.test("EOF")) { return token; } - if (token.test(TokenType.NUMBER)) { + if (token.test("NUMBER")) { return new Token(token.type, Number(token.value), token.line, token.column); } - if (token.test(TokenType.OPENING_QUOTE)) { + if (token.test("OPENING_QUOTE")) { let candidate = stream.look(1); - if (candidate.test(TokenType.CLOSING_QUOTE)) { - return new Token(TokenType.STRING, '', token.line, token.column); + if (candidate.test("CLOSING_QUOTE")) { + return new Token("STRING", '', token.line, token.column); } } - if (token.test(TokenType.STRING)) { + if (token.test("STRING")) { let candidate = stream.look(-1); - if (candidate && candidate.test(TokenType.OPENING_QUOTE)) { + if (candidate && candidate.test("OPENING_QUOTE")) { tokenLine = candidate.line; tokenColumn = candidate.column; } } - if (!token.test(TokenType.OPENING_QUOTE) && !token.test(TokenType.CLOSING_QUOTE)) { - if (token.test(TokenType.TEXT) || token.test(TokenType.STRING)) { + if (!token.test("OPENING_QUOTE") && !token.test("CLOSING_QUOTE")) { + if (token.test("TEXT") || token.test("STRING")) { // streamline line separators tokenValue = tokenValue.replace(/\r\n|\r/g, '\n'); - } else if (token.test(TokenType.OPERATOR)) { + } else if (token.test("OPERATOR")) { // remove unnecessary operator spaces tokenValue = tokenValue.replace(/\s+/, ' '); } - if (token.test(TokenType.STRING)) { + if (token.test("STRING")) { // strip C slashes tokenValue = stripcslashes(tokenValue); } @@ -75,11 +75,11 @@ export const astVisitor: TokenVisitor = (token: Token, stream: TokenStream): Tok wstCandidate = stream.look(2); if (wstCandidate) { - if (wstCandidate.type === TokenType.TRIMMING_MODIFIER) { + if (wstCandidate.type === "TRIMMING_MODIFIER") { tokenValue = tokenValue.replace(/\s*$/, ''); } - if (wstCandidate.type === TokenType.LINE_TRIMMING_MODIFIER) { + if (wstCandidate.type === "LINE_TRIMMING_MODIFIER") { tokenValue = tokenValue.replace(/[ \t\0\x0B]*$/, ''); } } @@ -87,17 +87,17 @@ export const astVisitor: TokenVisitor = (token: Token, stream: TokenStream): Tok wstCandidate = stream.look(-2); if (wstCandidate) { - if (wstCandidate.type === TokenType.TRIMMING_MODIFIER) { + if (wstCandidate.type === "TRIMMING_MODIFIER") { tokenValue = tokenValue.replace(/^\s*/, ''); } - if (wstCandidate.type === TokenType.LINE_TRIMMING_MODIFIER) { + if (wstCandidate.type === "LINE_TRIMMING_MODIFIER") { tokenValue = tokenValue.replace(/^[ \t\0\x0B]*/, ''); } } - // don't push empty TEXT tokens - if (!token.test(TokenType.TEXT) || (tokenValue.length > 0)) { + // don't push empty "TEXT" tokens + if (!token.test("TEXT") || (tokenValue.length > 0)) { return new Token(token.type, tokenValue, tokenLine, tokenColumn); } } diff --git a/src/lib/TokenType.ts b/src/lib/TokenType.ts index 235a880..f1e8be8 100644 --- a/src/lib/TokenType.ts +++ b/src/lib/TokenType.ts @@ -1,40 +1,24 @@ - export enum TokenType { - CLOSING_QUOTE = 'CLOSING_QUOTE', - COMMENT_END = 'COMMENT_END', - COMMENT_START = 'COMMENT_START', - EOF = 'EOF', - INTERPOLATION_START = 'INTERPOLATION_START', - INTERPOLATION_END = 'INTERPOLATION_END', - LINE_TRIMMING_MODIFIER = 'LINE_TRIMMING_MODIFIER', - NAME = 'NAME', - NUMBER = 'NUMBER', - OPENING_QUOTE = 'OPENING_QUOTE', - OPERATOR = 'OPERATOR', - PUNCTUATION = 'PUNCTUATION', - STRING = 'STRING', - TAG_END = 'TAG_END', - TAG_START = 'TAG_START', - TEST_OPERATOR = 'TEST_OPERATOR', - TEXT = 'TEXT', - TRIMMING_MODIFIER = 'TRIMMING_MODIFIER', - VARIABLE_END = 'VARIABLE_END', - VARIABLE_START = 'VARIABLE_START', - WHITESPACE = 'WHITESPACE', - ARROW = 'ARROW' - } - - /** - * Returns the human representation of a token type. - * - * @param {TokenType} type The token type - * @param {boolean} short Whether to return a short representation or not - * - * @returns {string} The string representation - */ - export function typeToString(type: TokenType, short: boolean = false): string { - if (type in TokenType) { - return short ? type : 'TokenType.' + type; - } else { - throw new Error(`Token type "${type}" does not exist.`); - } - } \ No newline at end of file +export type TokenType = + "CLOSING_QUOTE" | + "COMMENT_END" | + "COMMENT_START" | + "EOF" | + "INTERPOLATION_START" | + "INTERPOLATION_END" | + "LINE_TRIMMING_MODIFIER" | + "NAME" | + "NUMBER" | + "OPENING_QUOTE" | + "OPERATOR" | + "PUNCTUATION" | + "STRING" | + "TAG_END" | + "TAG_START" | + "TEST_OPERATOR" | + "TEXT" | + "TRIMMING_MODIFIER" | + "VARIABLE_END" | + "VARIABLE_START" | + "WHITESPACE" | + "ARROW" + ; \ No newline at end of file diff --git a/test/unit/lib/Lexer/test.ts b/test/unit/lib/Lexer/test.ts index 382c598..a5ddb07 100644 --- a/test/unit/lib/Lexer/test.ts +++ b/test/unit/lib/Lexer/test.ts @@ -1,8 +1,8 @@ import * as tape from 'tape'; import {Lexer} from '../../../../src/lib/Lexer'; import {Token} from "../../../../src/lib/Token"; -import {TokenType, typeToString} from "../../../../src/lib/TokenType"; import {SyntaxError} from "../../../../src/lib/SyntaxError"; +import {TokenType} from "../../../../src/index"; class CustomLexer extends Lexer { constructor() { @@ -29,7 +29,7 @@ let testTokens = (test: tape.Test, tokens: Token[], data: [TokenType, any, numbe let line = data[index][2]; let column = data[index][3]; - test.same(token.type, type, 'type should be "' + typeToString(type) + '"'); + test.same(token.type, type, 'type should be "' + type + '"'); test.looseEqual(token.value, value, token.type + ' value should be "' + ((value && value.length > 80) ? value.substr(0, 77) + '...' : value) + '"'); test.same(token.line, line, 'line should be ' + line); test.same(token.column, column, 'column should be ' + column); @@ -45,12 +45,12 @@ tape('Lexer', (test) => { let tokens = lexer.tokenize('{{foo.foo}}'); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.NAME, 'foo', 1, 3], - [TokenType.PUNCTUATION, '.', 1, 6], - [TokenType.NAME, 'foo', 1, 7], - [TokenType.VARIABLE_END, '}}', 1, 10], - [TokenType.EOF, null, 1, 12] + ["VARIABLE_START", '{{', 1, 1], + ["NAME", 'foo', 1, 3], + ["PUNCTUATION", '.', 1, 6], + ["NAME", 'foo', 1, 7], + ["VARIABLE_END", '}}', 1, 10], + ["EOF", null, 1, 12] ]); test.end(); @@ -61,8 +61,8 @@ tape('Lexer', (test) => { let tokens = lexer.tokenize('{{foo[foo]}}'); testTokens(test, [tokens[1], tokens[3]], [ - [TokenType.NAME, 'foo', 1, 3], - [TokenType.NAME, 'foo', 1, 7], + ["NAME", 'foo', 1, 3], + ["NAME", 'foo', 1, 7], ]); test.end(); @@ -73,21 +73,21 @@ tape('Lexer', (test) => { let tokens = lexer.tokenize('{{foo[foo.5[foo][foo]]}}'); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.NAME, 'foo', 1, 3], - [TokenType.PUNCTUATION, '[', 1, 6], - [TokenType.NAME, 'foo', 1, 7], - [TokenType.PUNCTUATION, '.', 1, 10], - [TokenType.NUMBER, 5, 1, 11], - [TokenType.PUNCTUATION, '[', 1, 12], - [TokenType.NAME, 'foo', 1, 13], - [TokenType.PUNCTUATION, ']', 1, 16], - [TokenType.PUNCTUATION, '[', 1, 17], - [TokenType.NAME, 'foo', 1, 18], - [TokenType.PUNCTUATION, ']', 1, 21], - [TokenType.PUNCTUATION, ']', 1, 22], - [TokenType.VARIABLE_END, '}}', 1, 23], - [TokenType.EOF, null, 1, 25] + ["VARIABLE_START", '{{', 1, 1], + ["NAME", 'foo', 1, 3], + ["PUNCTUATION", '[', 1, 6], + ["NAME", 'foo', 1, 7], + ["PUNCTUATION", '.', 1, 10], + ["NUMBER", 5, 1, 11], + ["PUNCTUATION", '[', 1, 12], + ["NAME", 'foo', 1, 13], + ["PUNCTUATION", ']', 1, 16], + ["PUNCTUATION", '[', 1, 17], + ["NAME", 'foo', 1, 18], + ["PUNCTUATION", ']', 1, 21], + ["PUNCTUATION", ']', 1, 22], + ["VARIABLE_END", '}}', 1, 23], + ["EOF", null, 1, 25] ]); test.end(); @@ -99,15 +99,15 @@ tape('Lexer', (test) => { let tokens = lexer.tokenize('{{foo["bar"]}}'); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.NAME, 'foo', 1, 3], - [TokenType.PUNCTUATION, '[', 1, 6], - [TokenType.OPENING_QUOTE, '"', 1, 7], - [TokenType.STRING, 'bar', 1, 8], - [TokenType.CLOSING_QUOTE, '"', 1, 11], - [TokenType.PUNCTUATION, ']', 1, 12], - [TokenType.VARIABLE_END, '}}', 1, 13], - [TokenType.EOF, null, 1, 15] + ["VARIABLE_START", '{{', 1, 1], + ["NAME", 'foo', 1, 3], + ["PUNCTUATION", '[', 1, 6], + ["OPENING_QUOTE", '"', 1, 7], + ["STRING", 'bar', 1, 8], + ["CLOSING_QUOTE", '"', 1, 11], + ["PUNCTUATION", ']', 1, 12], + ["VARIABLE_END", '}}', 1, 13], + ["EOF", null, 1, 15] ]); test.end(); @@ -118,17 +118,17 @@ tape('Lexer', (test) => { let tokens = lexer.tokenize('{{foo["#{bar}"]}}'); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.NAME, 'foo', 1, 3], - [TokenType.PUNCTUATION, '[', 1, 6], - [TokenType.OPENING_QUOTE, '"', 1, 7], - [TokenType.INTERPOLATION_START, '#{', 1, 8], - [TokenType.NAME, 'bar', 1, 10], - [TokenType.INTERPOLATION_END, '}', 1, 13], - [TokenType.CLOSING_QUOTE, '"', 1, 14], - [TokenType.PUNCTUATION, ']', 1, 15], - [TokenType.VARIABLE_END, '}}', 1, 16], - [TokenType.EOF, null, 1, 18] + ["VARIABLE_START", '{{', 1, 1], + ["NAME", 'foo', 1, 3], + ["PUNCTUATION", '[', 1, 6], + ["OPENING_QUOTE", '"', 1, 7], + ["INTERPOLATION_START", '#{', 1, 8], + ["NAME", 'bar', 1, 10], + ["INTERPOLATION_END", '}', 1, 13], + ["CLOSING_QUOTE", '"', 1, 14], + ["PUNCTUATION", ']', 1, 15], + ["VARIABLE_END", '}}', 1, 16], + ["EOF", null, 1, 18] ]); test.end(); @@ -145,26 +145,26 @@ tape('Lexer', (test) => { let tokens = lexer.tokenize('{{ {"a":{"b":"c"}} }}'); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.PUNCTUATION, '{', 1, 4], - [TokenType.OPENING_QUOTE, '"', 1, 5], - [TokenType.STRING, 'a', 1, 6], - [TokenType.CLOSING_QUOTE, '"', 1, 7], - [TokenType.PUNCTUATION, ':', 1, 8], - [TokenType.PUNCTUATION, '{', 1, 9], - [TokenType.OPENING_QUOTE, '"', 1, 10], - [TokenType.STRING, 'b', 1, 11], - [TokenType.CLOSING_QUOTE, '"', 1, 12], - [TokenType.PUNCTUATION, ':', 1, 13], - [TokenType.OPENING_QUOTE, '"', 1, 14], - [TokenType.STRING, 'c', 1, 15], - [TokenType.CLOSING_QUOTE, '"', 1, 16], - [TokenType.PUNCTUATION, '}', 1, 17], - [TokenType.PUNCTUATION, '}', 1, 18], - [TokenType.WHITESPACE, ' ', 1, 19], - [TokenType.VARIABLE_END, '}}', 1, 20], - [TokenType.EOF, null, 1, 22] + ["VARIABLE_START", '{{', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["PUNCTUATION", '{', 1, 4], + ["OPENING_QUOTE", '"', 1, 5], + ["STRING", 'a', 1, 6], + ["CLOSING_QUOTE", '"', 1, 7], + ["PUNCTUATION", ':', 1, 8], + ["PUNCTUATION", '{', 1, 9], + ["OPENING_QUOTE", '"', 1, 10], + ["STRING", 'b', 1, 11], + ["CLOSING_QUOTE", '"', 1, 12], + ["PUNCTUATION", ':', 1, 13], + ["OPENING_QUOTE", '"', 1, 14], + ["STRING", 'c', 1, 15], + ["CLOSING_QUOTE", '"', 1, 16], + ["PUNCTUATION", '}', 1, 17], + ["PUNCTUATION", '}', 1, 18], + ["WHITESPACE", ' ', 1, 19], + ["VARIABLE_END", '}}', 1, 20], + ["EOF", null, 1, 22] ]); test.test('with non-opening bracket', (test) => { @@ -195,18 +195,18 @@ tape('Lexer', (test) => { {% endverbatim %}`); testTokens(test, tokens, [ - [TokenType.TAG_START, '{%', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.NAME, 'verbatim', 1, 4], - [TokenType.WHITESPACE, ' ', 1, 12], - [TokenType.TAG_END, '%}', 1, 13], - [TokenType.TEXT, '\n {{ "bla" }}\n', 1, 15], - [TokenType.TAG_START, '{%', 3, 1], - [TokenType.WHITESPACE, ' ', 3, 3], - [TokenType.NAME, 'endverbatim', 3, 4], - [TokenType.WHITESPACE, ' ', 3, 15], - [TokenType.TAG_END, '%}', 3, 16], - [TokenType.EOF, null, 3, 18] + ["TAG_START", '{%', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["NAME", 'verbatim', 1, 4], + ["WHITESPACE", ' ', 1, 12], + ["TAG_END", '%}', 1, 13], + ["TEXT", '\n {{ "bla" }}\n', 1, 15], + ["TAG_START", '{%', 3, 1], + ["WHITESPACE", ' ', 3, 3], + ["NAME", 'endverbatim', 3, 4], + ["WHITESPACE", ' ', 3, 15], + ["TAG_END", '%}', 3, 16], + ["EOF", null, 3, 18] ]); test.end(); @@ -217,18 +217,18 @@ tape('Lexer', (test) => { let tokens = lexer.tokenize(`{% verbatim %}${'*'.repeat(100000)}{% endverbatim %}`); testTokens(test, tokens, [ - [TokenType.TAG_START, '{%', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.NAME, 'verbatim', 1, 4], - [TokenType.WHITESPACE, ' ', 1, 12], - [TokenType.TAG_END, '%}', 1, 13], - [TokenType.TEXT, '*'.repeat(100000), 1, 15], - [TokenType.TAG_START, '{%', 1, 100015], - [TokenType.WHITESPACE, ' ', 1, 100017], - [TokenType.NAME, 'endverbatim', 1, 100018], - [TokenType.WHITESPACE, ' ', 1, 100029], - [TokenType.TAG_END, '%}', 1, 100030], - [TokenType.EOF, null, 1, 100032] + ["TAG_START", '{%', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["NAME", 'verbatim', 1, 4], + ["WHITESPACE", ' ', 1, 12], + ["TAG_END", '%}', 1, 13], + ["TEXT", '*'.repeat(100000), 1, 15], + ["TAG_START", '{%', 1, 100015], + ["WHITESPACE", ' ', 1, 100017], + ["NAME", 'endverbatim', 1, 100018], + ["WHITESPACE", ' ', 1, 100029], + ["TAG_END", '%}', 1, 100030], + ["EOF", null, 1, 100032] ]); test.end(); @@ -239,20 +239,20 @@ tape('Lexer', (test) => { let tokens = lexer.tokenize(`foo{% verbatim %}{{bla}}{% endverbatim %}foo`); testTokens(test, tokens, [ - [TokenType.TEXT, 'foo', 1, 1], - [TokenType.TAG_START, '{%', 1, 4], - [TokenType.WHITESPACE, ' ', 1, 6], - [TokenType.NAME, 'verbatim', 1, 7], - [TokenType.WHITESPACE, ' ', 1, 15], - [TokenType.TAG_END, '%}', 1, 16], - [TokenType.TEXT, '{{bla}}', 1, 18], - [TokenType.TAG_START, '{%', 1, 25], - [TokenType.WHITESPACE, ' ', 1, 27], - [TokenType.NAME, 'endverbatim', 1, 28], - [TokenType.WHITESPACE, ' ', 1, 39], - [TokenType.TAG_END, '%}', 1, 40], - [TokenType.TEXT, 'foo', 1, 42], - [TokenType.EOF, null, 1, 45] + ["TEXT", 'foo', 1, 1], + ["TAG_START", '{%', 1, 4], + ["WHITESPACE", ' ', 1, 6], + ["NAME", 'verbatim', 1, 7], + ["WHITESPACE", ' ', 1, 15], + ["TAG_END", '%}', 1, 16], + ["TEXT", '{{bla}}', 1, 18], + ["TAG_START", '{%', 1, 25], + ["WHITESPACE", ' ', 1, 27], + ["NAME", 'endverbatim', 1, 28], + ["WHITESPACE", ' ', 1, 39], + ["TAG_END", '%}', 1, 40], + ["TEXT", 'foo', 1, 42], + ["EOF", null, 1, 45] ]); test.end(); @@ -263,30 +263,30 @@ tape('Lexer', (test) => { let tokens = lexer.tokenize(`{% if true %}{% verbatim %}foo{% endverbatim %}{% endif %}`); testTokens(test, tokens, [ - [TokenType.TAG_START, '{%', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.NAME, 'if', 1, 4], - [TokenType.WHITESPACE, ' ', 1, 6], - [TokenType.NAME, 'true', 1, 7], - [TokenType.WHITESPACE, ' ', 1, 11], - [TokenType.TAG_END, '%}', 1, 12], - [TokenType.TAG_START, '{%', 1, 14], - [TokenType.WHITESPACE, ' ', 1, 16], - [TokenType.NAME, 'verbatim', 1, 17], - [TokenType.WHITESPACE, ' ', 1, 25], - [TokenType.TAG_END, '%}', 1, 26], - [TokenType.TEXT, 'foo', 1, 28], - [TokenType.TAG_START, '{%', 1, 31], - [TokenType.WHITESPACE, ' ', 1, 33], - [TokenType.NAME, 'endverbatim', 1, 34], - [TokenType.WHITESPACE, ' ', 1, 45], - [TokenType.TAG_END, '%}', 1, 46], - [TokenType.TAG_START, '{%', 1, 48], - [TokenType.WHITESPACE, ' ', 1, 50], - [TokenType.NAME, 'endif', 1, 51], - [TokenType.WHITESPACE, ' ', 1, 56], - [TokenType.TAG_END, '%}', 1, 57], - [TokenType.EOF, null, 1, 59] + ["TAG_START", '{%', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["NAME", 'if', 1, 4], + ["WHITESPACE", ' ', 1, 6], + ["NAME", 'true', 1, 7], + ["WHITESPACE", ' ', 1, 11], + ["TAG_END", '%}', 1, 12], + ["TAG_START", '{%', 1, 14], + ["WHITESPACE", ' ', 1, 16], + ["NAME", 'verbatim', 1, 17], + ["WHITESPACE", ' ', 1, 25], + ["TAG_END", '%}', 1, 26], + ["TEXT", 'foo', 1, 28], + ["TAG_START", '{%', 1, 31], + ["WHITESPACE", ' ', 1, 33], + ["NAME", 'endverbatim', 1, 34], + ["WHITESPACE", ' ', 1, 45], + ["TAG_END", '%}', 1, 46], + ["TAG_START", '{%', 1, 48], + ["WHITESPACE", ' ', 1, 50], + ["NAME", 'endif', 1, 51], + ["WHITESPACE", ' ', 1, 56], + ["TAG_END", '%}', 1, 57], + ["EOF", null, 1, 59] ]); test.end(); @@ -321,10 +321,10 @@ tape('Lexer', (test) => { let tokens = lexer.tokenize(source); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.NAME, 'bla', 1, 3], - [TokenType.VARIABLE_END, '}}', 1, 6], - [TokenType.EOF, null, 1, 8] + ["VARIABLE_START", '{{', 1, 1], + ["NAME", 'bla', 1, 3], + ["VARIABLE_END", '}}', 1, 6], + ["EOF", null, 1, 8] ]); test.end(); @@ -338,12 +338,12 @@ bla }}`; let tokens = lexer.tokenize(source); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.WHITESPACE, '\n', 1, 3], - [TokenType.NAME, 'bla', 2, 1], - [TokenType.WHITESPACE, ' ', 2, 4], - [TokenType.VARIABLE_END, '}}', 2, 5], - [TokenType.EOF, null, 2, 7] + ["VARIABLE_START", '{{', 1, 1], + ["WHITESPACE", '\n', 1, 3], + ["NAME", 'bla', 2, 1], + ["WHITESPACE", ' ', 2, 4], + ["VARIABLE_END", '}}', 2, 5], + ["EOF", null, 2, 7] ]); test.end(); @@ -356,12 +356,12 @@ bla }}`; let tokens = lexer.tokenize(source); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.NAME, 'x'.repeat(100000), 1, 4], - [TokenType.WHITESPACE, ' ', 1, 100004], - [TokenType.VARIABLE_END, '}}', 1, 100005], - [TokenType.EOF, null, 1, 100007] + ["VARIABLE_START", '{{', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["NAME", 'x'.repeat(100000), 1, 4], + ["WHITESPACE", ' ', 1, 100004], + ["VARIABLE_END", '}}', 1, 100005], + ["EOF", null, 1, 100007] ]); test.end(); @@ -377,7 +377,7 @@ bla }}`; test.comment(`${code}: {{ ${char} }}`); testTokens(test, [tokens[2]], [ - [TokenType.NAME, char, 1, 4] + ["NAME", char, 1, 4] ]); } @@ -389,14 +389,14 @@ bla }}`; let tokens = lexer.tokenize('{{ f() }}'); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.NAME, 'f', 1, 4], - [TokenType.PUNCTUATION, '(', 1, 5], - [TokenType.PUNCTUATION, ')', 1, 6], - [TokenType.WHITESPACE, ' ', 1, 7], - [TokenType.VARIABLE_END, '}}', 1, 8], - [TokenType.EOF, null, 1, 10] + ["VARIABLE_START", '{{', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["NAME", 'f', 1, 4], + ["PUNCTUATION", '(', 1, 5], + ["PUNCTUATION", ')', 1, 6], + ["WHITESPACE", ' ', 1, 7], + ["VARIABLE_END", '}}', 1, 8], + ["EOF", null, 1, 10] ]); test.end(); @@ -407,17 +407,17 @@ bla }}`; let tokens = lexer.tokenize('{{ f("foo {{bar}}") }}'); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.NAME, 'f', 1, 4], - [TokenType.PUNCTUATION, '(', 1, 5], - [TokenType.OPENING_QUOTE, '"', 1, 6], - [TokenType.STRING, 'foo {{bar}}', 1, 7], - [TokenType.CLOSING_QUOTE, '"', 1, 18], - [TokenType.PUNCTUATION, ')', 1, 19], - [TokenType.WHITESPACE, ' ', 1, 20], - [TokenType.VARIABLE_END, '}}', 1, 21], - [TokenType.EOF, null, 1, 23] + ["VARIABLE_START", '{{', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["NAME", 'f', 1, 4], + ["PUNCTUATION", '(', 1, 5], + ["OPENING_QUOTE", '"', 1, 6], + ["STRING", 'foo {{bar}}', 1, 7], + ["CLOSING_QUOTE", '"', 1, 18], + ["PUNCTUATION", ')', 1, 19], + ["WHITESPACE", ' ', 1, 20], + ["VARIABLE_END", '}}', 1, 21], + ["EOF", null, 1, 23] ]); test.end(); @@ -445,7 +445,7 @@ bla }}`; let tokens = lexer.tokenize('{{in}}'); testTokens(test, [tokens[1]], [ - [TokenType.NAME, 'in', 1, 3] + ["NAME", 'in', 1, 3] ]); test.end(); @@ -462,12 +462,12 @@ bla %}`); testTokens(test, tokens, [ - [TokenType.TAG_START, '{%', 1, 1], - [TokenType.WHITESPACE, '\n', 1, 3], - [TokenType.NAME, 'bla', 2, 1], - [TokenType.WHITESPACE, '\n', 2, 4], - [TokenType.TAG_END, '%}', 3, 1], - [TokenType.EOF, null, 3, 3] + ["TAG_START", '{%', 1, 1], + ["WHITESPACE", '\n', 1, 3], + ["NAME", 'bla', 2, 1], + ["WHITESPACE", '\n', 2, 4], + ["TAG_END", '%}', 3, 1], + ["EOF", null, 3, 3] ]); test.end(); @@ -478,12 +478,12 @@ bla let tokens = lexer.tokenize(`{% ${'x'.repeat(100000)} %}`); testTokens(test, tokens, [ - [TokenType.TAG_START, '{%', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.NAME, 'x'.repeat(100000), 1, 4], - [TokenType.WHITESPACE, ' ', 1, 100004], - [TokenType.TAG_END, '%}', 1, 100005], - [TokenType.EOF, null, 1, 100007] + ["TAG_START", '{%', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["NAME", 'x'.repeat(100000), 1, 4], + ["WHITESPACE", ' ', 1, 100004], + ["TAG_END", '%}', 1, 100005], + ["EOF", null, 1, 100007] ]); test.end(); @@ -494,12 +494,12 @@ bla let tokens = lexer.tokenize('{% § %}'); testTokens(test, tokens, [ - [TokenType.TAG_START, '{%', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.NAME, '§', 1, 4], - [TokenType.WHITESPACE, ' ', 1, 5], - [TokenType.TAG_END, '%}', 1, 6], - [TokenType.EOF, null, 1, 8] + ["TAG_START", '{%', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["NAME", '§', 1, 4], + ["WHITESPACE", ' ', 1, 5], + ["TAG_END", '%}', 1, 6], + ["EOF", null, 1, 8] ]); test.end(); @@ -510,14 +510,14 @@ bla let tokens = lexer.tokenize(`{% foo bar %}`); testTokens(test, tokens, [ - [TokenType.TAG_START, '{%', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.NAME, 'foo', 1, 4], - [TokenType.WHITESPACE, ' ', 1, 7], - [TokenType.NAME, 'bar', 1, 8], - [TokenType.WHITESPACE, ' ', 1, 11], - [TokenType.TAG_END, '%}', 1, 12], - [TokenType.EOF, null, 1, 14] + ["TAG_START", '{%', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["NAME", 'foo', 1, 4], + ["WHITESPACE", ' ', 1, 7], + ["NAME", 'bar', 1, 8], + ["WHITESPACE", ' ', 1, 11], + ["TAG_END", '%}', 1, 12], + ["EOF", null, 1, 14] ]); test.end(); @@ -549,12 +549,12 @@ bla let tokens = lexer.tokenize('{{ 922337203685477580700 }}'); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.NUMBER, '922337203685477580700', 1, 4], - [TokenType.WHITESPACE, ' ', 1, 25], - [TokenType.VARIABLE_END, '}}', 1, 26], - [TokenType.EOF, null, 1, 28] + ["VARIABLE_START", '{{', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["NUMBER", '922337203685477580700', 1, 4], + ["WHITESPACE", ' ', 1, 25], + ["VARIABLE_END", '}}', 1, 26], + ["EOF", null, 1, 28] ]); test.end(); @@ -565,12 +565,12 @@ bla let tokens = lexer.tokenize('{{ 92233720368547.7580700 }}'); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.NUMBER, '92233720368547.7580700', 1, 4], - [TokenType.WHITESPACE, ' ', 1, 26], - [TokenType.VARIABLE_END, '}}', 1, 27], - [TokenType.EOF, null, 1, 29] + ["VARIABLE_START", '{{', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["NUMBER", '92233720368547.7580700', 1, 4], + ["WHITESPACE", ' ', 1, 26], + ["VARIABLE_END", '}}', 1, 27], + ["EOF", null, 1, 29] ]); test.end(); @@ -591,14 +591,14 @@ bla let tokens = lexer.tokenize(fixture.template); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.OPENING_QUOTE, fixture.quote, 1, 4], - [TokenType.STRING, fixture.expected, 1, 5], - [TokenType.CLOSING_QUOTE, fixture.quote, 1, 15], - [TokenType.WHITESPACE, ' ', 1, 16], - [TokenType.VARIABLE_END, '}}', 1, 17], - [TokenType.EOF, null, 1, 19] + ["VARIABLE_START", '{{', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["OPENING_QUOTE", fixture.quote, 1, 4], + ["STRING", fixture.expected, 1, 5], + ["CLOSING_QUOTE", fixture.quote, 1, 15], + ["WHITESPACE", ' ', 1, 16], + ["VARIABLE_END", '}}', 1, 17], + ["EOF", null, 1, 19] ]); }); @@ -610,24 +610,24 @@ bla let tokens = lexer.tokenize('foo {{ "bar #{ baz + 1 }" }}'); testTokens(test, tokens, [ - [TokenType.TEXT, 'foo ', 1, 1], - [TokenType.VARIABLE_START, '{{', 1, 5], - [TokenType.WHITESPACE, ' ', 1, 7], - [TokenType.OPENING_QUOTE, '"', 1, 8], - [TokenType.STRING, 'bar ', 1, 9], - [TokenType.INTERPOLATION_START, '#{', 1, 13], - [TokenType.WHITESPACE, ' ', 1, 15], - [TokenType.NAME, 'baz', 1, 16], - [TokenType.WHITESPACE, ' ', 1, 19], - [TokenType.OPERATOR, '+', 1, 20], - [TokenType.WHITESPACE, ' ', 1, 21], - [TokenType.NUMBER, '1', 1, 22], - [TokenType.WHITESPACE, ' ', 1, 23], - [TokenType.INTERPOLATION_END, '}', 1, 24], - [TokenType.CLOSING_QUOTE, '"', 1, 25], - [TokenType.WHITESPACE, ' ', 1, 26], - [TokenType.VARIABLE_END, '}}', 1, 27], - [TokenType.EOF, null, 1, 29] + ["TEXT", 'foo ', 1, 1], + ["VARIABLE_START", '{{', 1, 5], + ["WHITESPACE", ' ', 1, 7], + ["OPENING_QUOTE", '"', 1, 8], + ["STRING", 'bar ', 1, 9], + ["INTERPOLATION_START", '#{', 1, 13], + ["WHITESPACE", ' ', 1, 15], + ["NAME", 'baz', 1, 16], + ["WHITESPACE", ' ', 1, 19], + ["OPERATOR", '+', 1, 20], + ["WHITESPACE", ' ', 1, 21], + ["NUMBER", '1', 1, 22], + ["WHITESPACE", ' ', 1, 23], + ["INTERPOLATION_END", '}', 1, 24], + ["CLOSING_QUOTE", '"', 1, 25], + ["WHITESPACE", ' ', 1, 26], + ["VARIABLE_END", '}}', 1, 27], + ["EOF", null, 1, 29] ]); test.end(); @@ -638,14 +638,14 @@ bla let tokens = lexer.tokenize('{{ "bar \\#{baz+1}" }}'); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.OPENING_QUOTE, '"', 1, 4], - [TokenType.STRING, 'bar \\#{baz+1}', 1, 5], - [TokenType.CLOSING_QUOTE, '"', 1, 18], - [TokenType.WHITESPACE, ' ', 1, 19], - [TokenType.VARIABLE_END, '}}', 1, 20], - [TokenType.EOF, null, 1, 22] + ["VARIABLE_START", '{{', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["OPENING_QUOTE", '"', 1, 4], + ["STRING", 'bar \\#{baz+1}', 1, 5], + ["CLOSING_QUOTE", '"', 1, 18], + ["WHITESPACE", ' ', 1, 19], + ["VARIABLE_END", '}}', 1, 20], + ["EOF", null, 1, 22] ]); test.end(); @@ -656,14 +656,14 @@ bla let tokens = lexer.tokenize('{{ "bar # baz" }}'); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.OPENING_QUOTE, '"', 1, 4], - [TokenType.STRING, 'bar # baz', 1, 5], - [TokenType.CLOSING_QUOTE, '"', 1, 14], - [TokenType.WHITESPACE, ' ', 1, 15], - [TokenType.VARIABLE_END, '}}', 1, 16], - [TokenType.EOF, null, 1, 18] + ["VARIABLE_START", '{{', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["OPENING_QUOTE", '"', 1, 4], + ["STRING", 'bar # baz', 1, 5], + ["CLOSING_QUOTE", '"', 1, 14], + ["WHITESPACE", ' ', 1, 15], + ["VARIABLE_END", '}}', 1, 16], + ["EOF", null, 1, 18] ]); test.end(); @@ -692,24 +692,24 @@ bla let tokens = lexer.tokenize('{{ "bar #{ "foo#{bar}" }" }}'); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.OPENING_QUOTE, '"', 1, 4], - [TokenType.STRING, 'bar ', 1, 5], - [TokenType.INTERPOLATION_START, '#{', 1, 9], - [TokenType.WHITESPACE, ' ', 1, 11], - [TokenType.OPENING_QUOTE, '"', 1, 12], - [TokenType.STRING, 'foo', 1, 13], - [TokenType.INTERPOLATION_START, '#{', 1, 16], - [TokenType.NAME, 'bar', 1, 18], - [TokenType.INTERPOLATION_END, '}', 1, 21], - [TokenType.CLOSING_QUOTE, '"', 1, 22], - [TokenType.WHITESPACE, ' ', 1, 23], - [TokenType.INTERPOLATION_END, '}', 1, 24], - [TokenType.CLOSING_QUOTE, '"', 1, 25], - [TokenType.WHITESPACE, ' ', 1, 26], - [TokenType.VARIABLE_END, '}}', 1, 27], - [TokenType.EOF, null, 1, 29] + ["VARIABLE_START", '{{', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["OPENING_QUOTE", '"', 1, 4], + ["STRING", 'bar ', 1, 5], + ["INTERPOLATION_START", '#{', 1, 9], + ["WHITESPACE", ' ', 1, 11], + ["OPENING_QUOTE", '"', 1, 12], + ["STRING", 'foo', 1, 13], + ["INTERPOLATION_START", '#{', 1, 16], + ["NAME", 'bar', 1, 18], + ["INTERPOLATION_END", '}', 1, 21], + ["CLOSING_QUOTE", '"', 1, 22], + ["WHITESPACE", ' ', 1, 23], + ["INTERPOLATION_END", '}', 1, 24], + ["CLOSING_QUOTE", '"', 1, 25], + ["WHITESPACE", ' ', 1, 26], + ["VARIABLE_END", '}}', 1, 27], + ["EOF", null, 1, 29] ]); test.end(); @@ -720,26 +720,26 @@ bla let tokens = lexer.tokenize('{% foo "bar #{ "foo#{bar}" }" %}'); testTokens(test, tokens, [ - [TokenType.TAG_START, '{%', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.NAME, 'foo', 1, 4], - [TokenType.WHITESPACE, ' ', 1, 7], - [TokenType.OPENING_QUOTE, '"', 1, 8], - [TokenType.STRING, 'bar ', 1, 9], - [TokenType.INTERPOLATION_START, '#{', 1, 13], - [TokenType.WHITESPACE, ' ', 1, 15], - [TokenType.OPENING_QUOTE, '"', 1, 16], - [TokenType.STRING, 'foo', 1, 17], - [TokenType.INTERPOLATION_START, '#{', 1, 20], - [TokenType.NAME, 'bar', 1, 22], - [TokenType.INTERPOLATION_END, '}', 1, 25], - [TokenType.CLOSING_QUOTE, '"', 1, 26], - [TokenType.WHITESPACE, ' ', 1, 27], - [TokenType.INTERPOLATION_END, '}', 1, 28], - [TokenType.CLOSING_QUOTE, '"', 1, 29], - [TokenType.WHITESPACE, ' ', 1, 30], - [TokenType.TAG_END, '%}', 1, 31], - [TokenType.EOF, null, 1, 33] + ["TAG_START", '{%', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["NAME", 'foo', 1, 4], + ["WHITESPACE", ' ', 1, 7], + ["OPENING_QUOTE", '"', 1, 8], + ["STRING", 'bar ', 1, 9], + ["INTERPOLATION_START", '#{', 1, 13], + ["WHITESPACE", ' ', 1, 15], + ["OPENING_QUOTE", '"', 1, 16], + ["STRING", 'foo', 1, 17], + ["INTERPOLATION_START", '#{', 1, 20], + ["NAME", 'bar', 1, 22], + ["INTERPOLATION_END", '}', 1, 25], + ["CLOSING_QUOTE", '"', 1, 26], + ["WHITESPACE", ' ', 1, 27], + ["INTERPOLATION_END", '}', 1, 28], + ["CLOSING_QUOTE", '"', 1, 29], + ["WHITESPACE", ' ', 1, 30], + ["TAG_END", '%}', 1, 31], + ["EOF", null, 1, 33] ]); test.end(); @@ -750,11 +750,11 @@ bla let tokens = lexer.tokenize('{{""}}'); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.OPENING_QUOTE, '"', 1, 3], - [TokenType.CLOSING_QUOTE, '"', 1, 4], - [TokenType.VARIABLE_END, '}}', 1, 5], - [TokenType.EOF, null, 1, 7] + ["VARIABLE_START", '{{', 1, 1], + ["OPENING_QUOTE", '"', 1, 3], + ["CLOSING_QUOTE", '"', 1, 4], + ["VARIABLE_END", '}}', 1, 5], + ["EOF", null, 1, 7] ]); test.end(); @@ -765,12 +765,12 @@ bla let tokens = lexer.tokenize('{{\'foo\'}}'); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.OPENING_QUOTE, '\'', 1, 3], - [TokenType.STRING, 'foo', 1, 4], - [TokenType.CLOSING_QUOTE, '\'', 1, 7], - [TokenType.VARIABLE_END, '}}', 1, 8], - [TokenType.EOF, null, 1, 10] + ["VARIABLE_START", '{{', 1, 1], + ["OPENING_QUOTE", '\'', 1, 3], + ["STRING", 'foo', 1, 4], + ["CLOSING_QUOTE", '\'', 1, 7], + ["VARIABLE_END", '}}', 1, 8], + ["EOF", null, 1, 10] ]); test.end(); @@ -781,12 +781,12 @@ bla let tokens = lexer.tokenize('{{\'foo#{bar}\'}}'); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.OPENING_QUOTE, '\'', 1, 3], - [TokenType.STRING, 'foo#{bar}', 1, 4], - [TokenType.CLOSING_QUOTE, '\'', 1, 13], - [TokenType.VARIABLE_END, '}}', 1, 14], - [TokenType.EOF, null, 1, 16] + ["VARIABLE_START", '{{', 1, 1], + ["OPENING_QUOTE", '\'', 1, 3], + ["STRING", 'foo#{bar}', 1, 4], + ["CLOSING_QUOTE", '\'', 1, 13], + ["VARIABLE_END", '}}', 1, 14], + ["EOF", null, 1, 16] ]); test.end(); @@ -797,16 +797,16 @@ bla let tokens = lexer.tokenize(`{{"string \\"interpolation\\": '#{var}'"}}`); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.OPENING_QUOTE, '"', 1, 3], - [TokenType.STRING, 'string \\"interpolation\\": \'', 1, 4], - [TokenType.INTERPOLATION_START, '#{', 1, 31], - [TokenType.NAME, 'var', 1, 33], - [TokenType.INTERPOLATION_END, '}', 1, 36], - [TokenType.STRING, "'", 1, 37], - [TokenType.CLOSING_QUOTE, '"', 1, 38], - [TokenType.VARIABLE_END, '}}', 1, 39], - [TokenType.EOF, null, 1, 41] + ["VARIABLE_START", '{{', 1, 1], + ["OPENING_QUOTE", '"', 1, 3], + ["STRING", 'string \\"interpolation\\": \'', 1, 4], + ["INTERPOLATION_START", '#{', 1, 31], + ["NAME", 'var', 1, 33], + ["INTERPOLATION_END", '}', 1, 36], + ["STRING", "'", 1, 37], + ["CLOSING_QUOTE", '"', 1, 38], + ["VARIABLE_END", '}}', 1, 39], + ["EOF", null, 1, 41] ]); test.end(); @@ -822,8 +822,8 @@ bla tokens = lexer.tokenize('{{ is not foo }}'); testTokens(test, [tokens[2], tokens[4]], [ - [TokenType.TEST_OPERATOR, 'is not', 1, 4], - [TokenType.NAME, 'foo', 1, 11] + ["TEST_OPERATOR", 'is not', 1, 4], + ["NAME", 'foo', 1, 11] ]); test.comment('space within a test operator can be any amount of whitespaces'); @@ -831,29 +831,29 @@ bla tokens = lexer.tokenize('{{ is not foo }}'); testTokens(test, [tokens[2], tokens[4]], [ - [TokenType.TEST_OPERATOR, 'is not', 1, 4], - [TokenType.NAME, 'foo', 1, 22] + ["TEST_OPERATOR", 'is not', 1, 4], + ["NAME", 'foo', 1, 22] ]); tokens = lexer.tokenize('{{ is foo }}'); testTokens(test, [tokens[2], tokens[4]], [ - [TokenType.TEST_OPERATOR, 'is', 1, 4], - [TokenType.NAME, 'foo', 1, 7] + ["TEST_OPERATOR", 'is', 1, 4], + ["NAME", 'foo', 1, 7] ]); tokens = lexer.tokenize('{{ is is not }}'); testTokens(test, [tokens[2], tokens[4]], [ - [TokenType.TEST_OPERATOR, 'is', 1, 4], - [TokenType.TEST_OPERATOR, 'is not', 1, 7] + ["TEST_OPERATOR", 'is', 1, 4], + ["TEST_OPERATOR", 'is not', 1, 7] ]); tokens = lexer.tokenize('{{ is not is }}'); testTokens(test, [tokens[2], tokens[4]], [ - [TokenType.TEST_OPERATOR, 'is not', 1, 4], - [TokenType.TEST_OPERATOR, 'is', 1, 11] + ["TEST_OPERATOR", 'is not', 1, 4], + ["TEST_OPERATOR", 'is', 1, 11] ]); test.end(); @@ -869,7 +869,7 @@ bla let tokens = lexer.tokenize(`{{ ${operator} }}`); testTokens(test, [tokens[2]], [ - [TokenType.OPERATOR, operator, 1, 4] + ["OPERATOR", operator, 1, 4] ]); } @@ -878,7 +878,7 @@ bla tokens = lexer.tokenize('{{custom operator }}'); testTokens(test, [tokens[1]], [ - [TokenType.OPERATOR, 'custom operator', 1, 3] + ["OPERATOR", 'custom operator', 1, 3] ]); test.comment('not ending with a letter and not followed by either a space or an opening parenthesis'); @@ -886,7 +886,7 @@ bla tokens = lexer.tokenize('{{+}}'); testTokens(test, [tokens[1]], [ - [TokenType.OPERATOR, '+', 1, 3] + ["OPERATOR", '+', 1, 3] ]); test.test('ending with a letter and followed by a space or an opening parenthesis', (test) => { @@ -896,19 +896,19 @@ bla tokens = lexer.tokenize('{{in(foo)}}'); testTokens(test, [tokens[1]], [ - [TokenType.OPERATOR, 'in', 1, 3] + ["OPERATOR", 'in', 1, 3] ]); tokens = lexer.tokenize('{{in foo}}'); testTokens(test, [tokens[1]], [ - [TokenType.OPERATOR, 'in', 1, 3] + ["OPERATOR", 'in', 1, 3] ]); tokens = lexer.tokenize('{{in\nfoo}}'); testTokens(test, [tokens[1]], [ - [TokenType.OPERATOR, 'in', 1, 3] + ["OPERATOR", 'in', 1, 3] ]); test.end(); @@ -924,25 +924,25 @@ bla tokens = lexer.tokenize('{{ foo|filter(v => v > 1) }}'); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.NAME, 'foo', 1, 4], - [TokenType.PUNCTUATION, '|', 1, 7], - [TokenType.NAME, 'filter', 1, 8], - [TokenType.PUNCTUATION, '(', 1, 14], - [TokenType.NAME, 'v', 1, 15], - [TokenType.WHITESPACE, ' ', 1, 16], - [TokenType.ARROW, '=>', 1, 17], - [TokenType.WHITESPACE, ' ', 1, 19], - [TokenType.NAME, 'v', 1, 20], - [TokenType.WHITESPACE, ' ', 1, 21], - [TokenType.OPERATOR, '>', 1, 22], - [TokenType.WHITESPACE, ' ', 1, 23], - [TokenType.NUMBER, '1', 1, 24], - [TokenType.PUNCTUATION, ')', 1, 25], - [TokenType.WHITESPACE, ' ', 1, 26], - [TokenType.VARIABLE_END, '}}', 1, 27], - [TokenType.EOF, null, 1, 29] + ["VARIABLE_START", '{{', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["NAME", 'foo', 1, 4], + ["PUNCTUATION", '|', 1, 7], + ["NAME", 'filter', 1, 8], + ["PUNCTUATION", '(', 1, 14], + ["NAME", 'v', 1, 15], + ["WHITESPACE", ' ', 1, 16], + ["ARROW", '=>', 1, 17], + ["WHITESPACE", ' ', 1, 19], + ["NAME", 'v', 1, 20], + ["WHITESPACE", ' ', 1, 21], + ["OPERATOR", '>', 1, 22], + ["WHITESPACE", ' ', 1, 23], + ["NUMBER", '1', 1, 24], + ["PUNCTUATION", ')', 1, 25], + ["WHITESPACE", ' ', 1, 26], + ["VARIABLE_END", '}}', 1, 27], + ["EOF", null, 1, 29] ]); test.end(); @@ -953,8 +953,8 @@ bla let tokens = lexer.tokenize('foo '); testTokens(test, tokens, [ - [TokenType.TEXT, 'foo ', 1, 1], - [TokenType.EOF, null, 1, 5] + ["TEXT", 'foo ', 1, 1], + ["EOF", null, 1, 5] ]); test.test('containing line feeds', (test) => { @@ -962,8 +962,8 @@ bla let tokens = lexer.tokenize('\r\rfoo\r\nbar\roof\n\r'); testTokens(test, tokens, [ - [TokenType.TEXT, '\r\rfoo\r\nbar\roof\n\r', 1, 1], - [TokenType.EOF, null, 7, 1] + ["TEXT", '\r\rfoo\r\nbar\roof\n\r', 1, 1], + ["EOF", null, 7, 1] ]); test.end(); @@ -974,12 +974,12 @@ bla let tokens = lexer.tokenize('foo {{bar}} bar'); testTokens(test, tokens, [ - [TokenType.TEXT, 'foo ', 1, 1], - [TokenType.VARIABLE_START, '{{', 1, 5], - [TokenType.NAME, 'bar', 1, 7], - [TokenType.VARIABLE_END, '}}', 1, 10], - [TokenType.TEXT, ' bar', 1, 12], - [TokenType.EOF, null, 1, 16] + ["TEXT", 'foo ', 1, 1], + ["VARIABLE_START", '{{', 1, 5], + ["NAME", 'bar', 1, 7], + ["VARIABLE_END", '}}', 1, 10], + ["TEXT", ' bar', 1, 12], + ["EOF", null, 1, 16] ]); test.end(); @@ -994,14 +994,14 @@ bla let tokens = lexer.tokenize('{%- foo -%}'); testTokens(test, tokens, [ - [TokenType.TAG_START, '{%', 1, 1], - [TokenType.TRIMMING_MODIFIER, '-', 1, 3], - [TokenType.WHITESPACE, ' ', 1, 4], - [TokenType.NAME, 'foo', 1, 5], - [TokenType.WHITESPACE, ' ', 1, 8], - [TokenType.TRIMMING_MODIFIER, '-', 1, 9], - [TokenType.TAG_END, '%}', 1, 10], - [TokenType.EOF, null, 1, 12] + ["TAG_START", '{%', 1, 1], + ["TRIMMING_MODIFIER", '-', 1, 3], + ["WHITESPACE", ' ', 1, 4], + ["NAME", 'foo', 1, 5], + ["WHITESPACE", ' ', 1, 8], + ["TRIMMING_MODIFIER", '-', 1, 9], + ["TAG_END", '%}', 1, 10], + ["EOF", null, 1, 12] ]); test.end(); @@ -1012,14 +1012,14 @@ bla let tokens = lexer.tokenize('{%~ foo ~%}'); testTokens(test, tokens, [ - [TokenType.TAG_START, '{%', 1, 1], - [TokenType.LINE_TRIMMING_MODIFIER, '~', 1, 3], - [TokenType.WHITESPACE, ' ', 1, 4], - [TokenType.NAME, 'foo', 1, 5], - [TokenType.WHITESPACE, ' ', 1, 8], - [TokenType.LINE_TRIMMING_MODIFIER, '~', 1, 9], - [TokenType.TAG_END, '%}', 1, 10], - [TokenType.EOF, null, 1, 12] + ["TAG_START", '{%', 1, 1], + ["LINE_TRIMMING_MODIFIER", '~', 1, 3], + ["WHITESPACE", ' ', 1, 4], + ["NAME", 'foo', 1, 5], + ["WHITESPACE", ' ', 1, 8], + ["LINE_TRIMMING_MODIFIER", '~', 1, 9], + ["TAG_END", '%}', 1, 10], + ["EOF", null, 1, 12] ]); test.end(); @@ -1031,12 +1031,12 @@ bla let tokens = lexer.tokenize('{# foo bar #}'); testTokens(test, tokens, [ - [TokenType.COMMENT_START, '{#', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.TEXT, 'foo bar', 1, 4], - [TokenType.WHITESPACE, ' ', 1, 11], - [TokenType.COMMENT_END, '#}', 1, 12], - [TokenType.EOF, null, 1, 14] + ["COMMENT_START", '{#', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["TEXT", 'foo bar', 1, 4], + ["WHITESPACE", ' ', 1, 11], + ["COMMENT_END", '#}', 1, 12], + ["EOF", null, 1, 14] ]); test.test('long comments', (test) => { @@ -1046,10 +1046,10 @@ bla let tokens = lexer.tokenize('{#' + value + '#}'); testTokens(test, tokens, [ - [TokenType.COMMENT_START, '{#', 1, 1], - [TokenType.TEXT, value, 1, 3], - [TokenType.COMMENT_END, '#}', 1, 100003], - [TokenType.EOF, null, 1, 100005] + ["COMMENT_START", '{#', 1, 1], + ["TEXT", value, 1, 3], + ["COMMENT_END", '#}', 1, 100003], + ["EOF", null, 1, 100005] ]); test.end(); @@ -1076,16 +1076,16 @@ bla let tokens = lexer.tokenize('{#rn#}\r\n{#r#}\r{#n#}\n'); testTokens(test, tokens, [ - [TokenType.COMMENT_START, '{#', 1, 1], - [TokenType.TEXT, 'rn', 1, 3], - [TokenType.COMMENT_END, '#}\r\n', 1, 5], - [TokenType.COMMENT_START, '{#', 2, 1], - [TokenType.TEXT, 'r', 2, 3], - [TokenType.COMMENT_END, '#}\r', 2, 4], - [TokenType.COMMENT_START, '{#', 3, 1], - [TokenType.TEXT, 'n', 3, 3], - [TokenType.COMMENT_END, '#}\n', 3, 4], - [TokenType.EOF, null, 4, 1] + ["COMMENT_START", '{#', 1, 1], + ["TEXT", 'rn', 1, 3], + ["COMMENT_END", '#}\r\n', 1, 5], + ["COMMENT_START", '{#', 2, 1], + ["TEXT", 'r', 2, 3], + ["COMMENT_END", '#}\r', 2, 4], + ["COMMENT_START", '{#', 3, 1], + ["TEXT", 'n', 3, 3], + ["COMMENT_END", '#}\n', 3, 4], + ["EOF", null, 4, 1] ]); test.test('except when using line whitespace trimming on the right', (test) => { @@ -1093,8 +1093,8 @@ bla bar`); testTokens(test, [tokens[3], tokens[4]], [ - [TokenType.COMMENT_END, '#}', 1, 7], - [TokenType.TEXT, '\nbar', 1, 9] + ["COMMENT_END", '#}', 1, 7], + ["TEXT", '\nbar', 1, 9] ]); test.end(); @@ -1108,15 +1108,15 @@ bar`); let tokens = lexer.tokenize('{# a #}{{foo}}'); testTokens(test, tokens, [ - [TokenType.COMMENT_START, '{#', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.TEXT, 'a', 1, 4], - [TokenType.WHITESPACE, ' ', 1, 5], - [TokenType.COMMENT_END, '#}', 1, 6], - [TokenType.VARIABLE_START, '{{', 1, 8], - [TokenType.NAME, 'foo', 1, 10], - [TokenType.VARIABLE_END, '}}', 1, 13], - [TokenType.EOF, null, 1, 15] + ["COMMENT_START", '{#', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["TEXT", 'a', 1, 4], + ["WHITESPACE", ' ', 1, 5], + ["COMMENT_END", '#}', 1, 6], + ["VARIABLE_START", '{{', 1, 8], + ["NAME", 'foo', 1, 10], + ["VARIABLE_END", '}}', 1, 13], + ["EOF", null, 1, 15] ]); test.end(); @@ -1127,12 +1127,12 @@ bar`); let tokens = lexer.tokenize('{# {{a}} #}'); testTokens(test, tokens, [ - [TokenType.COMMENT_START, '{#', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.TEXT, '{{a}}', 1, 4], - [TokenType.WHITESPACE, ' ', 1, 9], - [TokenType.COMMENT_END, '#}', 1, 10], - [TokenType.EOF, null, 1, 12] + ["COMMENT_START", '{#', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["TEXT", '{{a}}', 1, 4], + ["WHITESPACE", ' ', 1, 9], + ["COMMENT_END", '#}', 1, 10], + ["EOF", null, 1, 12] ]); test.end(); @@ -1146,17 +1146,17 @@ bar`); let tokens = lexer.tokenize('{{ [1, 2] }}'); testTokens(test, tokens, [ - [TokenType.VARIABLE_START, '{{', 1, 1], - [TokenType.WHITESPACE, ' ', 1, 3], - [TokenType.PUNCTUATION, '[', 1, 4], - [TokenType.NUMBER, '1', 1, 5], - [TokenType.PUNCTUATION, ',', 1, 6], - [TokenType.WHITESPACE, ' ', 1, 7], - [TokenType.NUMBER, '2', 1, 8], - [TokenType.PUNCTUATION, ']', 1, 9], - [TokenType.WHITESPACE, ' ', 1, 10], - [TokenType.VARIABLE_END, '}}', 1, 11], - [TokenType.EOF, null, 1, 13] + ["VARIABLE_START", '{{', 1, 1], + ["WHITESPACE", ' ', 1, 3], + ["PUNCTUATION", '[', 1, 4], + ["NUMBER", '1', 1, 5], + ["PUNCTUATION", ',', 1, 6], + ["WHITESPACE", ' ', 1, 7], + ["NUMBER", '2', 1, 8], + ["PUNCTUATION", ']', 1, 9], + ["WHITESPACE", ' ', 1, 10], + ["VARIABLE_END", '}}', 1, 11], + ["EOF", null, 1, 13] ]); test.test('unclosed bracket', (test) => { @@ -1200,8 +1200,8 @@ bar`); bar`); testTokens(test, [tokens[2], tokens[3]], [ - [TokenType.TAG_END, '%}\n', 1, 6], - [TokenType.TEXT, 'bar', 2, 1] + ["TAG_END", '%}\n', 1, 6], + ["TEXT", 'bar', 2, 1] ]); test.test('except when using line whitespace trimming on the right', (test) => { @@ -1209,8 +1209,8 @@ bar`); bar`); testTokens(test, [tokens[3], tokens[4]], [ - [TokenType.TAG_END, '%}', 1, 7], - [TokenType.TEXT, '\nbar', 1, 9] + ["TAG_END", '%}', 1, 7], + ["TEXT", '\nbar', 1, 9] ]); test.end(); @@ -1221,10 +1221,10 @@ bar`); bar{%endverbatim%} foo`); testTokens(test, [tokens[2], tokens[3], tokens[6], tokens[7]], [ - [TokenType.TAG_END, '%}', 1, 11], - [TokenType.TEXT, '\nbar', 1, 13], - [TokenType.TAG_END, '%}', 2, 17], - [TokenType.TEXT, '\nfoo', 2, 19], + ["TAG_END", '%}', 1, 11], + ["TEXT", '\nbar', 1, 13], + ["TAG_END", '%}', 2, 17], + ["TEXT", '\nfoo', 2, 19], ]); test.end(); @@ -1240,16 +1240,16 @@ foo`); bar`); testTokens(test, tokens, [ - [TokenType.TEXT, 'foo\n', 1, 1], - [TokenType.TAG_START, '{%', 2, 1], - [TokenType.WHITESPACE, ' ', 2, 3], - [TokenType.NAME, 'line', 2, 4], - [TokenType.WHITESPACE, ' ', 2, 8], - [TokenType.NUMBER, '5', 2, 9], - [TokenType.WHITESPACE, ' ', 2, 10], - [TokenType.TAG_END, '%}', 2, 11], - [TokenType.TEXT, '\nbar', 5, 0], - [TokenType.EOF, null, 6, 4] + ["TEXT", 'foo\n', 1, 1], + ["TAG_START", '{%', 2, 1], + ["WHITESPACE", ' ', 2, 3], + ["NAME", 'line', 2, 4], + ["WHITESPACE", ' ', 2, 8], + ["NUMBER", '5', 2, 9], + ["WHITESPACE", ' ', 2, 10], + ["TAG_END", '%}', 2, 11], + ["TEXT", '\nbar', 5, 0], + ["EOF", null, 6, 4] ]); tokens = lexer.tokenize(`foo @@ -1257,14 +1257,14 @@ bar`); bar`); testTokens(test, tokens, [ - [TokenType.TEXT, 'foo\n', 1, 1], - [TokenType.TAG_START, '{%', 2, 1], - [TokenType.NAME, 'line', 2, 3], - [TokenType.WHITESPACE, ' ', 2, 7], - [TokenType.NUMBER, '5', 2, 8], - [TokenType.TAG_END, '%}', 2, 9], - [TokenType.TEXT, '\nbar', 5, 0], - [TokenType.EOF, null, 6, 4] + ["TEXT", 'foo\n', 1, 1], + ["TAG_START", '{%', 2, 1], + ["NAME", 'line', 2, 3], + ["WHITESPACE", ' ', 2, 7], + ["NUMBER", '5', 2, 8], + ["TAG_END", '%}', 2, 9], + ["TEXT", '\nbar', 5, 0], + ["EOF", null, 6, 4] ]); test.end(); diff --git a/test/unit/lib/Token/test.ts b/test/unit/lib/Token/test.ts index 28786ef..271bc8f 100644 --- a/test/unit/lib/Token/test.ts +++ b/test/unit/lib/Token/test.ts @@ -1,42 +1,41 @@ import * as tape from 'tape'; import {Token} from '../../../../src/lib/Token'; -import {TokenType} from '../../../../src/lib/TokenType'; tape('Token', (test) => { test.test('test', (test) => { test.test('accept a single parameter', (test) => { - let token = new Token(TokenType.TEXT, 'foo', 1, 1); + let token = new Token("TEXT", 'foo', 1, 1); - test.true(token.test(TokenType.TEXT)); - test.false(token.test(TokenType.STRING)); + test.true(token.test("TEXT")); + test.false(token.test("STRING")); test.end(); }); test.test('accept two parameters', (test) => { test.test('with string as second parameter', (test) => { - let token = new Token(TokenType.TEXT, 'foo', 1, 1); + let token = new Token("TEXT", 'foo', 1, 1); - test.true(token.test(TokenType.TEXT, 'foo')); - test.false(token.test(TokenType.TEXT, 'bar')); + test.true(token.test("TEXT", 'foo')); + test.false(token.test("TEXT", 'bar')); test.end(); }); test.test('with number as second parameter', (test) => { - let token = new Token(TokenType.TEXT, '5', 1, 1); + let token = new Token("TEXT", '5', 1, 1); - test.true(token.test(TokenType.TEXT, 5)); - test.false(token.test(TokenType.TEXT, 6)); + test.true(token.test("TEXT", 5)); + test.false(token.test("TEXT", 6)); test.end(); }); test.test('with array of strings as second parameter', (test) => { - let token = new Token(TokenType.TEXT, 'foo', 1, 1); + let token = new Token("TEXT", 'foo', 1, 1); - test.true(token.test(TokenType.TEXT, ['foo', 'bar'])); - test.false(token.test(TokenType.TEXT, ['fooo', 'bar'])); + test.true(token.test("TEXT", ['foo', 'bar'])); + test.false(token.test("TEXT", ['fooo', 'bar'])); test.end(); }); @@ -46,7 +45,7 @@ tape('Token', (test) => { }); test.test('serialize', (test) => { - let token = new Token(TokenType.TEXT, '\nfoo\nbar\n', 1, 1); + let token = new Token("TEXT", '\nfoo\nbar\n', 1, 1); let expected = ` foo @@ -59,16 +58,14 @@ bar }); test.test('toString', (test) => { - let token = new Token(TokenType.TEXT, '\nfoo\nbar\n', 1, 1); + let token = new Token("TEXT", '\nfoo\nbar\n', 1, 1); let expected = `TEXT(\nfoo\nbar\n)`; test.same(token.toString(), expected); - - test.end(); - + test.test('on token with null content', (test) => { - let token = new Token(TokenType.TEXT, null, 1, 1); + let token = new Token("TEXT", null, 1, 1); let expected = `TEXT()`; diff --git a/test/unit/lib/TokenStream/test.ts b/test/unit/lib/TokenStream/test.ts index bf19221..756e6ac 100644 --- a/test/unit/lib/TokenStream/test.ts +++ b/test/unit/lib/TokenStream/test.ts @@ -1,101 +1,100 @@ import * as tape from 'tape'; import {astVisitor, TokenStream} from '../../../../src/lib/TokenStream'; import {Token} from "../../../../src/lib/Token"; -import {TokenType} from "../../../../src/lib/TokenType"; const sinon = require('sinon'); tape('TokenStream', (test) => { test.test('traversal', (test) => { let stream = new TokenStream([ - new Token(TokenType.NAME, 'foo', 1, 1), - new Token(TokenType.TEXT, 'foo', 1, 1), - new Token(TokenType.STRING, 'foo', 1, 1) + new Token("NAME", 'foo', 1, 1), + new Token("TEXT", 'foo', 1, 1), + new Token("STRING", 'foo', 1, 1) ]); - test.true(stream.current.test(TokenType.NAME, 'foo')); - test.true(stream.next().test(TokenType.NAME, 'foo'), 'next returns the current token'); - test.true(stream.current.test(TokenType.TEXT, 'foo'), 'next increments the pointer'); + test.true(stream.current.test("NAME", 'foo')); + test.true(stream.next().test("NAME", 'foo'), 'next returns the current token'); + test.true(stream.current.test("TEXT", 'foo'), 'next increments the pointer'); stream.next(); - test.true(stream.current.test(TokenType.STRING, 'foo')); + test.true(stream.current.test("STRING", 'foo')); stream.next(); stream.rewind(); - test.true(stream.current.test(TokenType.NAME, 'foo'), 'rewind actually rewinds the stream'); + test.true(stream.current.test("NAME", 'foo'), 'rewind actually rewinds the stream'); - test.true(stream.nextIf(TokenType.NAME, 'foo').test(TokenType.NAME, 'foo'), 'nextIf returns the tested token when the test is successful'); - test.true(stream.current.test(TokenType.TEXT, 'foo'), 'nextIf increments the pointer when the test is successful'); - test.false(stream.nextIf(TokenType.NAME, 'foo')); - test.true(stream.nextIf(TokenType.TEXT), 'nextIf support a single parameter'); + test.true(stream.nextIf("NAME", 'foo').test("NAME", 'foo'), 'nextIf returns the tested token when the test is successful'); + test.true(stream.current.test("TEXT", 'foo'), 'nextIf increments the pointer when the test is successful'); + test.false(stream.nextIf("NAME", 'foo')); + test.true(stream.nextIf("TEXT"), 'nextIf support a single parameter'); test.end(); }); test.test('lookup', (test) => { let stream = new TokenStream([ - new Token(TokenType.NAME, 'foo', 1, 1), - new Token(TokenType.TEXT, 'foo', 1, 1), - new Token(TokenType.STRING, 'foo', 1, 1) + new Token("NAME", 'foo', 1, 1), + new Token("TEXT", 'foo', 1, 1), + new Token("STRING", 'foo', 1, 1) ]); test.same(stream.look(-1), null); - test.true(stream.look(0).test(TokenType.NAME, 'foo')); - test.true(stream.look().test(TokenType.TEXT, 'foo')); - test.true(stream.look(1).test(TokenType.TEXT, 'foo')); - test.true(stream.look(2).test(TokenType.STRING, 'foo')); + test.true(stream.look(0).test("NAME", 'foo')); + test.true(stream.look().test("TEXT", 'foo')); + test.true(stream.look(1).test("TEXT", 'foo')); + test.true(stream.look(2).test("STRING", 'foo')); test.same(stream.look(3), null); stream.next(); - test.true(stream.look(-1).test(TokenType.NAME, 'foo')); + test.true(stream.look(-1).test("NAME", 'foo')); test.end(); }); test.test('test', (test) => { let stream = new TokenStream([ - new Token(TokenType.NAME, 'foo', 1, 1), - new Token(TokenType.TEXT, 'foo', 1, 1), - new Token(TokenType.STRING, 'foo', 1, 1) + new Token("NAME", 'foo', 1, 1), + new Token("TEXT", 'foo', 1, 1), + new Token("STRING", 'foo', 1, 1) ]); - test.true(stream.test(TokenType.NAME, 'foo')); - test.false(stream.test(TokenType.TEXT, 'foo')); - test.true(stream.test(TokenType.NAME)); + test.true(stream.test("NAME", 'foo')); + test.false(stream.test("TEXT", 'foo')); + test.true(stream.test("NAME")); stream.next(); - test.true(stream.test(TokenType.TEXT, 'foo')); + test.true(stream.test("TEXT", 'foo')); test.end(); }); test.test('injection', (test) => { let stream = new TokenStream([ - new Token(TokenType.NAME, 'foo', 1, 1), - new Token(TokenType.TEXT, 'foo', 1, 1), - new Token(TokenType.STRING, 'foo', 1, 1) + new Token("NAME", 'foo', 1, 1), + new Token("TEXT", 'foo', 1, 1), + new Token("STRING", 'foo', 1, 1) ]); stream.injectTokens([ - new Token(TokenType.NAME, 'bar', 1, 1) + new Token("NAME", 'bar', 1, 1) ]); - test.true(stream.test(TokenType.NAME, 'bar')); + test.true(stream.test("NAME", 'bar')); stream.injectTokens([ - new Token(TokenType.TEXT, 'bar', 1, 1), - new Token(TokenType.STRING, 'bar', 1, 1) + new Token("TEXT", 'bar', 1, 1), + new Token("STRING", 'bar', 1, 1) ]); - test.true(stream.test(TokenType.TEXT, 'bar')); - test.true(stream.look().test(TokenType.STRING, 'bar')); - test.true(stream.look(2).test(TokenType.NAME, 'bar')); + test.true(stream.test("TEXT", 'bar')); + test.true(stream.look().test("STRING", 'bar')); + test.true(stream.look(2).test("NAME", 'bar')); test.end(); }); test.test('toString', (test) => { let stream = new TokenStream([ - new Token(TokenType.NAME, 'foo', 1, 1), - new Token(TokenType.TEXT, 'foo', 1, 1), - new Token(TokenType.STRING, 'foo', 1, 1) + new Token("NAME", 'foo', 1, 1), + new Token("TEXT", 'foo', 1, 1), + new Token("STRING", 'foo', 1, 1) ]); test.same(stream.toString(), `NAME(foo) @@ -107,9 +106,9 @@ STRING(foo)`); test.test('serialize', (test) => { let stream = new TokenStream([ - new Token(TokenType.NAME, 'foo', 1, 1), - new Token(TokenType.TEXT, 'foo', 1, 1), - new Token(TokenType.STRING, 'foo', 1, 1) + new Token("NAME", 'foo', 1, 1), + new Token("TEXT", 'foo', 1, 1), + new Token("STRING", 'foo', 1, 1) ]); test.same(stream.serialize(), `foofoofoo`); @@ -119,27 +118,27 @@ STRING(foo)`); test.test('traverse', (test) => { let stream = new TokenStream([ - new Token(TokenType.NAME, 'foo', 1, 1), - new Token(TokenType.TEXT, 'foo', 1, 1), - new Token(TokenType.STRING, 'foo', 1, 1) + new Token("NAME", 'foo', 1, 1), + new Token("TEXT", 'foo', 1, 1), + new Token("STRING", 'foo', 1, 1) ]); let tokens = stream.traverse((token: Token, stream: TokenStream): Token => { - if (token.test(TokenType.TEXT)) { + if (token.test("TEXT")) { return token; } }); - test.true(tokens[0].test(TokenType.TEXT)); + test.true(tokens[0].test("TEXT")); test.end(); }); test.test('toAst', (test) => { let stream = new TokenStream([ - new Token(TokenType.NAME, 'foo', 1, 1), - new Token(TokenType.TEXT, 'foo', 1, 1), - new Token(TokenType.STRING, 'foo', 1, 1) + new Token("NAME", 'foo', 1, 1), + new Token("TEXT", 'foo', 1, 1), + new Token("STRING", 'foo', 1, 1) ]); let traverseSpy = sinon.spy(stream, 'traverse'); @@ -157,121 +156,121 @@ STRING(foo)`); let stream: TokenStream; stream = new TokenStream([ - new Token(TokenType.EOF, null, 10, 5) + new Token("EOF", null, 10, 5) ]); - test.same(astVisitor(stream.current, stream), stream.current, 'keeps EOF untouched'); + test.same(astVisitor(stream.current, stream), stream.current, 'keeps "EOF" untouched'); stream = new TokenStream([ - new Token(TokenType.NUMBER, '5.78', 10, 5) + new Token("NUMBER", '5.78', 10, 5) ]); - test.true(astVisitor(stream.current, stream).test(TokenType.NUMBER, 5.78), 'sanitizes NUMBER value'); + test.true(astVisitor(stream.current, stream).test("NUMBER", 5.78), 'sanitizes "NUMBER" value'); stream = new TokenStream([ - new Token(TokenType.INTERPOLATION_START, '#{', 10, 5) + new Token("INTERPOLATION_START", '#{', 10, 5) ]); test.same(astVisitor(stream.current, stream), stream.current, 'keeps relevant token untouched'); stream = new TokenStream([ - new Token(TokenType.OPENING_QUOTE, '"', 1, 4), - new Token(TokenType.TRIMMING_MODIFIER, '-', 1, 1), - new Token(TokenType.WHITESPACE, ' ', 1, 2), - new Token(TokenType.LINE_TRIMMING_MODIFIER, '~', 1, 3), - new Token(TokenType.CLOSING_QUOTE, '"', 1, 5) + new Token("OPENING_QUOTE", '"', 1, 4), + new Token("TRIMMING_MODIFIER", '-', 1, 1), + new Token("WHITESPACE", ' ', 1, 2), + new Token("LINE_TRIMMING_MODIFIER", '~', 1, 3), + new Token("CLOSING_QUOTE", '"', 1, 5) ]); - test.false(astVisitor(stream.current, stream), 'filters OPENING_QUOTE tokens'); + test.false(astVisitor(stream.current, stream), 'filters "OPENING_QUOTE" tokens'); stream.next(); - test.false(astVisitor(stream.current, stream), 'filters TRIMMING_MODIFIER tokens'); + test.false(astVisitor(stream.current, stream), 'filters "TRIMMING_MODIFIER" tokens'); stream.next(); - test.false(astVisitor(stream.current, stream), 'filters WHITESPACE tokens'); + test.false(astVisitor(stream.current, stream), 'filters "WHITESPACE" tokens'); stream.next(); - test.false(astVisitor(stream.current, stream), 'filters LINE_TRIMMING_MODIFIER tokens'); + test.false(astVisitor(stream.current, stream), 'filters "LINE_TRIMMING_MODIFIER" tokens'); stream.next(); - test.false(astVisitor(stream.current, stream), 'filters CLOSING_QUOTE tokens'); + test.false(astVisitor(stream.current, stream), 'filters "CLOSING_QUOTE" tokens'); stream = new TokenStream([ - new Token(TokenType.OPENING_QUOTE, '"', 1, 1), - new Token(TokenType.STRING, 'foo', 1, 2) + new Token("OPENING_QUOTE", '"', 1, 1), + new Token("STRING", 'foo', 1, 2) ]); stream.next(); - test.same(astVisitor(stream.current, stream).column, 1, 'maps STRING tokens column to their corresponding OPENING_QUOTE'); + test.same(astVisitor(stream.current, stream).column, 1, 'maps "STRING" tokens column to their corresponding "OPENING_QUOTE"'); stream = new TokenStream([ - new Token(TokenType.TEXT, 'foo\n ', 1, 1), - new Token(TokenType.TAG_START, '{%', 2, 1), - new Token(TokenType.TRIMMING_MODIFIER, '-', 2, 3) + new Token("TEXT", 'foo\n ', 1, 1), + new Token("TAG_START", '{%', 2, 1), + new Token("TRIMMING_MODIFIER", '-', 2, 3) ]); - test.true(astVisitor(stream.current, stream).test(TokenType.TEXT, 'foo'), 'handles trimming modifier on left side'); + test.true(astVisitor(stream.current, stream).test("TEXT", 'foo'), 'handles trimming modifier on left side'); stream = new TokenStream([ - new Token(TokenType.TRIMMING_MODIFIER, '-', 1, 1), - new Token(TokenType.TAG_END, '%}', 1, 2), - new Token(TokenType.TEXT, ' \nfoo', 1, 4) + new Token("TRIMMING_MODIFIER", '-', 1, 1), + new Token("TAG_END", '%}', 1, 2), + new Token("TEXT", ' \nfoo', 1, 4) ]); stream.next(); stream.next(); - test.true(astVisitor(stream.current, stream).test(TokenType.TEXT, 'foo'), 'handles trimming modifier on right side'); + test.true(astVisitor(stream.current, stream).test("TEXT", 'foo'), 'handles trimming modifier on right side'); stream = new TokenStream([ - new Token(TokenType.TEXT, 'foo\n ', 1, 1), - new Token(TokenType.TAG_START, '{%', 2, 1), - new Token(TokenType.LINE_TRIMMING_MODIFIER, '~', 2, 3) + new Token("TEXT", 'foo\n ', 1, 1), + new Token("TAG_START", '{%', 2, 1), + new Token("LINE_TRIMMING_MODIFIER", '~', 2, 3) ]); - test.true(astVisitor(stream.current, stream).test(TokenType.TEXT, 'foo\n'), 'handles line trimming modifier on left side'); + test.true(astVisitor(stream.current, stream).test("TEXT", 'foo\n'), 'handles line trimming modifier on left side'); stream = new TokenStream([ - new Token(TokenType.LINE_TRIMMING_MODIFIER, '~', 1, 1), - new Token(TokenType.TAG_END, '%}', 1, 2), - new Token(TokenType.TEXT, ' \nfoo', 1, 4) + new Token("LINE_TRIMMING_MODIFIER", '~', 1, 1), + new Token("TAG_END", '%}', 1, 2), + new Token("TEXT", ' \nfoo', 1, 4) ]); stream.next(); stream.next(); - test.true(astVisitor(stream.current, stream).test(TokenType.TEXT, '\nfoo'), 'handles line trimming modifier on right side'); + test.true(astVisitor(stream.current, stream).test("TEXT", '\nfoo'), 'handles line trimming modifier on right side'); stream = new TokenStream([ - new Token(TokenType.OPERATOR, 'foo bar', 1, 1) + new Token("OPERATOR", 'foo bar', 1, 1) ]); - test.true(astVisitor(stream.current, stream).test(TokenType.OPERATOR, 'foo bar'), 'removes unnecessary operator spaces'); + test.true(astVisitor(stream.current, stream).test("OPERATOR", 'foo bar'), 'removes unnecessary operator spaces'); stream = new TokenStream([ - new Token(TokenType.TEXT, '', 1, 1), + new Token("TEXT", '', 1, 1), ]); - test.false(astVisitor(stream.current, stream), 'filters empty TEXT tokens out'); + test.false(astVisitor(stream.current, stream), 'filters empty "TEXT" tokens out'); stream = new TokenStream([ - new Token(TokenType.STRING, '\\z\\t', 1, 1) + new Token("STRING", '\\z\\t', 1, 1) ]); - test.true(astVisitor(stream.current, stream).test(TokenType.STRING, 'z\t'), 'converts C-style escape sequences'); + test.true(astVisitor(stream.current, stream).test("STRING", 'z\t'), 'converts C-style escape sequences'); stream = new TokenStream([ - new Token(TokenType.TEXT, 'a\\nb', 1, 1) + new Token("TEXT", 'a\\nb', 1, 1) ]); - test.true(astVisitor(stream.current, stream).test(TokenType.TEXT, 'a\\nb'), 'doesn\'t strip C slashes on TEXT tokens'); + test.true(astVisitor(stream.current, stream).test("TEXT", 'a\\nb'), 'doesn\'t strip C slashes on "TEXT" tokens'); - test.test('replaces OPENING_QUOTE tokens immediately followed by a CLOSING_QUOTE token with empty string tokens', (test) => { + test.test('replaces "OPENING_QUOTE" tokens immediately followed by a "CLOSING_QUOTE" token with empty string tokens', (test) => { let stream = new TokenStream([ - new Token(TokenType.OPENING_QUOTE, '"', 1, 5), - new Token(TokenType.CLOSING_QUOTE, '"', 1, 6) + new Token("OPENING_QUOTE", '"', 1, 5), + new Token("CLOSING_QUOTE", '"', 1, 6) ]); let token = astVisitor(stream.current, stream); - test.true(token.test(TokenType.STRING, '')); + test.true(token.test("STRING", '')); test.same(token.line, 1); test.same(token.column, 5); diff --git a/test/unit/lib/TokenType/test.ts b/test/unit/lib/TokenType/test.ts deleted file mode 100644 index b8c252e..0000000 --- a/test/unit/lib/TokenType/test.ts +++ /dev/null @@ -1,53 +0,0 @@ -import * as tape from 'tape'; -import {TokenType, typeToString} from '../../../../src/lib/TokenType'; - -tape('TokenType', (test) => { - test.test('typeToString', (test) => { - test.same(typeToString(TokenType.TAG_END), 'TokenType.TAG_END'); - test.same(typeToString(TokenType.TAG_START), 'TokenType.TAG_START'); - test.same(typeToString(TokenType.EOF), 'TokenType.EOF'); - test.same(typeToString(TokenType.INTERPOLATION_END), 'TokenType.INTERPOLATION_END'); - test.same(typeToString(TokenType.INTERPOLATION_START), 'TokenType.INTERPOLATION_START'); - test.same(typeToString(TokenType.NAME), 'TokenType.NAME'); - test.same(typeToString(TokenType.NUMBER), 'TokenType.NUMBER'); - test.same(typeToString(TokenType.OPERATOR), 'TokenType.OPERATOR'); - test.same(typeToString(TokenType.PUNCTUATION), 'TokenType.PUNCTUATION'); - test.same(typeToString(TokenType.STRING), 'TokenType.STRING'); - test.same(typeToString(TokenType.TEXT), 'TokenType.TEXT'); - test.same(typeToString(TokenType.VARIABLE_END), 'TokenType.VARIABLE_END'); - test.same(typeToString(TokenType.VARIABLE_START), 'TokenType.VARIABLE_START'); - test.same(typeToString(TokenType.WHITESPACE), 'TokenType.WHITESPACE'); - test.same(typeToString(TokenType.CLOSING_QUOTE), 'TokenType.CLOSING_QUOTE'); - test.same(typeToString(TokenType.OPENING_QUOTE), 'TokenType.OPENING_QUOTE'); - test.same(typeToString(TokenType.TRIMMING_MODIFIER), 'TokenType.TRIMMING_MODIFIER'); - test.same(typeToString(TokenType.LINE_TRIMMING_MODIFIER), 'TokenType.LINE_TRIMMING_MODIFIER'); - test.same(typeToString(TokenType.TAG_END, true), 'TAG_END'); - test.same(typeToString(TokenType.TAG_START, true), 'TAG_START'); - test.same(typeToString(TokenType.EOF, true), 'EOF'); - test.same(typeToString(TokenType.INTERPOLATION_END, true), 'INTERPOLATION_END'); - test.same(typeToString(TokenType.INTERPOLATION_START, true), 'INTERPOLATION_START'); - test.same(typeToString(TokenType.NAME, true), 'NAME'); - test.same(typeToString(TokenType.NUMBER, true), 'NUMBER'); - test.same(typeToString(TokenType.OPERATOR, true), 'OPERATOR'); - test.same(typeToString(TokenType.PUNCTUATION, true), 'PUNCTUATION'); - test.same(typeToString(TokenType.STRING, true), 'STRING'); - test.same(typeToString(TokenType.TEXT, true), 'TEXT'); - test.same(typeToString(TokenType.VARIABLE_END, true), 'VARIABLE_END'); - test.same(typeToString(TokenType.VARIABLE_START, true), 'VARIABLE_START'); - test.same(typeToString(TokenType.COMMENT_START, true), 'COMMENT_START'); - test.same(typeToString(TokenType.COMMENT_END, true), 'COMMENT_END'); - test.same(typeToString(TokenType.WHITESPACE, true), 'WHITESPACE'); - test.same(typeToString(TokenType.CLOSING_QUOTE, true), 'CLOSING_QUOTE'); - test.same(typeToString(TokenType.OPENING_QUOTE, true), 'OPENING_QUOTE'); - test.same(typeToString(TokenType.TRIMMING_MODIFIER, true), 'TRIMMING_MODIFIER'); - test.same(typeToString(TokenType.LINE_TRIMMING_MODIFIER, true), 'LINE_TRIMMING_MODIFIER'); - - test.throws(function () { - typeToString(-999 as any); - }, 'Token type "-999" does not exist.'); - - test.end(); - }); - - test.end(); -}); \ No newline at end of file