diff --git a/.gitignore b/.gitignore index 00aab34cd..9852a9075 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ t.* coverage node_modules +*.swp +*.vim +src/*.js diff --git a/lib/lexer.js b/lib/lexer.js index 602e868cd..f7a3ae8c6 100644 --- a/lib/lexer.js +++ b/lib/lexer.js @@ -117,7 +117,7 @@ exports.checkConsistency = function(camel, id){ } }; exports.doID = function(code, index){ - var regexMatch, input, id, e, last, ref$, tag, ref1$, that; + var regexMatch, input, id, e, last, ref$, tag, ref1$, that, a; input = (regexMatch = (ID.lastIndex = index, ID).exec(code))[0]; if (!input) { return 0; @@ -339,10 +339,13 @@ exports.doID = function(code, index){ break; case 'to': case 'til': - this.forange() && this.tokens.push(['FROM', '', this.line, this.column], ['STRNUM', '0', this.line, this.column]); - if (this.fget('from')) { - this.fset('from', false); - this.fset('by', true); + if (this.forange()) { + this.tokens.push(['FROM', '', this.line, this.column]); + this.tokens.push(['STRNUM', '0', this.line, this.column]); + } + if ((a = this.flags[this.closes.length]) && a.from) { + a.from = false; + a.by = true; tag = 'TO'; } else if (!last.callable && last[0] === 'STRNUM' && (ref$ = this.tokens)[ref$.length - 2][0] === '[') { last[0] = 'RANGE'; @@ -1678,8 +1681,10 @@ function addImplicitBraces(tokens){ } } function expandLiterals(tokens){ - var i, fromNum, token, sig, ref$, ref1$, lno, cno, ref2$, ref3$, ref4$, char, toNum, tochar, byNum, byp, ref5$, ts, enc, add, i$, n, ref6$, ref7$, len$, word, that; + var i, nFrom, nTo, token, sig, ref$, ref1$, lno, cno, ref2$, char, tochar, nBy, byp, ref3$, ts, enc, i$, nI, ref4$, ref5$, len$, word, that; i = 0; + nFrom = null; + nTo = null; while (token = tokens[++i]) { switch (token[0]) { case 'STRNUM': @@ -1700,57 +1705,66 @@ function expandLiterals(tokens){ tokens[i + 2][0] = 'RANGE_BY'; } token.op = token[1]; - fromNum = 0; + nFrom = 0; // fallthrough case 'RANGE': lno = token[2]; cno = token[3]; - if (fromNum != null || (tokens[i - 1][0] === '[' && tokens[i + 1][0] === 'STRNUM' && ((tokens[i + 2][0] === ']' && (((ref2$ = tokens[i + 1][1].charAt(0)) === '\'' || ref2$ === '"') || +tokens[i + 1][1] >= 0)) || (tokens[i + 2][0] === 'RANGE_BY' && ((ref2$ = tokens[i + 3]) != null ? ref2$[0] : void 8) === 'STRNUM' && ((ref3$ = tokens[i + 4]) != null ? ref3$[0] : void 8) === ']')))) { - if (fromNum == null) { - ref4$ = decode(token[1], lno), fromNum = ref4$[0], char = ref4$[1]; + if (tokens[i - 2][0] !== 'DOT' && (nFrom !== null || (tokens[i - 1][0] === '[' && tokens[i + 1][0] === 'STRNUM' && ((tokens[i + 2][0] === ']' && (tokens[i + 1][1][0] === '"' || tokens[i + 1][1][0] === "'" || +tokens[i + 1][1] >= 0)) || (tokens[i + 2][0] === 'RANGE_BY' && tokens[i + 3] && tokens[i + 4] && tokens[i + 3][0] === 'STRNUM' && tokens[i + 4][0] === ']'))))) { + if (nFrom === null) { + ref2$ = decode(token[1], lno), nFrom = ref2$[0], char = ref2$[1]; } - ref4$ = decode(tokens[i + 1][1], lno), toNum = ref4$[0], tochar = ref4$[1]; - if (toNum == null || char ^ tochar) { + ref2$ = decode(tokens[i + 1][1], lno), nTo = ref2$[0], tochar = ref2$[1]; + if (nTo == null || char ^ tochar) { carp('bad "to" in range', lno); } - byNum = 1; - if (byp = ((ref4$ = tokens[i + 2]) != null ? ref4$[0] : void 8) === 'RANGE_BY') { - if (!(byNum = +((ref5$ = tokens[i + 3]) != null ? ref5$[1] : void 8))) { + nBy = 1; + if (byp = ((ref2$ = tokens[i + 2]) != null ? ref2$[0] : void 8) === 'RANGE_BY') { + if (!(nBy = +((ref3$ = tokens[i + 3]) != null ? ref3$[1] : void 8))) { carp('bad "by" in range', tokens[i + 2][2]); } - } else if (fromNum > toNum) { - byNum = -1; + } else if (nFrom > nTo) { + nBy = -1; } ts = []; enc = char ? character : String; - add = fn$; if (token.op === 'to') { - for (i$ = fromNum; byNum < 0 ? i$ >= toNum : i$ <= toNum; i$ += byNum) { - n = i$; - add(); + for (i$ = nFrom; nBy < 0 ? i$ >= nTo : i$ <= nTo; i$ += nBy) { + nI = i$; + ts.push(['STRNUM', enc(nI), lno, cno]); + ts.push([',', ',', lno, cno]); + if (ts.length > 0x10000) { + carp('range limit exceeded', lno); + } } } else { - for (i$ = fromNum; byNum < 0 ? i$ > toNum : i$ < toNum; i$ += byNum) { - n = i$; - add(); + for (i$ = nFrom; nBy < 0 ? i$ > nTo : i$ < nTo; i$ += nBy) { + nI = i$; + ts.push(['STRNUM', enc(nI), lno, cno]); + ts.push([',', ',', lno, cno]); + if (ts.length > 0x10000) { + carp('range limit exceeded', lno); + } } } - ts.pop() || carp('empty range', lno); + if (!ts.pop()) { + carp('empty range', lno); + } tokens.splice.apply(tokens, [i, 2 + 2 * byp].concat(arrayFrom$(ts))); i += ts.length - 1; } else { token[0] = 'STRNUM'; - if (((ref6$ = tokens[i + 2]) != null ? ref6$[0] : void 8) === 'RANGE_BY') { + if (((ref4$ = tokens[i + 2]) != null ? ref4$[0] : void 8) === 'RANGE_BY') { tokens.splice(i + 2, 1, ['BY', 'by', lno, cno]); } tokens.splice(i + 1, 0, ['TO', token.op, lno, cno]); } - fromNum = null; + nFrom = null; break; case 'WORDS': ts = [['[', '[', lno = token[2], cno = token[3]]]; - for (i$ = 0, len$ = (ref7$ = token[1].match(/\S+/g) || '').length; i$ < len$; ++i$) { - word = ref7$[i$]; + for (i$ = 0, len$ = (ref5$ = token[1].match(/\S+/g) || '').length; i$ < len$; ++i$) { + word = ref5$[i$]; ts.push(['STRNUM', string('\'', word, lno), lno, cno], [',', ',', lno, cno]); } tokens.splice.apply(tokens, [i, 1].concat(arrayFrom$(ts), [[']', ']', lno, cno]])); @@ -1760,7 +1774,7 @@ function expandLiterals(tokens){ if (that = tokens[i - 1]) { if (that[1] === 'new') { tokens.splice(i++, 0, ['PARAM(', '', token[2], token[3]], [')PARAM', '', token[2], token[3]], ['->', '', token[2], token[3]]); - } else if ((ref7$ = that[0]) === 'FUNCTION' || ref7$ === 'GENERATOR' || ref7$ === 'LET') { + } else if ((ref5$ = that[0]) === 'FUNCTION' || ref5$ === 'GENERATOR' || ref5$ === 'LET') { tokens.splice(i, 0, ['CALL(', '', token[2], token[3]], [')CALL', '', token[2], token[3]]); i += 2; } @@ -1786,7 +1800,7 @@ function expandLiterals(tokens){ } break; case 'BIOP': - if (!token.spaced && ((ref7$ = token[1]) === '+' || ref7$ === '-') && tokens[i + 1][0] !== ')') { + if (!token.spaced && ((ref5$ = token[1]) === '+' || ref5$ === '-') && tokens[i + 1][0] !== ')') { tokens[i][0] = '+-'; } continue; @@ -1797,11 +1811,6 @@ function expandLiterals(tokens){ tokens.splice(++i, 0, [',', ',', token[2], token[3]]); } } - function fn$(){ - if (0x10000 < ts.push(['STRNUM', enc(n), lno, cno], [',', ',', lno, cno])) { - carp('range limit exceeded', lno); - } - } } function detectEnd(tokens, i, ok, go){ var levels, token, tag; diff --git a/src/ast.ls b/src/ast.ls index ae8bcd41d..38b4d698b 100644 --- a/src/ast.ls +++ b/src/ast.ls @@ -3145,8 +3145,11 @@ UTILS = # Copies properties from right to left. import: '''function(obj, src){ - var own = {}.hasOwnProperty; - for (var key in src) if (own.call(src, key)) obj[key] = src[key]; + for (var key in src) { + if (Object.hasOwnProperty.call(src, key)) { + obj[key] = src[key]; + } + } return obj; }''' import-all: '''function(obj, src){ diff --git a/src/lexer.ls b/src/lexer.ls index 87fe3deb9..c785322dd 100644 --- a/src/lexer.ls +++ b/src/lexer.ls @@ -250,14 +250,21 @@ exports <<< return 4 @forange! and tag = 'FROM' case 'to' 'til' - @forange! and @tokens.push do - ['FROM' '' @line, @column] ['STRNUM' '0' @line, @column] - if @fget 'from' - @fset 'from' false - @fset 'by' true - tag = 'TO' - else if not last.callable and last.0 is 'STRNUM' and @tokens[*-2].0 is '[' - last <<< 0:'RANGE' op: id + # check without + if @forange! + @tokens.push ['FROM' '' @line, @column] + @tokens.push ['STRNUM' '0' @line, @column] + # check flag + if (a = @flags[@closes.length]) and a.from + # is done, current is , next is + a.from = false + a.by = true + tag = 'TO' + # check at the array start + else if not last.callable and last.0 == 'STRNUM' and @tokens[*-2].0 == '[' + # STRNUM is converted to RANGE + last.0 = 'RANGE' + last.op = id return id.length else if ']' in @closes @token 'TO' id @@ -1218,7 +1225,8 @@ character = if not JSON? then uxxxx else -> # - Insert `, ` after each non-callable token facing an argument token. !function expand-literals tokens i = 0 - var from-num + nFrom = null + nTo = null while token = tokens[++i] switch token.0 case 'STRNUM' @@ -1239,46 +1247,89 @@ character = if not JSON? then uxxxx else -> if tokens[i + 2].0 is 'BY' tokens[i + 2].0 = 'RANGE_BY' token.op = token.1 - from-num = 0 + nFrom = 0 fallthrough case 'RANGE' + # {{{ + # prepare lno = token.2 cno = token.3 - if from-num? or (tokens[i-1].0 is '[' - and tokens[i + 1].0 is 'STRNUM' - and ((tokens[i + 2].0 is ']' - and (tokens[i + 1].1.char-at(0) in ['\'' '"'] - or +tokens[i + 1].1 >= 0)) - or (tokens[i + 2].0 is 'RANGE_BY' - and tokens[i + 3]?.0 is 'STRNUM' - and tokens[i + 4]?.0 is ']'))) - unless from-num? - [from-num, char] = decode token.1, lno - [to-num, tochar] = decode tokens[i + 1].1, lno - carp 'bad "to" in range' lno if not to-num? or char .^. tochar - by-num = 1 - if byp = tokens[i + 2]?.0 is 'RANGE_BY' - carp 'bad "by" in range' tokens[i + 2].2 unless by-num = +tokens[i + 3]?.1 - else if from-num > to-num - by-num = -1 - ts = [] - enc = if char then character else String - add = !-> - if 0x10000 < ts.push ['STRNUM' enc n; lno, cno] [',' ',' lno, cno] - carp 'range limit exceeded' lno - if token.op is 'to' - for n from from-num to to-num by by-num then add! - else - for n from from-num til to-num by by-num then add! - ts.pop! or carp 'empty range' lno - tokens.splice i, 2 + 2 * byp, ...ts - i += ts.length - 1 + # check token sequence, + # range with accessor should not be expanded + if tokens[i - 2].0 != 'DOT' and + # implicit range (when flag-number is set) or, + (nFrom != null or + # there is a "simple" numeric range startup with.. + (tokens[i - 1].0 == '[' and + tokens[i + 1].0 == 'STRNUM' and + # either end of the range.. + ((tokens[i + 2].0 == ']' and + # with a string literal(?) or a positive number.. + # Q: why this exact check order should matter? + (tokens[i + 1].1.0 == '"' or + tokens[i + 1].1.0 == "'" or + +tokens[i + 1].1 >= 0)) or + # ..or, a full + (tokens[i + 2].0 == 'RANGE_BY' and + tokens[i + 3] and tokens[i + 4] and + tokens[i + 3].0 == 'STRNUM' and + tokens[i + 4].0 == ']')))) + # do RANGE expansion + # determine number + if nFrom == null + # Q: the purpose of char? + [nFrom, char] = decode token.1, lno + # determine number + [nTo, tochar] = decode tokens[i + 1].1, lno + carp 'bad "to" in range' lno if not nTo? or char .^. tochar + # determine number + nBy = 1 + if byp = tokens[i + 2]?.0 is 'RANGE_BY' + carp 'bad "by" in range' tokens[i + 2].2 unless nBy = +tokens[i + 3]?.1 + else if nFrom > nTo + nBy = -1 + # prepare tokens stack + ts = [] + # prepare encoder function + enc = if char + then character + else String + # fill the stack + if token.op == 'to' + for nI from nFrom to nTo by nBy + ts.push ['STRNUM', (enc nI), lno, cno] + ts.push [',', ',', lno, cno] + if ts.length > 0x10000 + carp 'range limit exceeded' lno + else + for nI from nFrom til nTo by nBy + ts.push ['STRNUM', (enc nI), lno, cno] + ts.push [',', ',', lno, cno] + if ts.length > 0x10000 + carp 'range limit exceeded' lno + # remove last (garbage) token from the stack + # and make a short check + if not ts.pop! + carp 'empty range' lno + # replace this with stacked tokens + # the tokens array should be modified *in place* + #tokens = (tokens.slice 0, i) ++ ts ++ (tokens.slice i + 2 + 2*byp) + tokens.splice i, 2 + 2 * byp, ...ts + # advance index + i += ts.length - 1 + # done else - token.0 = 'STRNUM' - if tokens[i + 2]?.0 is 'RANGE_BY' - tokens.splice i + 2, 1, ['BY' 'by' lno, cno] - tokens.splice i + 1, 0, ['TO', token.op, lno, cno] - from-num = null + token.0 = 'STRNUM' + if nFrom != null + # set implicit number + token.1 = '0' + else if tokens[i + 2]?.0 is 'RANGE_BY' + tokens.splice i + 2, 1, ['BY' 'by' lno, cno] + # insert + tokens.splice i + 1, 0, ['TO', token.op, lno, cno] + # clear flag-number + nFrom = null + # }}} case 'WORDS' ts = [['[' '[' lno = token.2, cno = token.3]] for word in token.1.match /\S+/g or '' @@ -1302,7 +1353,8 @@ character = if not JSON? then uxxxx else -> if not token.spaced and token.1 in <[ + - ]> and tokens[i + 1].0 isnt ')' tokens[i].0 = '+-' continue - default continue + default + continue if token.spaced and tokens[i + 1].0 in ARG tokens.splice ++i, 0 [',' ',' token.2, token.3]