|
71 | 71 | - getFileContents, fileExists |
72 | 72 | - printf |
73 | 73 | - run |
74 | | - - tokenize, newToken, concatTokens |
| 74 | + - tokenize, newToken, concatTokens, removeUselessTokens, eachToken, isToken |
75 | 75 | - toLua, serialize |
76 | 76 | Only in metaprogram: |
77 | 77 | - outputValue, outputLua |
@@ -119,6 +119,8 @@ local ESCAPE_SEQUENCES = { |
119 | 119 | ["\'"] = [[\']], |
120 | 120 | } |
121 | 121 |
|
| 122 | +local USELESS_TOKENS = {whitespace=true, comment=true} |
| 123 | + |
122 | 124 | local ERROR_UNFINISHED_VALUE = 1 |
123 | 125 |
|
124 | 126 | local major, minor = _VERSION:match"Lua (%d+)%.(%d+)" |
@@ -175,7 +177,7 @@ function printf(s, ...) |
175 | 177 | end |
176 | 178 | function printTokens(tokens, filter) |
177 | 179 | for i, tok in ipairs(tokens) do |
178 | | - if not (filter and (tok.type == "whitespace" or tok.type == "comment")) then |
| 180 | + if not (filter and USELESS_TOKENS[tok.type]) then |
179 | 181 | printf("%d %-12s '%s'", i, tok.type, (F("%q", tostring(tok.value)):sub(2, -2):gsub("\\\n", "\\n"))) |
180 | 182 | end |
181 | 183 | end |
@@ -642,7 +644,7 @@ function escapePattern(s) |
642 | 644 | end |
643 | 645 |
|
644 | 646 | function maybeOutputLineNumber(parts, tok, lastLn, fromMetaToOutput) |
645 | | - if tok.line == lastLn or tok.type == "whitespace" or tok.type == "comment" then return lastLn end |
| 647 | + if tok.line == lastLn or USELESS_TOKENS[tok.type] then return lastLn end |
646 | 648 |
|
647 | 649 | -- if fromMetaToOutput then |
648 | 650 | -- table.insert(parts, '__LUA"--[[@'..tok.line..']]"\n') |
|
744 | 746 | -- token, index = getNextUsableToken( tokens, startIndex [, maxIndex=#tokens ] ) |
745 | 747 | function getNextUsableToken(tokens, i, iEnd) |
746 | 748 | for i = i, math.min((iEnd or math.huge), #tokens) do |
747 | | - if not isAny(tokens[i].type, "whitespace","comment") then |
| 749 | + if not USELESS_TOKENS[tokens[i].type] then |
748 | 750 | return tokens[i], i |
749 | 751 | end |
750 | 752 | end |
@@ -861,6 +863,52 @@ function metaFuncs.tokenize(lua, allowMetaTokens) |
861 | 863 | return tokens, err |
862 | 864 | end |
863 | 865 |
|
| 866 | +-- removeUselessTokens() |
| 867 | +-- Remove whitespace and comment tokens. |
| 868 | +-- removeUselessTokens( tokens ) |
| 869 | +function metaFuncs.removeUselessTokens(tokens) |
| 870 | + local len = #tokens |
| 871 | + local offset = 0 |
| 872 | + |
| 873 | + for i, tok in ipairs(tokens) do |
| 874 | + if USELESS_TOKENS[tok.type] then |
| 875 | + offset = offset-1 |
| 876 | + else |
| 877 | + tokens[i+offset] = tok |
| 878 | + end |
| 879 | + end |
| 880 | + |
| 881 | + for i = len+offset+1, len do |
| 882 | + tokens[i] = nil |
| 883 | + end |
| 884 | +end |
| 885 | + |
| 886 | +-- eachToken() |
| 887 | +-- Loop though tokens. |
| 888 | +-- for index, token in eachToken( tokens [, ignoreUselessTokens=false ] ) do |
| 889 | +local function getNextUsefulToken(tokens, i) |
| 890 | + while true do |
| 891 | + i = i+1 |
| 892 | + local tok = tokens[i] |
| 893 | + if not tok then return end |
| 894 | + if not USELESS_TOKENS[tok.type] then return i, tok end |
| 895 | + end |
| 896 | +end |
| 897 | +function metaFuncs.eachToken(tokens, ignoreUselessTokens) |
| 898 | + if ignoreUselessTokens then |
| 899 | + return getNextUsefulToken, tokens, 0 |
| 900 | + else |
| 901 | + return ipairs(tokens) |
| 902 | + end |
| 903 | +end |
| 904 | + |
| 905 | +-- isToken() |
| 906 | +-- Check if a token is of a specific type, optionally also check it's value. |
| 907 | +-- bool = isToken( token, tokenType [, tokenValue=any ] ) |
| 908 | +function metaFuncs.isToken(tok, tokType, v) |
| 909 | + return tok.type == tokType and (v == nil or tok.value == v) |
| 910 | +end |
| 911 | + |
864 | 912 | -- newToken() |
865 | 913 | -- Create a new token. Different token types take different arguments. |
866 | 914 | -- token = newToken( tokenType, ... ) |
@@ -1025,7 +1073,7 @@ local function getLineCountWithCode(tokens) |
1025 | 1073 | local lastLine = 0 |
1026 | 1074 |
|
1027 | 1075 | for _, tok in ipairs(tokens) do |
1028 | | - if not (tok.type == "comment" or tok.type == "whitespace") and tok.lineEnd > lastLine then |
| 1076 | + if not USELESS_TOKENS[tok.type] and tok.lineEnd > lastLine then |
1029 | 1077 | lineCount = lineCount+(tok.lineEnd-tok.line+1) |
1030 | 1078 | lastLine = tok.lineEnd |
1031 | 1079 | end |
|
0 commit comments