|
36 | 36 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
37 | 37 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
38 | 38 | # SOFTWARE.
|
39 |
| - |
| 39 | +import argparse |
40 | 40 | import os
|
41 | 41 | import sys
|
42 | 42 | import tokenize
|
43 | 43 |
|
44 |
| -from os import path |
45 |
| - |
46 | 44 | from pegen.build import generate_token_definitions
|
47 |
| -from pegen.grammar import Grammar |
48 | 45 | from pegen.grammar_parser import GeneratedParser as GrammarParser
|
49 | 46 | from pegen.tokenizer import Tokenizer
|
50 | 47 | from pegjava.java_generator import JavaParserGenerator
|
51 | 48 |
|
52 |
| -verbose_tokenizer = False |
53 |
| -verbose_parser = False |
54 | 49 |
|
55 | 50 | def main():
|
56 |
| - __dir__ = path.dirname(__file__) |
57 |
| - grammar_file = path.relpath(path.join(__dir__, "pegjava", "python.gram"), os.getcwd()) |
58 |
| - print("Reading", grammar_file) |
59 |
| - with open(grammar_file) as file: |
60 |
| - tokenizer = Tokenizer(tokenize.generate_tokens(file.readline), verbose=verbose_tokenizer) |
61 |
| - parser = GrammarParser(tokenizer, verbose=verbose_parser) |
| 51 | + parser = argparse.ArgumentParser() |
| 52 | + parser.add_argument("grammar_file") |
| 53 | + parser.add_argument("tokens_file") |
| 54 | + parser.add_argument("output_file") |
| 55 | + parser.add_argument("--verbose", action="store_true") |
| 56 | + parser.add_argument("--debug", action="store_true") |
| 57 | + |
| 58 | + args = parser.parse_args() |
| 59 | + |
| 60 | + with open(args.grammar_file) as file: |
| 61 | + tokenizer = Tokenizer(tokenize.generate_tokens(file.readline), verbose=args.verbose) |
| 62 | + parser = GrammarParser(tokenizer, verbose=args.verbose) |
62 | 63 | grammar = parser.start()
|
63 | 64 |
|
64 | 65 | if not grammar:
|
65 |
| - sys.exit("Fail") |
| 66 | + sys.exit("Failed to generate grammar") |
66 | 67 |
|
67 |
| - tokens_file = path.join(__dir__, "pegjava", "Tokens") |
68 |
| - with open(tokens_file, "r") as tok_file: |
| 68 | + with open(args.tokens_file, "r") as tok_file: |
69 | 69 | all_tokens, exact_tokens, non_exact_tokens = generate_token_definitions(tok_file)
|
70 |
| -# print("all_tokens") |
71 |
| -# print(all_tokens) |
72 | 70 |
|
73 |
| -# print("exact_tokens") |
74 |
| -# print(exact_tok) |
| 71 | + with open(args.output_file, "w") as file: |
| 72 | + gen = JavaParserGenerator(grammar, all_tokens, exact_tokens, non_exact_tokens, file, debug=args.debug) |
| 73 | + gen.generate(os.path.relpath(args.grammar_file, os.path.dirname(args.output_file))) |
75 | 74 |
|
76 |
| -# print("non_exact_tokens") |
77 |
| -# print(non_exact_tok) |
78 |
| - output_file = path.join(__dir__, "..", "java", "com", "oracle", "graal", "python", "pegparser", "Parser.java") |
79 |
| - with open(output_file, "w") as file: |
80 |
| - gen: ParserGenerator = JavaParserGenerator(grammar, all_tokens, exact_tokens, non_exact_tokens, file, debug=False) |
81 |
| - gen.generate(grammar_file) |
82 |
| -# print("[") |
83 |
| -# for rule in rules: |
84 |
| -# print(f" {rule},") |
85 |
| -# print("]") |
86 |
| -# for rule in rules: |
87 |
| -# print(rule.name, end=": ", file=sys.stderr) |
88 |
| -# print(*(" ".join(alt) for alt in rule.alts), sep=" | ", file=sys.stderr) |
89 |
| -# |
90 |
| -# |
91 |
| -# |
92 |
| -# outfile = "../src/genPythonParser/GenParser.java" |
93 |
| -# print("Updating", outfile, file=sys.stderr) |
94 |
| -# with open(outfile, "w") as stream: |
95 |
| -# generate(rules, stream) |
96 | 75 |
|
97 | 76 | if __name__ == '__main__':
|
98 | 77 | main()
|
0 commit comments