-
-
Notifications
You must be signed in to change notification settings - Fork 473
Expand file tree
/
Copy pathproject.yml
More file actions
139 lines (126 loc) · 7.55 KB
/
project.yml
File metadata and controls
139 lines (126 loc) · 7.55 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
title: "Universal Dependencies v2.5 Benchmarks"
description: "This project template lets you train a spaCy pipeline on any [Universal Dependencies](https://universaldependencies.org/) corpus (v2.5) for benchmarking purposes. The pipeline includes an experimental trainable tokenizer, an experimental edit tree lemmatizer, and the standard spaCy tagger, morphologizer and dependency parser components. The CoNLL 2018 evaluation script is used to evaluate the pipeline. The template uses the [`UD_English-EWT`](https://github.com/UniversalDependencies/UD_English-EWT) treebank by default, but you can swap it out for any other available treebank. Just make sure to adjust the `ud_treebank` and `spacy_lang` settings in the config. Use `xx` (multi-language) for `spacy_lang` if a particular language is not supported by spaCy. The tokenizer in particular is only intended for use in this generic benchmarking setup. It is not optimized for speed and it does not perform particularly well for languages without space-separated tokens. In production, custom rules for spaCy's rule-based tokenizer or a language-specific word segmenter such as jieba for Chinese or sudachipy for Japanese would be recommended instead."
# Variables can be referenced across the project.yml using ${vars.var_name}
vars:
ud_treebank: "UD_English-EWT"
# use "xx" for a language not currently supported by spaCy
spacy_lang: "en"
package_name: "udv25_englishewt_trf"
transformer_model: "xlm-roberta-base"
gpu: 0
mixed_precision: true
package_version: "0.0.1"
# These are the directories that the project needs. The project CLI will make
# sure that they always exist.
directories: ["assets", "training", "metrics", "packages"]
assets:
- dest: "assets/ud-treebanks-v2.5.tgz"
url: "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-3105/ud-treebanks-v2.5.tgz?sequence=1&isAllowed=y"
checksum: "388456892760ada0db8e20ce58501025"
workflows:
all:
- extract
- convert
- train-tokenizer
- train-transformer
- assemble
- evaluate
- evaluate-with-senter
- package
commands:
- name: extract
help: "Extract the data"
script:
- "tar xf assets/ud-treebanks-v2.5.tgz -C assets/"
deps:
- "assets/ud-treebanks-v2.5.tgz"
outputs:
- "assets/ud-treebanks-v2.5/"
- name: convert
help: "Convert the data to spaCy's format"
script:
- "python scripts/copy_files.py train conllu assets/ud-treebanks-v2.5/${vars.ud_treebank}/ corpus/${vars.ud_treebank}/train/"
- "python scripts/copy_files.py dev conllu assets/ud-treebanks-v2.5/${vars.ud_treebank}/ corpus/${vars.ud_treebank}/dev/"
- "python -m spacy convert corpus/${vars.ud_treebank}/train/ corpus/${vars.ud_treebank}/ --converter conllu -n 10 -T -C"
- "python -m spacy convert corpus/${vars.ud_treebank}/dev/ corpus/${vars.ud_treebank}/ --converter conllu -n 10 -T -C"
deps:
- "assets/ud-treebanks-v2.5/"
outputs:
- "corpus/${vars.ud_treebank}/train.spacy"
- "corpus/${vars.ud_treebank}/dev.spacy"
- name: train-tokenizer
help: "Train tokenizer"
script:
- "python -m spacy train configs/tokenizer.cfg -o training/${vars.ud_treebank}/tokenizer --gpu-id ${vars.gpu} --nlp.lang ${vars.spacy_lang} --paths.train corpus/${vars.ud_treebank}/train.spacy --paths.dev corpus/${vars.ud_treebank}/dev.spacy"
deps:
- "corpus/${vars.ud_treebank}/train.spacy"
- "corpus/${vars.ud_treebank}/dev.spacy"
- "configs/tokenizer.cfg"
outputs:
- "training/${vars.ud_treebank}/tokenizer/model-best"
- name: train-transformer
help: "Train transformer"
script:
- "python -m spacy train configs/transformer.cfg -o training/${vars.ud_treebank}/transformer --gpu-id ${vars.gpu} --nlp.lang ${vars.spacy_lang} --paths.train corpus/${vars.ud_treebank}/train.spacy --paths.dev corpus/${vars.ud_treebank}/dev.spacy --components.experimental_char_ner_tokenizer.source training/${vars.ud_treebank}/tokenizer/model-best --components.transformer.model.name ${vars.transformer_model} --components.transformer.model.mixed_precision ${vars.mixed_precision}"
deps:
- "corpus/${vars.ud_treebank}/train.spacy"
- "corpus/${vars.ud_treebank}/dev.spacy"
- "configs/transformer.cfg"
- "training/${vars.ud_treebank}/tokenizer/model-best"
outputs:
- "training/${vars.ud_treebank}/transformer/model-best"
- name: assemble
help: "Assemble full pipeline"
script:
- "python -m spacy assemble configs/assemble.cfg training/${vars.ud_treebank}/final --nlp.lang ${vars.spacy_lang} --paths.train corpus/${vars.ud_treebank}/train.spacy --paths.dev corpus/${vars.ud_treebank}/dev.spacy --paths.tokenizer_source training/${vars.ud_treebank}/tokenizer/model-best --paths.transformer_source training/${vars.ud_treebank}/transformer/model-best"
deps:
- "corpus/${vars.ud_treebank}/train.spacy"
- "corpus/${vars.ud_treebank}/dev.spacy"
- "configs/assemble.cfg"
- "training/${vars.ud_treebank}/tokenizer/model-best"
- "training/${vars.ud_treebank}/transformer/model-best"
outputs:
- "training/${vars.ud_treebank}/final"
- name: evaluate
help: "Evaluate on the test data and save the metrics"
script:
# not on GPU to avoid RAM issues on long test texts
- "python scripts/evaluate.py training/${vars.ud_treebank}/final assets/ud-treebanks-v2.5/${vars.ud_treebank}/ --output metrics/${vars.ud_treebank}.default --gpu-id -1"
# for very large corpora like UD_German-HDT, you can split the test texts
# with the rule-based sentencizer into smaller texts for evaluation
#- "python scripts/evaluate.py training/${vars.ud_treebank}/final assets/ud-treebanks-v2.5/${vars.ud_treebank}/ --output metrics/${vars.ud_treebank}.default --gpu-id -1 --sents-per-text 500"
deps:
- "training/${vars.ud_treebank}/final"
- "assets/ud-treebanks-v2.5/${vars.ud_treebank}/"
outputs:
- "metrics/${vars.ud_treebank}.default.txt"
- "metrics/${vars.ud_treebank}.default.json"
- name: evaluate-with-senter
help: "Evaluate on the test data and save the metrics"
script:
# not on GPU to avoid RAM issues on long test texts
- "python scripts/evaluate.py training/${vars.ud_treebank}/final assets/ud-treebanks-v2.5/${vars.ud_treebank}/ --output metrics/${vars.ud_treebank}.senter --gpu-id -1 --enable-senter"
# for very large corpora like UD_German-HDT, you can split the test texts
# with the rule-based sentencizer into smaller texts for evaluation
#- "python scripts/evaluate.py training/${vars.ud_treebank}/final assets/ud-treebanks-v2.5/${vars.ud_treebank}/ --output metrics/${vars.ud_treebank}.senter --gpu-id -1 --enable-senter --sents-per-text 500"
deps:
- "training/${vars.ud_treebank}/final"
- "assets/ud-treebanks-v2.5/${vars.ud_treebank}/"
outputs:
- "metrics/${vars.ud_treebank}.senter.txt"
- "metrics/${vars.ud_treebank}.senter.json"
- name: package
help: "Package the trained model so it can be installed"
script:
- "cp assets/ud-treebanks-v2.5/${vars.ud_treebank}/LICENSE.txt training/${vars.ud_treebank}/final"
- "python -m spacy package training/${vars.ud_treebank}/final packages --name ${vars.package_name} --version ${vars.package_version} --force"
deps:
- "training/${vars.ud_treebank}/final"
outputs_no_cache:
- "packages/${vars.package_name}-${vars.package_version}/dist/${vars.package_name}-${vars.package_version}.tar.gz"
- name: clean
help: "Remove intermediate files"
script:
- "rm -rf training/*"
- "rm -rf metrics/*"
- "rm -rf corpus/*"