Skip to content
Discussion options

You must be logged in to vote

my config file is below if that is of any use:

[paths]
train = null
dev = null
raw_text = null
vectors = "en_core_web_lg"
init_tok2vec = null

[initialize]
vectors = ${paths.vectors}
init_tok2vec = ${paths.init_tok2vec}
vocab_data = null
lookups = null
before_init = null
after_init = null

[system]
gpu_allocator = null
seed = 0

[nlp]
lang = "en"
pipeline = ["tok2vec","ner","textcat"]
batch_size = 1000
disabled = []
before_creation = null
after_creation = null
after_pipeline_creation = null
tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"}

[components]

[components.ner]
factory = "ner"
incorrect_spans_key = null
moves = null
scorer = {"@scorers":"spacy.ner_scorer.v1"}
update_with_oracle_c…

Replies: 1 comment 2 replies

Comment options

You must be logged in to vote
2 replies
@polm
Comment options

@Kobena-Afful
Comment options

Answer selected by Kobena-Afful
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
feat / tok2vec Feature: Token-to-vector layer and pretraining
2 participants