forked from EuroEval/EuroEval
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcreate_norne.py
More file actions
157 lines (134 loc) · 5.9 KB
/
create_norne.py
File metadata and controls
157 lines (134 loc) · 5.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
# /// script
# requires-python = ">=3.10,<4.0"
# dependencies = [
# "datasets==3.5.0",
# "huggingface-hub==0.24.0",
# "pandas==2.2.0",
# "requests==2.32.3",
# "tqdm==4.67.1",
# ]
# ///
"""Create the NorNE-mini NER datasets and upload them to the HF Hub."""
import re
from collections import defaultdict
from typing import Dict, List, Union
import pandas as pd
import requests
from datasets import Split
from datasets.arrow_dataset import Dataset
from datasets.dataset_dict import DatasetDict
from huggingface_hub.hf_api import HfApi
from tqdm.auto import tqdm
def main() -> None:
"""Create the NorNE-mini NER datasets and uploads them to the HF Hub."""
# Define the base download URLs
base_urls = dict(
nb="https://github.com/UniversalDependencies/UD_Norwegian-Bokmaal/raw/master/"
"no_bokmaal-ud-{}.conllu",
nn="https://github.com/UniversalDependencies/UD_Norwegian-Nynorsk/raw/master/"
"no_nynorsk-ud-{}.conllu",
)
# Define conversion dict to be used with the NER tags
ner_conversion_dict = {
"O": "O",
"B-LOC": "B-LOC",
"I-LOC": "I-LOC",
"B-PER": "B-PER",
"I-PER": "I-PER",
"B-ORG": "B-ORG",
"I-ORG": "I-ORG",
"B-MISC": "B-MISC",
"I-MISC": "I-MISC",
"B-GPE_LOC": "B-LOC",
"I-GPE_LOC": "I-LOC",
"B-GPE_ORG": "B-ORG",
"I-GPE_ORG": "I-ORG",
"B-PROD": "B-MISC",
"I-PROD": "I-MISC",
"B-DRV": "B-MISC",
"I-DRV": "I-MISC",
"B-EVT": "B-MISC",
"I-EVT": "I-MISC",
}
# Set up the progress bar and iterate over the languages
with tqdm(["nb", "nn"], desc="Creating NorNE-mini datasets") as pbar:
for lang in pbar:
# Define download URLs
train_url = base_urls[lang].format("train")
val_url = base_urls[lang].format("dev")
test_url = base_urls[lang].format("test")
# Download the data
data = dict(
train=requests.get(train_url).text.split("\n"),
val=requests.get(val_url).text.split("\n"),
test=requests.get(test_url).text.split("\n"),
)
# Update the progress bar description
pbar.set_description(f"Creating NorNE datasets - {lang}")
# Iterate over the data splits
dfs = dict()
for split, lines in data.items():
# Initialise the records, data dictionary and document
records = list()
data_dict: Dict[str, List[Union[int, str]]] = defaultdict(list)
doc = ""
# Iterate over the data for the given split
for line in lines:
# If we are at the first line of an entry then extract the document
if line.startswith("# text = "):
doc = re.sub("# text = ", "", line)
# Otherwise, if the line is a comment then ignore it
elif line.startswith("#"):
continue
# Otherwise, if we have reached the end of an entry then store it
# to the list of records and reset the data dictionary and document
elif line == "":
merged_data_dict: Dict[str, Union[str, List[Union[int, str]]]]
if len(data_dict["tokens"]) > 0:
merged_data_dict = {**data_dict, "doc": doc}
records.append(merged_data_dict)
data_dict = defaultdict(list)
doc = ""
# Otherwise we are in the middle of an entry which is not a
# comment, so we extract the data from the line and store it in the
# data dictionary
else:
data_tup = line.split("\t")
ner_tag = data_tup[9].replace("name=", "").split("|")[-1]
data_dict["ids"].append(data_tup[0])
data_dict["tokens"].append(data_tup[1])
data_dict["ner_tags"].append(ner_conversion_dict[ner_tag])
# Convert the records to a dataframe and store it
dfs[split] = pd.DataFrame.from_records(records)
# Merge the dataframes
df = pd.concat(dfs.values()).reset_index(drop=True)
# Remove duplicates
df = df.drop_duplicates().reset_index(drop=True)
# Create new splits
val_df = df.sample(n=256, random_state=4242)
df_filtered = df[~df.index.isin(val_df.index)]
test_df = df_filtered.sample(n=2048, random_state=4242)
full_train_df = df_filtered[~df_filtered.index.isin(test_df.index)]
assert isinstance(full_train_df, pd.DataFrame)
train_df = full_train_df.sample(n=1024, random_state=4242)
assert isinstance(train_df, pd.DataFrame)
assert isinstance(val_df, pd.DataFrame)
assert isinstance(test_df, pd.DataFrame)
# Collect datasets in a dataset dictionary
dataset = DatasetDict(
{
"train": Dataset.from_pandas(train_df, split=Split.TRAIN),
"val": Dataset.from_pandas(val_df, split=Split.VALIDATION),
"test": Dataset.from_pandas(test_df, split=Split.TEST),
"full_train": Dataset.from_pandas(
full_train_df,
split="full_train", # type: ignore[bad-argument-type]
),
}
)
# Push the dataset to the Hugging Face Hub
dataset_id = f"EuroEval/norne-{lang}-mini"
HfApi().delete_repo(dataset_id, repo_type="dataset", missing_ok=True)
dataset.push_to_hub(dataset_id, private=True)
if __name__ == "__main__":
main()