forked from EuroEval/EuroEval
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcreate_dane.py
More file actions
115 lines (94 loc) · 3.98 KB
/
create_dane.py
File metadata and controls
115 lines (94 loc) · 3.98 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
# /// script
# requires-python = ">=3.10,<4.0"
# dependencies = [
# "datasets==3.5.0",
# "huggingface-hub==0.24.0",
# "pandas==2.2.0",
# "requests==2.32.3",
# ]
# ///
"""Create the DaNE-mini NER dataset and upload it to the HF Hub."""
import re
from collections import defaultdict
import pandas as pd
import requests
from datasets import Split
from datasets.arrow_dataset import Dataset
from datasets.dataset_dict import DatasetDict
from huggingface_hub.hf_api import HfApi
def main() -> None:
"""Create the DaNE-mini NER dataset and uploads it to the HF Hub."""
# Define download URLs
# TODO: This is the wrong URL; use the alexandra URL instead
base_url = (
"https://github.com/UniversalDependencies/UD_Danish-DDT/raw/master/"
"da_ddt-ud-{}.conllu"
)
train_url = base_url.format("train")
val_url = base_url.format("dev")
test_url = base_url.format("test")
# Download the data
data = dict(
train=requests.get(train_url).text.split("\n"),
val=requests.get(val_url).text.split("\n"),
test=requests.get(test_url).text.split("\n"),
)
# Iterate over the data splits
dfs = dict()
for split, lines in data.items():
# Initialise the records, data dictionary and document
records = list()
data_dict: dict[str, list[int | str]] = defaultdict(list)
doc = ""
# Iterate over the data for the given split
for line in lines:
# If we are at the first line of an entry then extract the document
if line.startswith("# text = "):
doc = re.sub("# text = ", "", line)
# Otherwise, if the line is a comment then ignore it
elif line.startswith("#"):
continue
# Otherwise, if we have reached the end of an entry then store it to the
# list of records and reset the data dictionary and document
elif line == "":
if len(data_dict["tokens"]) > 0:
merged_data_dict: dict[str, str | list[int | str]]
merged_data_dict = {**data_dict, "doc": doc}
records.append(merged_data_dict)
data_dict = defaultdict(list)
doc = ""
# Otherwise we are in the middle of an entry which is not a comment, so
# we extract the data from the line and store it in the data dictionary
else:
data_tup = line.split("\t")
ner_tag = data_tup[9].replace("name=", "").split("|")[0]
data_dict["ids"].append(data_tup[0])
data_dict["tokens"].append(data_tup[1])
data_dict["ner_tags"].append(ner_tag)
# Convert the records to a dataframe and store it
dfs[split] = pd.DataFrame.from_records(records)
# Merge the dataframes
df = pd.concat(dfs.values()).reset_index(drop=True)
# Remove duplicates
df = df.drop_duplicates().reset_index(drop=True)
# Create new splits
val_df = df.sample(n=256, random_state=4242)
df_filtered = df.loc[~df.index.isin(val_df.index)]
test_df = df_filtered.sample(n=2048, random_state=4242)
full_train_df = df_filtered.loc[~df_filtered.index.isin(test_df.index)]
train_df = full_train_df.sample(n=1024, random_state=4242)
# Collect datasets in a dataset dictionary
dataset = DatasetDict(
train=Dataset.from_pandas(train_df, split=Split.TRAIN),
val=Dataset.from_pandas(val_df, split=Split.VALIDATION),
test=Dataset.from_pandas(test_df, split=Split.TEST),
full_train=Dataset.from_pandas(full_train_df, split="full_train"), # type: ignore[bad-argument-type]
)
# Create dataset ID
dataset_id = "EuroEval/dane-mini"
# Remove the dataset from Hugging Face Hub if it already exists
HfApi().delete_repo(dataset_id, repo_type="dataset", missing_ok=True)
# Push the dataset to the Hugging Face Hub
dataset.push_to_hub(dataset_id, private=True)
if __name__ == "__main__":
main()