forked from EuroEval/EuroEval
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcreate_kpwr_ner.py
More file actions
146 lines (118 loc) · 4.81 KB
/
create_kpwr_ner.py
File metadata and controls
146 lines (118 loc) · 4.81 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
# /// script
# requires-python = ">=3.10,<4.0"
# dependencies = [
# "datasets==3.5.0",
# "huggingface-hub==0.24.0",
# "pandas==2.2.0",
# "requests==2.32.3",
# ]
# ///
"""Create the KPWr NER dataset and upload it to the HF Hub."""
import pandas as pd
from datasets import Dataset, DatasetDict, Split, load_dataset
from huggingface_hub import HfApi
def main() -> None:
"""Create the KPWr NER dataset and uploads it to the HF Hub."""
# Define dataset ID
repo_id = "clarin-pl/kpwr-ner"
# Download the dataset
dataset = load_dataset(path=repo_id, token=True)
assert isinstance(dataset, DatasetDict)
# Convert the dataset to dataframes
train_df = dataset["train"].to_pandas()
test_df = dataset["test"].to_pandas()
assert isinstance(train_df, pd.DataFrame)
assert isinstance(test_df, pd.DataFrame)
# Drop unnecessary columns (keep only tokens and ner)
columns_to_drop = [col for col in train_df.columns if col not in ["tokens", "ner"]]
train_df.drop(columns=columns_to_drop, inplace=True)
test_df.drop(columns=columns_to_drop, inplace=True)
# Add a `text` column by joining tokens
train_df["text"] = train_df["tokens"].map(lambda tokens: " ".join(tokens))
test_df["text"] = test_df["tokens"].map(lambda tokens: " ".join(tokens))
# Rename `ner` to `labels`
train_df.rename(columns={"ner": "labels"}, inplace=True)
test_df.rename(columns={"ner": "labels"}, inplace=True)
# Get label names and create mapping
label_names = dataset["train"].features["ner"].feature.names
label_mapping = create_label_mapping(label_names)
# Convert the NER tags from IDs to strings using our mapping
train_df["labels"] = train_df["labels"].map(
lambda ner_tags: [label_mapping.get(ner_tag, "O") for ner_tag in ner_tags]
)
test_df["labels"] = test_df["labels"].map(
lambda ner_tags: [label_mapping.get(ner_tag, "O") for ner_tag in ner_tags]
)
# Create validation split from train set
val_size = 256
val_df = train_df.sample(n=val_size, random_state=4242)
# Update train split (remove validation samples)
train_size = 1024
remaining_train = train_df.drop(val_df.index.tolist())
train_df = remaining_train.sample(
n=min(train_size, len(remaining_train)), random_state=4242
)
# Create test split
test_size = 2048
test_df = test_df.sample(n=min(test_size, len(test_df)), random_state=4242)
# Reset the index
train_df = train_df.reset_index(drop=True)
val_df = val_df.reset_index(drop=True)
test_df = test_df.reset_index(drop=True)
assert isinstance(train_df, pd.DataFrame)
assert isinstance(val_df, pd.DataFrame)
assert isinstance(test_df, pd.DataFrame)
# Collect datasets in a dataset dictionary
dataset = DatasetDict(
{
"train": Dataset.from_pandas(train_df, split=Split.TRAIN),
"val": Dataset.from_pandas(val_df, split=Split.VALIDATION),
"test": Dataset.from_pandas(test_df, split=Split.TEST),
}
)
# Create dataset ID
dataset_id = "EuroEval/kpwr-ner"
# Remove the dataset from Hugging Face Hub if it already exists
HfApi().delete_repo(dataset_id, repo_type="dataset", missing_ok=True)
# Push the dataset to the Hugging Face Hub
dataset.push_to_hub(dataset_id, private=True)
def create_label_mapping(label_names: list[str]) -> dict[int, str]:
"""Create mapping from KPWr labels to standard BIO labels.
Args:
label_names: The list of label names.
Returns:
The mapping from KPWr labels to standard BIO labels.
"""
mapping: dict[int, str] = {}
for i, label_name in enumerate(label_names):
if label_name == "O":
mapping[i] = "O"
elif "nam_liv" in label_name:
# Living beings (persons, characters, etc.)
if label_name.startswith("B-"):
mapping[i] = "B-PER"
elif label_name.startswith("I-"):
mapping[i] = "I-PER"
elif "nam_loc" in label_name:
# Locations (cities, countries, etc.)
if label_name.startswith("B-"):
mapping[i] = "B-LOC"
elif label_name.startswith("I-"):
mapping[i] = "I-LOC"
elif "nam_org" in label_name:
# Organizations (companies, institutions, etc.)
if label_name.startswith("B-"):
mapping[i] = "B-ORG"
elif label_name.startswith("I-"):
mapping[i] = "I-ORG"
else:
# Everything else (events, products, etc.)
if label_name.startswith("B-"):
mapping[i] = "B-MISC"
elif label_name.startswith("I-"):
mapping[i] = "I-MISC"
else:
mapping[i] = "O" # Fallback
return mapping
if __name__ == "__main__":
main()