forked from EuroEval/EuroEval
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcreate_fullstack_ner.py
More file actions
252 lines (199 loc) · 7.44 KB
/
create_fullstack_ner.py
File metadata and controls
252 lines (199 loc) · 7.44 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
# /// script
# requires-python = ">=3.10,<4.0"
# dependencies = [
# "datasets==3.5.0",
# "huggingface-hub==0.24.0",
# "pandas==2.2.0",
# "requests==2.32.3",
# ]
# ///
"""Create and upload the FullStack-LV-mini NER dataset from CoNLL-U format."""
import glob
import logging
import os
import shutil
import subprocess
from collections import defaultdict
from pathlib import Path
import pandas as pd
from datasets import Dataset, DatasetDict, Split
from huggingface_hub import HfApi
logger = logging.getLogger(__name__)
def main() -> None:
"""Create the FullStack-LV-mini NER dataset and upload it to the HF Hub."""
# Clone the FullStack repository
repo_path = clone_fullstack_repository()
# Load and parse the data
records = load_fullstack_data(repo_path=repo_path)
# Convert to DataFrame
df = pd.DataFrame.from_records(records)
# Map the labels to the standard NER tags
label_mapping = {
"O": "O",
"B-person": "B-PER",
"I-person": "I-PER",
"B-organization": "B-ORG",
"I-organization": "I-ORG",
"B-location": "B-LOC",
"I-location": "I-LOC",
"B-GPE": "B-LOC", # Geopolitical entity -> Location
"I-GPE": "I-LOC",
"B-entity": "B-MISC", # Generic entity -> Miscellaneous
"I-entity": "I-MISC",
"B-event": "B-MISC", # Event -> Miscellaneous
"I-event": "I-MISC",
"B-product": "B-MISC", # Product -> Miscellaneous
"I-product": "I-MISC",
"B-money": "B-MISC", # Money -> Miscellaneous
"I-money": "I-MISC",
"B-time": "B-MISC", # Time -> Miscellaneous
"I-time": "I-MISC",
}
df["labels"] = df["labels"].map(
lambda ner_tags: [label_mapping[ner_tag] for ner_tag in ner_tags]
)
# Create splits
val_size = 256
test_size = 2048
train_size = 1024
val_df = df.sample(n=val_size, random_state=4242)
df_filtered = df[~df.index.isin(val_df.index)]
test_df = df_filtered.sample(n=test_size, random_state=4242)
df_filtered = df_filtered[~df_filtered.index.isin(test_df.index)]
assert isinstance(df_filtered, pd.DataFrame)
train_df = df_filtered.sample(n=train_size, random_state=4242)
# Reset indices
train_df = train_df.reset_index(drop=True)
val_df = val_df.reset_index(drop=True)
test_df = test_df.reset_index(drop=True)
assert isinstance(train_df, pd.DataFrame)
assert isinstance(val_df, pd.DataFrame)
assert isinstance(test_df, pd.DataFrame)
# Collect datasets in a dataset dictionary
dataset = DatasetDict(
{
"train": Dataset.from_pandas(train_df, split=Split.TRAIN),
"val": Dataset.from_pandas(val_df, split=Split.VALIDATION),
"test": Dataset.from_pandas(test_df, split=Split.TEST),
}
)
# Create dataset ID
dataset_id = "EuroEval/fullstack-ner-lv-mini"
# Remove the dataset from Hugging Face Hub if it already exists
HfApi().delete_repo(dataset_id, repo_type="dataset", missing_ok=True)
# Push the dataset to the Hugging Face Hub
dataset.push_to_hub(dataset_id, private=True)
# Delete the repository
delete_fullstack_repository(repo_path=repo_path)
def clone_fullstack_repository(repo_name: str = "FullStack") -> Path:
"""Clone the FullStack repository if it doesn't already exist.
Args:
repo_name:
Name of the directory to clone into.
Returns:
Path to the cloned repository.
"""
if not Path(repo_name).exists():
print("Cloning FullStack repository...")
try:
subprocess.run(
[
"git",
"clone",
"https://github.com/LUMII-AILab/FullStack.git",
repo_name,
],
check=True,
capture_output=True,
text=True,
)
logger.info("Successfully cloned repository")
except subprocess.CalledProcessError as e:
raise RuntimeError(f"Failed to clone repository: {e.stderr}") from e
else:
logger.info(f"Repository '{repo_name}' already exists, using existing copy")
return Path(repo_name)
def load_fullstack_data(repo_path: Path) -> list[dict[str, list[str] | str]]:
"""Load and parse all FullStack NER data from the specified repository path.
Args:
repo_path:
Path to the FullStack repository directory.
Returns:
A list of sentence records.
"""
# Path to the data directory within the repository
data_dir = repo_path / "NamedEntities" / "data"
if not data_dir.exists():
raise FileNotFoundError(
f"Data directory '{data_dir}' not found in the repository."
)
# Find all .conll2003 files in the directory
conll_files = glob.glob(os.path.join(data_dir, "*.conll2003"))
all_records = []
for file_path in sorted(conll_files):
# Read the file
with open(file_path, "r", encoding="utf-8") as f:
content = f.read()
# Parse the CoNLL-U data
records = parse_conllu_data(content)
all_records.extend(records)
return all_records
def parse_conllu_data(raw_data: str) -> list[dict[str, list[str] | str]]:
"""Parse CoNLL-U format data and return a list of sentence records.
Args:
raw_data: The raw data in CoNLL-U format.
Returns:
A list of sentence records.
"""
records = []
lines = raw_data.strip().split("\n")
# Initialize data dictionary for current sentence
data_dict: dict[str, list[str]] = defaultdict(list) # type: ignore[assignment]
for line in lines:
line = line.strip()
# Skip comments (lines starting with #)
if line.startswith("#"):
continue
# Empty line indicates end of sentence
elif line == "":
if len(data_dict["tokens"]) > 0:
# Create record with tokens, labels, and text
record = {
"tokens": data_dict["tokens"],
"labels": data_dict["labels"],
"text": " ".join(data_dict["tokens"]),
}
records.append(record)
# Reset for next sentence
data_dict = defaultdict(list)
# Parse token line
else:
columns = line.split("\t")
if len(columns) >= 7: # Ensure we have enough columns
token_id = columns[0]
token = columns[1]
# Use column 6 for NER tags (0-indexed, so columns[6] is column 7)
ner_tag = columns[6] if len(columns) > 6 else "O"
# Skip multi-word tokens (those with "-" in ID)
if "-" not in token_id and "." not in token_id:
data_dict["tokens"].append(token)
data_dict["labels"].append(ner_tag)
# Handle last sentence if data doesn't end with empty line
if len(data_dict["tokens"]) > 0:
record = {
"tokens": data_dict["tokens"],
"labels": data_dict["labels"],
"text": " ".join(data_dict["tokens"]),
}
records.append(record)
return records # type: ignore[return-value]
def delete_fullstack_repository(repo_path: Path) -> None:
"""Delete the FullStack repository.
Args:
repo_path:
Path to the FullStack repository.
"""
if repo_path.exists():
shutil.rmtree(repo_path)
if __name__ == "__main__":
main()