-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtextclassification_informativeness_search.py
More file actions
144 lines (117 loc) · 5.38 KB
/
textclassification_informativeness_search.py
File metadata and controls
144 lines (117 loc) · 5.38 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
# -*- coding: utf-8 -*-
import sys
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
import evaluate
from sklearn.metrics import classification_report
import numpy as np
import json
from config import my_cache_dir, my_output_dir
import ray
ray.init(ignore_reinit_error=True, num_cpus=4)
print("success")
# Tokenize text from dataset
def preprocess_function(examples):
return tokenizer(examples["text"], truncation=True)
# Metrics for evaluation
def compute_metrics(eval_pred):
metric1 = evaluate.load("precision")
metric2 = evaluate.load("recall")
metric3 = evaluate.load("f1")
metric4 = evaluate.load("accuracy")
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
precision = metric1.compute(predictions=predictions, references=labels, average="weighted")["precision"]
recall = metric2.compute(predictions=predictions, references=labels, average="weighted")["recall"]
f1 = metric3.compute(predictions=predictions, references=labels, average="weighted")["f1"]
accuracy = metric4.compute(predictions=predictions, references=labels)["accuracy"]
return {"precision": precision, "recall": recall, "f1": f1, "accuracy": accuracy}
# Decode multiclass labels
def transform_labels(label):
label = label['informativeness']
num = 0
if label == 'Personal_Experience':
num = 0
elif label == 'Informative':
num = 1
elif label == 'None':
num = 2
return {'labels': num}
# Encode labels
id2label = {0: 'Personal_Experience', 1: 'Informative', 2: 'None'}
def tokenize_data(example):
return tokenizer(example['text'], padding='max_length')
def sum_cv_scores(cv_results, cv_number):
for item in cv_number.keys():
if item != 'accuracy':
for score in cv_number[item].keys():
cv_results[item][score].append(cv_number[item][score])
cv_results['accuracy'].append(cv_number['accuracy'])
return cv_results
def mean_cv_scores(cv_results):
for item in cv_results.keys():
if item != 'accuracy':
for score in cv_results[item].keys():
cv_results[item][score] = sum(cv_results[item][score])/len(cv_results[item][score])
cv_results['accuracy'] = sum(cv_results['accuracy'])/len(cv_results['accuracy'])
return cv_results
def model_init():
return AutoModelForSequenceClassification.from_pretrained(
checkpoint, num_labels=3, cache_dir=my_cache_dir)
#, return_dict=True)
def my_hp_space(trial):
# gc.collect()
# torch.cuda.empty_cache()
return {
"learning_rate": trial.suggest_float("learning_rate", 1e-5, 1e-2, log=True),
"num_train_epochs": trial.suggest_int("num_train_epochs", 1,15),
"seed": trial.suggest_int("seed", 1, 40),
"per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32]),
"weight_decay": trial.suggest_float("weight_decay", 1e-12, 1e-1, log=True),
"adam_epsilon": trial.suggest_float("adam_epsilon", 1e-10, 1e-6, log=True),
"gradient_accumulation_steps": trial.suggest_categorical("gradient_accumulation_steps", [1,2,4,8,16]),
}
def my_objective(metrics):
# Your elaborate computation here
return metrics['eval_f1']
#########################################################################################################
#
# 5-fold cross-validation
#
#########################################################################################################
if __name__ == "__main__":
model_list = sys.argv[1:]
for checkpoint in model_list:
print("****************************Training of {}****************************".format(checkpoint))
num_lab = 3
# split dataset
dataset_train = load_dataset("json", data_files="data/train_cv0")
dataset_valid = load_dataset("json", data_files="data/val_cv0")
dataset_train = dataset_train['train']
dataset_valid = dataset_valid['train']
tokenizer = AutoTokenizer.from_pretrained(checkpoint, cache_dir=my_cache_dir)
dataset_train = dataset_train.map(tokenize_data, batched=True)
dataset_valid = dataset_valid.map(tokenize_data, batched=True)
valid_labels = dataset_valid["informativeness"]
remove_columns = ['title', 'tokens', 'named_entity_recognition', 'relations', 'informativeness', 'topic', 'credibility']
dataset_train = dataset_train.map(transform_labels, remove_columns=remove_columns)
dataset_valid = dataset_valid.map(transform_labels, remove_columns=remove_columns)
training_args = TrainingArguments(
evaluation_strategy="steps",
eval_steps=500,
disable_tqdm=True,
output_dir=my_output_dir,
)
trainer = Trainer(
args=training_args,
train_dataset=dataset_train,
eval_dataset=dataset_valid,
tokenizer=tokenizer,
model_init=model_init,
compute_metrics=compute_metrics,
)
result = trainer.hyperparameter_search(direction="maximize",n_trials=200,hp_space=my_hp_space, compute_objective=my_objective)
print(result)
# scores = open("bestrun/informativeness_{}.txt".format(checkpoint.split("/")[-1]), "w", encoding="utf-8")
# scores.write(str(result))
# scores.close()