Skip to content

Commit 7dd809a

Browse files
committed
fix bugs from test3.9
1 parent d8ce095 commit 7dd809a

File tree

18 files changed

+287
-75
lines changed

18 files changed

+287
-75
lines changed
Lines changed: 204 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,204 @@
1+
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from os import listdir, path, makedirs
16+
import random
17+
import sys
18+
import time
19+
import datetime
20+
21+
22+
def print_stats(data):
23+
total_ratings = 0
24+
print("STATS")
25+
for user in data:
26+
total_ratings += len(data[user])
27+
print("Total Ratings: {}".format(total_ratings))
28+
print("Total User count: {}".format(len(data.keys())))
29+
30+
31+
def save_data_to_file(data, filename):
32+
with open(filename, 'w') as out:
33+
for userId in data:
34+
for record in data[userId]:
35+
out.write("{}\t{}\t{}\n".format(userId, record[0], record[1]))
36+
37+
38+
def create_NETFLIX_data_timesplit(all_data, train_min, train_max, test_min,
39+
test_max):
40+
"""
41+
Creates time-based split of NETFLIX data into train, and (validation, test)
42+
:param all_data:
43+
:param train_min:
44+
:param train_max:
45+
:param test_min:
46+
:param test_max:
47+
:return:
48+
"""
49+
train_min_ts = time.mktime(
50+
datetime.datetime.strptime(train_min, "%Y-%m-%d").timetuple())
51+
train_max_ts = time.mktime(
52+
datetime.datetime.strptime(train_max, "%Y-%m-%d").timetuple())
53+
test_min_ts = time.mktime(
54+
datetime.datetime.strptime(test_min, "%Y-%m-%d").timetuple())
55+
test_max_ts = time.mktime(
56+
datetime.datetime.strptime(test_max, "%Y-%m-%d").timetuple())
57+
58+
training_data = dict()
59+
validation_data = dict()
60+
test_data = dict()
61+
62+
train_set_items = set()
63+
64+
for userId, userRatings in all_data.items():
65+
time_sorted_ratings = sorted(
66+
userRatings, key=lambda x: x[2]) # sort by timestamp
67+
for rating_item in time_sorted_ratings:
68+
if rating_item[2] >= train_min_ts and rating_item[
69+
2] <= train_max_ts:
70+
if userId not in training_data:
71+
training_data[userId] = []
72+
training_data[userId].append(rating_item)
73+
train_set_items.add(
74+
rating_item[0]) # keep track of items from training set
75+
elif rating_item[2] >= test_min_ts and rating_item[
76+
2] <= test_max_ts:
77+
if userId not in training_data:
78+
# only include users seen in the training set
79+
continue
80+
p = random.random()
81+
if p <= 0.5:
82+
if userId not in validation_data:
83+
validation_data[userId] = []
84+
validation_data[userId].append(rating_item)
85+
else:
86+
if userId not in test_data:
87+
test_data[userId] = []
88+
test_data[userId].append(rating_item)
89+
90+
# remove items not not seen in training set
91+
for userId, userRatings in test_data.items():
92+
test_data[userId] = [
93+
rating for rating in userRatings if rating[0] in train_set_items
94+
]
95+
for userId, userRatings in validation_data.items():
96+
validation_data[userId] = [
97+
rating for rating in userRatings if rating[0] in train_set_items
98+
]
99+
100+
return training_data, validation_data, test_data
101+
102+
103+
def main(args):
104+
user2id_map = dict()
105+
item2id_map = dict()
106+
userId = 0
107+
itemId = 0
108+
all_data = dict()
109+
110+
folder = args[1]
111+
out_folder = args[2]
112+
# create necessary folders:
113+
for output_dir in [(out_folder + f)
114+
for f in ["/NF_TRAIN", "/NF_VALID", "/NF_TEST"]]:
115+
makedirs(output_dir, exist_ok=True)
116+
117+
text_files = [
118+
path.join(folder, f) for f in listdir(folder)
119+
if path.isfile(path.join(folder, f)) and ('.txt' in f)
120+
]
121+
122+
for text_file in text_files:
123+
with open(text_file, 'r') as f:
124+
print("Processing: {}".format(text_file))
125+
lines = f.readlines()
126+
item = int(lines[0][:-2]) # remove newline and :
127+
if item not in item2id_map:
128+
item2id_map[item] = itemId
129+
itemId += 1
130+
131+
for rating in lines[1:]:
132+
parts = rating.strip().split(",")
133+
user = int(parts[0])
134+
if user not in user2id_map:
135+
user2id_map[user] = userId
136+
userId += 1
137+
rating = float(parts[1])
138+
ts = int(
139+
time.mktime(
140+
datetime.datetime.strptime(parts[2], "%Y-%m-%d")
141+
.timetuple()))
142+
if user2id_map[user] not in all_data:
143+
all_data[user2id_map[user]] = []
144+
all_data[user2id_map[user]].append(
145+
(item2id_map[item], rating, ts))
146+
147+
print("STATS FOR ALL INPUT DATA")
148+
print_stats(all_data)
149+
150+
# Netflix full
151+
(nf_train, nf_valid, nf_test) = create_NETFLIX_data_timesplit(
152+
all_data, "1999-12-01", "2005-11-30", "2005-12-01", "2005-12-31")
153+
print("Netflix full train")
154+
print_stats(nf_train)
155+
save_data_to_file(nf_train, out_folder + "/NF_TRAIN/nf.train.txt")
156+
print("Netflix full valid")
157+
print_stats(nf_valid)
158+
save_data_to_file(nf_valid, out_folder + "/NF_VALID/nf.valid.txt")
159+
print("Netflix full test")
160+
print_stats(nf_test)
161+
save_data_to_file(nf_test, out_folder + "/NF_TEST/nf.test.txt")
162+
'''
163+
(n3m_train, n3m_valid, n3m_test) = create_NETFLIX_data_timesplit(
164+
all_data, "2005-09-01", "2005-11-30", "2005-12-01", "2005-12-31")
165+
166+
print("Netflix 3m train")
167+
print_stats(n3m_train)
168+
save_data_to_file(n3m_train, out_folder + "/N3M_TRAIN/n3m.train.txt")
169+
print("Netflix 3m valid")
170+
print_stats(n3m_valid)
171+
save_data_to_file(n3m_valid, out_folder + "/N3M_VALID/n3m.valid.txt")
172+
print("Netflix 3m test")
173+
print_stats(n3m_test)
174+
save_data_to_file(n3m_test, out_folder + "/N3M_TEST/n3m.test.txt")
175+
176+
(n6m_train, n6m_valid, n6m_test) = create_NETFLIX_data_timesplit(
177+
all_data, "2005-06-01", "2005-11-30", "2005-12-01", "2005-12-31")
178+
print("Netflix 6m train")
179+
print_stats(n6m_train)
180+
save_data_to_file(n6m_train, out_folder + "/N6M_TRAIN/n6m.train.txt")
181+
print("Netflix 6m valid")
182+
print_stats(n6m_valid)
183+
save_data_to_file(n6m_valid, out_folder + "/N6M_VALID/n6m.valid.txt")
184+
print("Netflix 6m test")
185+
print_stats(n6m_test)
186+
save_data_to_file(n6m_test, out_folder + "/N6M_TEST/n6m.test.txt")
187+
188+
# Netflix 1 year
189+
(n1y_train, n1y_valid, n1y_test) = create_NETFLIX_data_timesplit(
190+
all_data, "2004-06-01", "2005-05-31", "2005-06-01", "2005-06-30")
191+
print("Netflix 1y train")
192+
print_stats(n1y_train)
193+
save_data_to_file(n1y_train, out_folder + "/N1Y_TRAIN/n1y.train.txt")
194+
print("Netflix 1y valid")
195+
print_stats(n1y_valid)
196+
save_data_to_file(n1y_valid, out_folder + "/N1Y_VALID/n1y.valid.txt")
197+
print("Netflix 1y test")
198+
print_stats(n1y_test)
199+
save_data_to_file(n1y_test, out_folder + "/N1Y_TEST/n1y.test.txt")
200+
'''
201+
202+
203+
if __name__ == "__main__":
204+
main(sys.argv)

datasets/Netflix_deeprec/run.sh

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
wget https://paddlerec.bj.bcebos.com/datasets/Netflix-DeepRec/nf_prize_dataset.tar.gz
2+
tar -xvf nf_prize_dataset.tar.gz
3+
tar -xf download/training_set.tar
4+
python netflix_data_convert.py training_set Netflix

models/rank/autofis/config.yaml

Lines changed: 22 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -12,41 +12,39 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15+
# global settings
1516

1617
runner:
1718
train_data_dir: "data/sample_data/train"
1819
train_reader_path: "criteo_reader" # importlib format
1920
use_gpu: False
20-
use_auc: True
21-
train_batch_size: 2
22-
epochs: 3
23-
print_interval: 2
24-
# model_init_path: "output_model_deepfm/2" # init model
25-
model_save_path: "output_model_deepfm"
26-
test_data_dir: "data/sample_data/train"
21+
use_auc: False
22+
train_batch_size: 200
23+
epochs: 1
24+
print_interval: 1
25+
#model_init_path: "output_model/0" # init model
26+
model_save_path: "output_model_autofis"
27+
test_data_dir: "data/sample_data/test"
2728
infer_reader_path: "criteo_reader" # importlib format
28-
infer_batch_size: 5
29-
infer_load_path: "output_model_deepfm"
29+
infer_batch_size: 200
30+
infer_load_path: "output_model_autofis"
3031
infer_start_epoch: 0
31-
infer_end_epoch: 3
32-
#use inference save model
33-
use_inference: False
34-
save_inference_feed_varnames: ["C1","C2","C3","C4","C5","C6","C7","C8","C9","C10","C11","C12","C13","C14","C15","C16","C17","C18","C19","C20","C21","C22","C23","C24","C25","C26","dense_input"]
35-
save_inference_fetch_varnames: ["sigmoid_0.tmp_0"]
36-
# use fleet
37-
use_fleet: False
38-
32+
infer_end_epoch: 1
33+
3934
# hyper parameters of user-defined network
4035
hyper_parameters:
4136
# optimizer config
4237
optimizer:
4338
class: Adam
4439
learning_rate: 0.001
45-
strategy: async
40+
gamma: 0.7
4641
# user-defined <key, value> pairs
47-
sparse_inputs_slots: 27
48-
sparse_feature_number: 1000001
49-
sparse_feature_dim: 9
50-
dense_input_dim: 13
51-
fc_sizes: [512, 256, 128, 32]
52-
distributed_embedding: 0
42+
num_inputs: 39
43+
input_size: 1178909
44+
embedding_size: 40
45+
width: 700
46+
depth: 5
47+
n_col: 741
48+
grad_c: 0.0005
49+
grad_mu: 0.8
50+
pairs: 741

models/rank/autofis/config_bigdata.yaml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,11 @@ runner:
2323
epochs: 1
2424
print_interval: 50
2525
#model_init_path: "output_model/0" # init model
26-
model_save_path: "output_model_all_autodeepfm"
26+
model_save_path: "output_model_autofis_all"
2727
test_data_dir: "../../../datasets/criteo_autofis/data/whole_data/test"
2828
infer_reader_path: "criteo_reader" # importlib format
29-
infer_batch_size: 2000
30-
infer_load_path: "output_model_all_autodeepfm"
29+
infer_batch_size: 200
30+
infer_load_path: "output_model_autofis_all"
3131
infer_start_epoch: 0
3232
infer_end_epoch: 1
3333

305 KB
Binary file not shown.
7.94 KB
Binary file not shown.
305 KB
Binary file not shown.
7.94 KB
Binary file not shown.

models/rank/autofis/readme.md

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
├── README.md #文档
1313
├── config.yaml # sample数据配置
1414
├── config_bigdata.yaml # 全量数据配置
15-
├── net.py # 模型核心组网(动静统一)
15+
├── net.py # 模型核心组网
1616
├── criteo_reader.py #数据读取程序
1717
├── dygraph_model.py # 构建动态图
1818
├── trainer.py # 训练脚本
@@ -33,11 +33,11 @@
3333
- [FAQ](#FAQ)
3434

3535
## 模型简介
36-
Automatic Feature Interaction Selection in Factorization Models(点击率预测问题下因子分解机模型的自动特征交互选择模)是华为在2020kdd上提出了新的CTR预估方法。论文指出,很多CTR预估算法都需要进行特征组合,但是传统的特征组合方式都是简单的暴力组合或者人工选择,人工选择的方式依赖于先验知识,而简单的暴力组合其实对模型的性能的提升并不是总有益的,有些组合方式其实对模型的性能提升并没有多少的帮助,甚至会损害模型的性能,而且大量无效的特征组合会形成很多的参数,降低内存的利用率。根据AutoML技术,提出AutoFIS,顾名思义,就是自动去找最佳的特征组合。
36+
Automatic Feature Interaction Selection in Factorization Models(点击率预测问题下因子分解机模型的自动特征交互选择模型)是华为在2020kdd上提出了新的CTR预估方法。论文指出,很多CTR预估算法都需要进行特征组合,但是传统的特征组合方式都是简单的暴力组合或者人工选择,人工选择的方式依赖于先验知识,而简单的暴力组合其实对模型的性能的提升并不总是有益的,有些组合方式其实对模型的性能提升并没有多少的帮助,甚至会损害模型的性能,而且大量无效的特征组合会形成很多的参数,降低内存的利用率。根据AutoML技术,提出AutoFIS,顾名思义,就是自动去找最佳的特征组合。
3737

3838
## 数据准备
3939

40-
数据为[Criteo](http://labs.criteo.com/downloads/download-terabyte-click-log),选择了第6-12天的数据作为训练集,低13天的数据测试集。正负样本采用后的比例约为1:1
40+
数据为[Criteo](http://labs.criteo.com/downloads/download-terabyte-click-log),选择了第6-12天的数据作为训练集,第13天的数据测试集。正负样本采用后的比例约为1:1
4141
在模型目录的data目录下为您准备了快速运行的示例数据,若需要使用全量数据可以参考下方[效果复现](#效果复现)部分。
4242

4343
## 运行环境
@@ -48,14 +48,16 @@ python 2.7/3.5/3.6/3.7
4848
os : windows/linux/macos
4949

5050
## 快速开始
51-
本文提供了样例数据可以供您快速体验,在任意目录下均可执行。在deepfm模型目录的快速执行命令如下
51+
本文提供了样例数据可以供您快速体验,在任意目录下均可执行。在autofis模型目录的快速执行命令如下
5252
```bash
5353
# 进入模型目录
54-
# cd models/rank/deepfm # 在任意目录均可运行
54+
# cd models/rank/autofis # 在任意目录均可运行
5555
# 动态图训练
56-
python -u ../../../tools/trainer.py -m config.yaml # 全量数据运行config_bigdata.yaml
56+
python trainer.py -m config.yaml # stage0:自动搜索最佳特征组合 全量数据运行config_bigdata.yaml
57+
python trainer.py -m config.yaml -o stage=1 # stage1:训练最终模 全量数据运行config_bigdata.yaml型
58+
5759
# 动态图预测
58-
python -u ../../../tools/infer.py -m config.yaml
60+
python -u ../../../tools/infer.py -m config.yaml -o stage=1 # 全量数据运行config_bigdata.yaml
5961
```
6062
## 效果复现
6163
为了方便使用者能够快速的跑通每一个模型,我们在每个模型下都提供了样例数据。如果需要复现readme中的效果,请按如下步骤依次操作即可。
@@ -65,7 +67,7 @@ python -u ../../../tools/infer.py -m config.yaml
6567
| AutodeepFM | 0.8009 |0.5403 | 2000 | 1 | 约3小时 |
6668

6769
1. 确认您当前所在目录为PaddleRec/models/rank/autofis
68-
2. 进入paddlerec/datasets/criteo_autofis
70+
2. 进入Paddlerec/datasets/criteo_autofis
6971
3. 执行命令运行全量数据
7072

7173
``` bash
@@ -77,7 +79,7 @@ cd - # 切回模型目录
7779
# 动态图训练
7880
python trainer.py -m config_bigdata.yaml # stage0:自动搜索最佳特征组合
7981
python trainer.py -m config_bigdata.yaml -o stage=1 # stage1:训练最终模型
80-
python -u ../../../tools/infer.py -m config_bigdata.yaml -o stage=1 # 全量数据运行config_bigdata.yaml
82+
python -u ../../../tools/infer.py -m config_bigdata.yaml -o stage=1 # 预测
8183
```
8284
## 进阶使用
8385

models/rank/autofis/trainer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ def main(args):
224224
mask_val = (mask_val != 0).astype(int)
225225
np.save('comb_mask.npy', mask_val)
226226

227-
print(mask_val)
227+
#print(mask_val)
228228

229229

230230
if __name__ == '__main__':

0 commit comments

Comments
 (0)