Skip to content

Commit 0955645

Browse files
authored
Change NGCF Batch Size Param (#532)
1 parent a470d5c commit 0955645

File tree

2 files changed

+10
-14
lines changed

2 files changed

+10
-14
lines changed

cornac/models/ngcf/recom_ngcf.py

Lines changed: 9 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -42,13 +42,10 @@ class NGCF(Recommender):
4242
Maximum number of iterations or the number of epochs.
4343
4444
learning_rate: float, default: 0.001
45-
The learning rate that determines the step size at each iteration
45+
The learning rate that determines the step size at each iteration.
4646
47-
train_batch_size: int, default: 1024
48-
Mini-batch size used for train set
49-
50-
test_batch_size: int, default: 100
51-
Mini-batch size used for test set
47+
batch_size: int, default: 1024
48+
Mini-batch size used for training.
5249
5350
early_stopping: {min_delta: float, patience: int}, optional, default: None
5451
If `None`, no early stopping. Meaning of the arguments:
@@ -62,7 +59,7 @@ class NGCF(Recommender):
6259
training should be stopped.
6360
6461
lambda_reg: float, default: 1e-4
65-
Weight decay for the L2 normalization
62+
Weight decay for the L2 normalization.
6663
6764
trainable: boolean, optional, default: True
6865
When False, the model is not trained and Cornac assumes that the model
@@ -87,8 +84,7 @@ def __init__(
8784
dropout_rates=[0.1, 0.1, 0.1],
8885
num_epochs=1000,
8986
learning_rate=0.001,
90-
train_batch_size=1024,
91-
test_batch_size=100,
87+
batch_size=1024,
9288
early_stopping=None,
9389
lambda_reg=1e-4,
9490
trainable=True,
@@ -101,8 +97,7 @@ def __init__(
10197
self.dropout_rates = dropout_rates
10298
self.num_epochs = num_epochs
10399
self.learning_rate = learning_rate
104-
self.train_batch_size = train_batch_size
105-
self.test_batch_size = test_batch_size
100+
self.batch_size = batch_size
106101
self.early_stopping = early_stopping
107102
self.lambda_reg = lambda_reg
108103
self.seed = seed
@@ -163,11 +158,11 @@ def fit(self, train_set, val_set=None):
163158
accum_loss = 0.0
164159
for batch_u, batch_i, batch_j in tqdm(
165160
train_set.uij_iter(
166-
batch_size=self.train_batch_size,
161+
batch_size=self.batch_size,
167162
shuffle=True,
168163
),
169164
desc="Epoch",
170-
total=train_set.num_batches(self.train_batch_size),
165+
total=train_set.num_batches(self.batch_size),
171166
leave=False,
172167
position=1,
173168
disable=not self.verbose,
@@ -221,6 +216,7 @@ def monitor_value(self):
221216
metrics=[Recall(k=20)],
222217
train_set=self.train_set,
223218
test_set=self.val_set,
219+
verbose=True
224220
)[0][0]
225221

226222
return recall_20 # Section 4.2.3 in the paper

examples/ngcf_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@
4141
layer_sizes=[64, 64, 64],
4242
dropout_rates=[0.1, 0.1, 0.1],
4343
early_stopping={"min_delta": 1e-4, "patience": 50},
44-
train_batch_size=1024,
44+
batch_size=1024,
4545
learning_rate=0.001,
4646
lambda_reg=1e-5,
4747
verbose=True,

0 commit comments

Comments
 (0)