Skip to content

Commit 2b41782

Browse files
authored
fix(gbdt): correct dtrain assignment in finetune() to use Dataset instead of tuple (#2049)
1 parent ac3fe94 commit 2b41782

File tree

1 file changed

+5
-3
lines changed

1 file changed

+5
-3
lines changed

qlib/contrib/model/gbdt.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def _prepare_data(self, dataset: DatasetH, reweighter=None) -> List[Tuple[lgb.Da
5151
w = reweighter.reweight(df)
5252
else:
5353
raise ValueError("Unsupported reweighter type.")
54-
ds_l.append((lgb.Dataset(x.values, label=y, weight=w), key))
54+
ds_l.append((lgb.Dataset(x.values, label=y, weight=w, free_raw_data=False), key))
5555
return ds_l
5656

5757
def fit(
@@ -109,8 +109,10 @@ def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=20, rewei
109109
verbose level
110110
"""
111111
# Based on existing model and finetune by train more rounds
112-
dtrain, _ = self._prepare_data(dataset, reweighter) # pylint: disable=W0632
113-
if dtrain.empty:
112+
ds_l = self._prepare_data(dataset, reweighter)
113+
dtrain, _ = ds_l[0]
114+
115+
if dtrain.construct().num_data() == 0:
114116
raise ValueError("Empty data from dataset, please check your dataset config.")
115117
verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)
116118
self.model = lgb.train(

0 commit comments

Comments
 (0)