@@ -156,23 +156,25 @@ class IALSRecommender(
156156 BaseRecommenderWithUserEmbedding ,
157157 BaseRecommenderWithItemEmbedding ,
158158):
159- r"""Implementation of Implicit Alternating Least Squares(IALS ) or Weighted Matrix Factorization(WMF).
159+ r"""Implementation of implicit Alternating Least Squares (iALS ) or Weighted Matrix Factorization (WMF).
160160
161161 By default, it tries to minimize the following loss:
162162
163163 .. math ::
164164
165- \frac{1}{2} \sum _{u, i \in S} X_ {ui} (\mathbf{u}_u \cdot \mathbf{v}_i - 1) ^ 2
166- + \frac{\alpha_0}{2} \sum_{u, i} X_{ui} (\mathbf{u}_u \cdot \mathbf{v}_i) ^ 2 +
165+ \frac{1}{2} \sum _{u, i \in S} c_ {ui} (\mathbf{u}_u \cdot \mathbf{v}_i - 1) ^ 2
166+ + \frac{\alpha_0}{2} \sum_{u, i} (\mathbf{u}_u \cdot \mathbf{v}_i) ^ 2 +
167167 \frac{\text{reg}}{2} \left( \sum_u (\alpha_0 I + N_u) ^ \nu || \mathbf{u}_u || ^2 + \sum_i (\alpha_0 U + N_i) ^ \nu || \mathbf{v}_i || ^2 \right)
168168
169+ where :math:`S` denotes the set of all pairs wher :math:`X_{ui}` is non-zero.
170+
169171 See the seminal paper:
170172
171173 - `Collaborative filtering for implicit feedback datasets
172174 <http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.167.5120&rep=rep1&type=pdf>`_
173175
174176
175- To speed up the learning procedure, we have also implemented the conjugate gradient descent version following :
177+ By default it uses a conjugate gradient descent version:
176178
177179 - `Applications of the conjugate gradient method for implicit feedback collaborative filtering
178180 <https://dl.acm.org/doi/abs/10.1145/2043932.2043987>`_
@@ -188,20 +190,26 @@ class IALSRecommender(
188190 n_components (int, optional):
189191 The dimension for latent factor. Defaults to 20.
190192 alpha0 (float, optional):
191- The "unovserved " weight
193+ The "unobserved " weight.
192194 reg (float, optional) :
193195 Regularization coefficient for both user & item factors. Defaults to 1e-3.
194196 nu (float, optional) :
195197 Controlles frequency regularization introduced in the paper,
196198 "Revisiting the Performance of iALS on Item Recommendation Benchmarks".
197199 confidence_scaling (str, optional) :
198200 Specifies how to scale confidence scaling :math:`c_{ui}`. Must be either "none" or "log".
199- If "none", the non-zero "rating" :math:`r_{ui}` yields
201+ If "none", the non-zero (not-necessarily 1) :math:`X_{ui}` yields
202+
200203 .. math ::
201- c_{ui} = 1 + \alpha r_{ui}
204+ c_{ui} = A + X_{ui}
205+
202206 If "log",
207+
203208 .. math ::
204- c_{ui} = 1 + \alpha \log (1 + r_{ui} / \epsilon )
209+ c_{ui} = A + \log (1 + X_{ui} / \epsilon )
210+
211+ The constant :math:`A` above will be 0 if ``loss_type`` is ``"IALSPP"``, :math:`\alpha_0` if ``loss_type`` is ``"ORIGINAL"``.
212+
205213 Defaults to "none".
206214 epsilon (float, optional):
207215 The :math:`\epsilon` parameter for log-scaling described above.
@@ -240,6 +248,18 @@ class IALSRecommender(
240248 Maximal number of conjute gradient descent steps during the prediction time,
241249 i.e., the case when a user unseen at the training time is given as a history matrix.
242250 Defaults to 5.
251+
252+ Examples:
253+
254+ >>> from irspack import IALSRecommender, rowwise_train_test_split, Evaluator
255+ >>> from irspack.utils.sample_data import mf_example_data
256+ >>> X = mf_example_data(100, 30, random_state=1)
257+ >>> X_train, X_test = rowwise_train_test_split(X, random_state=0)
258+ >>> rec = IALSRecommender(X_train)
259+ >>> rec.learn()
260+ >>> evaluator=Evaluator(X_test)
261+ >>> print(evaluator.get_scores(rec, [20]))
262+ OrderedDict([('hit@20', 1.0), ('recall@20', 0.9003412698412698), ('ndcg@20', 0.6175493479217139), ('map@20', 0.3848785870622406), ('precision@20', 0.3385), ('gini_index@20', 0.0814), ('entropy@20', 3.382497875272383), ('appeared_item@20', 30.0)])
243263 """
244264
245265 config_class = IALSConfig
0 commit comments