@@ -248,6 +248,8 @@ def fit(self, S):
248248 print (f"Alpha values match: { self .alpha } " )
249249 return self
250250
251+ # TODO: remove this class and use AdaptiveGraphicalLassoPenalty instead
252+
251253
252254class AdaptiveGraphicalLasso ():
253255 """An adaptive version of the Graphical Lasso with non-convex penalties.
@@ -310,7 +312,22 @@ def fit(self, S):
310312
311313
312314def update_weights (Theta , alpha , strategy = "log" ):
313- """Update weights for adaptive graphical lasso based on strategy."""
315+ """Update weights for adaptive graphical lasso based on strategy.
316+
317+ Parameters
318+ ----------
319+ Theta : array-like
320+ Precision matrix.
321+ alpha : float
322+ Regularization parameter.
323+ strategy : str, default="log"
324+ Reweighting strategy: "log", "sqrt", or "mcp".
325+
326+ Returns
327+ -------
328+ array-like
329+ Updated weights.
330+ """
314331 if strategy == "log" :
315332 return 1 / (np .abs (Theta ) + 1e-10 )
316333 elif strategy == "sqrt" :
@@ -326,16 +343,15 @@ def update_weights(Theta, alpha, strategy="log"):
326343 else :
327344 raise ValueError (f"Unknown strategy { strategy } " )
328345
329-
346+ # TODO: remove this testing code
330347# Testing
331348
332- def frobenius_norm_diff ( A , B ):
333- """Relative Frobenius norm difference between A and B."""
349+
350+ def _frobenius_norm_diff ( A , B ):
334351 return np .linalg .norm (A - B , ord = 'fro' ) / np .linalg .norm (B , ord = 'fro' )
335352
336353
337- def generate_problem (dim = 20 , n_samples = 100 , seed = 42 ):
338- """Generate data from a known sparse precision matrix."""
354+ def _generate_problem (dim = 20 , n_samples = 100 , seed = 42 ):
339355 np .random .seed (seed )
340356
341357 # Ground-truth sparse precision matrix (positive definite)
@@ -357,7 +373,7 @@ def generate_problem(dim=20, n_samples=100, seed=42):
357373 seed = 42
358374
359375 # Get empirical covariance and ground truth
360- S , Theta_true = generate_problem (dim = dim , seed = seed )
376+ S , Theta_true = _generate_problem (dim = dim , seed = seed )
361377
362378 # Define non-convex penalty — this is consistent with 'log' strategy
363379 penalty = LogSumPenalty (alpha = alpha , eps = 1e-10 )
@@ -383,16 +399,16 @@ def generate_problem(dim=20, n_samples=100, seed=42):
383399 Theta_strategy = model_strategy .precision_
384400
385401 # Compare the two estimated models
386- rel_diff_between_models = frobenius_norm_diff (Theta_penalty , Theta_strategy )
402+ rel_diff_between_models = _frobenius_norm_diff (Theta_penalty , Theta_strategy )
387403 print (
388404 f"\n Frobenius norm relative difference between models: "
389405 f"{ rel_diff_between_models :.2e} " )
390406 print (" Matrices are close?" , np .allclose (
391407 Theta_penalty , Theta_strategy , atol = 1e-4 ))
392408
393409 # Compare both to ground truth
394- rel_diff_penalty_vs_true = frobenius_norm_diff (Theta_penalty , Theta_true )
395- rel_diff_strategy_vs_true = frobenius_norm_diff (Theta_strategy , Theta_true )
410+ rel_diff_penalty_vs_true = _frobenius_norm_diff (Theta_penalty , Theta_true )
411+ rel_diff_strategy_vs_true = _frobenius_norm_diff (Theta_strategy , Theta_true )
396412
397413 print (
398414 f"\n Penalty vs true Θ: Frobenius norm diff = { rel_diff_penalty_vs_true :.2e} " )
0 commit comments