25
25
)
26
26
27
27
28
- def test_graphical_lassos (random_state = 1 ):
29
- """Test the graphical lasso solvers.
30
-
31
- This checks is unstable for some random seeds where the covariance found with "cd"
32
- and "lars" solvers are different (4 cases / 100 tries).
33
- """
28
+ def test_graphical_lassos (global_random_seed ):
29
+ """Test the graphical lasso solvers."""
34
30
# Sample data from a sparse multivariate normal
35
- dim = 20
31
+ dim = 10
36
32
n_samples = 100
37
- random_state = check_random_state (random_state )
33
+ random_state = check_random_state (global_random_seed )
38
34
prec = make_sparse_spd_matrix (dim , alpha = 0.95 , random_state = random_state )
39
35
cov = linalg .inv (prec )
40
36
X = random_state .multivariate_normal (np .zeros (dim ), cov , size = n_samples )
@@ -45,24 +41,29 @@ def test_graphical_lassos(random_state=1):
45
41
icovs = dict ()
46
42
for method in ("cd" , "lars" ):
47
43
cov_ , icov_ , costs = graphical_lasso (
48
- emp_cov , return_costs = True , alpha = alpha , mode = method
44
+ emp_cov ,
45
+ return_costs = True ,
46
+ alpha = alpha ,
47
+ mode = method ,
48
+ tol = 1e-7 ,
49
+ enet_tol = 1e-11 ,
50
+ max_iter = 100 ,
49
51
)
50
52
covs [method ] = cov_
51
53
icovs [method ] = icov_
52
54
costs , dual_gap = np .array (costs ).T
53
55
# Check that the costs always decrease (doesn't hold if alpha == 0)
54
56
if not alpha == 0 :
55
- # use 1e-12 since the cost can be exactly 0
56
- assert_array_less (np .diff (costs ), 1e-12 )
57
+ # use 1e-10 since the cost can be exactly 0
58
+ assert_array_less (np .diff (costs ), 1e-10 )
57
59
# Check that the 2 approaches give similar results
58
- assert_allclose (covs ["cd" ], covs ["lars" ], atol = 5e-4 )
59
- assert_allclose (icovs ["cd" ], icovs ["lars" ], atol = 5e-4 )
60
+ assert_allclose (covs ["cd" ], covs ["lars" ], atol = 1e-3 )
61
+ assert_allclose (icovs ["cd" ], icovs ["lars" ], atol = 1e-3 )
60
62
61
63
# Smoke test the estimator
62
- model = GraphicalLasso (alpha = 0.25 ).fit (X )
64
+ model = GraphicalLasso (alpha = 0.25 , tol = 1e-7 , enet_tol = 1e-11 , max_iter = 100 ).fit (X )
63
65
model .score (X )
64
- assert_array_almost_equal (model .covariance_ , covs ["cd" ], decimal = 4 )
65
- assert_array_almost_equal (model .covariance_ , covs ["lars" ], decimal = 4 )
66
+ assert_allclose (model .covariance_ , covs ["cd" ], rtol = 1e-6 )
66
67
67
68
# For a centered matrix, assume_centered could be chosen True or False
68
69
# Check that this returns indeed the same result for centered data
@@ -87,6 +88,7 @@ def test_graphical_lasso_when_alpha_equals_0(global_random_seed):
87
88
88
89
89
90
@pytest .mark .parametrize ("mode" , ["cd" , "lars" ])
91
+ @pytest .mark .filterwarnings ("ignore::sklearn.exceptions.ConvergenceWarning" )
90
92
def test_graphical_lasso_n_iter (mode ):
91
93
X , _ = datasets .make_classification (n_samples = 5_000 , n_features = 20 , random_state = 0 )
92
94
emp_cov = empirical_covariance (X )
@@ -138,12 +140,25 @@ def test_graph_lasso_2D():
138
140
assert_array_almost_equal (icov , icov_skggm )
139
141
140
142
141
- def test_graphical_lasso_iris_singular ():
143
+ @pytest .mark .parametrize ("method" , ["cd" , "lars" ])
144
+ def test_graphical_lasso_iris_singular (method ):
142
145
# Small subset of rows to test the rank-deficient case
143
146
# Need to choose samples such that none of the variances are zero
144
147
indices = np .arange (10 , 13 )
145
148
146
149
# Hard-coded solution from R glasso package for alpha=0.01
150
+ # library(glasso)
151
+ # X = t(array(c(
152
+ # 5.4, 3.7, 1.5, 0.2,
153
+ # 4.8, 3.4, 1.6, 0.2,
154
+ # 4.8, 3. , 1.4, 0.1),
155
+ # dim = c(4, 3)
156
+ # ))
157
+ # n = nrow(X)
158
+ # emp_cov = cov(X) * (n - 1)/n # without Bessel correction
159
+ # sol = glasso(emp_cov, 0.01, penalize.diagonal = FALSE)
160
+ # # print cov_R
161
+ # print(noquote(format(sol$w, scientific=FALSE, digits = 10)))
147
162
cov_R = np .array (
148
163
[
149
164
[0.08 , 0.056666662595 , 0.00229729713223 , 0.00153153142149 ],
@@ -162,12 +177,9 @@ def test_graphical_lasso_iris_singular():
162
177
)
163
178
X = datasets .load_iris ().data [indices , :]
164
179
emp_cov = empirical_covariance (X )
165
- for method in ("cd" , "lars" ):
166
- cov , icov = graphical_lasso (
167
- emp_cov , alpha = 0.01 , return_costs = False , mode = method
168
- )
169
- assert_array_almost_equal (cov , cov_R , decimal = 5 )
170
- assert_array_almost_equal (icov , icov_R , decimal = 5 )
180
+ cov , icov = graphical_lasso (emp_cov , alpha = 0.01 , return_costs = False , mode = method )
181
+ assert_allclose (cov , cov_R , atol = 1e-6 )
182
+ assert_allclose (icov , icov_R , atol = 1e-5 )
171
183
172
184
173
185
def test_graphical_lasso_cv (global_random_seed ):
0 commit comments