Skip to content

Commit 3a5b528

Browse files
author
Joao Felipe Rocha
committed
corrected indentation on graph operator
1 parent 599968f commit 3a5b528

File tree

1 file changed

+17
-18
lines changed

1 file changed

+17
-18
lines changed

graphtools/graphs.py

Lines changed: 17 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,6 @@ def __init__(
8484
n_pca=None,
8585
**kwargs,
8686
):
87-
8887
if decay is not None:
8988
if thresh <= 0 and knn_max is None:
9089
raise ValueError(
@@ -491,7 +490,9 @@ class LandmarkGraph(DataGraph):
491490
>>> X_full = G.interpolate(X_landmark)
492491
"""
493492

494-
def __init__(self, data, n_landmark=2000, n_svd=100, random_landmarking = False, **kwargs):
493+
def __init__(
494+
self, data, n_landmark=2000, n_svd=100, random_landmarking=False, **kwargs
495+
):
495496
"""Initialize a landmark graph.
496497
497498
Raises
@@ -641,28 +642,28 @@ def build_landmark_op(self):
641642
"""Build the landmark operator
642643
643644
644-
Calculates spectral clusters on the kernel, and calculates transition
645-
probabilities between cluster centers by using transition probabilities
646-
between samples assigned to each cluster.
647-
648-
random_landmarking:
649-
This method randomly selects n_landmark points and assigns each sample to its nearest landmark
650-
using Euclidean distance .
645+
Calculates spectral clusters on the kernel, and calculates transition
646+
probabilities between cluster centers by using transition probabilities
647+
between samples assigned to each cluster.
648+
649+
random_landmarking:
650+
This method randomly selects n_landmark points and assigns each sample to its nearest landmark
651+
using Euclidean distance .
652+
651653
652-
653654
"""
654655
with _logger.log_task("landmark operator"):
655-
is_sparse = sparse.issparse(self.kernel)
656-
656+
is_sparse = sparse.issparse(self.kernel)
657+
657658
if self.random_landmark:
658659
n_samples = self.data.shape[0]
659660
rng = np.random.default_rng(self.random_state)
660661
landmark_indices = rng.choice(n_samples, self.n_landmark, replace=False)
661-
data = self.data if not hasattr(self, 'data_nu') else self.data_nu
662-
# if n_samples > 5000 and self.distance == "euclidean": ( sklearn.euclidean_distances is faster than cdist for big dataset)
662+
data = self.data if not hasattr(self, "data_nu") else self.data_nu
663+
# if n_samples > 5000 and self.distance == "euclidean": ( sklearn.euclidean_distances is faster than cdist for big dataset)
663664
# distances = euclidean_distances(data, data[landmark_indices])
664-
# this is a futur optimization for the euclidean case
665-
#
665+
# this is a futur optimization for the euclidean case
666+
#
666667
distances = cdist(data, data[landmark_indices], metric=self.distance)
667668
self._clusters = np.argmin(distances, axis=1)
668669

@@ -683,8 +684,6 @@ def build_landmark_op(self):
683684
)
684685
self._clusters = kmeans.fit_predict(self.diff_op.dot(VT.T))
685686

686-
687-
688687
# transition matrices
689688
pmn = self._landmarks_to_data()
690689

0 commit comments

Comments
 (0)