@@ -60,103 +60,4 @@ def run(self):
6060 # Hard-coded `False` is for "save_extra_vars", which isn't an option
6161 # in the GUI right now (and isn't likely to be added).
6262
63- # try:
64- # logger.info(f"Kilosort version {kilosort.__version__}")
65- # logger.info(f"Sorting {self.data_path}")
66- # clear_cache = settings['clear_cache']
67- # if clear_cache:
68- # logger.info('clear_cache=True')
69- # logger.info('-'*40)
70-
71- # tic0 = time.time()
72-
73- # if probe['chanMap'].max() >= settings['n_chan_bin']:
74- # raise ValueError(
75- # f'Largest value of chanMap exceeds channel count of data, '
76- # 'make sure chanMap is 0-indexed.'
77- # )
78-
79- # if settings['nt0min'] is None:
80- # settings['nt0min'] = int(20 * settings['nt']/61)
81- # data_dtype = settings['data_dtype']
82- # device = self.device
83- # save_preprocessed_copy = settings['save_preprocessed_copy']
84- # do_CAR = settings['do_CAR']
85- # invert_sign = settings['invert_sign']
86- # if not do_CAR:
87- # logger.info("Skipping common average reference.")
88-
89- # ops = initialize_ops(settings, probe, data_dtype, do_CAR,
90- # invert_sign, device, save_preprocessed_copy)
91-
92- # # Pretty-print ops and probe for log
93- # logger.debug(f"Initial ops:\n\n{ops_as_string(ops)}\n")
94- # logger.debug(f"Probe dictionary:\n\n{probe_as_string(ops['probe'])}\n")
95-
96- # # TODO: add support for file object through data conversion
97- # # Set preprocessing and drift correction parameters
98- # ops = compute_preprocessing(ops, self.device, tic0=tic0,
99- # file_object=self.file_object)
100- # np.random.seed(1)
101- # torch.cuda.manual_seed_all(1)
102- # torch.random.manual_seed(1)
103- # ops, bfile, st0 = compute_drift_correction(
104- # ops, self.device, tic0=tic0, progress_bar=self.progress_bar,
105- # file_object=self.file_object, clear_cache=clear_cache
106- # )
107-
108- # # Check scale of data for log file
109- # b1 = bfile.padded_batch_to_torch(0).cpu().numpy()
110- # logger.debug(f"First batch min, max: {b1.min(), b1.max()}")
111-
112- # if save_preprocessed_copy:
113- # save_preprocessing(results_dir / 'temp_wh.dat', ops, bfile)
114-
115- # # Will be None if nblocks = 0 (no drift correction)
116- # if st0 is not None:
117- # self.dshift = ops['dshift']
118- # self.st0 = st0
119- # self.plotDataReady.emit('drift')
120-
121- # # Sort spikes and save results
122- # st, tF, Wall0, clu0 = detect_spikes(
123- # ops, self.device, bfile, tic0=tic0,
124- # progress_bar=self.progress_bar, clear_cache=clear_cache,
125- # verbose=verbose
126- # )
127-
128- # self.Wall0 = Wall0
129- # self.wPCA = torch.clone(ops['wPCA'].cpu()).numpy()
130- # self.clu0 = clu0
131- # self.plotDataReady.emit('diagnostics')
132-
133- # clu, Wall, _ = cluster_spikes(
134- # st, tF, ops, self.device, bfile, tic0=tic0,
135- # progress_bar=self.progress_bar, clear_cache=clear_cache,
136- # verbose=verbose
137- # )
138- # ops, similar_templates, is_ref, est_contam_rate, kept_spikes = \
139- # save_sorting(ops, results_dir, st, clu, tF, Wall, bfile.imin, tic0)
140-
141- # except Exception as e:
142- # if isinstance(e, torch.cuda.OutOfMemoryError):
143- # logger.exception('Out of memory error, printing performance...')
144- # log_performance(logger, level='info')
145- # log_cuda_details(logger)
146- # # This makes sure the full traceback is written to log file.
147- # logger.exception('Encountered error in `run_kilosort`:')
148- # # Annoyingly, this will print the error message twice for console
149- # # but I haven't found a good way around that.
150- # raise
151-
152- # finally:
153- # close_logger()
154-
155- # self.ops = ops
156- # self.st = st[kept_spikes]
157- # self.clu = clu[kept_spikes]
158- # self.tF = tF[kept_spikes]
159- # self.is_refractory = is_ref
160- # self.plotDataReady.emit('probe')
161-
16263 self .finishedSpikesort .emit (self .context )
0 commit comments