|
8 | 8 |
|
9 | 9 | import kilosort |
10 | 10 | from kilosort.run_kilosort import ( |
11 | | - setup_logger, initialize_ops, compute_preprocessing, compute_drift_correction, |
12 | | - detect_spikes, cluster_spikes, save_sorting, close_logger |
| 11 | + # setup_logger, initialize_ops, compute_preprocessing, compute_drift_correction, |
| 12 | + # detect_spikes, cluster_spikes, save_sorting, close_logger |
| 13 | + setup_logger, _sort |
13 | 14 | ) |
14 | 15 | from kilosort.io import save_preprocessing |
15 | 16 | from kilosort.utils import ( |
@@ -46,105 +47,116 @@ def run(self): |
46 | 47 | results_dir.mkdir(parents=True) |
47 | 48 |
|
48 | 49 | setup_logger(results_dir) |
49 | | - verbose = settings['verbose_log'] |
50 | | - |
51 | | - try: |
52 | | - logger.info(f"Kilosort version {kilosort.__version__}") |
53 | | - logger.info(f"Sorting {self.data_path}") |
54 | | - clear_cache = settings['clear_cache'] |
55 | | - if clear_cache: |
56 | | - logger.info('clear_cache=True') |
57 | | - logger.info('-'*40) |
58 | | - |
59 | | - tic0 = time.time() |
60 | | - |
61 | | - if probe['chanMap'].max() >= settings['n_chan_bin']: |
62 | | - raise ValueError( |
63 | | - f'Largest value of chanMap exceeds channel count of data, ' |
64 | | - 'make sure chanMap is 0-indexed.' |
65 | | - ) |
66 | | - |
67 | | - if settings['nt0min'] is None: |
68 | | - settings['nt0min'] = int(20 * settings['nt']/61) |
69 | | - data_dtype = settings['data_dtype'] |
70 | | - device = self.device |
71 | | - save_preprocessed_copy = settings['save_preprocessed_copy'] |
72 | | - do_CAR = settings['do_CAR'] |
73 | | - invert_sign = settings['invert_sign'] |
74 | | - if not do_CAR: |
75 | | - logger.info("Skipping common average reference.") |
76 | | - |
77 | | - ops = initialize_ops(settings, probe, data_dtype, do_CAR, |
78 | | - invert_sign, device, save_preprocessed_copy) |
79 | | - |
80 | | - # Pretty-print ops and probe for log |
81 | | - logger.debug(f"Initial ops:\n\n{ops_as_string(ops)}\n") |
82 | | - logger.debug(f"Probe dictionary:\n\n{probe_as_string(ops['probe'])}\n") |
83 | | - |
84 | | - # TODO: add support for file object through data conversion |
85 | | - # Set preprocessing and drift correction parameters |
86 | | - ops = compute_preprocessing(ops, self.device, tic0=tic0, |
87 | | - file_object=self.file_object) |
88 | | - np.random.seed(1) |
89 | | - torch.cuda.manual_seed_all(1) |
90 | | - torch.random.manual_seed(1) |
91 | | - ops, bfile, st0 = compute_drift_correction( |
92 | | - ops, self.device, tic0=tic0, progress_bar=self.progress_bar, |
93 | | - file_object=self.file_object, clear_cache=clear_cache |
94 | | - ) |
95 | | - |
96 | | - # Check scale of data for log file |
97 | | - b1 = bfile.padded_batch_to_torch(0).cpu().numpy() |
98 | | - logger.debug(f"First batch min, max: {b1.min(), b1.max()}") |
99 | | - |
100 | | - if save_preprocessed_copy: |
101 | | - save_preprocessing(results_dir / 'temp_wh.dat', ops, bfile) |
102 | | - |
103 | | - # Will be None if nblocks = 0 (no drift correction) |
104 | | - if st0 is not None: |
105 | | - self.dshift = ops['dshift'] |
106 | | - self.st0 = st0 |
107 | | - self.plotDataReady.emit('drift') |
108 | | - |
109 | | - # Sort spikes and save results |
110 | | - st, tF, Wall0, clu0 = detect_spikes( |
111 | | - ops, self.device, bfile, tic0=tic0, |
112 | | - progress_bar=self.progress_bar, clear_cache=clear_cache, |
113 | | - verbose=verbose |
114 | | - ) |
115 | | - |
116 | | - self.Wall0 = Wall0 |
117 | | - self.wPCA = torch.clone(ops['wPCA'].cpu()).numpy() |
118 | | - self.clu0 = clu0 |
119 | | - self.plotDataReady.emit('diagnostics') |
120 | | - |
121 | | - clu, Wall, _ = cluster_spikes( |
122 | | - st, tF, ops, self.device, bfile, tic0=tic0, |
123 | | - progress_bar=self.progress_bar, clear_cache=clear_cache, |
124 | | - verbose=verbose |
125 | | - ) |
126 | | - ops, similar_templates, is_ref, est_contam_rate, kept_spikes = \ |
127 | | - save_sorting(ops, results_dir, st, clu, tF, Wall, bfile.imin, tic0) |
128 | | - |
129 | | - except Exception as e: |
130 | | - if isinstance(e, torch.cuda.OutOfMemoryError): |
131 | | - logger.exception('Out of memory error, printing performance...') |
132 | | - log_performance(logger, level='info') |
133 | | - log_cuda_details(logger) |
134 | | - # This makes sure the full traceback is written to log file. |
135 | | - logger.exception('Encountered error in `run_kilosort`:') |
136 | | - # Annoyingly, this will print the error message twice for console |
137 | | - # but I haven't found a good way around that. |
138 | | - raise |
139 | | - |
140 | | - finally: |
141 | | - close_logger() |
142 | | - |
143 | | - self.ops = ops |
144 | | - self.st = st[kept_spikes] |
145 | | - self.clu = clu[kept_spikes] |
146 | | - self.tF = tF[kept_spikes] |
147 | | - self.is_refractory = is_ref |
148 | | - self.plotDataReady.emit('probe') |
| 50 | + |
| 51 | + # NOTE: All but `gui_sorter` are positional args, |
| 52 | + # don't move these around. |
| 53 | + _ = _sort( |
| 54 | + settings['filename'], results_dir, probe, settings, |
| 55 | + settings['data_dtype'], self.device, settings['do_CAR'], |
| 56 | + settings['clear_cache'], settings['invert_sign'], |
| 57 | + settings['save_preprocessed_copy'], settings['verbose_log'], |
| 58 | + False, self.file_object, self.progress_bar, gui_sorter=self |
| 59 | + ) |
| 60 | + # Hard-coded `False` is for "save_extra_vars", which isn't an option |
| 61 | + # in the GUI right now (and isn't likely to be added). |
| 62 | + |
| 63 | + # try: |
| 64 | + # logger.info(f"Kilosort version {kilosort.__version__}") |
| 65 | + # logger.info(f"Sorting {self.data_path}") |
| 66 | + # clear_cache = settings['clear_cache'] |
| 67 | + # if clear_cache: |
| 68 | + # logger.info('clear_cache=True') |
| 69 | + # logger.info('-'*40) |
| 70 | + |
| 71 | + # tic0 = time.time() |
| 72 | + |
| 73 | + # if probe['chanMap'].max() >= settings['n_chan_bin']: |
| 74 | + # raise ValueError( |
| 75 | + # f'Largest value of chanMap exceeds channel count of data, ' |
| 76 | + # 'make sure chanMap is 0-indexed.' |
| 77 | + # ) |
| 78 | + |
| 79 | + # if settings['nt0min'] is None: |
| 80 | + # settings['nt0min'] = int(20 * settings['nt']/61) |
| 81 | + # data_dtype = settings['data_dtype'] |
| 82 | + # device = self.device |
| 83 | + # save_preprocessed_copy = settings['save_preprocessed_copy'] |
| 84 | + # do_CAR = settings['do_CAR'] |
| 85 | + # invert_sign = settings['invert_sign'] |
| 86 | + # if not do_CAR: |
| 87 | + # logger.info("Skipping common average reference.") |
| 88 | + |
| 89 | + # ops = initialize_ops(settings, probe, data_dtype, do_CAR, |
| 90 | + # invert_sign, device, save_preprocessed_copy) |
| 91 | + |
| 92 | + # # Pretty-print ops and probe for log |
| 93 | + # logger.debug(f"Initial ops:\n\n{ops_as_string(ops)}\n") |
| 94 | + # logger.debug(f"Probe dictionary:\n\n{probe_as_string(ops['probe'])}\n") |
| 95 | + |
| 96 | + # # TODO: add support for file object through data conversion |
| 97 | + # # Set preprocessing and drift correction parameters |
| 98 | + # ops = compute_preprocessing(ops, self.device, tic0=tic0, |
| 99 | + # file_object=self.file_object) |
| 100 | + # np.random.seed(1) |
| 101 | + # torch.cuda.manual_seed_all(1) |
| 102 | + # torch.random.manual_seed(1) |
| 103 | + # ops, bfile, st0 = compute_drift_correction( |
| 104 | + # ops, self.device, tic0=tic0, progress_bar=self.progress_bar, |
| 105 | + # file_object=self.file_object, clear_cache=clear_cache |
| 106 | + # ) |
| 107 | + |
| 108 | + # # Check scale of data for log file |
| 109 | + # b1 = bfile.padded_batch_to_torch(0).cpu().numpy() |
| 110 | + # logger.debug(f"First batch min, max: {b1.min(), b1.max()}") |
| 111 | + |
| 112 | + # if save_preprocessed_copy: |
| 113 | + # save_preprocessing(results_dir / 'temp_wh.dat', ops, bfile) |
| 114 | + |
| 115 | + # # Will be None if nblocks = 0 (no drift correction) |
| 116 | + # if st0 is not None: |
| 117 | + # self.dshift = ops['dshift'] |
| 118 | + # self.st0 = st0 |
| 119 | + # self.plotDataReady.emit('drift') |
| 120 | + |
| 121 | + # # Sort spikes and save results |
| 122 | + # st, tF, Wall0, clu0 = detect_spikes( |
| 123 | + # ops, self.device, bfile, tic0=tic0, |
| 124 | + # progress_bar=self.progress_bar, clear_cache=clear_cache, |
| 125 | + # verbose=verbose |
| 126 | + # ) |
| 127 | + |
| 128 | + # self.Wall0 = Wall0 |
| 129 | + # self.wPCA = torch.clone(ops['wPCA'].cpu()).numpy() |
| 130 | + # self.clu0 = clu0 |
| 131 | + # self.plotDataReady.emit('diagnostics') |
| 132 | + |
| 133 | + # clu, Wall, _ = cluster_spikes( |
| 134 | + # st, tF, ops, self.device, bfile, tic0=tic0, |
| 135 | + # progress_bar=self.progress_bar, clear_cache=clear_cache, |
| 136 | + # verbose=verbose |
| 137 | + # ) |
| 138 | + # ops, similar_templates, is_ref, est_contam_rate, kept_spikes = \ |
| 139 | + # save_sorting(ops, results_dir, st, clu, tF, Wall, bfile.imin, tic0) |
| 140 | + |
| 141 | + # except Exception as e: |
| 142 | + # if isinstance(e, torch.cuda.OutOfMemoryError): |
| 143 | + # logger.exception('Out of memory error, printing performance...') |
| 144 | + # log_performance(logger, level='info') |
| 145 | + # log_cuda_details(logger) |
| 146 | + # # This makes sure the full traceback is written to log file. |
| 147 | + # logger.exception('Encountered error in `run_kilosort`:') |
| 148 | + # # Annoyingly, this will print the error message twice for console |
| 149 | + # # but I haven't found a good way around that. |
| 150 | + # raise |
| 151 | + |
| 152 | + # finally: |
| 153 | + # close_logger() |
| 154 | + |
| 155 | + # self.ops = ops |
| 156 | + # self.st = st[kept_spikes] |
| 157 | + # self.clu = clu[kept_spikes] |
| 158 | + # self.tF = tF[kept_spikes] |
| 159 | + # self.is_refractory = is_ref |
| 160 | + # self.plotDataReady.emit('probe') |
149 | 161 |
|
150 | 162 | self.finishedSpikesort.emit(self.context) |
0 commit comments