Skip to content
This repository was archived by the owner on May 31, 2023. It is now read-only.

Commit 9b561f8

Browse files
committed
Code Cleanup, README update
1 parent 74463b3 commit 9b561f8

File tree

10 files changed

+86
-60
lines changed

10 files changed

+86
-60
lines changed

README.md

Lines changed: 28 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,30 @@
11
# MicroSPAT
22

3-
#To Do
4-
1. Add in controls for probabilistic allele identification at the project level
5-
2. refactor and pull control of a project into service
6-
3. refactor REST API into sockets based API
7-
4. Add in notification functionality
8-
3+
## Introduction
4+
MicroSPAT is a collection of tools for semi-automated analysis of raw capillary electrophoresis (CE) data output by the ABI 3730. MicroSPAT integrates several features including a plate view for quality checking, automated ladder identification, sample based association of FSA data to keep data organized in a logical manner, automated bin generation using a clustering algorithm, automated artifact estimator generation, automated quantification bias estimation, and automated genotyping of samples with the option of manual curation.
5+
6+
## DEV WARNING
7+
Under Very Heavy Development -- Features will change, things WILL break
8+
9+
## Installation
10+
Download the latest build zip, unzip, and navigate to the folder. Using either pip or conda, install the required packages either through requirements.txt or using the conda environment manager and the environment.yml file. If you plan to install through pip, I highly suggest using an environment manager, but this is beyond the scope of this tutorial. Once all packages are installed, execute the following
11+
12+
```
13+
python manage.py initDB
14+
```
15+
16+
This initializes the application with the HDROX400 Ladder
17+
18+
## Running
19+
While in the MicroSPAT directory, execute the following
20+
21+
```
22+
python manage.py runsockets
23+
```
24+
25+
Then open Chrome (Only tested in Chrome to date, no guarantee for other browsers at this time) and navigate to:
26+
```
27+
http://localhost:5000
28+
```
29+
30+
This will bring up MicroSPAT for the first time. At this point, you can then start to populate the application with your data.

app/microspat/events.py

Lines changed: 0 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -87,9 +87,6 @@ def update_peak_scanner(target, update_dict):
8787
:type update_dict: dict
8888
:type target: PeakScanner
8989
"""
90-
print update_dict
91-
print type(update_dict)
92-
print update_dict['scanning_method']
9390
if update_dict['scanning_method'] == 'relmax':
9491
target.scanning_method = 'relmax'
9592
target.argrelmax_window = update_dict['argrelmax_window']
@@ -258,7 +255,6 @@ def load_plate_map(plate_map_file, plate, create_samples_if_not_exist=False):
258255
plate.check_contamination()
259256
for project_id in new_channels.keys():
260257
project = Project.query.get(project_id)
261-
print "Adding New Channels"
262258
project.add_channels(new_channels[project_id])
263259
db.session.commit()
264260
plate = Plate.query.get(plate.id)
@@ -298,7 +294,6 @@ def test_message(message=None):
298294

299295
@socketio.on('client_test')
300296
def client_test(message=None):
301-
print message
302297
emit('server_test', 'Success')
303298

304299

@@ -322,7 +317,6 @@ def get_or_post_genotyping_projects():
322317
return table_list_all(GenotypingProject)
323318
elif request.method == 'POST':
324319
project_params = json.loads(request.get_json())
325-
print project_params
326320
try:
327321
project = GenotypingProject(**project_params)
328322
db.session.add(project)
@@ -473,7 +467,6 @@ def get_peak_data(id):
473467
res["In Bin"] = bool(peak['bin_id'])
474468
res["Relative Peak Height"] = peak['relative_peak_height']
475469
if res["In Bin"]:
476-
print peak['bin_id']
477470
res["Called Allele"] = str(peak['bin_id']) in bin_ids
478471
res["Allele Label"] = str(peak['bin'])
479472
res["Bleedthrough Ratio"] = peak['bleedthrough_ratio']
@@ -562,7 +555,6 @@ def get_or_post_bias_estimator_projects():
562555
return table_list_all(QuantificationBiasEstimatorProject)
563556
elif request.method == 'POST':
564557
project_params = json.loads(request.get_json())
565-
print project_params
566558
try:
567559
project = QuantificationBiasEstimatorProject(**project_params)
568560
db.session.add(project)
@@ -715,7 +707,6 @@ def get_or_create_bin_estimators():
715707
elif request.method == 'POST':
716708
project_params = json.loads(request.get_json())
717709
try:
718-
print "Adding a new project"
719710
project = BinEstimatorProject(**project_params)
720711
db.session.add(project)
721712
db.session.flush()
@@ -777,7 +768,6 @@ def create_or_update_bins(id, locus_id):
777768
# Remove deleted bins
778769
old_bin_ids = [_['id'] for _ in old_bins]
779770
for b in locus_bin_set.bins:
780-
print b.id
781771
if b.id not in old_bin_ids:
782772
db.session.delete(b)
783773

@@ -823,7 +813,6 @@ def batch_update_locus_parameters():
823813
proj_id = locus_params_update_dict['project_id']
824814
project = Project.query.get(proj_id)
825815
locus_parameters = ProjectLocusParams.query.filter(ProjectLocusParams.project_id == proj_id).all()
826-
print project, locus_parameters
827816
if locus_parameters:
828817
try:
829818
updater = update_fns.get(locus_parameters[0].discriminator, update_locus_params)
@@ -858,7 +847,6 @@ def get_or_update_locus_parameters(id):
858847
return table_get_details(ProjectLocusParams, id)
859848
elif request.method == 'PUT':
860849
locus_params_update_dict = json.loads(request.get_json())
861-
print type(locus_params_update_dict)
862850
locus_params = ProjectLocusParams.query.get(id)
863851
assert isinstance(locus_params, ProjectLocusParams)
864852
project = Project.query.get(locus_params.project_id)
@@ -868,7 +856,6 @@ def get_or_update_locus_parameters(id):
868856
try:
869857
locus_params = updater(locus_params, locus_params_update_dict)
870858
except StaleParametersError as e:
871-
print "STALE PARAMETER ERROR"
872859
return handle_error("{} is stale at locus {}, analyze that first!".format(e.project, e.locus))
873860
db.session.flush()
874861
send_notification('info', 'Beginning Analysis: {}'.format(locus_params.locus.label))
@@ -938,8 +925,6 @@ def get_or_post_locus_sets():
938925
return table_list_all(LocusSet)
939926
elif request.method == 'POST':
940927
request_json = request.get_json()
941-
print request_json
942-
print type(request_json)
943928
locus_set_params = json.loads(request_json['locus_set'])
944929
locus_ids = request_json['locus_ids']
945930
try:
@@ -979,7 +964,6 @@ def get_or_post_ladders():
979964
elif request.method == 'POST':
980965
ladder_params = json.loads(request.get_json())
981966
try:
982-
print ladder_params
983967
if ladder_params.get('id', None):
984968
l = Ladder.query.get(ladder_params['id'])
985969
else:
@@ -1066,7 +1050,6 @@ def save_plate():
10661050
files.append(os.path.join('./tmp', filename))
10671051
ladder = Ladder.query.get(ladder_id)
10681052
plate_zips = [open(_, 'rb') for _ in files]
1069-
print plate_zips
10701053
try:
10711054
if os.name == 'nt': # Currently no support for multiprocessing in windows at this time.
10721055
extracted_plates = load_plate_zips(plate_zips, ladder, parallel=False)
@@ -1243,7 +1226,6 @@ def get_project_sample_locus_annotations_by_sample(project_id, sample_id):
12431226
def update_locus_annotations():
12441227
annotations = map(json.loads, request.get_json())
12451228
for annotation in annotations:
1246-
print annotation
12471229
sample_annotation = SampleLocusAnnotation.query.get(annotation['id'])
12481230
assert isinstance(sample_annotation, SampleLocusAnnotation)
12491231
sample_annotation.alleles = annotation['alleles']
@@ -1259,7 +1241,6 @@ def get_controls():
12591241
elif request.method == 'POST':
12601242
try:
12611243
ctrl_info = json.loads(request.get_json())
1262-
print ctrl_info
12631244
ctrl = Control(barcode=ctrl_info['barcode'], bin_estimator_id=ctrl_info['bin_estimator_id'])
12641245
for k in ctrl_info['alleles'].keys():
12651246
if ctrl_info['alleles'][k] == 'null':
@@ -1284,8 +1265,6 @@ def get_control(id):
12841265
for k in update_control['alleles'].keys():
12851266
if update_control['alleles'][k] == 'null':
12861267
update_control['alleles'][k] = None
1287-
print ctrl
1288-
print update_control
12891268
ctrl.bin_estimator_id = update_control['bin_estimator_id']
12901269
db.session.flush()
12911270
ctrl.initialize_alleles()
@@ -1296,7 +1275,6 @@ def get_control(id):
12961275
handle_error(e)
12971276
elif request.method == 'DELETE':
12981277
ctrl = Control.query.get(id)
1299-
print ctrl
13001278
db.session.delete(ctrl)
13011279
db.session.flush()
13021280
return jsonify(wrap_data({'status': 'Success'}))

app/microspat/fsa_extractor/PlateExtractor.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,6 @@ def parallel_from_zip(cls, zip_files, ladder, color, base_size_precision, sq_lim
209209
filter_parameters, scanning_parameters, creator=None, comments=None):
210210

211211
pool = multiprocessing.Pool()
212-
print "MultiProcessing Plate Loading with {} Processes".format(pool._processes)
213212
fn = partial(cls.from_zip_and_calculate_base_sizes, ladder=ladder, color=color,
214213
base_size_precision=base_size_precision,
215214
sq_limit=sq_limit,

app/microspat/models.py

Lines changed: 4 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,7 @@
66
from ..custom_sql_types.custom_types import JSONEncodedData, MutableDict, MutableList, CompressedJSONEncodedData
77

88
from app import db, socketio
9-
from app.microspat.statistics import calculate_moi
10-
from app.microspat.statistics.utils import calculate_prob_negative, calculate_prob_pos_if_observed
9+
from app.microspat.statistics import calculate_moi, calculate_prob_negative, calculate_prob_pos_if_observed
1110
from quantification_bias.BiasCalculator import correct_peak_proportion, calculate_beta
1211
from fsa_extractor.PlateExtractor import PlateExtractor, WellExtractor, ChannelExtractor
1312
from statistics import calculate_allele_frequencies, calculate_peak_probability
@@ -659,10 +658,7 @@ def add_sample(self, sample_id):
659658

660659
def add_samples(self, sample_ids):
661660
present_sample_ids = set([_.id for _ in self.associated_samples])
662-
print present_sample_ids
663-
print set(sample_ids)
664661
full_sample_ids = list(set(sample_ids) - present_sample_ids)
665-
print full_sample_ids
666662
n = 0
667663
while n * 100 < len(full_sample_ids) + 100:
668664
sample_ids = full_sample_ids[n * 100: (n + 1) * 100]
@@ -1327,7 +1323,7 @@ def from_peaks(cls, locus_id, peak_sets, min_artifact_peak_frequency):
13271323
artifact_estimator = ArtifactEstimator(artifact_distance=estimator.artifact_distance,
13281324
artifact_distance_buffer=estimator.artifact_distance_buffer,
13291325
peak_data=estimator.peak_data, label=estimator.label)
1330-
print artifact_estimator.label
1326+
13311327
for eqn in estimator.artifact_equations:
13321328
eventlet.sleep()
13331329
assert isinstance(eqn, AE.ArtifactEquation)
@@ -1679,8 +1675,6 @@ def serialize(self):
16791675
return res
16801676

16811677
def serialize_details(self):
1682-
print "Serializing QBE"
1683-
print self.locus_parameters.all()
16841678
res = super(QuantificationBiasEstimatorProject, self).serialize_details()
16851679
res.update({
16861680
'locus_parameters': {_.locus_id: _.serialize() for _ in self.locus_parameters.all()},
@@ -1939,7 +1933,6 @@ def bootstrap_allele_frequencies(self):
19391933
while alleles_changed:
19401934

19411935
cycles += 1
1942-
print cycles
19431936
alleles_changed = False
19441937
allele_frequency_locus_annotations = format_locus_annotations(all_locus_annotations, peak_filters)
19451938
allele_frequencies = calculate_allele_frequencies(allele_frequency_locus_annotations)
@@ -2055,7 +2048,6 @@ def probabilistic_peak_annotation(self, allele_frequencies=None):
20552048
sample_annotation.moi = moi_dict[sample_annotation.id]
20562049
locus_annotations = locus_annotation_dict[sample_annotation.id]
20572050

2058-
verbose = False
20592051

20602052
for locus_annotation in locus_annotations:
20612053
for peak in locus_annotation.annotated_peaks:
@@ -2064,10 +2056,8 @@ def probabilistic_peak_annotation(self, allele_frequencies=None):
20642056
locus_annotation.annotated_peaks = calculate_prob_negative(locus_annotation.annotated_peaks,
20652057
sample_annotation.moi,
20662058
allele_frequencies[
2067-
locus_annotation.locus.label],
2068-
verbose)
2069-
locus_annotation.annotated_peaks = calculate_prob_pos_if_observed(locus_annotation.annotated_peaks,
2070-
verbose)
2059+
locus_annotation.locus.label])
2060+
locus_annotation.annotated_peaks = calculate_prob_pos_if_observed(locus_annotation.annotated_peaks)
20712061
self.recalculate_alleles(locus_annotation)
20722062
return self
20732063

@@ -2425,7 +2415,6 @@ def serialize(self):
24252415
'min_bias_quantifier_peak_proportion': self.min_bias_quantifier_peak_proportion,
24262416
'quantification_bias_paramters_stale': self.quantification_bias_parameters_stale
24272417
})
2428-
print res
24292418
return res
24302419

24312420

@@ -2504,7 +2493,6 @@ def copy_project_sample_annotations(cls, psa):
25042493
def serialize(self):
25052494
if self.sample_id and not self.sample:
25062495
self.sample = Sample.query.get(self.sample_id)
2507-
print self.sample_id
25082496
res = {
25092497
'id': self.id,
25102498
'sample': self.sample.serialize(),

app/microspat/quantification_bias/BiasCalculator.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -96,16 +96,11 @@ def correct_peak_proportion(beta, peak_set):
9696
else:
9797
corrected_total_peak_height = total_peak_height
9898

99-
print peak_set
100-
101-
print "Corrected Total: {}".format(corrected_total_peak_height)
102-
10399
for peak in peak_set:
104100
peak_height = max(1, peak['peak_height'] - peak.get('artifact_contribution', 0))
105101

106102
if beta:
107103
corrected_peak_height = peak_height * np.e ** (beta * (peak['peak_size'] - pivot_point))
108-
print "Corrected Peak Height: {}".format(corrected_peak_height)
109104
else:
110105
corrected_peak_height = peak_height
111106

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
from utils import calculate_allele_frequencies, calculate_peak_probability, calculate_moi
1+
from utils import calculate_allele_frequencies, calculate_peak_probability, calculate_moi, calculate_prob_pos_if_observed, calculate_prob_negative

app/microspat/statistics/utils.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -125,21 +125,18 @@ def calculate_peak_probability(peak_set, num_possible, locus_allele_frequencies=
125125
return recalculated_probabilities
126126

127127

128-
def calculate_prob_pos_if_observed(peak_set, verbose=False):
128+
def calculate_prob_pos_if_observed(peak_set):
129129
for peak in peak_set:
130130
prob_neg = peak['prob_negative']
131131
prob_observed_if_neg = st.norm.sf((peak['peak_height'] - peak['artifact_contribution']) / float(max(peak['artifact_error'], 1e-6)))
132132

133133
prob_neg_if_observed = (prob_observed_if_neg * prob_neg) / float(max(((prob_observed_if_neg * prob_neg) + 1 - prob_neg), 1e-6))
134134

135135
peak['probability'] = max(1 - prob_neg_if_observed, 0)
136-
if verbose:
137-
print "Peak Height: {} Artifact: {} Error: {}".format(peak['peak_height'], peak['artifact_contribution'], peak['artifact_error'])
138-
print "Prob neg: {} Prob Observed if Neg: {} Prob Neg if Observed: {}".format(prob_neg, prob_observed_if_neg, prob_neg_if_observed)
139136
return peak_set
140137

141138

142-
def calculate_prob_negative(peak_set, moi, allele_frequencies, verbose=False):
139+
def calculate_prob_negative(peak_set, moi, allele_frequencies):
143140

144141
prob_all_perms = sum([_['probability'] * allele_frequencies.get(str(_['bin_id']), 0) for _ in peak_set]) ** moi
145142
if prob_all_perms < 1e-6:

app/static/plasmotrack-js/app/plasmomapper/services/sample-based-project/sample-based-project.service.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,15 @@ export class SampleBasedProjectService extends ProjectService {
2929
};
3030

3131
this.getSampleLocusAnnotations = (project_id: number, sample_id: number) => {
32+
console.log("Getting Sample Locus Annotations");
33+
3234
let url = this._locusAnnotationUrl + project_id + "/sample/" + sample_id + "/";
3335
return this._commonServerMethods.getList(SampleLocusAnnotation, url)
3436
};
3537

3638
this.getSampleChannelAnnotations = (project_id: number, sample_id: number) => {
39+
console.log("Getting Sample Channel Annotations");
40+
3741
let url = this._channelAnnotationsUrl + project_id + "/sample/" + sample_id;
3842
return this._commonServerMethods.getList(ChannelAnnotation, url)
3943
}

config.py

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,24 @@ def init_app(app):
2929

3030
class DevelopmentConfig(Config):
3131
DEBUG = True
32-
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
32+
SQLALCHEMY_TRACK_MODIFICATIONS = False
33+
34+
if os.name == 'nt':
35+
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
36+
'sqlite:///' + os.path.join(basedir, 'win-data-dev.sqlite')
37+
CELERY_RESULT_BACKEND = os.environ.get('DEV_RESULT_BACKEND_URL') or \
38+
'db+sqlite:///' + os.path.join(basedir, 'win-result-backend-dev.sqlite')
39+
40+
else:
41+
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
3342
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
3443

35-
CELERY_RESULT_BACKEND = os.environ.get('DEV_RESULT_BACKEND_URL') or \
44+
CELERY_RESULT_BACKEND = os.environ.get('DEV_RESULT_BACKEND_URL') or \
3645
'db+sqlite:///' + os.path.join(basedir, 'result-backend-dev.sqlite')
46+
47+
# SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
48+
# CELERY_RESULT_BACKEND = 'db+sqlite:///' + os.path.join(basedir, 'result-backend-dev.sqlite')
49+
3750
SQLALCHEMY_ECHO = False
3851
LOGGING_LEVEL = logging.DEBUG
3952

0 commit comments

Comments
 (0)