-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathAnalysis.py
More file actions
123 lines (98 loc) · 4.12 KB
/
Analysis.py
File metadata and controls
123 lines (98 loc) · 4.12 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
#%%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plot
plot.style.use('fivethirtyeight')
%matplotlib inline
from sklearn.model_selection import train_test_split, StratifiedKFold, KFold
#from sklearn.cross_validation import KFold
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
data_folder = '~/Downloads/Python_Projects/Cancer/Data/'
# %%
raw_data = pd.read_csv(data_folder + 'data.csv', sep=',', encoding='latin-1')
data = raw_data.loc[:, ~raw_data.columns.str.contains('^Unnamed')] # get rid of unnamed random columns
data.drop(['id'], axis = 1, inplace = True)
# %%
data.head()
# %%
data.diagnosis.unique()
# %%
len(data)
# %%
# create a function to change diagnosis column to 0,1 binary. 1 = M. 0 = B
def diagnosis_converter(diagnosis):
if diagnosis == 'M':
return 1
elif diagnosis == 'B':
return 0
else:
return 'Error'
data['diagnosis_binary'] = data['diagnosis'].apply(diagnosis_converter)
data.diagnosis_binary.unique() # check to see function worked properly
# %%
data.drop(['diagnosis'], axis = 1, inplace = True) # drop original diagnosis column
data = data.rename(columns={'diagnosis_binary': 'diagnosis'}) # rename new 0,1 binary column as "diagnosis"
var_names = list(data.columns[1:11])
# %%
plot.hist(data['diagnosis'])
plot.xticks([0,1])
plot.title('Diagnosis Graph', y = 1)
plot.xlabel('1 = Malignant, 0 = Benign')
plot.ylabel('Frequency')
fig = plot.gcf()
fig.set_size_inches(18.5, 10.5)
fig.savefig('histogram.png', dpi=100)
plot.show()
# %%
data.shape[0]
#%%
ind_var = data.drop('diagnosis', axis=1)
depen_var = data[['diagnosis']]
# %%
#
def classification_model(model, data_x, data_y, predictors, outcome):
model.fit(data[predictors], data[outcome]) # fit the model
predictions = model.predict(data[predictors]) # get predictions from traning data
accuracy = metrics.accuracy_score(predictions, data[outcome])
print("Accuracy : %s" % "{0:.3%}".format(accuracy))
accuracy = []
k_fold = StratifiedKFold(n_splits = 5)
for train, test in k_fold.split(data_x,data_y):
train_x = data_x[predictors].iloc[train, :]
train_y = data_y[outcome].iloc[train]
model.fit(train_x,train_y)
test_x = data_x[predictors].iloc[test,:]
test_y = data_y[outcome].iloc[test]
accuracy.append(model.score(test_x, test_y))
print("Cross-Validation Score : %s" % "{0:.3%}".format(np.mean(accuracy)))
model.fit(train_x,train_y)
#%%
## LOGISTIC REGRESSION
predictor_var = ['radius_mean','texture_mean','perimeter_mean','area_mean','smoothness_mean', 'compactness_mean','concave points_mean', 'symmetry_mean', 'fractal_dimension_mean']
outcome_var = 'diagnosis'
model = LogisticRegression()
classification_model(model,ind_var,depen_var,predictor_var,outcome_var)
#%%
## RANDOM FOREST
predictor_var = ['radius_mean','texture_mean','perimeter_mean','area_mean','smoothness_mean', 'compactness_mean','concave points_mean', 'symmetry_mean', 'fractal_dimension_mean']
model = RandomForestClassifier(n_estimators = 100, min_samples_split = 25, max_depth = 7, max_features = 2)
classification_model(model, ind_var, depen_var, predictor_var,outcome_var)
#%%
## RANDOM FOREST NO SIZE
predictor_var = ['texture_mean','smoothness_mean', 'compactness_mean','concave points_mean', 'symmetry_mean', 'fractal_dimension_mean']
model = RandomForestClassifier(n_estimators = 100, min_samples_split = 25, max_depth = 7, max_features = 2)
classification_model(model, ind_var, depen_var, predictor_var,outcome_var)
#%%
## RANDOM FOREST Size
predictor_var = ['radius_mean', 'perimeter_mean','area_mean']
model = RandomForestClassifier(n_estimators = 100, min_samples_split = 25, max_depth = 7, max_features = 2)
classification_model(model, ind_var, depen_var, predictor_var,outcome_var)
#%%
## RANDOM FOREST ALL
predictor_var = var_names
model = RandomForestClassifier(n_estimators = 100, min_samples_split = 25, max_depth = 7, max_features = 2)
classification_model(model, ind_var, depen_var, predictor_var,outcome_var)
# %%