Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -1294,7 +1294,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.4"
"version": "3.7.6"
}
},
"nbformat": 4,
Expand Down

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
6.1101,17.592
5.5277,9.1302
8.5186,13.662
7.0032,11.854
5.8598,6.8233
8.3829,11.886
7.4764,4.3483
8.5781,12
6.4862,6.5987
5.0546,3.8166
5.7107,3.2522
14.164,15.505
5.734,3.1551
8.4084,7.2258
5.6407,0.71618
5.3794,3.5129
6.3654,5.3048
5.1301,0.56077
6.4296,3.6518
7.0708,5.3893
6.1891,3.1386
20.27,21.767
5.4901,4.263
6.3261,5.1875
5.5649,3.0825
18.945,22.638
12.828,13.501
10.957,7.0467
13.176,14.692
22.203,24.147
5.2524,-1.22
6.5894,5.9966
9.2482,12.134
5.8918,1.8495
8.2111,6.5426
7.9334,4.5623
8.0959,4.1164
5.6063,3.3928
12.836,10.117
6.3534,5.4974
5.4069,0.55657
6.8825,3.9115
11.708,5.3854
5.7737,2.4406
7.8247,6.7318
7.0931,1.0463
5.0702,5.1337
5.8014,1.844
11.7,8.0043
5.5416,1.0179
7.5402,6.7504
5.3077,1.8396
7.4239,4.2885
7.6031,4.9981
6.3328,1.4233
6.3589,-1.4211
6.2742,2.4756
5.6397,4.6042
9.3102,3.9624
9.4536,5.4141
8.8254,5.1694
5.1793,-0.74279
21.279,17.929
14.908,12.054
18.959,17.054
7.2182,4.8852
8.2951,5.7442
10.236,7.7754
5.4994,1.0173
20.341,20.992
10.136,6.6799
7.3345,4.0259
6.0062,1.2784
7.2259,3.3411
5.0269,-2.6807
6.5479,0.29678
7.5386,3.8845
5.0365,5.7014
10.274,6.7526
5.1077,2.0576
5.7292,0.47953
5.1884,0.20421
6.3557,0.67861
9.7687,7.5435
6.5159,5.3436
8.5172,4.2415
9.1802,6.7981
6.002,0.92695
5.5204,0.152
5.0594,2.8214
5.7077,1.8451
7.6366,4.2959
5.8707,7.2029
5.3054,1.9869
8.2934,0.14454
13.394,9.0551
5.4369,0.61705
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
2104,3,399900
1600,3,329900
2400,3,369000
1416,2,232000
3000,4,539900
1985,4,299900
1534,3,314900
1427,3,198999
1380,3,212000
1494,3,242500
1940,4,239999
2000,3,347000
1890,3,329999
4478,5,699900
1268,3,259900
2300,4,449900
1320,2,299900
1236,3,199900
2609,4,499998
3031,4,599000
1767,3,252900
1888,2,255000
1604,3,242900
1962,4,259900
3890,3,573900
1100,3,249900
1458,3,464500
2526,3,469000
2200,3,475000
2637,3,299900
1839,2,349900
1000,1,169900
2040,4,314900
3137,3,579900
1811,4,285900
1437,3,249900
1239,3,229900
2132,4,345000
4215,4,549000
2162,4,287000
1664,2,368500
2238,3,329900
2567,4,314000
1200,3,299000
852,2,179900
1852,4,299900
1203,3,239500

Large diffs are not rendered by default.

Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
"from matplotlib import pyplot\n",
"\n",
"# Optimization module in scipy\n",
"from scipy import optimize\n",
"from scipy import optimize as opt\n",
"\n",
"# will be used to load MATLAB mat datafile format\n",
"from scipy.io import loadmat\n",
Expand All @@ -42,7 +42,7 @@
"import utils\n",
"\n",
"# define the submission/grader object for this exercise\n",
"grader = utils.Grader()\n",
"#grader = utils.Grader()\n",
"\n",
"# tells matplotlib to embed plots within the notebook\n",
"%matplotlib inline"
Expand Down Expand Up @@ -348,7 +348,14 @@
" grad = np.zeros(theta.shape)\n",
" \n",
" # ====================== YOUR CODE HERE ======================\n",
"\n",
" hyp = utils.sigmoid(X@theta)\n",
" reg = (lambda_/(2*m)) * (theta[1:].T @ theta[1:])\n",
" J = (-1/m) * (y.T @ np.log(hyp) + (1-y.T) @ np.log(1 - hyp)) + reg\n",
" #J=J+reg\n",
" grad = (1/m)*(hyp-y)@X\n",
" temp = theta\n",
" temp[0] = 0\n",
" grad[1:] = grad[1:] + (lambda_/m)*temp[1:]\n",
"\n",
" \n",
" # =============================================================\n",
Expand Down Expand Up @@ -422,10 +429,10 @@
"outputs": [],
"source": [
"# appends the implemented function in part 1 to the grader object\n",
"grader[1] = lrCostFunction\n",
"# grader[1] = lrCostFunction\n",
"\n",
"# send the added functions to coursera grader for getting a grade on this part\n",
"grader.grade()"
"# grader.grade()"
]
},
{
Expand All @@ -452,6 +459,16 @@
"metadata": {},
"outputs": [],
"source": [
"def GraDes(X,y,theta,alpha,num_iters,Lambda):\n",
" m=len(y)\n",
" J_his=[]\n",
" \n",
" for i in range(num_iters):\n",
" cost,grad = lrCostFunction(theta,X,y,Lambda)\n",
" theta = theta - (alpha*grad)\n",
" J_his.append(J)\n",
" return theta, J_history\n",
"\n",
"def oneVsAll(X, y, num_labels, lambda_):\n",
" \"\"\"\n",
" Trains num_labels logistic regression classifiers and returns\n",
Expand Down Expand Up @@ -526,7 +543,17 @@
" X = np.concatenate([np.ones((m, 1)), X], axis=1)\n",
"\n",
" # ====================== YOUR CODE HERE ======================\n",
" \n",
" Options = {'maxiter': 50}\n",
" for c in range(num_labels):\n",
" initial_theta = np.zeros((n+1,1))\n",
" res = opt.minimize(lrCostFunction, \n",
" initial_theta, \n",
" (X, (y == c),lambda_), \n",
" jac=True, \n",
" method='CG',\n",
" options=Options) \n",
" all_theta[c,:] = res[\"x\"]\n",
" \n",
"\n",
"\n",
" # ============================================================\n",
Expand Down Expand Up @@ -563,8 +590,8 @@
"metadata": {},
"outputs": [],
"source": [
"grader[2] = oneVsAll\n",
"grader.grade()"
"# grader[2] = oneVsAll\n",
"# grader.grade()"
]
},
{
Expand Down Expand Up @@ -625,7 +652,6 @@
" of the max for each row.\n",
" \"\"\"\n",
" m = X.shape[0];\n",
" num_labels = all_theta.shape[0]\n",
"\n",
" # You need to return the following variables correctly \n",
" p = np.zeros(m)\n",
Expand All @@ -634,9 +660,7 @@
" X = np.concatenate([np.ones((m, 1)), X], axis=1)\n",
"\n",
" # ====================== YOUR CODE HERE ======================\n",
"\n",
"\n",
" \n",
" p = np.argmax(utils.sigmoid( np.dot(X,all_theta.T) ), axis=1) \n",
" # ============================================================\n",
" return p"
]
Expand Down Expand Up @@ -671,8 +695,8 @@
"metadata": {},
"outputs": [],
"source": [
"grader[3] = predictOneVsAll\n",
"grader.grade()"
"# grader[3] = predictOneVsAll\n",
"# grader.grade()"
]
},
{
Expand Down Expand Up @@ -828,13 +852,22 @@
" # useful variables\n",
" m = X.shape[0]\n",
" num_labels = Theta2.shape[0]\n",
" X = np.concatenate([np.ones((m, 1)), X], axis=1)\n",
"\n",
"\n",
" # You need to return the following variables correctly \n",
" p = np.zeros(X.shape[0])\n",
"\n",
" # ====================== YOUR CODE HERE ======================\n",
"\n",
"\n",
" a2 = utils.sigmoid(np.dot(X,Theta1.T))\n",
" \n",
" #Adding bias unit\n",
" a2 = np.column_stack((np.ones((a2.shape[0],1)), a2))\n",
" \n",
" a3 = utils.sigmoid(np.dot(a2,Theta2.T))\n",
" \n",
" p = np.argmax(a3, axis=1)\n",
"\n",
" # =============================================================\n",
" return p"
Expand Down Expand Up @@ -897,6 +930,13 @@
"grader[4] = predict\n",
"grader.grade()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
Expand All @@ -915,7 +955,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.4"
"version": "3.7.6"
}
},
"nbformat": 4,
Expand Down
Binary file not shown.
Binary file not shown.
Loading