|
1 | 1 | import sklearn
|
2 | 2 | from sklearn.datasets import make_circles
|
3 | 3 | # Make 100 Samples
|
4 |
| -n_samples = 25000 |
| 4 | +n_samples = 10000 |
5 | 5 | X,y = make_circles(n_samples,noise=0.0625,random_state=42)
|
6 | 6 |
|
7 | 7 |
|
@@ -237,7 +237,7 @@ def forward(self,X):
|
237 | 237 | optimizer = torch.optim.Adam(model.parameters())
|
238 | 238 |
|
239 | 239 |
|
240 |
| -epochs = 150 |
| 240 | +epochs = 1 |
241 | 241 | batch_size = 32
|
242 | 242 |
|
243 | 243 |
|
@@ -279,10 +279,150 @@ def forward(self,X):
|
279 | 279 | plt.figure(figsize=(12,6))
|
280 | 280 | plt.subplot(1,2,1)
|
281 | 281 | plt.title("Train")
|
282 |
| -plot_decision_boundary(model_0,X_train,y_train) |
| 282 | +plot_decision_boundary(model,X_train,y_train) |
283 | 283 | plt.subplot(1,2,2)
|
284 | 284 | plt.title("Test")
|
285 |
| -plot_decision_boundary(model_0,X_test,y_test) |
| 285 | +plot_decision_boundary(model,X_test,y_test) |
| 286 | + |
| 287 | + |
| 288 | +torch.max(torch.tensor(0),torch.tensor(4)) |
| 289 | + |
| 290 | + |
| 291 | +# Create a tensor |
| 292 | +A = torch.arange(-10,10) |
| 293 | + |
| 294 | + |
| 295 | +A.dtype |
| 296 | + |
| 297 | + |
| 298 | +A |
| 299 | + |
| 300 | + |
| 301 | +plt.plot(A) |
| 302 | + |
| 303 | + |
| 304 | +plt.plot(torch.relu(A)) |
| 305 | + |
| 306 | + |
| 307 | +def relu(X): |
| 308 | + return torch.max(torch.tensor(0),X) |
| 309 | + |
| 310 | + |
| 311 | +plt.plot(relu(A)) |
| 312 | + |
| 313 | + |
| 314 | +def sigmoid(X): |
| 315 | + return 1 / (1 + torch.exp(-X)) |
| 316 | + |
| 317 | + |
| 318 | +plt.plot(sigmoid(A)) |
| 319 | + |
| 320 | + |
| 321 | +torch.exp(torch.tensor(1)) |
| 322 | + |
| 323 | + |
| 324 | +from sklearn.datasets import make_blobs |
| 325 | + |
| 326 | + |
| 327 | +NUM_CLASSES = 4 |
| 328 | +NUM_FEATURES = 2 |
| 329 | + |
| 330 | + |
| 331 | +X,y = make_blobs(n_samples=1000,n_features=NUM_FEATURES,centers=NUM_CLASSES,cluster_std=1,random_state=42) |
| 332 | +plt.figure(figsize=(10,7)) |
| 333 | +plt.scatter(X[:,0],X[:,1],c=y,cmap = plt.cm.RdYlBu) |
| 334 | +# X is the cordinate spaces and y is used to assign each of the points a class (label) |
| 335 | + |
| 336 | + |
| 337 | +X,y = torch.from_numpy(X).type(torch.float).to(device),torch.tensor(y).type(torch.float).to(device) |
| 338 | + |
| 339 | + |
| 340 | +X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.125) |
| 341 | + |
| 342 | + |
| 343 | +class BlobModel(nn.Module): |
| 344 | + def __init__(self,input_features=2,output_features=4,hidden_units=1024): |
| 345 | + super().__init__() |
| 346 | + self.linear_layer_stack = nn.Sequential( |
| 347 | + nn.Linear(input_features,hidden_units), |
| 348 | + nn.ReLU(), |
| 349 | + nn.Linear(hidden_units,hidden_units * 2), |
| 350 | + nn.ReLU(), |
| 351 | + nn.Linear(hidden_units * 2, output_features) |
| 352 | + ) |
| 353 | + |
| 354 | + def forward(self,X): |
| 355 | + return self.linear_layer_stack(X) |
| 356 | + |
| 357 | + |
| 358 | +model = BlobModel().to(device) |
| 359 | +criterion = nn.CrossEntropyLoss().to(device) |
| 360 | +optimizer = torch.optim.Adam(model.parameters(),lr=0.01) |
| 361 | + |
| 362 | + |
| 363 | +epochs = 100 |
| 364 | +batch_size = 32 |
| 365 | + |
| 366 | + |
| 367 | +y_preds = model(X_test) |
| 368 | + |
| 369 | + |
| 370 | +y_pred_probs = torch.softmax(y_preds,dim=1) |
| 371 | + |
| 372 | + |
| 373 | +torch.argmax(y_pred_probs[0]),y_pred_probs[0] |
| 374 | + |
| 375 | + |
| 376 | +# Conver models preditions probabilities to prediction labels |
| 377 | +y_preds = torch.argmax(y_pred_probs,dim=1) |
| 378 | + |
| 379 | + |
| 380 | +y_pred_probs.shape |
| 381 | + |
| 382 | + |
| 383 | +y_preds |
| 384 | + |
| 385 | + |
| 386 | +# Fit the multi-class model to the data |
| 387 | +torch.manual_seed(42) |
| 388 | +torch.cuda.manual_seed(42) |
| 389 | + |
| 390 | +# Set number of epochs |
| 391 | +epochs = 100 |
| 392 | + |
| 393 | +# Put data to the target device |
| 394 | +X_blob_train, y_blob_train = X_train.to(device), y_train.to(device) |
| 395 | +X_blob_test, y_blob_test = X_test.to(device), y_test.to(device) |
| 396 | + |
| 397 | +# Loop through data |
| 398 | +for epoch in range(epochs): |
| 399 | + ### Training |
| 400 | + model.train() |
| 401 | + |
| 402 | + y_logits = model(X_blob_train) |
| 403 | + y_pred = torch.softmax(y_logits, dim=1).argmax(dim=1) |
| 404 | + |
| 405 | + loss = loss_fn(y_logits, y_blob_train) |
| 406 | + acc = accuracy_fn(y_true=y_blob_train, |
| 407 | + y_pred=y_pred) |
| 408 | + |
| 409 | + optimizer.zero_grad() |
| 410 | + loss.backward() |
| 411 | + optimizer.step() |
| 412 | + |
| 413 | + ### Testing |
| 414 | + model.eval() |
| 415 | + with torch.inference_mode(): |
| 416 | + test_logits = model(X_blob_test) |
| 417 | + test_preds = torch.softmax(test_logits, dim=1).argmax(dim=1) |
| 418 | + |
| 419 | + test_loss = loss_fn(test_logits, y_blob_test) |
| 420 | + test_acc = accuracy_fn(y_true=y_blob_test, |
| 421 | + y_pred=test_preds) |
| 422 | + |
| 423 | + # Print out what's happenin' |
| 424 | + if epoch % 10 == 0: |
| 425 | + print(f"Epoch: {epoch} | Loss: {loss:.4f}, Acc: {acc:.2f}% | Test loss: {test_loss:.4f}, Test acc: {test_acc:.2f}get_ipython().run_line_magic("")", "") |
286 | 426 |
|
287 | 427 |
|
288 | 428 |
|
0 commit comments