-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcuda neural network
More file actions
111 lines (86 loc) · 2.84 KB
/
cuda neural network
File metadata and controls
111 lines (86 loc) · 2.84 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import time
from tqdm import tqdm
app = int(input("1 pour utiliser CUDA et autre pour utiliser le CPU : "))
device = torch.device("cuda" if torch.cuda.is_available() and app ==1 else "cpu")
print(f'utilisation de : {device}')
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
trainset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
trainloader = DataLoader(trainset, batch_size=41096, shuffle=True)
class MLP(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()
self.fc1 = nn.Linear(28*28, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)
self.relu = nn.ReLU()
def forward(self, x):
x = self.flatten(x)
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return x
def entrainer(optimiseur_type,nom, epochs=10):
model = MLP().to(device)
loss_fn = nn.CrossEntropyLoss()
optimiseur = optimiseur_type(model.parameters(), lr=0.08)
pertes = []
for _ in tqdm(range(epochs), desc=f"apprentissage avec l'optimiseur {nom}"):
model.train()
perte_totale = 0
for images, labels in trainloader:
images = images.to(device)
labels = labels.to(device)
pred = model(images)
perte = loss_fn(pred, labels)
optimiseur.zero_grad()
perte.backward()
optimiseur.step()
perte_totale += perte.item()
pertes.append(perte_totale / len(trainloader))
return pertes
types_optimiseurs = {
"SGD": optim.SGD,
"Adam": optim.Adam,
"ASGD": optim.ASGD
}
resultats = {}
torch.cuda.synchronize()
debut = time.time()
for nom, opt in types_optimiseurs.items():
resultats[nom] = entrainer(opt, nom)
torch.cuda.synchronize()
fin = time.time()
print(f'durée total : {f"{(fin-debut)} secondes" if (fin-debut) < 60 else f"{((fin-debut)/60)} minutes" }')
plt.figure(figsize=(10, 6))
for nom, pertes in resultats.items():
plt.plot(pertes, label=nom)
plt.xlabel("Époques")
plt.ylabel("Perte moyenne")
plt.title("Comparaison des optimiseurs")
plt.legend()
plt.grid(True)
plt.show()
import torch
import time
print("🖥️ Utilisation de CUDA :", torch.cuda.is_available())
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Appareil :", device)
# GROS produit matriciel
A = torch.rand(10000, 10000, device=device)
B = torch.rand(10000, 10000, device=device)
torch.cuda.synchronize()
start = time.time()
C = torch.matmul(A, B)
torch.cuda.synchronize()
end = time.time()
print(f"Temps GPU : {end - start:.4f} s")