Skip to content
Open

hello #196

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file added .venv/Scripts/python.exe
Binary file not shown.
Binary file added .venv/Scripts/pythonw.exe
Binary file not shown.
3 changes: 3 additions & 0 deletions .venv/pyvenv.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
home = d:\Anaconda\envs\jupyter
include-system-site-packages = false
version = 3.9.16
5 changes: 5 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"githubPullRequests.ignoredPullRequestBranches": [
"master"
]
}
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
8 changes: 5 additions & 3 deletions ML/Pytorch/Basics/pytorch_simple_fullynet.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,16 +95,18 @@ def forward(self, x):
for epoch in range(num_epochs):
for batch_idx, (data, targets) in enumerate(tqdm(train_loader)):
# Get data to cuda if possible
print( data.shape)
print( targets.shape)
data = data.to(device=device)
targets = targets.to(device=device)

# Get to correct shape
data = data.reshape(data.shape[0], -1)

# Forward
scores = model(data)
scores = model.forward(data)
loss = criterion(scores, targets)

print(f"Loss at epoch {epoch}, batch {batch_idx}: {loss.item()}")
# Backward
optimizer.zero_grad()
loss.backward()
Expand All @@ -131,7 +133,7 @@ def check_accuracy(loader, model):

num_correct = 0
num_samples = 0
model.eval()
model.eval()#评估模式,这会关闭dropout等

# We don't need to keep track of gradients here so we wrap it in torch.no_grad()
with torch.no_grad():
Expand Down
17 changes: 10 additions & 7 deletions ML/Pytorch/Basics/pytorch_tensorbasics.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,11 +163,12 @@
values, indices = torch.min(x, dim=0) # Can also do x.min(dim=0)
abs_x = torch.abs(x) # Returns x where abs function has been applied to every element
z = torch.argmax(x, dim=0) # Gets index of the maximum value
z = torch.argmin(x, dim=0) # Gets index of the minimum value
z = torch.argmin(x, dim=0)
print(z)# Gets index of the minimum value
mean_x = torch.mean(x.float(), dim=0) # mean requires x to be float
z = torch.eq(x, y) # Element wise comparison, in this case z = [False, False, False]
sorted_y, indices = torch.sort(y, dim=0, descending=False)

print(indices)
z = torch.clamp(x, min=0)
# All values < 0 set to 0 and values > 0 unchanged (this is exactly ReLU function)
# If you want to values over max_val to be clamped, do torch.clamp(x, min=min_val, max=max_val)
Expand Down Expand Up @@ -207,7 +208,7 @@
rows = torch.tensor([1, 0])
cols = torch.tensor([4, 0])
print(x[rows, cols]) # Gets second row fifth column and first row first column

# which is same as doing: [x[1,4], x[0,0]] 高级索引
# More advanced indexing
x = torch.arange(10)
print(x[(x < 2) | (x > 8)]) # will be [0, 1, 9]
Expand All @@ -216,7 +217,9 @@
# Useful operations for indexing
print(
torch.where(x > 5, x, x * 2)
) # gives [0, 2, 4, 6, 8, 10, 6, 7, 8, 9], all values x > 5 yield x, else x*2
)
#满足第一个条件执行第二个 反之执行第三个
# gives [0, 2, 4, 6, 8, 10, 6, 7, 8, 9], all values x > 5 yield x, else x*2
x = torch.tensor([0, 0, 1, 2, 2, 3, 4]).unique() # x = [0, 1, 2, 3, 4]
print(
x.ndimension()
Expand All @@ -231,7 +234,7 @@
# ============================================================= #

x = torch.arange(9)

print(x.shape) # Shape is [9]
# Let's say we want to reshape it to be 3x3
x_3x3 = x.view(3, 3)

Expand All @@ -256,7 +259,7 @@
# using pointers to construct these matrices). This is a bit complicated and I need to explore this more
# as well, at least you know it's a problem to be cautious of! A solution is to do the following
print(y.contiguous().view(9)) # Calling .contiguous() before view and it works

#内存跳动
# Moving on to another operation, let's say we want to add two tensors dimensions togethor
x1 = torch.rand(2, 5)
x2 = torch.rand(2, 5)
Expand Down Expand Up @@ -284,7 +287,7 @@
z = torch.chunk(x, chunks=2, dim=1)
print(z[0].shape)
print(z[1].shape)

#分成若干个子张量
# Let's say we want to add an additional dimension
x = torch.arange(
10
Expand Down
3 changes: 2 additions & 1 deletion ML/Pytorch/GANs/1. SimpleGAN/fc_gan.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,8 @@ def forward(self, x):
noise = torch.randn(batch_size, z_dim).to(device)
fake = gen(noise)
disc_real = disc(real).view(-1)
lossD_real = criterion(disc_real, torch.ones_like(disc_real))
lossD_real = criterion(disc_real, to
rch.ones_like(disc_real))
disc_fake = disc(fake).view(-1)
lossD_fake = criterion(disc_fake, torch.zeros_like(disc_fake))
lossD = (lossD_real + lossD_fake) / 2
Expand Down
8 changes: 8 additions & 0 deletions ML/Pytorch/GANs/2. DCGAN/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,14 @@
* 2022-12-20: Small revision of code, checked that it works with latest PyTorch version
"""

import torch"""
Discriminator and Generator implementation from DCGAN paper

Programmed by Aladdin Persson <aladdin.persson at hotmail dot com>
* 2020-11-01: Initial coding
* 2022-12-20: Small revision of code, checked that it works with latest PyTorch version
"""

import torch
import torch.nn as nn

Expand Down
Loading