forked from Harold-lkk/d2lzh_pytorch_lkk
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path3.7_softmax-regression-pytorch.py
More file actions
38 lines (31 loc) · 986 Bytes
/
3.7_softmax-regression-pytorch.py
File metadata and controls
38 lines (31 loc) · 986 Bytes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# -*- coding:utf-8 –*-
'''
@Author: lkk
@Date: 2019-12-17 15:52:28
@LastEditTime: 2019-12-17 17:50:04
@LastEditors: lkk
@Description:
'''
import torch
from torch import nn
from torch.nn import init
import numpy as np
import d2lzh_pytorch as d2l
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
num_input = 784
num_output = 10
num_epochs = 5
class LinearNet(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(LinearNet, self).__init__()
self.linear = nn.Linear(num_inputs, num_outputs)
def forward(self, x): # x shape: (batch, 1, 28, 28)
y = self.linear(x.view(x.shape[0], -1))
return y
net = LinearNet(num_input, num_output)
init.normal_(net.linear.weight, mean=0, std=0.01)
init.constant_(net.linear.bias, val=0)
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.1)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)