Skip to content

Commit 337f878

Browse files
committed
format code with black
Signed-off-by: reiase <[email protected]>
1 parent 09c7fa3 commit 337f878

File tree

11 files changed

+348
-169
lines changed

11 files changed

+348
-169
lines changed

.pre-commit-config.yaml

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
repos:
2+
- repo: https://github.com/ambv/black
3+
rev: 22.3.0
4+
hooks:
5+
- id: black
6+
args: [--line-length=88]
7+
8+
- repo: https://github.com/john-hen/Flake8-pyproject
9+
rev: 1.0.1
10+
hooks:
11+
- id: Flake8-pyproject
12+
additional_dependencies: [flake8-docstrings, Flake8-pyproject]
13+
args: [--max-line-length=88, --exit-zero]

examples/mnist/main.py

Lines changed: 94 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -44,9 +44,15 @@ def train(args, model, device, train_loader, optimizer, epoch):
4444
loss.backward()
4545
optimizer.step()
4646
if batch_idx % args.log_interval == 0:
47-
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
48-
epoch, batch_idx * len(data), len(train_loader.dataset),
49-
100. * batch_idx / len(train_loader), loss.item()))
47+
print(
48+
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
49+
epoch,
50+
batch_idx * len(data),
51+
len(train_loader.dataset),
52+
100.0 * batch_idx / len(train_loader),
53+
loss.item(),
54+
)
55+
)
5056
if args.dry_run:
5157
break
5258

@@ -59,65 +65,109 @@ def test(model, device, test_loader):
5965
for data, target in test_loader:
6066
data, target = data.to(device), target.to(device)
6167
output = model(data)
62-
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
63-
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
68+
test_loss += F.nll_loss(
69+
output, target, reduction="sum"
70+
).item() # sum up batch loss
71+
pred = output.argmax(
72+
dim=1, keepdim=True
73+
) # get the index of the max log-probability
6474
correct += pred.eq(target.view_as(pred)).sum().item()
6575

6676
test_loss /= len(test_loader.dataset)
6777

68-
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
69-
test_loss, correct, len(test_loader.dataset),
70-
100. * correct / len(test_loader.dataset)))
78+
print(
79+
"\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
80+
test_loss,
81+
correct,
82+
len(test_loader.dataset),
83+
100.0 * correct / len(test_loader.dataset),
84+
)
85+
)
7186

7287

7388
def main():
7489
# Training settings
75-
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
76-
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
77-
help='input batch size for training (default: 64)')
78-
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
79-
help='input batch size for testing (default: 1000)')
80-
parser.add_argument('--epochs', type=int, default=14, metavar='N',
81-
help='number of epochs to train (default: 14)')
82-
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
83-
help='learning rate (default: 1.0)')
84-
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
85-
help='Learning rate step gamma (default: 0.7)')
86-
parser.add_argument('--no-cuda', action='store_true', default=False,
87-
help='disables CUDA training')
88-
parser.add_argument('--dry-run', action='store_true', default=False,
89-
help='quickly check a single pass')
90-
parser.add_argument('--seed', type=int, default=1, metavar='S',
91-
help='random seed (default: 1)')
92-
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
93-
help='how many batches to wait before logging training status')
94-
parser.add_argument('--save-model', action='store_true', default=False,
95-
help='For Saving the current Model')
90+
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
91+
parser.add_argument(
92+
"--batch-size",
93+
type=int,
94+
default=64,
95+
metavar="N",
96+
help="input batch size for training (default: 64)",
97+
)
98+
parser.add_argument(
99+
"--test-batch-size",
100+
type=int,
101+
default=1000,
102+
metavar="N",
103+
help="input batch size for testing (default: 1000)",
104+
)
105+
parser.add_argument(
106+
"--epochs",
107+
type=int,
108+
default=14,
109+
metavar="N",
110+
help="number of epochs to train (default: 14)",
111+
)
112+
parser.add_argument(
113+
"--lr",
114+
type=float,
115+
default=1.0,
116+
metavar="LR",
117+
help="learning rate (default: 1.0)",
118+
)
119+
parser.add_argument(
120+
"--gamma",
121+
type=float,
122+
default=0.7,
123+
metavar="M",
124+
help="Learning rate step gamma (default: 0.7)",
125+
)
126+
parser.add_argument(
127+
"--no-cuda", action="store_true", default=False, help="disables CUDA training"
128+
)
129+
parser.add_argument(
130+
"--dry-run",
131+
action="store_true",
132+
default=False,
133+
help="quickly check a single pass",
134+
)
135+
parser.add_argument(
136+
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
137+
)
138+
parser.add_argument(
139+
"--log-interval",
140+
type=int,
141+
default=10,
142+
metavar="N",
143+
help="how many batches to wait before logging training status",
144+
)
145+
parser.add_argument(
146+
"--save-model",
147+
action="store_true",
148+
default=False,
149+
help="For Saving the current Model",
150+
)
96151
args = parser.parse_args()
97152
use_cuda = not args.no_cuda and torch.cuda.is_available()
98153

99154
torch.manual_seed(args.seed)
100155

101156
device = torch.device("cuda" if use_cuda else "cpu")
102157

103-
train_kwargs = {'batch_size': args.batch_size}
104-
test_kwargs = {'batch_size': args.test_batch_size}
158+
train_kwargs = {"batch_size": args.batch_size}
159+
test_kwargs = {"batch_size": args.test_batch_size}
105160
if use_cuda:
106-
cuda_kwargs = {'num_workers': 1,
107-
'pin_memory': True,
108-
'shuffle': True}
161+
cuda_kwargs = {"num_workers": 1, "pin_memory": True, "shuffle": True}
109162
train_kwargs.update(cuda_kwargs)
110163
test_kwargs.update(cuda_kwargs)
111164

112-
transform=transforms.Compose([
113-
transforms.ToTensor(),
114-
transforms.Normalize((0.1307,), (0.3081,))
115-
])
116-
dataset1 = datasets.MNIST('../data', train=True, download=True,
117-
transform=transform)
118-
dataset2 = datasets.MNIST('../data', train=False,
119-
transform=transform)
120-
train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs)
165+
transform = transforms.Compose(
166+
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
167+
)
168+
dataset1 = datasets.MNIST("../data", train=True, download=True, transform=transform)
169+
dataset2 = datasets.MNIST("../data", train=False, transform=transform)
170+
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
121171
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
122172

123173
model = Net().to(device)
@@ -133,5 +183,5 @@ def main():
133183
torch.save(model.state_dict(), "mnist_cnn.pt")
134184

135185

136-
if __name__ == '__main__':
186+
if __name__ == "__main__":
137187
main()

examples/mnist/main_with_hp.py

Lines changed: 95 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -47,9 +47,15 @@ def train(args, model, device, train_loader, optimizer, epoch):
4747
loss.backward()
4848
optimizer.step()
4949
if batch_idx % args.log_interval == 0:
50-
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
51-
epoch, batch_idx * len(data), len(train_loader.dataset),
52-
100. * batch_idx / len(train_loader), loss.item()))
50+
print(
51+
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
52+
epoch,
53+
batch_idx * len(data),
54+
len(train_loader.dataset),
55+
100.0 * batch_idx / len(train_loader),
56+
loss.item(),
57+
)
58+
)
5359
if args.dry_run:
5460
break
5561

@@ -62,66 +68,110 @@ def test(model, device, test_loader):
6268
for data, target in test_loader:
6369
data, target = data.to(device), target.to(device)
6470
output = model(data)
65-
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
66-
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
71+
test_loss += F.nll_loss(
72+
output, target, reduction="sum"
73+
).item() # sum up batch loss
74+
pred = output.argmax(
75+
dim=1, keepdim=True
76+
) # get the index of the max log-probability
6777
correct += pred.eq(target.view_as(pred)).sum().item()
6878

6979
test_loss /= len(test_loader.dataset)
7080

71-
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
72-
test_loss, correct, len(test_loader.dataset),
73-
100. * correct / len(test_loader.dataset)))
81+
print(
82+
"\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
83+
test_loss,
84+
correct,
85+
len(test_loader.dataset),
86+
100.0 * correct / len(test_loader.dataset),
87+
)
88+
)
7489

7590

7691
def main():
7792
# Training settings
78-
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
79-
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
80-
help='input batch size for training (default: 64)')
81-
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
82-
help='input batch size for testing (default: 1000)')
83-
parser.add_argument('--epochs', type=int, default=14, metavar='N',
84-
help='number of epochs to train (default: 14)')
85-
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
86-
help='learning rate (default: 1.0)')
87-
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
88-
help='Learning rate step gamma (default: 0.7)')
89-
parser.add_argument('--no-cuda', action='store_true', default=False,
90-
help='disables CUDA training')
91-
parser.add_argument('--dry-run', action='store_true', default=False,
92-
help='quickly check a single pass')
93-
parser.add_argument('--seed', type=int, default=1, metavar='S',
94-
help='random seed (default: 1)')
95-
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
96-
help='how many batches to wait before logging training status')
97-
parser.add_argument('--save-model', action='store_true', default=False,
98-
help='For Saving the current Model')
99-
parser.add_argument('-D', '--define', nargs='*', default=[], action="extend")
93+
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
94+
parser.add_argument(
95+
"--batch-size",
96+
type=int,
97+
default=64,
98+
metavar="N",
99+
help="input batch size for training (default: 64)",
100+
)
101+
parser.add_argument(
102+
"--test-batch-size",
103+
type=int,
104+
default=1000,
105+
metavar="N",
106+
help="input batch size for testing (default: 1000)",
107+
)
108+
parser.add_argument(
109+
"--epochs",
110+
type=int,
111+
default=14,
112+
metavar="N",
113+
help="number of epochs to train (default: 14)",
114+
)
115+
parser.add_argument(
116+
"--lr",
117+
type=float,
118+
default=1.0,
119+
metavar="LR",
120+
help="learning rate (default: 1.0)",
121+
)
122+
parser.add_argument(
123+
"--gamma",
124+
type=float,
125+
default=0.7,
126+
metavar="M",
127+
help="Learning rate step gamma (default: 0.7)",
128+
)
129+
parser.add_argument(
130+
"--no-cuda", action="store_true", default=False, help="disables CUDA training"
131+
)
132+
parser.add_argument(
133+
"--dry-run",
134+
action="store_true",
135+
default=False,
136+
help="quickly check a single pass",
137+
)
138+
parser.add_argument(
139+
"--seed", type=int, default=1, metavar="S", help="random seed (default: 1)"
140+
)
141+
parser.add_argument(
142+
"--log-interval",
143+
type=int,
144+
default=10,
145+
metavar="N",
146+
help="how many batches to wait before logging training status",
147+
)
148+
parser.add_argument(
149+
"--save-model",
150+
action="store_true",
151+
default=False,
152+
help="For Saving the current Model",
153+
)
154+
parser.add_argument("-D", "--define", nargs="*", default=[], action="extend")
100155
args = parser.parse_args()
101156
use_cuda = not args.no_cuda and torch.cuda.is_available()
102157

103158
torch.manual_seed(args.seed)
104159

105160
device = torch.device("cuda" if use_cuda else "cpu")
106161

107-
train_kwargs = {'batch_size': args.batch_size}
108-
test_kwargs = {'batch_size': args.test_batch_size}
162+
train_kwargs = {"batch_size": args.batch_size}
163+
test_kwargs = {"batch_size": args.test_batch_size}
109164
if use_cuda:
110-
cuda_kwargs = {'num_workers': 1,
111-
'pin_memory': True,
112-
'shuffle': True}
165+
cuda_kwargs = {"num_workers": 1, "pin_memory": True, "shuffle": True}
113166
train_kwargs.update(cuda_kwargs)
114167
test_kwargs.update(cuda_kwargs)
115168

116-
transform=transforms.Compose([
117-
transforms.ToTensor(),
118-
transforms.Normalize((0.1307,), (0.3081,))
119-
])
120-
dataset1 = datasets.MNIST('../data', train=True, download=True,
121-
transform=transform)
122-
dataset2 = datasets.MNIST('../data', train=False,
123-
transform=transform)
124-
train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs)
169+
transform = transforms.Compose(
170+
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
171+
)
172+
dataset1 = datasets.MNIST("../data", train=True, download=True, transform=transform)
173+
dataset2 = datasets.MNIST("../data", train=False, transform=transform)
174+
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
125175
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
126176

127177
with param_scope(*args.define):
@@ -138,5 +188,5 @@ def main():
138188
torch.save(model.state_dict(), "mnist_cnn.pt")
139189

140190

141-
if __name__ == '__main__':
191+
if __name__ == "__main__":
142192
main()

0 commit comments

Comments
 (0)