Skip to content

Commit 9b7b27b

Browse files
committed
update examples
1 parent 0ba17a6 commit 9b7b27b

File tree

7 files changed

+19
-42
lines changed

7 files changed

+19
-42
lines changed

examples/basic_tutorials/cifar10_cnn.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,12 @@
22
# -*- coding: utf-8 -*-
33

44
import os
5-
# os.environ['TL_BACKEND'] = 'paddle'
6-
os.environ['TL_BACKEND'] = 'tensorflow'
5+
os.environ['TL_BACKEND'] = 'paddle'
6+
# os.environ['TL_BACKEND'] = 'tensorflow'
77
# os.environ['TL_BACKEND'] = 'mindspore'
88

99
import time
10-
from tensorlayerx.dataflow import Dataset, Dataloader
10+
from tensorlayerx.dataflow import Dataset, DataLoader
1111
from tensorlayerx.vision.transforms import (
1212
Compose, Resize, RandomFlipHorizontal, RandomContrast, RandomBrightness, StandardizePerImage, RandomCrop
1313
)
@@ -110,11 +110,8 @@ def __len__(self):
110110
train_dataset = make_dataset(data=X_train, label=y_train, transforms=train_transforms)
111111
test_dataset = make_dataset(data=X_test, label=y_test, transforms=test_transforms)
112112

113-
train_dataset = tlx.dataflow.FromGenerator(train_dataset, output_types=(tlx.float32, tlx.int64))
114-
test_dataset = tlx.dataflow.FromGenerator(test_dataset, output_types=(tlx.float32, tlx.int64))
115-
116-
train_dataset = Dataloader(train_dataset, batch_size=batch_size, shuffle=True, shuffle_buffer_size=128)
117-
test_dataset = Dataloader(test_dataset, batch_size=batch_size)
113+
train_dataset = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
114+
test_dataset = DataLoader(test_dataset, batch_size=batch_size)
118115

119116

120117
class WithLoss(Module):

examples/basic_tutorials/cifar10_cnn_mixed_mindspore.py

Lines changed: 3 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
from tensorlayerx.vision.transforms import (
1313
Compose, Resize, RandomFlipHorizontal, RandomContrast, RandomBrightness, StandardizePerImage, RandomCrop, HWC2CHW
1414
)
15-
from tensorlayerx.dataflow import Dataset, Dataloader
15+
from tensorlayerx.dataflow import Dataset, DataLoader
1616
from mindspore.nn import WithLossCell, Adam
1717
from mindspore import ParameterTuple
1818
import mindspore.nn as nn
@@ -99,15 +99,8 @@ def __len__(self):
9999
train_dataset = make_dataset(data=X_train, label=y_train, transforms=train_transforms)
100100
test_dataset = make_dataset(data=X_test, label=y_test, transforms=test_transforms)
101101

102-
train_dataset = tlx.dataflow.FromGenerator(
103-
train_dataset, output_types=(tlx.float32, tlx.int64), column_names=['data', 'label']
104-
)
105-
test_dataset = tlx.dataflow.FromGenerator(
106-
test_dataset, output_types=(tlx.float32, tlx.int64), column_names=['data', 'label']
107-
)
108-
109-
train_dataset = Dataloader(train_dataset, batch_size=batch_size, shuffle=True, shuffle_buffer_size=128)
110-
test_dataset = Dataloader(test_dataset, batch_size=batch_size)
102+
train_dataset = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
103+
test_dataset = DataLoader(test_dataset, batch_size=batch_size)
111104

112105

113106
class GradWrap(Module):

examples/basic_tutorials/cifar10_cnn_mixed_paddle.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
import paddle as pd
99
from tensorlayerx.nn import Module
1010
import tensorlayerx as tlx
11-
from tensorlayerx.dataflow import Dataset, Dataloader
11+
from tensorlayerx.dataflow import Dataset, DataLoader
1212
from tensorlayerx.nn import (Conv2d, Dense, Flatten, MaxPool2d, BatchNorm2d)
1313
from tensorlayerx.vision.transforms import (
1414
Compose, Resize, RandomFlipHorizontal, RandomContrast, RandomBrightness, StandardizePerImage, RandomCrop
@@ -108,11 +108,8 @@ def __len__(self):
108108
train_dataset = make_dataset(data=X_train, label=y_train, transforms=train_transforms)
109109
test_dataset = make_dataset(data=X_test, label=y_test, transforms=test_transforms)
110110

111-
train_dataset = tlx.dataflow.FromGenerator(train_dataset, output_types=(tlx.float32, tlx.int64))
112-
test_dataset = tlx.dataflow.FromGenerator(test_dataset, output_types=(tlx.float32, tlx.int64))
113-
114-
train_dataset = Dataloader(train_dataset, batch_size=batch_size, shuffle=True, shuffle_buffer_size=128)
115-
test_dataset = Dataloader(test_dataset, batch_size=batch_size)
111+
train_dataset = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
112+
test_dataset = DataLoader(test_dataset, batch_size=batch_size)
116113

117114
for epoch in range(n_epoch):
118115
train_loss, train_acc, n_iter = 0, 0, 0

examples/basic_tutorials/imdb_LSTM_simple.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -60,10 +60,7 @@ def forward(self, x):
6060
print_freq = 2
6161

6262
train_dataset = imdbdataset(X=X_train, y=y_train)
63-
train_dataset = tlx.dataflow.FromGenerator(
64-
train_dataset, output_types=[tlx.int64, tlx.int64], column_names=['data', 'label']
65-
)
66-
train_loader = tlx.dataflow.Dataloader(train_dataset, batch_size=batch_size, shuffle=True)
63+
train_loader = tlx.dataflow.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
6764

6865
net = ImdbNet()
6966
train_weights = net.trainable_weights

examples/basic_tutorials/mnist_dataflow.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -75,10 +75,7 @@ def forward(self, x):
7575

7676

7777
train_dataset = mnistdataset1(data=X_train, label=y_train, transform=transform)
78-
train_dataset = tlx.dataflow.FromGenerator(
79-
train_dataset, output_types=[tlx.float32, tlx.int64], column_names=['data', 'label']
80-
)
81-
train_loader = tlx.dataflow.Dataloader(train_dataset, batch_size=128, shuffle=False)
78+
train_loader = tlx.dataflow.DataLoader(train_dataset, batch_size=128, shuffle=False)
8279

8380
for i in train_loader:
8481
print(i[0].shape, i[1])

examples/basic_tutorials/mnist_gan.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
import numpy as np
1111
import tensorlayerx as tlx
1212
from tensorlayerx.nn import Module, Dense
13-
from tensorlayerx.dataflow import Dataset
13+
from tensorlayerx.dataflow import Dataset, DataLoader
1414
from tensorlayerx.model import TrainOneStep
1515

1616
X_train, y_train, X_val, y_val, X_test, y_test = tlx.files.load_mnist_dataset(shape=(-1, 784))
@@ -33,10 +33,7 @@ def __len__(self):
3333

3434
batch_size = 128
3535
train_dataset = mnistdataset(data=X_train, label=y_train)
36-
train_dataset = tlx.dataflow.FromGenerator(
37-
train_dataset, output_types=[tlx.float32, tlx.int64], column_names=['data', 'label']
38-
)
39-
train_loader = tlx.dataflow.Dataloader(train_dataset, batch_size=batch_size, shuffle=True)
36+
train_loader = tlx.dataflow.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
4037

4138

4239
class generator(Module):

examples/basic_tutorials/mnist_mlp.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
import tensorlayerx as tlx
1111
from tensorlayerx.nn import Module
1212
from tensorlayerx.nn import Dense, Dropout
13-
from tensorlayerx.dataflow import Dataset
13+
from tensorlayerx.dataflow import Dataset, DataLoader
1414

1515
X_train, y_train, X_val, y_val, X_test, y_test = tlx.files.load_mnist_dataset(shape=(-1, 784))
1616

@@ -63,12 +63,11 @@ def forward(self, x, foo=None):
6363
metric = tlx.metrics.Accuracy()
6464
loss_fn = tlx.losses.softmax_cross_entropy_with_logits
6565
train_dataset = mnistdataset(data=X_train, label=y_train)
66-
train_dataset = tlx.dataflow.FromGenerator(
67-
train_dataset, output_types=[tlx.float32, tlx.int64], column_names=['data', 'label']
68-
)
69-
train_loader = tlx.dataflow.Dataloader(train_dataset, batch_size=batch_size, shuffle=True)
66+
train_loader = tlx.dataflow.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
7067

7168
model = tlx.model.Model(network=MLP, loss_fn=loss_fn, optimizer=optimizer, metrics=metric)
7269
model.train(n_epoch=n_epoch, train_dataset=train_loader, print_freq=print_freq, print_train_batch=False)
7370
model.save_weights('./model.npz', format='npz_dict')
7471
model.load_weights('./model.npz', format='npz_dict')
72+
73+

0 commit comments

Comments
 (0)