Replies: 4 comments 1 reply
-
感谢分享,把所有计算写在一个 |
Beta Was this translation helpful? Give feedback.
1 reply
-
大兄弟可以先去英文论坛瞅瞅,前几个小作业是有答案的 |
Beta Was this translation helpful? Give feedback.
0 replies
-
感谢分享!同分享一个:https://github.com/LucienXian/mlc-cource/blob/main/mlc_assignment1.ipynb |
Beta Was this translation helpful? Give feedback.
0 replies
-
分享一个我的 # %%
import numpy as np
import pickle as pkl
import torch
import torch.nn.functional as F
import torchvision
import tvm
import tvm.testing
from matplotlib import pyplot as plt
from torch import nn
from torchvision import transforms
from tvm import topi, relax, te
from tvm.script import tir as T
# %%
# Load the weight map from file.
# The prediction accuracy of the weight map on test data is around 83.3%.
weight_map = pkl.load(open("fasionmnist_mlp_assignment_params.pkl", "rb"))
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
batch_size = 4
input_shape = (batch_size, 1, 28, 28) # NCHW layout
def pytorch_model():
list = []
list.append(nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3, 3), bias=True))
list.append(nn.ReLU())
list.append(nn.MaxPool2d(kernel_size=(2, 2)))
list.append(nn.Flatten())
list.append(nn.Linear(in_features=5408, out_features=100, bias=True))
list.append(nn.ReLU())
list.append(nn.Linear(in_features=100, out_features=10, bias=True))
list.append(nn.Softmax(dim=1))
model = nn.Sequential(*list).cpu()
name_map = {
"0.weight": "conv2d_weight",
"0.bias": "conv2d_bias",
"4.weight": "linear0_weight",
"4.bias": "linear0_bias",
"6.weight": "linear1_weight",
"6.bias": "linear1_bias",
}
for name, param in model.named_parameters():
param.data = torch.from_numpy(weight_map[name_map[name]]).cpu()
return model
# %%
def test(model, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
print_img = True
for data, label in test_loader:
data, label = data.cpu(), label.cpu()
output = model(data)
# sum up batch loss
test_loss += F.nll_loss(output, label, reduction="sum").item()
# get the index of the max log-probability
pred = output.argmax(dim=1, keepdim=True)
if print_img:
imshow(data[0])
print("predict: {}, label: {}".format(class_names[pred[0][0]], class_names[label[0]]))
print_img = False
correct += pred.eq(label.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print("\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def imshow(img):
img = img / 2 + 0.5
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
test_data = torchvision.datasets.FashionMNIST(
"./data",
download=True,
train=False,
transform=transforms.Compose([transforms.ToTensor()])
)
test_loader = torch.utils.data.DataLoader(
test_data, batch_size=batch_size, shuffle=False)
test(pytorch_model(), test_loader)
# %% [markdown]
# # Part 2
# %%
def create_model_via_emit_te():
bb = relax.BlockBuilder()
x = relax.Var("x", input_shape, relax.DynTensorType(batch_size, "float32"))
conv2d_weight = relax.const(weight_map["conv2d_weight"], "float32")
conv2d_bias = relax.const(weight_map["conv2d_bias"].reshape(1, 32, 1, 1), "float32")
linear0_weight = relax.const(weight_map["linear0_weight"], "float32")
linear0_bias = relax.const(weight_map["linear0_bias"].reshape(1, 100), "float32")
linear1_weight = relax.const(weight_map["linear1_weight"], "float32")
linear1_bias = relax.const(weight_map["linear1_bias"].reshape(1, 10), "float32")
with bb.function("main", [x]):
with bb.dataflow():
conv1 = bb.emit_te(topi.nn.conv2d, x, conv2d_weight, (1, 1), (0, 0), (1, 1))
conv1 = bb.emit_te(topi.add, conv1, conv2d_bias)
relu1 = bb.emit_te(topi.nn.relu, conv1)
# maxpool's stride == kernel_size in usual
pool1 = bb.emit_te(topi.nn.pool2d, relu1, (2, 2), (2, 2), (1, 1), (0, 0, 0, 0), 'max')
flatten = bb.emit_te(topi.nn.flatten, pool1)
fc1 = bb.emit_te(topi.nn.dense, flatten, linear0_weight)
fc1 = bb.emit_te(topi.add, fc1, linear0_bias)
relu2 = bb.emit_te(topi.nn.relu, fc1)
fc2 = bb.emit_te(topi.nn.dense, relu2, linear1_weight)
fc2 = bb.emit_te(topi.add, fc2, linear1_bias)
logits = bb.emit_te(topi.nn.softmax, fc2)
gv = bb.emit_output(logits)
bb.emit_func_output(gv)
return bb.get()
def build_mod(mod):
exec = relax.vm.build(mod, "llvm")
dev = tvm.cpu()
vm = relax.VirtualMachine(exec, dev)
return vm
def check_equivalence(mod, torch_model, test_loader):
torch_model.eval()
with torch.no_grad():
rt_mod = build_mod(mod)
for data, label in test_loader:
data, label = data.cpu(), label.cpu()
output_from_pytorch = torch_model(data).numpy()
output_from_relax = rt_mod["main"](tvm.nd.array(data, tvm.cpu())).numpy()
tvm.testing.assert_allclose(output_from_pytorch, output_from_relax, rtol=1e-4)
test_data = torchvision.datasets.FashionMNIST(
"./data",
download=True,
train=False,
transform=transforms.Compose([transforms.ToTensor()])
)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=False)
mod = create_model_via_emit_te()
torch_model = pytorch_model()
check_equivalence(mod, torch_model, test_loader)
print(mod.script())
# %% [markdown]
# # Part 3
# %%
import torch
@tvm.register_func("env.conv2d", override=True)
def torch_conv2d(x: tvm.nd.NDArray,
w: tvm.nd.NDArray,
bias: tvm.nd.NDArray,
out: tvm.nd.NDArray):
x_torch = torch.from_dlpack(x)
w_torch = torch.from_dlpack(w)
bias_torch = torch.from_dlpack(bias)
out_torch = torch.from_dlpack(out)
conv2d = torch.nn.functional.conv2d(x_torch, w_torch, bias_torch)
out_torch.copy_(conv2d)
def create_model_with_torch_func():
bb = relax.BlockBuilder()
x = relax.Var("x", input_shape, relax.DynTensorType(4, "float32"))
conv2d_weight = relax.const(weight_map["conv2d_weight"], "float32")
conv2d_bias = relax.const(weight_map["conv2d_bias"].reshape(32), "float32")
linear0_weight = relax.const(weight_map["linear0_weight"], "float32")
linear0_bias = relax.const(weight_map["linear0_bias"].reshape(1, 100), "float32")
linear1_weight = relax.const(weight_map["linear1_weight"], "float32")
linear1_bias = relax.const(weight_map["linear1_bias"].reshape(1, 10), "float32")
with bb.function("main", [x]):
with bb.dataflow():
# ref: https://github.com/mlc-ai/mlc-en/discussions/31
conv1 = bb.emit(relax.op.call_tir(relax.extern("env.conv2d"), \
(x, conv2d_weight, conv2d_bias), (batch_size, 32, 26, 26), dtype="float32"))
relu1 = bb.emit_te(topi.nn.relu, conv1)
# maxpool's stride == kernel_size in usual
pool1 = bb.emit_te(topi.nn.pool2d, relu1, (2, 2), (2, 2), (1, 1), (0, 0, 0, 0), 'max')
flatten = bb.emit_te(topi.nn.flatten, pool1)
fc1 = bb.emit_te(topi.nn.dense, flatten, linear0_weight)
fc1 = bb.emit_te(topi.add, fc1, linear0_bias)
relu2 = bb.emit_te(topi.nn.relu, fc1)
fc2 = bb.emit_te(topi.nn.dense, relu2, linear1_weight)
fc2 = bb.emit_te(topi.add, fc2, linear1_bias)
logits = bb.emit_te(topi.nn.softmax, fc2)
gv = bb.emit_output(logits)
bb.emit_func_output(gv)
return bb.get()
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=False)
mod = create_model_with_torch_func()
check_equivalence(mod, torch_model, test_loader)
import IPython
IPython.display.Code(mod.script(), language="python")
# %% [markdown]
# # Part 4
# %%
import IPython
mod = create_model_via_emit_te()
sch = tvm.tir.Schedule(mod)
# Step 1. Get blocks
pad_block = sch.get_block(name="pad_temp", func_name="conv2d")
conv_block = sch.get_block(name="conv2d_nchw", func_name="conv2d")
# Step 2. Inline the padding block (if exists)
sch.compute_inline(pad_block)
# Step 3. Get loops
i, j, k, l, m, n, o = sch.get_loops(conv_block)
# Step 4. Organize the loops
sch.fuse(i, j, k, l)
i, j, k, l = sch.get_loops(conv_block)
i0, i1, i2 = sch.split(i, factors=[None, 8, 4])
sch.reorder(i0, j, k, l, i1, i2)
# Step 5. fuse + vectorize / fuse + parallel / fuse + unroll
sch.unroll(i1)
sch.vectorize(i2)
sch.parallel(i0)
# Step 6. decompose reduction
conv_init = sch.decompose_reduction(conv_block, j)
IPython.display.Code(sch.mod.script(), language="python")
# %%
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=False)
check_equivalence(sch.mod, torch_model, test_loader)
|
Beta Was this translation helpful? Give feedback.
0 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
Uh oh!
There was an error while loading. Please reload this page.
-
分享我的代码,如果有问题请帮忙指正:
https://colab.research.google.com/gist/hbsun2113/1329eba39ab1dda46015f9c5244f9d2d/-assignment1-ipynb.ipynb
Beta Was this translation helpful? Give feedback.
All reactions