|
1 | | -import torch |
| 1 | +import math |
| 2 | +import random |
| 3 | +import unittest |
2 | 4 | import time |
| 5 | + |
| 6 | +from functools import reduce |
| 7 | +import torch |
3 | 8 | import intel_pytorch_extension as ipex |
| 9 | + |
| 10 | +import torch.nn as nn |
| 11 | +import torch.backends.cudnn as cudnn |
| 12 | +from torch.nn import Parameter |
| 13 | +import torch.nn.functional as F |
| 14 | +from torch.autograd import gradcheck |
| 15 | +from torch.autograd.gradcheck import gradgradcheck |
| 16 | +from torch._six import inf, nan |
| 17 | + |
| 18 | +from common_utils import TestCase, iter_indices, TEST_NUMPY, TEST_SCIPY, TEST_MKL, \ |
| 19 | + TEST_LIBROSA, run_tests, download_file, skipIfNoLapack, suppress_warnings, \ |
| 20 | + IS_WINDOWS, PY3, NO_MULTIPROCESSING_SPAWN, do_test_dtypes, do_test_empty_full, \ |
| 21 | + IS_SANDCASTLE, load_tests, brute_pdist, brute_cdist, slowTest, \ |
| 22 | + skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf |
| 23 | + |
4 | 24 | K=1 #128 |
5 | 25 | C=16 #64 |
6 | 26 | MB = 28 |
7 | 27 |
|
8 | | -def get_rand_seed(): |
9 | | - return int(time.time() * 1000000000) |
10 | | - |
11 | | -def _ipxex_linear(random_seed, data_type = torch.float32): |
12 | | - torch.manual_seed(random_seed) |
13 | | - fc = ipex.IpexMLPLinear(C, K).to(data_type) |
14 | | - return fc |
15 | | - |
16 | | -def _cpu_linear(random_seed, data_type = torch.float32): |
17 | | - torch.manual_seed(random_seed) |
18 | | - fc = torch.nn.Linear(C, K).to(data_type) |
19 | | - return fc |
20 | | - |
21 | | -def _run_mlp(random_seed, fc_module, data_type = torch.float32): |
22 | | - torch.manual_seed(random_seed) |
23 | | - x1 = torch.randn(MB, C, requires_grad=True).to(data_type).requires_grad_(True) |
24 | | - y1 = fc_module(x1) |
25 | | - z1 = y1.mean() |
26 | | - z1.backward() |
27 | | - return x1.grad, fc_module.weight.grad, fc_module.bias.grad |
28 | | - |
29 | | -for data_type in [torch.float32, torch.bfloat16]: |
30 | | - seed = get_rand_seed() |
31 | | - ipex_fc = _ipxex_linear(seed, data_type) |
32 | | - cpu_fc = _cpu_linear(seed, data_type) |
33 | | - |
34 | | - rtol = 1e-5 |
35 | | - atol = rtol |
36 | | - if data_type == torch.bfloat16: |
37 | | - rtol = 1e-2 |
38 | | - atol = rtol |
39 | | - |
40 | | - seed = get_rand_seed() |
41 | | - input_grad_ipex, weight_grad_ipex, bias_grad_ipex = _run_mlp(seed, ipex_fc, data_type) |
42 | | - input_grad_cpu, weight_grad_cpu, bias_grad_cpu = _run_mlp(seed, cpu_fc, data_type) |
43 | | - |
44 | | - if input_grad_ipex is None: |
45 | | - if input_grad_cpu is not None: |
46 | | - print("##################### {} MLP input grad FAIL".format(str(data_type))) |
47 | | - else: |
48 | | - print("##################### {} MLP input grad PASS".format(str(data_type))) |
49 | | - else: |
50 | | - if not input_grad_ipex.to(torch.float32).allclose(input_grad_cpu.to(torch.float32), rtol=rtol, atol=atol): |
51 | | - print("##################### {} MLP input grad FAIL".format(str(data_type))) |
52 | | - else: |
53 | | - print("##################### {} MLP input grad PASS".format(str(data_type))) |
54 | | - |
55 | | - if not weight_grad_ipex.to(torch.float32).allclose(weight_grad_cpu.to(torch.float32), rtol=rtol, atol=atol): |
56 | | - print("##################### {} MLP weight grad FAIL".format(str(data_type))) |
57 | | - else: |
58 | | - print("##################### {} MLP weight grad PASS".format(str(data_type))) |
59 | | - |
60 | | - if not bias_grad_ipex.to(torch.float32).allclose(bias_grad_cpu.to(torch.float32), rtol=rtol, atol=atol): |
61 | | - print("##################### {} MLP bias grad FAIL".format(str(data_type))) |
62 | | - else: |
63 | | - print("##################### {} MLP bias grad PASS".format(str(data_type))) |
| 28 | + |
| 29 | +class TestMLPCases(TestCase): |
| 30 | + def get_rand_seed(self): |
| 31 | + return int(time.time() * 1000000000) |
| 32 | + |
| 33 | + def _ipxex_linear(self, random_seed, data_type = torch.float32): |
| 34 | + torch.manual_seed(random_seed) |
| 35 | + fc = ipex.IpexMLPLinear(C, K).to(data_type) |
| 36 | + return fc |
| 37 | + |
| 38 | + def _cpu_linear(self, random_seed, data_type = torch.float32): |
| 39 | + torch.manual_seed(random_seed) |
| 40 | + fc = torch.nn.Linear(C, K).to(data_type) |
| 41 | + return fc |
| 42 | + |
| 43 | + def _run_mlp(self, random_seed, fc_module, data_type = torch.float32): |
| 44 | + torch.manual_seed(random_seed) |
| 45 | + x1 = torch.randn(MB, C, requires_grad=True).to(data_type).requires_grad_(True) |
| 46 | + y1 = fc_module(x1) |
| 47 | + z1 = y1.mean() |
| 48 | + z1.backward() |
| 49 | + return x1.grad, fc_module.weight.grad, fc_module.bias.grad |
| 50 | + |
| 51 | + def test_mlp(self): |
| 52 | + for data_type in [torch.float32, torch.bfloat16]: |
| 53 | + prec = 1e-5 |
| 54 | + if data_type == torch.bfloat16: |
| 55 | + prec = 1.2e-2 |
| 56 | + seed = self.get_rand_seed() |
| 57 | + ipex_fc = self._ipxex_linear(seed, data_type) |
| 58 | + cpu_fc = self._cpu_linear(seed, data_type) |
| 59 | + |
| 60 | + seed = self.get_rand_seed() |
| 61 | + input_grad_ipex, weight_grad_ipex, bias_grad_ipex = self._run_mlp(seed, ipex_fc, data_type) |
| 62 | + input_grad_cpu, weight_grad_cpu, bias_grad_cpu = self._run_mlp(seed, cpu_fc, data_type) |
| 63 | + |
| 64 | + if input_grad_ipex is None: |
| 65 | + self.assertTrue(input_grad_cpu is None) |
| 66 | + else: |
| 67 | + self.assertEqual(input_grad_ipex.to(torch.float32), input_grad_cpu.to(torch.float32), prec) |
| 68 | + self.assertEqual(weight_grad_ipex.to(torch.float32), weight_grad_cpu.to(torch.float32), prec) |
| 69 | + self.assertEqual(bias_grad_ipex.to(torch.float32), bias_grad_cpu.to(torch.float32), prec) |
| 70 | + |
| 71 | +if __name__ == '__main__': |
| 72 | + test = unittest.main() |
0 commit comments