Skip to content

Commit a302c94

Browse files
authored
Define device string as contant variable in python package
1 parent de118df commit a302c94

File tree

12 files changed

+379
-403
lines changed

12 files changed

+379
-403
lines changed

intel_pytorch_extension_py/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
from .ops import *
77
import _torch_ipex as core
88

9+
DEVICE = 'dpcpp'
10+
911
def get_auto_optimization():
1012
return core.get_auto_dnnl()
1113

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -254,7 +254,7 @@ def make_relative_rpath(path):
254254
'intel_pytorch_extension',
255255
'intel_pytorch_extension.optim',
256256
'intel_pytorch_extension.ops'],
257-
package_dir={'torch_ipex': 'torch_ipex_py', 'intel_pytorch_extension': 'intel_pytorch_extension_py'},
257+
package_dir={'intel_pytorch_extension': 'intel_pytorch_extension_py'},
258258
zip_safe=False,
259259
ext_modules=[DPCPPExt('_torch_ipex')],
260260
cmdclass={

tests/cpu/common_device_type.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -17,23 +17,23 @@
1717
1818
All contributions by Facebook:
1919
Copyright (c) 2016 Facebook Inc.
20-
20+
2121
All contributions by Google:
2222
Copyright (c) 2015 Google Inc.
2323
All rights reserved.
24-
24+
2525
All contributions by Yangqing Jia:
2626
Copyright (c) 2015 Yangqing Jia
2727
All rights reserved.
28-
28+
2929
All contributions from Caffe:
3030
Copyright(c) 2013, 2014, 2015, the respective contributors
3131
All rights reserved.
32-
32+
3333
All other contributions:
3434
Copyright(c) 2015, 2016 the respective contributors
3535
All rights reserved.
36-
36+
3737
Caffe2 uses a copyright model similar to Caffe: each contributor holds
3838
copyright over their contributions to Caffe2. The project versioning records
3939
all such contribution and copyright details. If a contributor wants to further
@@ -49,7 +49,7 @@
4949
from functools import wraps
5050
import unittest
5151
import torch
52-
import _torch_ipex as ipex
52+
import intel_pytorch_extension as ipex
5353
import copy
5454
from common_utils import TestCase, TEST_WITH_ROCM, TEST_MKL, \
5555
skipCUDANonDefaultStreamIf
@@ -254,15 +254,15 @@ class CPUTestBase(DeviceTypeTestBase):
254254
device_type = 'cpu'
255255

256256
class DPCPPTestBase(DeviceTypeTestBase):
257-
device_type = 'dpcpp'
257+
device_type = ipex.DEVICE
258258

259259
@classmethod
260260
def get_primary_device(cls):
261261
return cls.primary_device
262262

263263
@classmethod
264264
def get_all_devices(cls):
265-
return ['dpcpp']
265+
return [ipex.DEVICE]
266266

267267
# Returns the dtypes the test has requested.
268268
# Prefers device-specific dtype specifications over generic ones.
@@ -276,7 +276,7 @@ def _get_dtypes(cls, test):
276276

277277
@classmethod
278278
def setUpClass(cls):
279-
cls.primary_device = 'dpcpp'
279+
cls.primary_device = ipex.DEVICE
280280

281281
class CUDATestBase(DeviceTypeTestBase):
282282
device_type = 'cuda'

tests/cpu/test_bf16_lazy_reorder.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
def get_rand_seed():
3333
return int(time.time() * 1000000000)
3434

35-
device = torch.device("dpcpp:0")
35+
device = ipex.DEVICE
3636
class TestConv(TestCase):
3737
def test_Conv2d_with_cpu(self):
3838
rand_seed = int(get_rand_seed())

tests/cpu/test_emb.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,14 @@ class TestEMB(TestCase):
99
def test_emb(self):
1010
#E = nn.EmbeddingBag(10, 5, mode="sum", sparse=True)
1111
cpu_emb = nn.EmbeddingBag(10, 3, mode='sum', sparse=True)
12-
dpcpp_emb = copy.deepcopy(cpu_emb).to('dpcpp:0')
13-
bf16_emb = copy.deepcopy(cpu_emb).to('dpcpp').bfloat16()
12+
dpcpp_emb = copy.deepcopy(cpu_emb).to(ipex.DEVICE)
13+
bf16_emb = copy.deepcopy(cpu_emb).to(ipex.DEVICE).bfloat16()
1414
# a batch of 2 samples of 4 indices each
1515
cpu_input = torch.LongTensor([1,2,4,5,4,3,2,9])
16-
dpcpp_input = cpu_input.clone().detach().to('dpcpp:0')
16+
dpcpp_input = cpu_input.clone().detach().to(ipex.DEVICE)
1717

1818
cpu_offsets = torch.LongTensor([0,1,2,3,4,5,6,7])
19-
dpcpp_offsets = cpu_offsets.clone().detach().to('dpcpp:0')
19+
dpcpp_offsets = cpu_offsets.clone().detach().to(ipex.DEVICE)
2020

2121
cpu_out = cpu_emb(cpu_input, cpu_offsets)
2222

tests/cpu/test_interaction.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,21 +16,21 @@ def interact_features(x, ly):
1616
Z = torch.bmm(T, torch.transpose(T, 1, 2))
1717
_, ni, nj = Z.shape
1818
offset = 0
19-
li = torch.tensor([i for i in range(ni) for j in range(i + offset)], device='dpcpp')
20-
lj = torch.tensor([j for i in range(nj) for j in range(i + offset)], device='dpcpp')
19+
li = torch.tensor([i for i in range(ni) for j in range(i + offset)], device=ipex.DEVICE)
20+
lj = torch.tensor([j for i in range(nj) for j in range(i + offset)], device=ipex.DEVICE)
2121
Zflat = Z[:, li, lj]
2222
# concatenate dense features and interactions
2323
R = torch.cat([x] + [Zflat], dim=1)
2424
return R
2525

2626
def run(dtype='float32'):
2727
print("##################### testing with %s"% str(dtype))
28-
x1 = torch.randn([2048, 128], device='dpcpp').to(dtype).clone().detach().requires_grad_()
28+
x1 = torch.randn([2048, 128], device=ipex.DEVICE).to(dtype).clone().detach().requires_grad_()
2929
x2 = x1.clone().detach().requires_grad_()
3030
ly1 = []
3131
ly2 = []
3232
for i in range(0, 26):
33-
V = torch.randn([2048, 128], device='dpcpp').to(dtype).clone().detach().requires_grad_()
33+
V = torch.randn([2048, 128], device=ipex.DEVICE).to(dtype).clone().detach().requires_grad_()
3434
ly1.append(V)
3535
ly2.append(V.clone().detach().requires_grad_())
3636

tests/cpu/test_jit.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@
5959
from torch.jit._recursive import wrap_cpp_module
6060
import copy
6161

62-
import intel_pytorch_extension
62+
import intel_pytorch_extension as ipex
6363
from intel_pytorch_extension import core
6464

6565
import torch.nn as nn
@@ -76,7 +76,7 @@
7676
IS_SANDCASTLE, load_tests, brute_pdist, brute_cdist, slowTest, \
7777
skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf
7878

79-
device = 'dpcpp:0'
79+
device = ipex.DEVICE
8080
#device = 'cpu:0'
8181
SIZE = 100
8282

@@ -210,8 +210,8 @@ def _test_output(self, model, x, kind=None):
210210
core.disable_jit()
211211
core.disable_mix_bf16_fp32()
212212

213-
model = model.to('dpcpp').eval()
214-
x = x.to('dpcpp')
213+
model = model.to(device).eval()
214+
x = x.to(device)
215215
with torch.no_grad():
216216
result = model(x)
217217

@@ -247,8 +247,8 @@ def _test_output_bf16(self, model, x, kind=None, prec=None):
247247
core.enable_jit()
248248
core.disable_mix_bf16_fp32()
249249

250-
model = model.to('dpcpp').eval()
251-
x = x.to('dpcpp')
250+
model = model.to(ipex.DEVICE).eval()
251+
x = x.to(ipex.DEVICE)
252252
x2 = x.clone()
253253

254254
fused_model = torch.jit.script(copy.deepcopy(model))

0 commit comments

Comments
 (0)