Skip to content

Commit 648b71b

Browse files
Merge branch 'beta' into cloud
2 parents ecbc09c + c14d6a5 commit 648b71b

File tree

9 files changed

+131
-82
lines changed

9 files changed

+131
-82
lines changed

check_all.sh

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@ echo "pylint check"
88
pylint tensorcircuit tests examples/*.py
99
echo "pytest check"
1010
pytest -n auto --cov=tensorcircuit -vv -W ignore::DeprecationWarning
11+
# for test on gpu machine, please set `export TF_FORCE_GPU_ALLOW_GROWTH=true` for tf
12+
# and `export XLA_PYTHON_CLIENT_PREALLOCATE=false` for jax to avoid OOM in testing
1113
echo "sphinx check"
1214
cd docs && sphinx-build source build/html && sphinx-build source -D language="zh" build/html_cn
1315
echo "all checks passed, congratulation! 💐"

examples/variational_dynamics_circuit.py

Lines changed: 48 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -184,10 +184,10 @@ def numdiff(i):
184184
f.append(-0.5j)
185185
door.append([0, i, numdiff(i)])
186186
for i in range(N - 1):
187-
f.append(-1j)
187+
f.append(-0.5j)
188188
door.append([5, i, i + 1])
189189
for i in range(N - 1):
190-
f.append(-1j)
190+
f.append(-0.5j)
191191
door.append([3, i, i + 1])
192192
for i in range(N):
193193
h.append(1)
@@ -235,54 +235,60 @@ def numdiff(i):
235235

236236
# variation realize
237237
ODE_theta = tf.zeros(len(door), dtype="float64")
238+
239+
a_batch_theta = []
240+
a_batch_is_k = []
241+
a_batch_is_q = []
242+
for k in range(len(door)):
243+
for q in range(len(door)):
244+
is_k = [0 for _ in range(len(door))]
245+
is_k[k] = 1
246+
is_q = [0 for _ in range(len(door))]
247+
is_q[q] = 1
248+
if how_variation == 0:
249+
a_batch_theta.append(np.angle(f[q]) - np.angle(f[k]))
250+
else:
251+
a_batch_theta.append(np.angle(f[q]) - np.angle(f[k]) - math.pi / 2)
252+
a_batch_is_k.append(is_k)
253+
a_batch_is_q.append(is_q)
254+
a_batch_theta = tc.array_to_tensor(a_batch_theta)
255+
a_batch_is_k = tf.constant(a_batch_is_k)
256+
a_batch_is_q = tf.constant(a_batch_is_q)
257+
258+
c_batch_theta = []
259+
c_batch_is_k = []
260+
c_batch_is_q = []
261+
for k in range(len(door)):
262+
for q in range(len(h_door)):
263+
is_k = [0 for _ in range(len(door))]
264+
is_k[k] = 1
265+
is_q = [0 for _ in range(len(door))]
266+
is_q[q] = 1
267+
c_batch_is_k.append(is_k)
268+
c_batch_is_q.append(is_q)
269+
if how_variation == 0:
270+
c_batch_theta.append(np.angle(h[q]) - np.angle(f[k]) - math.pi / 2)
271+
else:
272+
c_batch_theta.append(np.angle(h[q]) - np.angle(f[k]) + math.pi)
273+
c_batch_theta = tc.array_to_tensor(c_batch_theta)
274+
c_batch_is_k = tf.constant(c_batch_is_k)
275+
c_batch_is_q = tf.constant(c_batch_is_q)
276+
238277
for T in range(int(t / dt)):
239278
# calculate coefficient in paper
240-
A = np.zeros((len(door), len(door)))
241-
C = np.zeros(len(door))
242-
batch_theta = []
243-
batch_is_k = []
244-
batch_is_q = []
245-
for k in range(len(door)):
246-
for q in range(len(door)):
247-
is_k = [0 for _ in range(len(door))]
248-
is_k[k] = 1
249-
is_q = [0 for _ in range(len(door))]
250-
is_q[q] = 1
251-
if how_variation == 0:
252-
batch_theta.append(np.angle(f[q]) - np.angle(f[k]))
253-
else:
254-
batch_theta.append(np.angle(f[q]) - np.angle(f[k]) - math.pi / 2)
255-
batch_is_k.append(is_k)
256-
batch_is_q.append(is_q)
257-
batch_theta = tc.array_to_tensor(batch_theta)
258-
batch_is_k = tf.constant(batch_is_k)
259-
batch_is_q = tf.constant(batch_is_q)
260-
vmap_result = Calculation_A_vmap(batch_theta, batch_is_k, batch_is_q, ODE_theta)
279+
280+
vmap_result = Calculation_A_vmap(
281+
a_batch_theta, a_batch_is_k, a_batch_is_q, ODE_theta
282+
)
261283
A = tf.cast(
262284
tf.tensordot(tf.abs(f), tf.abs(f), 0), dtype="float64"
263285
) * tf.reshape(
264286
tc.backend.cast(vmap_result, dtype="float64"), [len(door), len(door)]
265287
)
266288

267-
batch_theta = []
268-
batch_is_k = []
269-
batch_is_q = []
270-
for k in range(len(door)):
271-
for q in range(len(h_door)):
272-
is_k = [0 for _ in range(len(door))]
273-
is_k[k] = 1
274-
is_q = [0 for _ in range(len(door))]
275-
is_q[q] = 1
276-
if how_variation == 0:
277-
batch_theta.append(np.angle(h[q]) - np.angle(f[k]) - math.pi / 2)
278-
else:
279-
batch_theta.append(np.angle(h[q]) - np.angle(f[k]) + math.pi)
280-
batch_is_k.append(is_k)
281-
batch_is_q.append(is_q)
282-
batch_theta = tc.array_to_tensor(batch_theta)
283-
batch_is_k = tf.constant(batch_is_k)
284-
batch_is_q = tf.constant(batch_is_q)
285-
vmap_result = Calculation_C_vmap(batch_theta, batch_is_k, batch_is_q, ODE_theta)
289+
vmap_result = Calculation_C_vmap(
290+
c_batch_theta, c_batch_is_k, c_batch_is_q, ODE_theta
291+
)
286292
C = tf.reduce_sum(
287293
tf.cast(tf.tensordot(tf.abs(f), tf.abs(h), 0), dtype="float64")
288294
* tf.reshape(

tensorcircuit/backends/cupy_backend.py

Lines changed: 26 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
from typing import Any, Callable, Optional, Sequence, Tuple, Union
99

1010
import numpy as np
11+
import scipy
1112

1213
try: # old version tn compatiblity
1314
from tensornetwork.backends import base_backend
@@ -62,6 +63,18 @@ def sum(
6263
) -> Tensor:
6364
return cp.sum(a, axis=axis, keepdims=keepdims)
6465

66+
def conj(self, tensor: Tensor) -> Tensor:
67+
return tensor.conj()
68+
69+
def sign(self, tensor: Tensor) -> Tensor:
70+
return cp.sign(tensor)
71+
72+
def multiply(self, tensor1: Tensor, tensor2: Tensor) -> Tensor:
73+
return tensor1 * tensor2
74+
75+
def norm(self, tensor: Tensor) -> Tensor:
76+
return cp.linalg.norm(tensor)
77+
6578
def shape_tuple(self, tensor: Tensor) -> Tuple[int]:
6679
return tensor.shape # type:ignore
6780

@@ -100,7 +113,7 @@ def copy(self, a: Tensor) -> Tensor:
100113
return a.copy()
101114

102115
def expm(self, a: Tensor) -> Tensor:
103-
raise NotImplementedError
116+
return self.convert_to_tensor(scipy.linalg.expm(self.numpy(a)))
104117

105118
def abs(self, a: Tensor) -> Tensor:
106119
return cp.abs(a)
@@ -307,7 +320,7 @@ def stateful_randu(
307320
dtype = dtype[-2:]
308321
if isinstance(shape, int):
309322
shape = (shape,)
310-
r = g.uniform(low=low, high=high, size=shape)
323+
r = g.random(shape) * (high - low) + low
311324
if dtype == "32":
312325
r = r.astype(np.float32)
313326
elif dtype == "64":
@@ -368,22 +381,22 @@ def switch(self, index: Tensor, branches: Sequence[Callable[[], Tensor]]) -> Ten
368381
return branches[index]()
369382

370383
def device(self, a: Tensor) -> str:
371-
return "gpu"
384+
return self._dev2str(a.device)
372385

373386
def device_move(self, a: Tensor, dev: Any) -> Tensor:
374-
if dev == "gpu":
375-
return a
376-
raise ValueError("CuPy backend only support GPU device")
387+
if isinstance(dev, str):
388+
dev = self._str2dev(dev)
389+
with dev:
390+
return cp.asarray(a)
377391

378392
def _dev2str(self, dev: Any) -> str:
379-
if dev == "gpu":
380-
return "gpu"
381-
raise ValueError("CuPy backend only support GPU device")
393+
return f"gpu:{dev.id}"
382394

383395
def _str2dev(self, str_: str) -> Any:
384-
if str_ == "gpu":
385-
return "gpu"
386-
raise ValueError("CuPy backend only support GPU device")
396+
if str_ == "cpu":
397+
raise ValueError("CuPy backend only support GPU device")
398+
else:
399+
return cp.cuda.Device(int(str_.split(":")[-1]))
387400

388401
def stop_gradient(self, a: Tensor) -> Tensor:
389402
raise NotImplementedError("CuPy backend doesn't support AD")
@@ -409,7 +422,7 @@ def jit(
409422
f: Callable[..., Any],
410423
static_argnums: Optional[Union[int, Sequence[int]]] = None,
411424
jit_compile: Optional[bool] = None,
412-
**kws: Any
425+
**kws: Any,
413426
) -> Callable[..., Any]:
414427
logger.warning("CuPy backend has no jit interface, just do nothing")
415428
return f

tensorcircuit/gates.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -812,7 +812,7 @@ def exponential_gate(unitary: Tensor, theta: float, name: str = "none") -> Gate:
812812
:return: Exponential Gate
813813
:rtype: Gate
814814
"""
815-
theta = num_to_tensor(theta)
815+
theta, unitary = num_to_tensor(theta, unitary)
816816
mat = backend.expm(-backend.i() * theta * unitary)
817817
dimension = reduce(mul, mat.shape)
818818
nolegs = int(np.log(dimension) / np.log(2))

tests/conftest.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,18 @@ def torchb():
4848
pytest.skip("****** No torch backend found, skipping test suit *******")
4949

5050

51+
@pytest.fixture(scope="function")
52+
def cpb():
53+
try:
54+
tc.set_backend("cupy")
55+
yield
56+
tc.set_backend("numpy")
57+
except ImportError as e:
58+
print(e)
59+
tc.set_backend("numpy")
60+
pytest.skip("****** No cupy backend found, skipping test suit *******")
61+
62+
5163
@pytest.fixture(scope="function")
5264
def highp():
5365
tc.set_dtype("complex128")

tests/test_backends.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ def test_backend_methods_2(backend):
300300
def test_device_cpu_only(backend):
301301
a = tc.backend.ones([])
302302
dev_str = tc.backend.device(a)
303-
assert dev_str == "cpu"
303+
assert dev_str in ["cpu", "gpu:0"]
304304
tc.backend.device_move(a, dev_str)
305305

306306

0 commit comments

Comments
 (0)