|
| 1 | +""" |
| 2 | +slicing the output wavefunction to save the memory in VQA context |
| 3 | +""" |
| 4 | + |
| 5 | +from itertools import product |
| 6 | +import numpy as np |
| 7 | +import tensorcircuit as tc |
| 8 | + |
| 9 | +K = tc.set_backend("jax") |
| 10 | + |
| 11 | + |
| 12 | +def circuit(param, n, nlayers): |
| 13 | + c = tc.Circuit(n) |
| 14 | + for i in range(n): |
| 15 | + c.h(i) |
| 16 | + c = tc.templates.blocks.example_block(c, param, nlayers) |
| 17 | + return c |
| 18 | + |
| 19 | + |
| 20 | +def sliced_state(c, cut, mask): |
| 21 | + # mask = Tensor([0, 1, 0]) |
| 22 | + # cut = [0, 1, 2] |
| 23 | + n = c._nqubits |
| 24 | + ncut = len(cut) |
| 25 | + end0 = tc.array_to_tensor(np.array([1.0, 0.0])) |
| 26 | + end1 = tc.array_to_tensor(np.array([0.0, 1.0])) |
| 27 | + ends = [tc.Gate(mask[i] * end1 + (1 - mask[i]) * end0) for i in range(ncut)] |
| 28 | + nodes, front = c._copy() |
| 29 | + for j, i in enumerate(cut): |
| 30 | + front[i] ^ ends[j][0] |
| 31 | + oeo = [] |
| 32 | + for i in range(n): |
| 33 | + if i not in cut: |
| 34 | + oeo.append(front[i]) |
| 35 | + ss = tc.contractor(nodes + ends, output_edge_order=oeo) |
| 36 | + return ss |
| 37 | + |
| 38 | + |
| 39 | +def sliced_op(ps, cut, mask1, mask2): |
| 40 | + # ps: Tensor([0, 0, 1, 1]) |
| 41 | + n = K.shape_tuple(ps)[-1] |
| 42 | + ncut = len(cut) |
| 43 | + end0 = tc.array_to_tensor(np.array([1.0, 0.0])) |
| 44 | + end1 = tc.array_to_tensor(np.array([0.0, 1.0])) |
| 45 | + endsr = [tc.Gate(mask1[i] * end1 + (1 - mask1[i]) * end0) for i in range(ncut)] |
| 46 | + endsl = [tc.Gate(mask2[i] * end1 + (1 - mask2[i]) * end0) for i in range(ncut)] |
| 47 | + |
| 48 | + structuresc = K.cast(ps, dtype="int32") |
| 49 | + structuresc = K.onehot(structuresc, num=4) |
| 50 | + structuresc = K.cast(structuresc, dtype=tc.dtypestr) |
| 51 | + obs = [] |
| 52 | + for i in range(n): |
| 53 | + obs.append( |
| 54 | + tc.Gate( |
| 55 | + sum( |
| 56 | + [ |
| 57 | + structuresc[i, k] * g.tensor |
| 58 | + for k, g in enumerate(tc.gates.pauli_gates) |
| 59 | + ] |
| 60 | + ) |
| 61 | + ) |
| 62 | + ) |
| 63 | + for j, i in enumerate(cut): |
| 64 | + obs[i][0] ^ endsl[j][0] |
| 65 | + obs[i][1] ^ endsr[j][0] |
| 66 | + oeo = [] |
| 67 | + for i in range(n): |
| 68 | + if i not in cut: |
| 69 | + oeo.append(obs[i][0]) |
| 70 | + for i in range(n): |
| 71 | + if i not in cut: |
| 72 | + oeo.append(obs[i][1]) |
| 73 | + return obs + endsl + endsr, oeo |
| 74 | + |
| 75 | + |
| 76 | +def sliced_core(param, n, nlayers, ps, cut, mask1, mask2): |
| 77 | + # param, ps, mask1, mask2 are all tensor |
| 78 | + c = circuit(param, n, nlayers) |
| 79 | + ss = sliced_state(c, cut, mask1) |
| 80 | + ssc = sliced_state(c, cut, mask2) |
| 81 | + ssc, _ = tc.Circuit.copy([ssc], conj=True) |
| 82 | + op_nodes, op_edges = sliced_op(ps, cut, mask1, mask2) |
| 83 | + nodes = [ss] + ssc + op_nodes |
| 84 | + ssc = ssc[0] |
| 85 | + n = c._nqubits |
| 86 | + nleft = n - len(cut) |
| 87 | + for i in range(nleft): |
| 88 | + op_edges[i + nleft] ^ ss[i] |
| 89 | + op_edges[i] ^ ssc[i] |
| 90 | + scalar = tc.contractor(nodes) |
| 91 | + return K.real(scalar.tensor) |
| 92 | + |
| 93 | + |
| 94 | +sliced_core_vvg = K.jit( |
| 95 | + K.vectorized_value_and_grad(sliced_core, argnums=0, vectorized_argnums=(5, 6)), |
| 96 | + static_argnums=(1, 2, 4), |
| 97 | +) # vmap version if memory is enough |
| 98 | + |
| 99 | +sliced_core_vg = K.jit( |
| 100 | + K.value_and_grad(sliced_core, argnums=0), |
| 101 | + static_argnums=(1, 2, 4), |
| 102 | +) # nonvmap version is memory is tight and distrubution workload may be enabled |
| 103 | + |
| 104 | + |
| 105 | +def sliced_expectation_and_grad(param, n, nlayers, ps, cut, is_vmap=True): |
| 106 | + pst = tc.array_to_tensor(ps) |
| 107 | + res = 0.0 |
| 108 | + mask1s = [] |
| 109 | + mask2s = [] |
| 110 | + for mask1 in product(*[(0, 1) for _ in cut]): |
| 111 | + mask1t = tc.array_to_tensor(np.array(mask1)) |
| 112 | + mask1s.append(mask1t) |
| 113 | + mask2 = list(mask1) |
| 114 | + for j, i in enumerate(cut): |
| 115 | + if ps[i] in [1, 2]: |
| 116 | + mask2[j] = 1 - mask1[j] |
| 117 | + mask2t = tc.array_to_tensor(np.array(mask2)) |
| 118 | + mask2s.append(mask2t) |
| 119 | + if is_vmap: |
| 120 | + mask1s = K.stack(mask1s) |
| 121 | + mask2s = K.stack(mask2s) |
| 122 | + res = sliced_core_vvg(param, n, nlayers, pst, cut, mask1s, mask2s) |
| 123 | + res = list(res) |
| 124 | + res[0] = K.sum(res[0]) |
| 125 | + res = tuple(res) |
| 126 | + else: |
| 127 | + # memory bounded |
| 128 | + # can modified to adpative pmap |
| 129 | + vs = 0.0 |
| 130 | + gs = 0.0 |
| 131 | + for i in range(len(mask1s)): |
| 132 | + mask1t = mask1s[i] |
| 133 | + mask2t = mask2s[i] |
| 134 | + v, g = sliced_core_vg(param, n, nlayers, pst, cut, mask1t, mask2t) |
| 135 | + vs += v |
| 136 | + gs += g |
| 137 | + res = (vs, gs) |
| 138 | + return res |
| 139 | + |
| 140 | + |
| 141 | +def sliced_expectation_ref(c, ps, cut): |
| 142 | + """ |
| 143 | + reference implementation |
| 144 | + """ |
| 145 | + # ps: [0, 2, 1] |
| 146 | + res = 0.0 |
| 147 | + for mask1 in product(*[(0, 1) for _ in cut]): |
| 148 | + mask1t = tc.array_to_tensor(np.array(mask1)) |
| 149 | + ss = sliced_state(c, cut, mask1t) |
| 150 | + mask2 = list(mask1) |
| 151 | + for j, i in enumerate(cut): |
| 152 | + if ps[i] in [1, 2]: |
| 153 | + mask2[j] = 1 - mask1[j] |
| 154 | + mask2t = tc.array_to_tensor(np.array(mask2)) |
| 155 | + ssc = sliced_state(c, cut, mask2t) |
| 156 | + ssc, _ = tc.Circuit.copy([ssc], conj=True) |
| 157 | + ps = tc.array_to_tensor(ps) |
| 158 | + op_nodes, op_edges = sliced_op(ps, cut, mask1t, mask2t) |
| 159 | + nodes = [ss] + ssc + op_nodes |
| 160 | + ssc = ssc[0] |
| 161 | + n = c._nqubits |
| 162 | + nleft = n - len(cut) |
| 163 | + for i in range(nleft): |
| 164 | + op_edges[i + nleft] ^ ss[i] |
| 165 | + op_edges[i] ^ ssc[i] |
| 166 | + scalar = tc.contractor(nodes) |
| 167 | + res += scalar.tensor |
| 168 | + return res |
| 169 | + |
| 170 | + |
| 171 | +if __name__ == "__main__": |
| 172 | + n = 10 |
| 173 | + nlayers = 5 |
| 174 | + param = K.ones([n, 2 * nlayers], dtype="float32") |
| 175 | + cut = (0, 2, 5, 9) |
| 176 | + ops = [2, 0, 3, 1, 0, 0, 1, 2, 0, 1] |
| 177 | + ops_dict = tc.quantum.ps2xyz(ops) |
| 178 | + |
| 179 | + def trivial_core(param, n, nlayers): |
| 180 | + c = circuit(param, n, nlayers) |
| 181 | + return K.real(c.expectation_ps(**ops_dict)) |
| 182 | + |
| 183 | + trivial_vg = K.jit(K.value_and_grad(trivial_core, argnums=0), static_argnums=(1, 2)) |
| 184 | + |
| 185 | + print("reference impl") |
| 186 | + r0 = tc.utils.benchmark(trivial_vg, param, n, nlayers) |
| 187 | + print("vmapped slice") |
| 188 | + r1 = tc.utils.benchmark( |
| 189 | + sliced_expectation_and_grad, param, n, nlayers, ops, cut, True |
| 190 | + ) |
| 191 | + print("naive for slice") |
| 192 | + r2 = tc.utils.benchmark( |
| 193 | + sliced_expectation_and_grad, param, n, nlayers, ops, cut, False |
| 194 | + ) |
| 195 | + |
| 196 | + np.testing.assert_allclose(r0[0][0], r1[0][0], atol=1e-5) |
| 197 | + np.testing.assert_allclose(r2[0][0], r1[0][0], atol=1e-5) |
| 198 | + np.testing.assert_allclose(r0[0][1], r1[0][1], atol=1e-5) |
| 199 | + np.testing.assert_allclose(r2[0][1], r1[0][1], atol=1e-5) |
0 commit comments