diff --git a/tensorcircuit/abstractcircuit.py b/tensorcircuit/abstractcircuit.py index 3f3d5951..6dd6d07d 100644 --- a/tensorcircuit/abstractcircuit.py +++ b/tensorcircuit/abstractcircuit.py @@ -68,6 +68,7 @@ class AbstractCircuit: _nqubits: int + _d: int = 2 _qir: List[Dict[str, Any]] _extra_qir: List[Dict[str, Any]] inputs: Tensor diff --git a/tensorcircuit/basecircuit.py b/tensorcircuit/basecircuit.py index 765b4587..f996cf08 100644 --- a/tensorcircuit/basecircuit.py +++ b/tensorcircuit/basecircuit.py @@ -1,5 +1,9 @@ """ Quantum circuit: common methods for all circuit classes as MixIn + +Note: + - Supports qubit (d = 2) and qudit (d >= 2) systems. + - For string-encoded samples/counts when d <= 36, digits use base-d characters 0–9A–Z (A = 10, …, Z = 35). """ # pylint: disable=invalid-name @@ -20,6 +24,9 @@ measurement_counts, sample_int2bin, sample2all, + _infer_num_sites, + _decode_basis_label, + onehot_d_tensor, ) from .abstractcircuit import AbstractCircuit from .cons import npdtype, backend, dtypestr, contractor, rdtypestr @@ -40,8 +47,9 @@ class BaseCircuit(AbstractCircuit): is_mps = False @staticmethod - def all_zero_nodes(n: int, d: int = 2, prefix: str = "qb-") -> List[tn.Node]: - l = [0.0 for _ in range(d)] + def all_zero_nodes(n: int, prefix: str = "qb-", dim: int = 2) -> List[tn.Node]: + prefix = "qd-" if dim > 2 else prefix + l = [0.0 for _ in range(dim)] l[0] = 1.0 nodes = [ tn.Node( @@ -288,7 +296,7 @@ def expectation_before( for op, index in ops: if not isinstance(op, tn.Node): # op is only a matrix - op = backend.reshape2(op) + op = backend.reshaped(op, d=self._d) op = backend.cast(op, dtype=dtypestr) op = gates.Gate(op) else: @@ -354,12 +362,12 @@ def to_qir(self) -> List[Dict[str, Any]]: def perfect_sampling(self, status: Optional[Tensor] = None) -> Tuple[str, float]: """ - Sampling bistrings from the circuit output based on quantum amplitudes. + Sampling base-d strings (0–9A–Z when d <= 36) from the circuit output based on quantum amplitudes. Reference: arXiv:1201.3974. :param status: external randomness, with shape [nqubits], defaults to None :type status: Optional[Tensor] - :return: Sampled bit string and the corresponding theoretical probability. + :return: Sampled base-d string and the corresponding theoretical probability. :rtype: Tuple[str, float] """ return self.measure_jit(*range(self._nqubits), with_prob=True, status=status) @@ -368,10 +376,10 @@ def measure_jit( self, *index: int, with_prob: bool = False, status: Optional[Tensor] = None ) -> Tuple[Tensor, Tensor]: """ - Take measurement to the given quantum lines. + Take measurement on the given site indices (computational basis). This method is jittable is and about 100 times faster than unjit version! - :param index: Measure on which quantum line. + :param index: Measure on which site (wire) index. :type index: int :param with_prob: If true, theoretical probability is also returned. :type with_prob: bool, optional @@ -382,9 +390,8 @@ def measure_jit( """ # finally jit compatible ! and much faster than unjit version ! (100x) sample: List[Tensor] = [] - p = 1.0 - p = backend.convert_to_tensor(p) - p = backend.cast(p, dtype=rdtypestr) + one_r = backend.cast(backend.convert_to_tensor(1.0), rdtypestr) + p = one_r for k, j in enumerate(index): if self.is_dm is False: nodes1, edge1 = self._copy() @@ -399,40 +406,71 @@ def measure_jit( if i != j: e ^ edge2[i] for i in range(k): - m = (1 - sample[i]) * gates.array_to_tensor(np.array([1, 0])) + sample[ - i - ] * gates.array_to_tensor(np.array([0, 1])) - newnodes.append(Gate(m)) - newnodes[-1].id = id(newnodes[-1]) - newnodes[-1].is_dagger = False - newnodes[-1].flag = "measurement" - newnodes[-1].get_edge(0) ^ edge1[index[i]] - newnodes.append(Gate(m)) - newnodes[-1].id = id(newnodes[-1]) - newnodes[-1].is_dagger = True - newnodes[-1].flag = "measurement" - newnodes[-1].get_edge(0) ^ edge2[index[i]] + if self._d == 2: + m = (1 - sample[i]) * gates.array_to_tensor( + np.array([1, 0]) + ) + sample[i] * gates.array_to_tensor(np.array([0, 1])) + else: + m = onehot_d_tensor(sample[i], d=self._d) + g1 = Gate(m) + g1.id = id(g1) + g1.is_dagger = False + g1.flag = "measurement" + newnodes.append(g1) + g1.get_edge(0) ^ edge1[index[i]] + g2 = Gate(m) + g2.id = id(g2) + g2.is_dagger = True + g2.flag = "measurement" + newnodes.append(g2) + g2.get_edge(0) ^ edge2[index[i]] + rho = ( 1 / backend.cast(p, dtypestr) * contractor(newnodes, output_edge_order=[edge1[j], edge2[j]]).tensor ) - pu = backend.real(rho[0, 0]) - if status is None: - r = backend.implicit_randu()[0] + if self._d == 2: + pu = backend.real(rho[0, 0]) + if status is None: + r = backend.implicit_randu()[0] + else: + r = status[k] + r = backend.real(backend.cast(r, dtypestr)) + eps = 0.31415926 * 1e-12 + sign = ( + backend.sign(r - pu + eps) / 2 + 0.5 + ) # in case status is exactly 0.5 + sign = backend.convert_to_tensor(sign) + sign = backend.cast(sign, dtype=rdtypestr) + sign_complex = backend.cast(sign, dtypestr) + sample.append(sign_complex) + p = p * (pu * (-1) ** sign + sign) else: - r = status[k] - r = backend.real(backend.cast(r, dtypestr)) - eps = 0.31415926 * 1e-12 - sign = backend.sign(r - pu + eps) / 2 + 0.5 # in case status is exactly 0.5 - sign = backend.convert_to_tensor(sign) - sign = backend.cast(sign, dtype=rdtypestr) - sign_complex = backend.cast(sign, dtypestr) - sample.append(sign_complex) - p = p * (pu * (-1) ** sign + sign) - - sample = backend.stack(sample) - sample = backend.real(sample) + pu = backend.clip( + backend.real(backend.diagonal(rho)), + backend.convert_to_tensor(0.0), + backend.convert_to_tensor(1.0), + ) + pu = pu / backend.sum(pu) + if status is None: + ind = backend.implicit_randc( + a=backend.arange(self._d), + shape=1, + p=backend.cast(pu, rdtypestr), + ) + else: + one_r = backend.cast(backend.convert_to_tensor(1.0), rdtypestr) + st = backend.cast(status[k : k + 1], rdtypestr) + ind = backend.probability_sample( + shots=1, + p=backend.cast(pu, rdtypestr), + status=one_r - st, + ) + k_out = backend.cast(ind[0], "int32") + sample.append(backend.cast(k_out, rdtypestr)) + p = p * backend.cast(pu[k_out], rdtypestr) + sample = backend.real(backend.stack(sample)) if with_prob: return sample, p else: @@ -442,35 +480,48 @@ def measure_jit( def amplitude_before(self, l: Union[str, Tensor]) -> List[Gate]: r""" - Returns the tensornetwor nodes for the amplitude of the circuit given the bitstring l. - For state simulator, it computes :math:`\langle l\vert \psi\rangle`, - for density matrix simulator, it computes :math:`Tr(\rho \vert l\rangle \langle 1\vert)` + Returns the tensornetwor nodes for the amplitude of the circuit given a computational-basis label ``l``. + For a state simulator, it computes :math:`\langle l \vert \psi\rangle`; + for a density-matrix simulator, it computes :math:`\mathrm{Tr}(\rho \vert l\rangle\langle l\vert)`. Note how these two are different up to a square operation. - :param l: The bitstring of 0 and 1s. + :Example: + + >>> c = tc.Circuit(2) + >>> c.X(0) + >>> c.amplitude("10") # d=2, per-qubit digits + array(1.+0.j, dtype=complex64) + >>> c.CNOT(0, 1) + >>> c.amplitude("11") + array(1.+0.j, dtype=complex64) + + For qudits (d>2, d<=36): + >>> c = tc.Circuit(3, dim=12) + >>> c.amplitude("0A2") # base-12 string, A stands for 10 + + :param l: Basis label. + - If a string: it must be a base-d string of length ``nqubits``, using 0–9A–Z (A=10,…,Z=35) when ``d<=36``. + - If a tensor/array/list: it should contain per-site integers in ``[0, d-1]`` with length ``nqubits``. :type l: Union[str, Tensor] :return: The tensornetwork nodes for the amplitude of the circuit. :rtype: List[Gate] """ + no, d_edges = self._copy() ms = [] if self.is_dm: msconj = [] if isinstance(l, str): - for s in l: - if s == "1": - endn = np.array([0, 1], dtype=npdtype) - elif s == "0": - endn = np.array([1, 0], dtype=npdtype) - ms.append(tn.Node(endn)) + symbols = _decode_basis_label(l, n=self._nqubits, dim=self._d) + for k in symbols: + n = onehot_d_tensor(k, d=self._d) + ms.append(tn.Node(n)) if self.is_dm: - msconj.append(tn.Node(endn)) - else: # l is Tensor + msconj.append(tn.Node(n)) + else: l = backend.cast(l, dtype=dtypestr) for i in range(self._nqubits): - endn = l[i] * gates.array_to_tensor(np.array([0, 1])) + ( - 1 - l[i] - ) * gates.array_to_tensor(np.array([1, 0])) + endn = onehot_d_tensor(l[i], d=self._d) ms.append(tn.Node(endn)) if self.is_dm: msconj.append(tn.Node(endn)) @@ -521,17 +572,18 @@ def amplitude(self, l: Union[str, Tensor]) -> Tensor: def probability(self) -> Tensor: """ - get the 2^n length probability vector over computational basis + get the d^n length probability vector over computational basis - :return: probability vector + :return: probability vector of shape [dim**n] :rtype: Tensor """ s = self.state() # type: ignore if self.is_dm is False: - p = backend.abs(s) ** 2 - + amp = backend.reshape(s, [-1]) + p = backend.real(backend.abs(amp) ** 2) else: - p = backend.abs(backend.diagonal(s)) + diag = backend.diagonal(s) + p = backend.real(backend.reshape(diag, [-1])) return p @partial(arg_alias, alias_dict={"format": ["format_"]}) @@ -545,7 +597,7 @@ def sample( status: Optional[Tensor] = None, jittable: bool = True, ) -> Any: - """ + r""" batched sampling from state or circuit tensor network directly :param batch: number of samples, defaults to None @@ -568,6 +620,7 @@ def sample( "count_tuple": # (np.array([0]), np.array([2])) "count_dict_bin": # {"00": 2, "01": 0, "10": 0, "11": 0} + for cases d\in [11, 36], use 0–9A–Z digits (e.g., 'A' -> 10, …, 'Z' -> 35); "count_dict_int": # {0: 2, 1: 0, 2: 0, 3: 0} @@ -619,7 +672,7 @@ def perfect_sampling(key: Any) -> Any: return r r = backend.stack([ri[0] for ri in r]) # type: ignore ch = backend.cast(r, "int32") - # ch = sample_bin2int(r, self._nqubits) + # ch = sample_bin2int(r, self._nqubits, dim=self._d) else: # allow_state if batch is None: nbatch = 1 @@ -644,7 +697,7 @@ def perfect_sampling(key: Any) -> Any: # 2, # ) if format is None: # for backward compatibility - confg = sample_int2bin(ch, self._nqubits) + confg = sample_int2bin(ch, self._nqubits, dim=self._d) prob = backend.gather1d(p, ch) r = list(zip(confg, prob)) # type: ignore if batch is None: @@ -652,7 +705,9 @@ def perfect_sampling(key: Any) -> Any: return r if self._nqubits > 35: jittable = False - return sample2all(sample=ch, n=self._nqubits, format=format, jittable=jittable) + return sample2all( + sample=ch, n=self._nqubits, format=format, jittable=jittable, dim=self._d + ) def sample_expectation_ps( self, @@ -852,9 +907,9 @@ def replace_inputs(self, inputs: Tensor) -> None: """ inputs = backend.reshape(inputs, [-1]) N = inputs.shape[0] - n = int(np.log(N) / np.log(2)) + n = _infer_num_sites(N, self._d) assert n == self._nqubits - inputs = backend.reshape(inputs, [2 for _ in range(n)]) + inputs = backend.reshape(inputs, [self._d for _ in range(n)]) if self.inputs is not None: self._nodes[0].tensor = inputs if self.is_dm: @@ -885,9 +940,9 @@ def cond_measurement(self, index: int, status: Optional[float] = None) -> Tensor - :param index: the qubit for the z-basis measurement + :param index: the site index for the Z-basis measurement :type index: int - :return: 0 or 1 for z measurement on up and down freedom + :return: 0 or 1 for Z-basis measurement outcome :rtype: Tensor """ return self.general_kraus( # type: ignore @@ -966,8 +1021,8 @@ def get_quvector(self) -> QuVector: def projected_subsystem(self, traceout: Tensor, left: Tuple[int, ...]) -> Tensor: """ - remaining wavefunction or density matrix on qubits in left, with other qubits - fixed in 0 or 1 indicated by traceout + remaining wavefunction or density matrix on sites in ``left``, with other sites + fixed to given digits (0..d-1) as indicated by ``traceout`` :param traceout: can be jitted :type traceout: Tensor @@ -976,15 +1031,14 @@ def projected_subsystem(self, traceout: Tensor, left: Tuple[int, ...]) -> Tensor :return: _description_ :rtype: Tensor """ - end0, end1 = gates.array_to_tensor(np.array([1.0, 0]), np.array([0, 1.0])) + traceout = backend.cast(traceout, dtypestr) nodes, front = self._copy() L = self._nqubits edges = [] for i in range(len(traceout)): if i not in left: - b = traceout[i] - n = gates.Gate((1 - b) * end0 + b * end1) + n = Gate(onehot_d_tensor(traceout[i], d=self._d)) nodes.append(n) front[i] ^ n[0] else: @@ -993,8 +1047,7 @@ def projected_subsystem(self, traceout: Tensor, left: Tuple[int, ...]) -> Tensor if self.is_dm: for i in range(len(traceout)): if i not in left: - b = traceout[i] - n = gates.Gate((1 - b) * end0 + b * end1) + n = Gate(onehot_d_tensor(traceout[i], d=self._d)) nodes.append(n) front[i + L] ^ n[0] else: diff --git a/tensorcircuit/circuit.py b/tensorcircuit/circuit.py index b18bdf49..2347bdca 100644 --- a/tensorcircuit/circuit.py +++ b/tensorcircuit/circuit.py @@ -1,5 +1,7 @@ """ -Quantum circuit: the state simulator +Quantum circuit: the state simulator. +Supports qubit (dim=2) and qudit (3 <= dim <= 36) systems. +For string-encoded samples/counts, digits use 0–9A–Z where A=10, …, Z=35. """ # pylint: disable=invalid-name @@ -13,8 +15,8 @@ from . import gates from . import channels -from .cons import backend, contractor, dtypestr, npdtype -from .quantum import QuOperator, identity +from .cons import backend, contractor, dtypestr, npdtype, _ALPHABET +from .quantum import QuOperator, identity, _infer_num_sites, onehot_d_tensor from .simplify import _full_light_cone_cancel from .basecircuit import BaseCircuit @@ -23,7 +25,7 @@ class Circuit(BaseCircuit): - """ + r""" ``Circuit`` class. Simple usage demo below. @@ -45,14 +47,18 @@ def __init__( inputs: Optional[Tensor] = None, mps_inputs: Optional[QuOperator] = None, split: Optional[Dict[str, Any]] = None, + dim: Optional[int] = None, ) -> None: - """ + r""" Circuit object based on state simulator. + Do not use this class with d!=2 directly, use tc.QuditCircuit instead for qudit systems. :param nqubits: The number of qubits in the circuit. :type nqubits: int + :param dim: The local Hilbert space dimension per site. Qudit is supported for 2 <= d <= 36. + :type dim: If None, the dimension of the circuit will be `2`, which is a qubit system. :param inputs: If not None, the initial state of the circuit is taken as ``inputs`` - instead of :math:`\\vert 0\\rangle^n` qubits, defaults to None. + instead of :math:`\vert 0 \rangle^n` qubits, defaults to None. :type inputs: Optional[Tensor], optional :param mps_inputs: QuVector for a MPS like initial wavefunction. :type mps_inputs: Optional[QuOperator] @@ -60,6 +66,7 @@ def __init__( ``max_singular_values`` and ``max_truncation_err``. :type split: Optional[Dict[str, Any]] """ + self._d = 2 if dim is None else dim self.inputs = inputs self.mps_inputs = mps_inputs self.split = split @@ -70,18 +77,19 @@ def __init__( "inputs": inputs, "mps_inputs": mps_inputs, "split": split, + "dim": dim, } if (inputs is None) and (mps_inputs is None): - nodes = self.all_zero_nodes(nqubits) + nodes = self.all_zero_nodes(nqubits, dim=self._d) self._front = [n.get_edge(0) for n in nodes] elif inputs is not None: # provide input function inputs = backend.convert_to_tensor(inputs) inputs = backend.cast(inputs, dtype=dtypestr) inputs = backend.reshape(inputs, [-1]) N = inputs.shape[0] - n = int(np.log(N) / np.log(2)) + n = _infer_num_sites(N, dim=self._d) assert n == nqubits or n == 2 * nqubits - inputs = backend.reshape(inputs, [2 for _ in range(n)]) + inputs = backend.reshape(inputs, [self._d for _ in range(n)]) inputs = Gate(inputs) nodes = [inputs] self._front = [inputs.get_edge(i) for i in range(n)] @@ -178,27 +186,14 @@ def mid_measurement(self, index: int, keep: int = 0) -> Tensor: :param index: The index of qubit that the Z direction postselection applied on. :type index: int - :param keep: 0 for spin up, 1 for spin down, defaults to be 0. + :param keep: the post-selected digit in {0, ..., d-1}, defaults to be 0. :type keep: int, optional """ # normalization not guaranteed - # assert keep in [0, 1] - if keep < 0.5: - gate = np.array( - [ - [1.0], - [0.0], - ], - dtype=npdtype, - ) - else: - gate = np.array( - [ - [0.0], - [1.0], - ], - dtype=npdtype, - ) + gate = np.array( + [[0.0] if _idx != keep else [1.0] for _idx in range(self._d)], + dtype=npdtype, + ) mg1 = tn.Node(gate) mg2 = tn.Node(gate) @@ -479,7 +474,7 @@ def step_function(x: Tensor) -> Tensor: if get_gate_from_index is None: raise ValueError("no `get_gate_from_index` implementation is provided") g = get_gate_from_index(r, kraus) - g = backend.reshape(g, [2 for _ in range(sites * 2)]) + g = backend.reshape(g, [self._d for _ in range(sites * 2)]) self.any(*index, unitary=g, name=name) # type: ignore return r @@ -680,7 +675,7 @@ def _meta_apply_channels(cls) -> None: Apply %s quantum channel on the circuit. See :py:meth:`tensorcircuit.channels.%schannel` - :param index: Qubit number that the gate applies on. + :param index: Site index that the gate applies on. :type index: int. :param status: uniform external random number between 0 and 1 :type status: Tensor @@ -737,8 +732,8 @@ def get_quoperator(self) -> QuOperator: :return: ``QuOperator`` object for the circuit unitary (open indices for the input state) :rtype: QuOperator """ - mps = identity([2 for _ in range(self._nqubits)]) - c = Circuit(self._nqubits) + mps = identity([self._d for _ in range(self._nqubits)]) + c = Circuit(self._nqubits, dim=self._d) ns, es = self._copy() c._nodes = ns c._front = es @@ -758,8 +753,8 @@ def matrix(self) -> Tensor: :return: The circuit unitary matrix :rtype: Tensor """ - mps = identity([2 for _ in range(self._nqubits)]) - c = Circuit(self._nqubits) + mps = identity([self._d for _ in range(self._nqubits)]) + c = Circuit(self._nqubits, dim=self._d) ns, es = self._copy() c._nodes = ns c._front = es @@ -772,6 +767,9 @@ def measure_reference( """ Take measurement on the given quantum lines by ``index``. + Return format: + - For d <= 36, the sample is a base-d string using 0–9A–Z (A=10,…). + :Example: >>> c = tc.Circuit(3) @@ -800,10 +798,7 @@ def measure_reference( if i != j: e ^ edge2[i] for i in range(len(sample)): - if sample[i] == "0": - m = np.array([1, 0], dtype=npdtype) - else: - m = np.array([0, 1], dtype=npdtype) + m = onehot_d_tensor(sample[i], d=self._d) nodes1.append(tn.Node(m)) nodes1[-1].get_edge(0) ^ edge1[index[i]] nodes2.append(tn.Node(m)) @@ -814,15 +809,13 @@ def measure_reference( / p * contractor(nodes1, output_edge_order=[edge1[j], edge2[j]]).tensor ) - pu = rho[0, 0] - r = backend.random_uniform([]) - r = backend.real(backend.cast(r, dtypestr)) - if r < backend.real(pu): - sample += "0" - p = p * pu - else: - sample += "1" - p = p * (1 - pu) + probs = backend.real(backend.diagonal(rho)) + probs /= backend.sum(probs) + outcome = backend.implicit_randc(self._d, shape=1, p=probs) + + sample += _ALPHABET[outcome] + p *= float(probs[outcome]) + if with_prob: return sample, p else: @@ -842,6 +835,10 @@ def expectation( ) -> Tensor: """ Compute the expectation of corresponding operators. + For qudit (d > 2), + ensure that operator tensor shapes are consistent with d (each site contributes two axes of size d). + + Noise shorthand (via noise_conf) is qubit-only; for d>2, use explicit operators. :Example: @@ -883,8 +880,6 @@ def expectation( :return: Tensor with one element :rtype: Tensor """ - from .noisemodel import expectation_noisfy - if noise_conf is None: # if not reuse: # nodes1, edge1 = self._copy() @@ -899,6 +894,8 @@ def expectation( nodes1 = _full_light_cone_cancel(nodes1) return contractor(nodes1).tensor else: + from .noisemodel import expectation_noisfy + return expectation_noisfy( self, *ops, @@ -919,9 +916,11 @@ def expectation( bra: Optional[Tensor] = None, conj: bool = True, normalization: bool = False, + dim: Optional[int] = None, ) -> Tensor: """ Compute :math:`\\langle bra\\vert ops \\vert ket\\rangle`. + For qudit systems (d>2), ops must be reshaped with per-site axes of length d. Example 1 (:math:`bra` is same as :math:`ket`) @@ -966,6 +965,8 @@ def expectation( :type ket: Tensor :param bra: :math:`bra`, defaults to None, which is the same as ``ket``. :type bra: Optional[Tensor], optional + :param dim: dimension of the circuit (defaults to 2) + :type dim: int, optional :param conj: :math:`bra` changes to the adjoint matrix of :math:`bra`, defaults to True. :type conj: bool, optional :param normalization: Normalize the :math:`ket` and :math:`bra`, defaults to False. @@ -974,6 +975,7 @@ def expectation( :return: The result of :math:`\\langle bra\\vert ops \\vert ket\\rangle`. :rtype: Tensor """ + dim = 2 if dim is None else dim if bra is None: bra = ket if isinstance(ket, QuOperator): @@ -987,7 +989,7 @@ def expectation( for op, index in ops: if not isinstance(op, tn.Node): # op is only a matrix - op = backend.reshape2(op) + op = backend.reshaped(op, dim) op = gates.Gate(op) if isinstance(index, int): index = [index] @@ -1011,8 +1013,8 @@ def expectation( if conj is True: bra = backend.conj(bra) ket = backend.reshape(ket, [-1]) - ket = backend.reshape2(ket) - bra = backend.reshape2(bra) + ket = backend.reshaped(ket, dim) + bra = backend.reshaped(bra, dim) n = len(backend.shape_tuple(ket)) ket = Gate(ket) bra = Gate(bra) @@ -1024,7 +1026,7 @@ def expectation( for op, index in ops: if not isinstance(op, tn.Node): # op is only a matrix - op = backend.reshape2(op) + op = backend.reshaped(op, dim) op = gates.Gate(op) if isinstance(index, int): index = [index] diff --git a/tensorcircuit/cons.py b/tensorcircuit/cons.py index c7db79ac..660b1c2a 100644 --- a/tensorcircuit/cons.py +++ b/tensorcircuit/cons.py @@ -63,6 +63,7 @@ def sorted_edges(edges: Iterator[tn.Edge]) -> List[tn.Edge]: npdtype = np.complex64 backend: NumpyBackend = get_backend("numpy") contractor = tn.contractors.auto +_ALPHABET = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" # these above lines are just for mypy, it is not very good at evaluating runtime object diff --git a/tensorcircuit/densitymatrix.py b/tensorcircuit/densitymatrix.py index f819e058..5fbfa420 100644 --- a/tensorcircuit/densitymatrix.py +++ b/tensorcircuit/densitymatrix.py @@ -17,7 +17,7 @@ from .circuit import Circuit from .cons import backend, contractor, dtypestr from .basecircuit import BaseCircuit -from .quantum import QuOperator +from .quantum import QuOperator, _infer_num_sites Gate = gates.Gate Tensor = Any @@ -35,9 +35,11 @@ def __init__( dminputs: Optional[Tensor] = None, mpo_dminputs: Optional[QuOperator] = None, split: Optional[Dict[str, Any]] = None, + dim: Optional[int] = None, ) -> None: """ The density matrix simulator based on tensornetwork engine. + Do not use this class with d!=2 directly :param nqubits: Number of qubits :type nqubits: int @@ -55,6 +57,7 @@ def __init__( ``max_singular_values`` and ``max_truncation_err``. :type split: Optional[Dict[str, Any]] """ + self._d = 2 if dim is None else dim if not empty: if ( (inputs is None) @@ -73,9 +76,9 @@ def __init__( inputs = backend.cast(inputs, dtype=dtypestr) inputs = backend.reshape(inputs, [-1]) N = inputs.shape[0] - n = int(np.log(N) / np.log(2)) + n = _infer_num_sites(N, self._d) assert n == nqubits - inputs = backend.reshape(inputs, [2 for _ in range(n)]) + inputs = backend.reshape(inputs, [self._d for _ in range(n)]) inputs_gate = Gate(inputs) self._nodes = [inputs_gate] self.coloring_nodes(self._nodes) @@ -94,7 +97,9 @@ def __init__( elif dminputs is not None: dminputs = backend.convert_to_tensor(dminputs) dminputs = backend.cast(dminputs, dtype=dtypestr) - dminputs = backend.reshape(dminputs, [2 for _ in range(2 * nqubits)]) + dminputs = backend.reshape( + dminputs, [self._d for _ in range(2 * nqubits)] + ) dminputs_gate = Gate(dminputs) nodes = [dminputs_gate] self._front = [dminputs_gate.get_edge(i) for i in range(2 * nqubits)] @@ -217,7 +222,7 @@ def apply_general_kraus( dd = dmc.densitymatrix() circuits.append(dd) tensor = reduce(add, circuits) - tensor = backend.reshape(tensor, [2 for _ in range(2 * self._nqubits)]) + tensor = backend.reshaped(tensor, d=self._d) self._nodes = [Gate(tensor)] dangling = [e for e in self._nodes[0]] self._front = dangling @@ -255,7 +260,7 @@ def densitymatrix(self, check: bool = False, reuse: bool = True) -> Tensor: t = contractor(nodes, output_edge_order=d_edges) else: t = nodes[0] - dm = backend.reshape(t.tensor, shape=[2**self._nqubits, 2**self._nqubits]) + dm = backend.reshapem(t.tensor) if check: self.check_density_matrix(dm) return dm @@ -274,7 +279,7 @@ def wavefunction(self) -> Tensor: dm = self.densitymatrix() e, v = backend.eigh(dm) np.testing.assert_allclose( - e[:-1], backend.zeros([2**self._nqubits - 1]), atol=1e-5 + e[:-1], backend.zeros([self._d**self._nqubits - 1]), atol=1e-5 ) return v[:, -1] @@ -375,7 +380,7 @@ def apply_general_kraus( # index = [index[0] for _ in range(len(kraus))] super_op = kraus_to_super_gate(kraus) nlegs = 4 * len(index) - super_op = backend.reshape(super_op, [2 for _ in range(nlegs)]) + super_op = backend.reshape(super_op, [self._d for _ in range(nlegs)]) super_op = Gate(super_op) o2i = int(nlegs / 2) r2l = int(nlegs / 4) diff --git a/tensorcircuit/gates.py b/tensorcircuit/gates.py index 459666ab..e9f80ae3 100644 --- a/tensorcircuit/gates.py +++ b/tensorcircuit/gates.py @@ -34,6 +34,12 @@ plus_state = 1.0 / np.sqrt(2) * (zero_state + one_state) minus_state = 1.0 / np.sqrt(2) * (zero_state - one_state) +# Common elements as np.ndarray objects +_i00 = np.array([[1.0, 0.0], [0.0, 0.0]]) +_i01 = np.array([[0.0, 1.0], [0.0, 0.0]]) +_i10 = np.array([[0.0, 0.0], [1.0, 0.0]]) +_i11 = np.array([[0.0, 0.0], [0.0, 1.0]]) + # Common single qubit gates as np.ndarray objects _h_matrix = 1 / np.sqrt(2) * np.array([[1.0, 1.0], [1.0, -1.0]]) _i_matrix = np.array([[1.0, 0.0], [0.0, 1.0]]) @@ -229,7 +235,7 @@ def num_to_tensor(*num: Union[float, Tensor], dtype: Optional[str] = None) -> An # TODO(@YHPeter): fix __doc__ for same function with different names l = [] - if not dtype: + if dtype is None: dtype = dtypestr for n in num: if not backend.is_tensor(n): @@ -245,7 +251,7 @@ def num_to_tensor(*num: Union[float, Tensor], dtype: Optional[str] = None) -> An def gate_wrapper(m: Tensor, n: Optional[str] = None) -> Gate: - if not n: + if n is None: n = "unknowngate" m = m.astype(npdtype) return Gate(deepcopy(m), name=n) @@ -255,7 +261,7 @@ class GateF: def __init__( self, m: Tensor, n: Optional[str] = None, ctrl: Optional[List[int]] = None ): - if not n: + if n is None: n = "unknowngate" self.m = m self.n = n @@ -310,7 +316,7 @@ def f(*args: Any, **kws: Any) -> Any: return Gate(cu, name="c" + self.n) - if not self.ctrl: + if self.ctrl is None: ctrl = [1] else: ctrl = [1] + self.ctrl @@ -330,7 +336,7 @@ def f(*args: Any, **kws: Any) -> Any: # TODO(@refraction-ray): ctrl convention to be finally determined return Gate(ocu, name="o" + self.n) - if not self.ctrl: + if self.ctrl is None: ctrl = [0] else: ctrl = [0] + self.ctrl @@ -349,7 +355,7 @@ def __init__( n: Optional[str] = None, ctrl: Optional[List[int]] = None, ): - if not n: + if n is None: n = "unknowngate" self.f = f self.n = n @@ -483,7 +489,7 @@ def phase_gate(theta: float = 0) -> Gate: :rtype: Gate """ theta = array_to_tensor(theta) - i00, i11 = array_to_tensor(np.array([[1, 0], [0, 0]]), np.array([[0, 0], [0, 1]])) + i00, i11 = array_to_tensor(_i00, _i11) unitary = i00 + backend.exp(1.0j * theta) * i11 return Gate(unitary) @@ -512,7 +518,7 @@ def get_u_parameter(m: Tensor) -> Tuple[float, float, float]: return theta, phi, lbd -def u_gate(theta: float = 0, phi: float = 0, lbd: float = 0) -> Gate: +def u_gate(theta: float = 0.0, phi: float = 0.0, lbd: float = 0.0) -> Gate: r""" IBMQ U gate following the converntion of OpenQASM3.0. See `OpenQASM doc `_ @@ -533,12 +539,7 @@ def u_gate(theta: float = 0, phi: float = 0, lbd: float = 0) -> Gate: :rtype: Gate """ theta, phi, lbd = array_to_tensor(theta, phi, lbd) - i00, i01, i10, i11 = array_to_tensor( - np.array([[1, 0], [0, 0]]), - np.array([[0, 1], [0, 0]]), - np.array([[0, 0], [1, 0]]), - np.array([[0, 0], [0, 1]]), - ) + i00, i01, i10, i11 = array_to_tensor(_i00, _i01, _i10, _i11) unitary = ( backend.cos(theta / 2) * i00 - backend.exp(1.0j * lbd) * backend.sin(theta / 2) * i01 @@ -548,7 +549,7 @@ def u_gate(theta: float = 0, phi: float = 0, lbd: float = 0) -> Gate: return Gate(unitary) -def r_gate(theta: float = 0, alpha: float = 0, phi: float = 0) -> Gate: +def r_gate(theta: float = 0.0, alpha: float = 0.0, phi: float = 0.0) -> Gate: r""" General single qubit rotation gate @@ -582,7 +583,7 @@ def r_gate(theta: float = 0, alpha: float = 0, phi: float = 0) -> Gate: # r = r_gate -def rx_gate(theta: float = 0) -> Gate: +def rx_gate(theta: float = 0.0) -> Gate: r""" Rotation gate along :math:`x` axis. @@ -603,7 +604,7 @@ def rx_gate(theta: float = 0) -> Gate: # rx = rx_gate -def ry_gate(theta: float = 0) -> Gate: +def ry_gate(theta: float = 0.0) -> Gate: r""" Rotation gate along :math:`y` axis. @@ -624,7 +625,7 @@ def ry_gate(theta: float = 0) -> Gate: # ry = ry_gate -def rz_gate(theta: float = 0) -> Gate: +def rz_gate(theta: float = 0.0) -> Gate: r""" Rotation gate along :math:`z` axis. @@ -645,7 +646,7 @@ def rz_gate(theta: float = 0) -> Gate: # rz = rz_gate -def rgate_theoretical(theta: float = 0, alpha: float = 0, phi: float = 0) -> Gate: +def rgate_theoretical(theta: float = 0.0, alpha: float = 0.0, phi: float = 0.0) -> Gate: r""" Rotation gate implemented by matrix exponential. The output is the same as `rgate`. @@ -723,7 +724,7 @@ def iswap_gate(theta: float = 1.0) -> Gate: # iswap = iswap_gate -def cr_gate(theta: float = 0, alpha: float = 0, phi: float = 0) -> Gate: +def cr_gate(theta: float = 0.0, alpha: float = 0.0, phi: float = 0.0) -> Gate: r""" Controlled rotation gate. When the control qubit is 1, `rgate` is applied to the target qubit. @@ -775,7 +776,7 @@ def random_two_qubit_gate() -> Gate: return Gate(deepcopy(unitary), name="R2Q") -def any_gate(unitary: Tensor, name: str = "any") -> Gate: +def any_gate(unitary: Tensor, name: str = "any", dim: Optional[int] = None) -> Gate: """ Note one should provide the gate with properly reshaped. @@ -783,6 +784,8 @@ def any_gate(unitary: Tensor, name: str = "any") -> Gate: :type unitary: Tensor :param name: The name of the gate. :type name: str + :param dim: The dimension of the gate. + :type dim: int :return: the resulted gate :rtype: Gate """ @@ -791,7 +794,10 @@ def any_gate(unitary: Tensor, name: str = "any") -> Gate: unitary.tensor = backend.cast(unitary.tensor, dtypestr) return unitary unitary = backend.cast(unitary, dtypestr) - unitary = backend.reshape2(unitary) + if dim is None or dim == 2: + unitary = backend.reshape2(unitary) + else: + unitary = backend.reshaped(unitary, dim) # nleg = int(np.log2(backend.sizen(unitary))) # unitary = backend.reshape(unitary, [2 for _ in range(nleg)]) return Gate(unitary, name=name) diff --git a/tensorcircuit/mpscircuit.py b/tensorcircuit/mpscircuit.py index dcd8f0c8..7b6e8d7b 100644 --- a/tensorcircuit/mpscircuit.py +++ b/tensorcircuit/mpscircuit.py @@ -8,13 +8,14 @@ from typing import Any, List, Optional, Sequence, Tuple, Dict, Union from copy import copy import logging +import types import numpy as np import tensornetwork as tn from . import gates from .cons import backend, npdtype, contractor, rdtypestr, dtypestr -from .quantum import QuOperator, QuVector, extract_tensors_from_qop +from .quantum import QuOperator, QuVector, extract_tensors_from_qop, _decode_basis_label from .mps_base import FiniteMPS from .abstractcircuit import AbstractCircuit from .utils import arg_alias @@ -91,12 +92,16 @@ def __init__( tensors: Optional[Sequence[Tensor]] = None, wavefunction: Optional[Union[QuVector, Tensor]] = None, split: Optional[Dict[str, Any]] = None, + dim: Optional[int] = None, ) -> None: """ MPSCircuit object based on state simulator. + Do not use this class with d!=2 directly :param nqubits: The number of qubits in the circuit. :type nqubits: int + :param dim: The local Hilbert space dimension per site. Qudit is supported for 2 <= d <= 36. + :type dim: If None, the dimension of the circuit will be `2`, which is a qubit system. :param center_position: The center position of MPS, default to 0 :type center_position: int, optional :param tensors: If not None, the initial state of the circuit is taken as ``tensors`` @@ -109,6 +114,7 @@ def __init__( :param split: Split rules :type split: Any """ + self._d = 2 if dim is None else dim self.circuit_param = { "nqubits": nqubits, "center_position": center_position, @@ -137,7 +143,9 @@ def __init__( wavefunction, split=self.split ) else: # full wavefunction - tensors = self.wavefunction_to_tensors(wavefunction, split=self.split) + tensors = self.wavefunction_to_tensors( + wavefunction, dim_phys=self._d, split=self.split + ) assert len(tensors) == nqubits self._mps = FiniteMPS(tensors, canonicalize=False) self._mps.center_position = 0 @@ -151,8 +159,13 @@ def __init__( self._mps = FiniteMPS(tensors, canonicalize=True, center_position=0) else: tensors = [ - np.array([1.0, 0.0], dtype=npdtype)[None, :, None] - for i in range(nqubits) + np.concatenate( + [ + np.array([1.0], dtype=npdtype), + np.zeros((self._d - 1,), dtype=npdtype), + ] + )[None, :, None] + for _ in range(nqubits) ] self._mps = FiniteMPS(tensors, canonicalize=False) if center_position is not None: @@ -370,42 +383,51 @@ def gate_to_MPO( # b # index must be ordered - assert np.all(np.diff(index) > 0) - index_left = np.min(index) + if len(index) == 0: + raise ValueError("`index` must contain at least one site.") + if not all(index[i] < index[i + 1] for i in range(len(index) - 1)): + raise ValueError("`index` must be strictly increasing.") + + index_left = int(np.min(index)) if isinstance(gate, tn.Node): gate = backend.copy(gate.tensor) - index = np.array(index) - index_left + nindex = len(index) + in_dims = tuple(backend.shape_tuple(gate))[:nindex] + dim = int(in_dims[0]) + dim_phys_mpo = dim * dim + gate = backend.reshape(gate, (dim,) * nindex + (dim,) * nindex) # transform gate from (in1, in2, ..., out1, out2 ...) to # (in1, out1, in2, out2, ...) - order = tuple(np.arange(2 * nindex).reshape((2, nindex)).T.flatten()) - shape = (4,) * nindex - gate = backend.reshape(backend.transpose(gate, order), shape) - argsort = np.argsort(index) + order = tuple(np.arange(2 * nindex).reshape(2, nindex).T.flatten().tolist()) + gate = backend.transpose(gate, order) # reorder the gate according to the site positions - gate = backend.transpose(gate, tuple(argsort)) - index = index[argsort] # type: ignore + gate = backend.reshape(gate, (dim_phys_mpo,) * nindex) # split the gate into tensors assuming they are adjacent - main_tensors = cls.wavefunction_to_tensors(gate, dim_phys=4, norm=False) + main_tensors = cls.wavefunction_to_tensors( + gate, dim_phys=dim_phys_mpo, norm=False + ) # each tensor is in shape of (i, a, b, j) - tensors = [] - previous_i = None - for i, main_tensor in zip(index, main_tensors): - # insert identites in the middle + tensors: list[Tensor] = [] + previous_i: Optional[int] = None + index_arr = np.array(index, dtype=int) - index_left + + for i, main_tensor in zip(index_arr, main_tensors): if previous_i is not None: - for _ in range(previous_i + 1, i): - bond_dim = tensors[-1].shape[-1] - I = ( - np.eye(bond_dim * 2) - .reshape((bond_dim, 2, bond_dim, 2)) - .transpose((0, 1, 3, 2)) - .astype(dtypestr) + for _gap_site in range(int(previous_i) + 1, int(i)): + bond_dim = int(backend.shape_tuple(tensors[-1])[-1]) + eye2d = backend.eye( + bond_dim * dim, dtype=backend.dtype(tensors[-1]) ) - tensors.append(backend.convert_to_tensor(I)) - nleft, _, nright = main_tensor.shape - tensor = backend.reshape(main_tensor, (nleft, 2, 2, nright)) + I4 = backend.reshape(eye2d, (bond_dim, dim, bond_dim, dim)) + I4 = backend.transpose(I4, (0, 1, 3, 2)) + tensors.append(I4) + + nleft, _, nright = backend.shape_tuple(main_tensor) + tensor = backend.reshape(main_tensor, (int(nleft), dim, dim, int(nright))) tensors.append(tensor) - previous_i = i + previous_i = int(i) + return tensors, index_left @classmethod @@ -448,15 +470,15 @@ def reduce_tensor_dimension( """ if split is None: split = {} - ni = tensor_left.shape[0] - nk = tensor_right.shape[-1] + ni, di = tensor_left.shape[0], tensor_right.shape[1] + nk, dk = tensor_right.shape[-1], tensor_right.shape[-2] T = backend.einsum("iaj,jbk->iabk", tensor_left, tensor_right) - T = backend.reshape(T, (ni * 2, nk * 2)) + T = backend.reshape(T, (ni * di, nk * dk)) new_tensor_left, new_tensor_right = split_tensor( T, center_left=center_left, split=split ) - new_tensor_left = backend.reshape(new_tensor_left, (ni, 2, -1)) - new_tensor_right = backend.reshape(new_tensor_right, (-1, 2, nk)) + new_tensor_left = backend.reshape(new_tensor_left, (ni, di, -1)) + new_tensor_right = backend.reshape(new_tensor_right, (-1, dk, nk)) return new_tensor_left, new_tensor_right def reduce_dimension( @@ -550,10 +572,11 @@ def apply_MPO( for i, idx in zip(i_list, idx_list): O = tensors[i] T = self._mps.tensors[idx] - ni, _, _, nj = O.shape + ni, d_in, _, nj = O.shape nk, _, nl = T.shape OT = backend.einsum("iabj,kbl->ikajl", O, T) - OT = backend.reshape(OT, (ni * nk, 2, nj * nl)) + OT = backend.reshape(OT, (ni * nk, d_in, nj * nl)) + self._mps.tensors[idx] = OT # canonicalize @@ -660,8 +683,7 @@ def mid_measurement(self, index: int, keep: int = 0) -> None: :type keep: int, optional """ # normalization not guaranteed - assert keep in [0, 1] - gate = backend.zeros((2, 2), dtype=dtypestr) + gate = backend.zeros((self._d, self._d), dtype=dtypestr) gate = backend.scatter( gate, backend.convert_to_tensor([[keep, keep]]), @@ -692,7 +714,7 @@ def is_valid(self) -> bool: def wavefunction_to_tensors( cls, wavefunction: Tensor, - dim_phys: int = 2, + dim_phys: Optional[int] = None, norm: bool = True, split: Optional[Dict[str, Any]] = None, ) -> List[Tensor]: @@ -710,6 +732,7 @@ def wavefunction_to_tensors( :return: The tensors :rtype: List[Tensor] """ + dim_phys = dim_phys if dim_phys is not None else 2 if split is None: split = {} wavefunction = backend.reshape(wavefunction, (-1, 1)) @@ -768,10 +791,16 @@ def copy_without_tensor(self) -> "MPSCircuit": for key in vars(self): if key == "_mps": continue - if backend.is_tensor(info[key]): - copied_value = backend.copy(info[key]) + val = info[key] + if backend.is_tensor(val): + copied_value = backend.copy(val) + elif isinstance(val, types.ModuleType): + copied_value = val else: - copied_value = copy(info[key]) + try: + copied_value = copy(val) + except TypeError: + copied_value = val setattr(result, key, copied_value) return result @@ -815,7 +844,8 @@ def normalize(self) -> None: def amplitude(self, l: str) -> Tensor: assert len(l) == self._nqubits - tensors = [self._mps.tensors[i][:, int(s), :] for i, s in enumerate(l)] + idx_list = _decode_basis_label(l, n=self._nqubits, dim=self._d) + tensors = [self._mps.tensors[i][:, idx, :] for i, idx in enumerate(idx_list)] return reduce(backend.matmul, tensors)[0, 0] def proj_with_mps(self, other: "MPSCircuit", conj: bool = True) -> Tensor: @@ -873,6 +903,7 @@ def slice(self, begin: int, end: int) -> "MPSCircuit": mps = self.__class__( nqubits, + dim=self._d, tensors=tensors, center_position=center_position, split=self.split.copy(), @@ -1000,36 +1031,35 @@ def measure( # set the center to the left side, then gradually move to the right and do measurement at sites """ mps = self.copy() - up = backend.convert_to_tensor(np.array([1, 0]).astype(dtypestr)) - down = backend.convert_to_tensor(np.array([0, 1]).astype(dtypestr)) p = 1.0 p = backend.convert_to_tensor(p) p = backend.cast(p, dtype=rdtypestr) - sample = [] + sample: Tensor = [] for k, site in enumerate(index): mps.position(site) - # do measurement tensor = mps._mps.tensors[site] ps = backend.real( backend.einsum("iaj,iaj->a", tensor, backend.conj(tensor)) ) ps /= backend.sum(ps) - pu = ps[0] if status is None: - r = backend.implicit_randu()[0] + outcome = backend.implicit_randc( + self._d, shape=1, p=backend.cast(ps, rdtypestr) + )[0] else: - r = status[k] - r = backend.real(backend.cast(r, dtypestr)) - eps = 0.31415926 * 1e-12 - sign = backend.sign(r - pu + eps) / 2 + 0.5 # in case status is exactly 0.5 - sign = backend.convert_to_tensor(sign) - sign = backend.cast(sign, dtype=rdtypestr) - sign_complex = backend.cast(sign, dtypestr) - sample.append(sign_complex) - p = p * (pu * (-1) ** sign + sign) - m = (1 - sign_complex) * up + sign_complex * down + one_r = backend.cast(backend.convert_to_tensor(1.0), rdtypestr) + st = backend.cast(status[k : k + 1], rdtypestr) + ind = backend.probability_sample( + shots=1, p=backend.cast(ps, rdtypestr), status=one_r - st + ) + outcome = backend.cast(ind[0], "int32") + + p = p * ps[outcome] + basis = backend.convert_to_tensor(np.eye(self._d).astype(dtypestr)) + m = basis[outcome] mps._mps.tensors[site] = backend.einsum("iaj,a->ij", tensor, m)[:, None, :] + sample.append(outcome) sample = backend.stack(sample) sample = backend.real(sample) if with_prob: diff --git a/tensorcircuit/quantum.py b/tensorcircuit/quantum.py index b4cb3d0c..1755a7bb 100644 --- a/tensorcircuit/quantum.py +++ b/tensorcircuit/quantum.py @@ -32,7 +32,7 @@ remove_node, ) -from .cons import backend, contractor, dtypestr, npdtype, rdtypestr +from .cons import backend, contractor, dtypestr, npdtype, rdtypestr, _ALPHABET from .gates import Gate, num_to_tensor from .utils import arg_alias @@ -57,6 +57,91 @@ def get_all_nodes(edges: Iterable[Edge]) -> List[Node]: return nodes +def onehot_d_tensor(_k: Union[int, Tensor], d: int = 2) -> Tensor: + """ + Construct a one-hot vector (or matrix) of local dimension ``d``. + + :param _k: index or indices to set as 1. Can be an int or a backend Tensor. + :type _k: int or Tensor + :param d: local dimension (number of categories), defaults to 2 + :type d: int, optional + :return: one-hot encoded vector (shape [d]) or matrix (shape [len(_k), d]) + :rtype: Tensor + """ + if isinstance(_k, int): + vec = backend.one_hot(_k, d) + else: + vec = backend.one_hot(backend.cast(_k, "int32"), d) + return backend.cast(vec, dtypestr) + + +def _decode_basis_label(label: str, n: int, dim: int) -> List[int]: + """ + Decode a string basis label into a list of integer digits. + + The label is interpreted in base-``dim`` using characters ``0–9A–Z``. + Only dimensions up to 36 are supported. + + :param label: basis label string, e.g. "010" or "A9F" + :type label: str + :param n: number of sites (expected length of the label) + :type n: int + :param dim: local dimension (2 <= dim <= 36) + :type dim: int + :return: list of integer digits of length ``n``, each in ``[0, dim-1]`` + :rtype: List[int] + + :raises NotImplementedError: if ``dim > 36`` + :raises ValueError: if the label length mismatches ``n``, + or contains invalid/out-of-range characters + """ + if dim > 36: + raise NotImplementedError( + f"String basis label supports d<=36 (0–9A–Z). Got dim={dim}. " + "Use an integer array/tensor of length n instead." + ) + s = label.upper() + if len(s) != n: + raise ValueError(f"Basis label length mismatch: expect {n}, got {len(s)}") + digits = [] + for ch in s: + if ch not in _ALPHABET: + raise ValueError( + f"Invalid character '{ch}' in basis label (allowed 0–9A–Z)." + ) + v = _ALPHABET.index(ch) + if v >= dim: + raise ValueError( + f"Digit '{ch}' (= {v}) out of range for base-d with dim={dim}." + ) + digits.append(v) + return digits + + +def _infer_num_sites(D: int, dim: int) -> int: + """ + Infer the number of sites (n) from a Hilbert space dimension D + and local dimension d, assuming D = d**n. + + :param D: total Hilbert space dimension (int) + :param dim: local dimension per site (int) + :return: n such that D == d**n + :raises ValueError: if D is not an exact power of d + """ + if not (isinstance(D, int) and D > 0): + raise ValueError(f"D must be a positive integer, got {D}") + if not (isinstance(dim, int) and dim >= 2): + raise ValueError(f"d must be an integer >= 2, got {dim}") + + tmp, n = D, 0 + while tmp % dim == 0 and tmp > 1: + tmp //= dim + n += 1 + if tmp != 1: + raise ValueError(f"Dimension {D} is not a power of local dim {dim}") + return n + + def _reachable(nodes: List[AbstractNode]) -> List[AbstractNode]: if not nodes: raise ValueError("Reachable requires at least 1 node.") @@ -2151,7 +2236,10 @@ def entanglement_entropy(state: Tensor, cut: Union[int, List[int]]) -> Tensor: def reduced_wavefunction( - state: Tensor, cut: List[int], measure: Optional[List[int]] = None + state: Tensor, + cut: List[int], + measure: Optional[List[int]] = None, + dim: Optional[int] = None, ) -> Tensor: """ Compute the reduced wavefunction from the quantum state ``state``. @@ -2166,20 +2254,22 @@ def reduced_wavefunction( :type measure: List[int] :return: _description_ :rtype: Tensor + :param dim: dimension of qudit system + :type dim: int """ + dim = 2 if dim is None else dim if measure is None: measure = [0 for _ in cut] - s = backend.reshape2(state) + s = backend.reshaped(state, dim) n = len(backend.shape_tuple(s)) s_node = Gate(s) end_nodes = [] for c, m in zip(cut, measure): - rt = backend.cast(backend.convert_to_tensor(1 - m), dtypestr) * backend.cast( - backend.convert_to_tensor(np.array([1.0, 0.0])), dtypestr - ) + backend.cast(backend.convert_to_tensor(m), dtypestr) * backend.cast( - backend.convert_to_tensor(np.array([0.0, 1.0])), dtypestr + oh = backend.cast( + backend.one_hot(backend.cast(backend.convert_to_tensor(m), "int32"), dim), + dtypestr, ) - end_node = Gate(rt) + end_node = Gate(backend.convert_to_tensor(oh)) end_nodes.append(end_node) s_node[c] ^ end_node[0] new_node = contractor( @@ -2194,8 +2284,9 @@ def reduced_density_matrix( cut: Union[int, List[int]], p: Optional[Tensor] = None, normalize: bool = True, + dim: Optional[int] = None, ) -> Union[Tensor, QuOperator]: - """ + r""" Compute the reduced density matrix from the quantum state ``state``. :param state: The quantum state in form of Tensor or QuOperator. @@ -2207,8 +2298,12 @@ def reduced_density_matrix( :type p: Optional[Tensor] :return: The reduced density matrix. :rtype: Union[Tensor, QuOperator] - :normalize: if True, returns a trace 1 density matrix. Otherwise does not normalize. + :param normalize: if True, returns a trace 1 density matrix. Otherwise, does not normalize. + :type normalize: bool + :param dim: dimension of qudit system + :type dim: int """ + dim = 2 if dim is None else dim if isinstance(cut, list) or isinstance(cut, tuple) or isinstance(cut, set): traceout = list(cut) else: @@ -2221,21 +2316,19 @@ def reduced_density_matrix( return state.partial_trace(traceout) if len(state.shape) == 2 and state.shape[0] == state.shape[1]: # density operator - freedomexp = backend.sizen(state) - # traceout = sorted(traceout)[::-1] - freedom = int(np.log2(freedomexp) / 2) - # traceout2 = [i + freedom for i in traceout] + freedom = _infer_num_sites(state.shape[0], dim) left = traceout + [i for i in range(freedom) if i not in traceout] right = [i + freedom for i in left] - rho = backend.reshape(state, [2 for _ in range(2 * freedom)]) + + rho = backend.reshape(state, [dim] * (2 * freedom)) rho = backend.transpose(rho, perm=left + right) rho = backend.reshape( rho, [ - 2 ** len(traceout), - 2 ** (freedom - len(traceout)), - 2 ** len(traceout), - 2 ** (freedom - len(traceout)), + dim ** len(traceout), + dim ** (freedom - len(traceout)), + dim ** len(traceout), + dim ** (freedom - len(traceout)), ], ) if p is None: @@ -2248,20 +2341,20 @@ def reduced_density_matrix( p = backend.reshape(p, [-1]) rho = backend.einsum("a,aiaj->ij", p, rho) rho = backend.reshape( - rho, [2 ** (freedom - len(traceout)), 2 ** (freedom - len(traceout))] + rho, [dim ** (freedom - len(traceout)), dim ** (freedom - len(traceout))] ) if normalize: rho /= backend.trace(rho) else: w = state / backend.norm(state) - freedomexp = backend.sizen(state) - freedom = int(np.log(freedomexp) / np.log(2)) + size = int(backend.sizen(state)) + freedom = _infer_num_sites(size, dim) perm = [i for i in range(freedom) if i not in traceout] perm = perm + traceout - w = backend.reshape(w, [2 for _ in range(freedom)]) + w = backend.reshape(w, [dim for _ in range(freedom)]) w = backend.transpose(w, perm=perm) - w = backend.reshape(w, [-1, 2 ** len(traceout)]) + w = backend.reshape(w, [-1, dim ** len(traceout)]) if p is None: rho = w @ backend.adjoint(w) else: @@ -2404,7 +2497,9 @@ def truncated_free_energy( @op2tensor -def partial_transpose(rho: Tensor, transposed_sites: List[int]) -> Tensor: +def partial_transpose( + rho: Tensor, transposed_sites: List[int], dim: Optional[int] = None +) -> Tensor: """ _summary_ @@ -2412,10 +2507,13 @@ def partial_transpose(rho: Tensor, transposed_sites: List[int]) -> Tensor: :type rho: Tensor :param transposed_sites: sites int list to be transposed :type transposed_sites: List[int] + :param dim: dimension of qudit system + :type dim: int :return: _description_ :rtype: Tensor """ - rho = backend.reshape2(rho) + dim = 2 if dim is None else dim + rho = backend.reshaped(rho, dim) rho_node = Gate(rho) n = len(rho.shape) // 2 left_edges = [] @@ -2433,7 +2531,9 @@ def partial_transpose(rho: Tensor, transposed_sites: List[int]) -> Tensor: @op2tensor -def entanglement_negativity(rho: Tensor, transposed_sites: List[int]) -> Tensor: +def entanglement_negativity( + rho: Tensor, transposed_sites: List[int], dim: Optional[int] = None +) -> Tensor: """ _summary_ @@ -2441,17 +2541,21 @@ def entanglement_negativity(rho: Tensor, transposed_sites: List[int]) -> Tensor: :type rho: Tensor :param transposed_sites: _description_ :type transposed_sites: List[int] + :param dim: dimension of qudit system + :type dim: int :return: _description_ :rtype: Tensor """ - rhot = partial_transpose(rho, transposed_sites) + rhot = partial_transpose(rho, transposed_sites, dim=dim) es = backend.eigvalsh(rhot) rhot_m = backend.sum(backend.abs(es)) return (rhot_m - 1.0) / 2.0 @op2tensor -def log_negativity(rho: Tensor, transposed_sites: List[int], base: str = "e") -> Tensor: +def log_negativity( + rho: Tensor, transposed_sites: List[int], base: str = "e", dim: Optional[int] = None +) -> Tensor: """ _summary_ @@ -2461,10 +2565,13 @@ def log_negativity(rho: Tensor, transposed_sites: List[int], base: str = "e") -> :type transposed_sites: List[int] :param base: whether use 2 based log or e based log, defaults to "e" :type base: str, optional + :param dim: dimension of qudit system + :type dim: int :return: _description_ :rtype: Tensor """ - rhot = partial_transpose(rho, transposed_sites) + dim = 2 if dim is None else dim + rhot = partial_transpose(rho, transposed_sites, dim) es = backend.eigvalsh(rhot) rhot_m = backend.sum(backend.abs(es)) een = backend.log(rhot_m) @@ -2550,7 +2657,9 @@ def double_state(h: Tensor, beta: float = 1) -> Tensor: @op2tensor -def mutual_information(s: Tensor, cut: Union[int, List[int]]) -> Tensor: +def mutual_information( + s: Tensor, cut: Union[int, List[int]], dim: Optional[int] = None +) -> Tensor: """ Mutual information between AB subsystem described by ``cut``. @@ -2558,9 +2667,12 @@ def mutual_information(s: Tensor, cut: Union[int, List[int]]) -> Tensor: :type s: Tensor :param cut: The AB subsystem. :type cut: Union[int, List[int]] + :param dim: The diagonal matrix in form of Tensor. + :type dim: Tensor :return: The mutual information between AB subsystem described by ``cut``. :rtype: Tensor """ + dim = 2 if dim is None else dim if isinstance(cut, list) or isinstance(cut, tuple) or isinstance(cut, set): traceout = list(cut) else: @@ -2568,22 +2680,22 @@ def mutual_information(s: Tensor, cut: Union[int, List[int]]) -> Tensor: if len(s.shape) == 2 and s.shape[0] == s.shape[1]: # mixed state - n = int(np.log2(backend.sizen(s)) / 2) + n = _infer_num_sites(s.shape[0], dim=dim) hab = entropy(s) # subsystem a - rhoa = reduced_density_matrix(s, traceout) + rhoa = reduced_density_matrix(s, traceout, dim=dim) ha = entropy(rhoa) # need subsystem b as well other = tuple(i for i in range(n) if i not in traceout) - rhob = reduced_density_matrix(s, other) # type: ignore + rhob = reduced_density_matrix(s, other, dim=dim) # type: ignore hb = entropy(rhob) # pure system else: hab = 0.0 - rhoa = reduced_density_matrix(s, traceout) + rhoa = reduced_density_matrix(s, traceout, dim=dim) ha = hb = entropy(rhoa) return ha + hb - hab @@ -2592,7 +2704,9 @@ def mutual_information(s: Tensor, cut: Union[int, List[int]]) -> Tensor: # measurement results and transformations and correlations below -def count_s2d(srepr: Tuple[Tensor, Tensor], n: int) -> Tensor: +def count_s2d( + srepr: Tuple[Tensor, Tensor], n: int, dim: Optional[int] = None +) -> Tensor: """ measurement shots results, sparse tuple representation to dense representation count_vector to count_tuple @@ -2601,11 +2715,14 @@ def count_s2d(srepr: Tuple[Tensor, Tensor], n: int) -> Tensor: :type srepr: Tuple[Tensor, Tensor] :param n: number of qubits :type n: int + :param dim: [description], defaults to None + :type dim: int, optional :return: [description] :rtype: Tensor """ + dim = 2 if dim is None else dim return backend.scatter( - backend.cast(backend.zeros([2**n]), srepr[1].dtype), + backend.cast(backend.zeros([dim**n]), srepr[1].dtype), backend.reshape(srepr[0], [-1, 1]), srepr[1], ) @@ -2648,117 +2765,146 @@ def count_d2s(drepr: Tensor, eps: float = 1e-7) -> Tuple[Tensor, Tensor]: count_t2v = count_d2s -def sample_int2bin(sample: Tensor, n: int) -> Tensor: +def sample_int2bin(sample: Tensor, n: int, dim: Optional[int] = None) -> Tensor: """ - int sample to bin sample + Convert linear-index samples to per-site digits (base-d). - :param sample: in shape [trials] of int elements in the range [0, 2**n) + :param sample: shape [trials], integers in [0, d**n) :type sample: Tensor - :param n: number of qubits + :param n: number of sites :type n: int - :return: in shape [trials, n] of element (0, 1) + :param dim: local dimension, defaults to 2 + :type dim: int, optional + :return: shape [trials, n], entries in [0, d-1] :rtype: Tensor """ - confg = backend.mod( - backend.right_shift(sample[..., None], backend.reverse(backend.arange(n))), - 2, - ) - return confg + dim = 2 if dim is None else dim + if dim == 2: + return backend.mod( + backend.right_shift(sample[..., None], backend.reverse(backend.arange(n))), + 2, + ) + else: + pos = backend.reverse(backend.arange(n)) + base = backend.power(dim, pos) + digits = backend.mod( + backend.floor_divide(sample[..., None], base), # ⌊sample / d**pos⌋ + dim, + ) + return backend.cast(digits, "int32") -def sample_bin2int(sample: Tensor, n: int) -> Tensor: +def sample_bin2int(sample: Tensor, n: int, dim: Optional[int] = None) -> Tensor: """ bin sample to int sample :param sample: in shape [trials, n] of elements (0, 1) :type sample: Tensor - :param n: number of qubits + :param n: number of sites :type n: int + :param dim: local dimension, defaults to 2 + :type dim: int, optional :return: in shape [trials] :rtype: Tensor """ - power = backend.convert_to_tensor([2**j for j in reversed(range(n))]) + dim = 2 if dim is None else dim + power = backend.convert_to_tensor([dim**j for j in reversed(range(n))]) return backend.sum(sample * power, axis=-1) def sample2count( - sample: Tensor, n: int, jittable: bool = True + sample: Tensor, + n: int, + jittable: bool = True, + dim: Optional[int] = None, ) -> Tuple[Tensor, Tensor]: """ - sample_int to count_tuple + sample_int to count_tuple (indices, counts), size = d**n - :param sample: _description_ + :param sample: linear-index samples, shape [shots] :type sample: Tensor - :param n: _description_ + :param n: number of sites :type n: int - :param jittable: _description_, defaults to True - :type jittable: bool, optional - :return: _description_ + :param jittable: whether to return fixed-size outputs (backend dependent) + :type jittable: bool + :param dim: local dimension per site, default 2 (qubit) + :type dim: int, optional + :return: (unique_indices, counts) :rtype: Tuple[Tensor, Tensor] """ - d = 2**n + dim = 2 if dim is None else dim + size = dim**n if not jittable: results = backend.unique_with_counts(sample) # non-jittable - else: # jax specified - results = backend.unique_with_counts(sample, size=d, fill_value=-1) + else: # jax specified / fixed-size + results = backend.unique_with_counts(sample, size=size, fill_value=-1) return results -def count_vector2dict(count: Tensor, n: int, key: str = "bin") -> Dict[Any, int]: +def count_vector2dict( + count: Tensor, n: int, key: str = "bin", dim: Optional[int] = None +) -> Dict[Any, int]: """ - convert_vector to count_dict_bin or count_dict_int + Convert count_vector to count_dict_bin or count_dict_int. + For d>10 cases, a base-d string (0-9A-Z) is used. - :param count: tensor in shape [2**n] + :param count: tensor in shape [d**n] :type count: Tensor - :param n: number of qubits + :param n: number of sites :type n: int :param key: can be "int" or "bin", defaults to "bin" :type key: str, optional - :return: _description_ - :rtype: _type_ + :param dim: local dimension (default 2) + :type dim: int, optional + :return: mapping from configuration to count + :rtype: Dict[Any, int] """ from .interfaces import which_backend + dim = 2 if dim is None else dim b = which_backend(count) - d = {i: b.numpy(count[i]).item() for i in range(2**n)} + out_int = {i: b.numpy(count[i]).item() for i in range(dim**n)} if key == "int": - return d + return out_int else: - dn = {} - for k, v in d.items(): - kn = str(bin(k))[2:].zfill(n) - dn[kn] = v - return dn + out_str = {} + for k, v in out_int.items(): + kn = np.base_repr(k, base=dim).zfill(n) + out_str[kn] = v + return out_str def count_tuple2dict( - count: Tuple[Tensor, Tensor], n: int, key: str = "bin" + count: Tuple[Tensor, Tensor], n: int, key: str = "bin", dim: Optional[int] = None ) -> Dict[Any, int]: """ count_tuple to count_dict_bin or count_dict_int - :param count: count_tuple format + :param count: count_tuple format (indices, counts) :type count: Tuple[Tensor, Tensor] - :param n: number of qubits + :param n: number of sites (qubits or qudits) :type n: int :param key: can be "int" or "bin", defaults to "bin" :type key: str, optional + :param dim: local dimension, defaults to 2 + :type dim: int, optional :return: count_dict - :rtype: _type_ + :rtype: Dict[Any, int] """ - d = { + dim = 2 if dim is None else dim + out_int = { backend.numpy(i).item(): backend.numpy(j).item() for i, j in zip(count[0], count[1]) if i >= 0 } if key == "int": - return d + return out_int else: - dn = {} - for k, v in d.items(): - kn = str(bin(k))[2:].zfill(n) - dn[kn] = v - return dn + out_str = {} + for k, v in out_int.items(): + kn = np.base_repr(k, base=dim).zfill(n) + out_str[kn] = v + return out_str @partial(arg_alias, alias_dict={"counts": ["shots"], "format": ["format_"]}) @@ -2770,8 +2916,9 @@ def measurement_counts( random_generator: Optional[Any] = None, status: Optional[Tensor] = None, jittable: bool = False, + dim: Optional[int] = None, ) -> Any: - """ + r""" Simulate the measuring of each qubit of ``p`` in the computational basis, thus producing output like that of ``qiskit``. @@ -2786,6 +2933,7 @@ def measurement_counts( "count_tuple": # (np.array([0]), np.array([2])) "count_dict_bin": # {"00": 2, "01": 0, "10": 0, "11": 0} + / for cases d\in [10, 36], "10" -> "A", ..., "35" -> "Z" "count_dict_int": # {0: 2, 1: 0, 2: 0, 3: 0} @@ -2837,21 +2985,22 @@ def measurement_counts( state /= backend.norm(state) pi = backend.real(backend.conj(state) * state) pi = backend.reshape(pi, [-1]) - d = int(backend.shape_tuple(pi)[0]) - n = int(np.log(d) / np.log(2) + 1e-8) + + local_d = 2 if dim is None else dim + total_dim = int(backend.shape_tuple(pi)[0]) + n = _infer_num_sites(total_dim, local_d) + if (counts is None) or counts <= 0: if format == "count_vector": return pi elif format == "count_tuple": return count_d2s(pi) elif format == "count_dict_bin": - return count_vector2dict(pi, n, key="bin") + return count_vector2dict(pi, n, key="bin", dim=local_d) elif format == "count_dict_int": - return count_vector2dict(pi, n, key="int") + return count_vector2dict(pi, n, key="int", dim=local_d) else: - raise ValueError( - "unsupported format %s for analytical measurement" % format - ) + raise ValueError(f"unsupported format {format} for analytical measurement") else: raw_counts = backend.probability_sample( counts, pi, status=status, g=random_generator @@ -2862,7 +3011,7 @@ def measurement_counts( # raw_counts = backend.stateful_randc( # random_generator, a=drange, shape=counts, p=pi # ) - return sample2all(raw_counts, n, format=format, jittable=jittable) + return sample2all(raw_counts, n, format=format, jittable=jittable, dim=local_d) measurement_results = measurement_counts @@ -2870,27 +3019,34 @@ def measurement_counts( @partial(arg_alias, alias_dict={"format": ["format_"]}) def sample2all( - sample: Tensor, n: int, format: str = "count_vector", jittable: bool = False + sample: Tensor, + n: int, + format: str = "count_vector", + jittable: bool = False, + dim: Optional[int] = None, ) -> Any: """ - transform ``sample_int`` or ``sample_bin`` form results to other forms specified by ``format`` + transform ``sample_int`` or ``sample_bin`` results to other forms specified by ``format`` - :param sample: measurement shots results in ``sample_int`` or ``sample_bin`` format + :param sample: measurement shots results in ``sample_int`` (shape [shots]) or ``sample_bin`` (shape [shots, n]) :type sample: Tensor - :param n: number of qubits + :param n: number of sites :type n: int - :param format: see the doc in the doc in :py:meth:`tensorcircuit.quantum.measurement_results`, - defaults to "count_vector" + :param format: see :py:meth:`tensorcircuit.quantum.measurement_results`, defaults to "count_vector" :type format: str, optional :param jittable: only applicable to count transformation in jax backend, defaults to False :type jittable: bool, optional + :param dim: local dimension (2 for qubit; >2 for qudit), defaults to 2 + :type dim: Optional[int] :return: measurement results specified as ``format`` :rtype: Any """ - if n > 32: + dim = 2 if dim is None else int(dim) + n_max_d = int(32 / np.log2(dim)) + if n > n_max_d: assert ( len(backend.shape_tuple(sample)) == 2 - ), "n>32 is only supported for ``sample_bin``" + ), f"n>{n_max_d} is only supported for ``sample_bin``" if format == "sample_bin": return sample if format == "count_dict_bin": @@ -2900,26 +3056,27 @@ def sample2all( if len(backend.shape_tuple(sample)) == 1: sample_int = sample - sample_bin = sample_int2bin(sample, n) + sample_bin = sample_int2bin(sample, n, dim=dim) elif len(backend.shape_tuple(sample)) == 2: - sample_int = sample_bin2int(sample, n) + sample_int = sample_bin2int(sample, n, dim=dim) sample_bin = sample else: raise ValueError("unrecognized tensor shape for sample") + if format == "sample_int": return sample_int elif format == "sample_bin": return sample_bin else: - count_tuple = sample2count(sample_int, n, jittable) + count_tuple = sample2count(sample_int, n, jittable=jittable, dim=dim) if format == "count_tuple": return count_tuple elif format == "count_vector": - return count_s2d(count_tuple, n) + return count_s2d(count_tuple, n, dim=dim) elif format == "count_dict_bin": - return count_tuple2dict(count_tuple, n, key="bin") + return count_tuple2dict(count_tuple, n, key="bin", dim=dim) elif format == "count_dict_int": - return count_tuple2dict(count_tuple, n, key="int") + return count_tuple2dict(count_tuple, n, key="int", dim=dim) else: raise ValueError( "unsupported format %s for finite shots measurement" % format diff --git a/tensorcircuit/results/counts.py b/tensorcircuit/results/counts.py index 5bf0e870..662da8d9 100644 --- a/tensorcircuit/results/counts.py +++ b/tensorcircuit/results/counts.py @@ -6,6 +6,7 @@ import numpy as np +from ..quantum import _decode_basis_label Tensor = Any ct = Dict[str, int] @@ -89,14 +90,24 @@ def marginal_count(count: ct, keep_list: Sequence[int]) -> ct: return reverse_count(ncount) -def count2vec(count: ct, normalization: bool = True) -> Tensor: +def count2vec( + count: ct, normalization: bool = True, dim: Optional[int] = None +) -> Tensor: """ - Convert count dictionary to probability vector. + Convert a dictionary of counts (with string keys) to a probability/count vector. + + Support: + - base-d string (d <= 36), characters taken from 0-9A-Z (case-insensitive) + For example: + qubit: '0101' + qudit: '012' or '09A' (A represents 10, which means [0, 9, 10]) :param count: A dictionary mapping bit strings to counts :type count: ct :param normalization: Whether to normalize the counts to probabilities, defaults to True :type normalization: bool, optional + :param dim: Dimensionality of the vector, defaults to 2 + :type dim: int, optional :return: Probability vector as numpy array :rtype: Tensor @@ -105,44 +116,47 @@ def count2vec(count: ct, normalization: bool = True) -> Tensor: >>> count2vec({"00": 2, "10": 3, "11": 5}) array([0.2, 0. , 0.3, 0.5]) """ - nqubit = len(list(count.keys())[0]) - probability = [0] * 2**nqubit - shots = sum([v for k, v in count.items()]) - for k, v in count.items(): - if normalization is True: - v /= shots # type: ignore - probability[int(k, 2)] = v - return np.array(probability) + if not count: + return np.array([], dtype=float) -def vec2count(vec: Tensor, prune: bool = False) -> ct: - """ - Convert probability vector to count dictionary. + dim = 2 if dim is None else dim - :param vec: Probability vector - :type vec: Tensor - :param prune: Whether to remove near-zero probabilities, defaults to False - :type prune: bool, optional - :return: Count dictionary - :rtype: ct + n = len(next(iter(count)).upper()) + prob = np.zeros(dim**n, dtype=float) + shots = float(sum(count.values())) if normalization else 1.0 + if shots == 0: + return prob + + powers = [dim**p for p in range(n)][::-1] + for k, v in count.items(): + digits = _decode_basis_label(k, n, dim) + idx = sum(dig * p for dig, p in zip(digits, powers)) + prob[idx] = (v / shots) if normalization else v + + return prob - :Example: - >>> vec2count(np.array([0.2, 0.3, 0.1, 0.4])) - {'00': 0.2, '01': 0.3, '10': 0.1, '11': 0.4} +def vec2count(vec: Tensor, prune: bool = False, dim: Optional[int] = None) -> ct: """ - from ..quantum import count_vector2dict + Map a count/probability vector of length D to a dictionary with base-d string keys (0-9A-Z). + Only generate string keys when d ≤ 36; if d is inferred to be > 36, raise a NotImplementedError. + :param vec: A one-dimensional vector of length D = d**n + :param prune: Whether to prune near-zero elements (threshold 1e-8) + :param dim: Dimensionality of the vector, defaults to 2 + :return: {base-d string key: value}, key length n + """ + from ..quantum import count_vector2dict, _infer_num_sites + + dim = 2 if dim is None else dim if isinstance(vec, list): vec = np.array(vec) - n = int(np.log(vec.shape[0]) / np.log(2) + 1e-9) - c = count_vector2dict(vec, n, key="bin") - if prune is True: - nc = c.copy() - for k, v in c.items(): - if np.abs(v) < 1e-8: - del nc[k] - return nc + n = _infer_num_sites(int(vec.shape[0]), dim) + c: ct = count_vector2dict(vec, n, key="bin", dim=dim) # type: ignore + if prune: + c = {k: v for k, v in c.items() if np.abs(v) >= 1e-8} + return c diff --git a/tensorcircuit/simplify.py b/tensorcircuit/simplify.py index 1cbcfc6a..917e6d54 100644 --- a/tensorcircuit/simplify.py +++ b/tensorcircuit/simplify.py @@ -121,7 +121,9 @@ def _split_two_qubit_gate( if fixed_choice == 2: # swap one return n3, n4, True # swap s2 = n3.tensor.shape[-1] - if (s1 >= 4) and (s2 >= 4): + if (s1 >= n[0].dimension * n[2].dimension) and ( + s2 >= n[1].dimension * n[3].dimension + ): # jax jit unspport split_node with trun_err anyway # tf function doesn't work either, though I believe it may work on tf side # CANNOT DONE(@refraction-ray): tf.function version with trun_err set diff --git a/tests/test_quantum.py b/tests/test_quantum.py index 816f6dba..8cbf13a4 100644 --- a/tests/test_quantum.py +++ b/tests/test_quantum.py @@ -1034,3 +1034,122 @@ def test_u1_project(backend): s1 = tc.quantum.u1_project(s, 8, 3) assert s1.shape[-1] == 56 np.testing.assert_allclose(tc.quantum.u1_enlarge(s1, 8, 3), s) + + +@pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb")]) +def test_sample_int2bin(backend): + for n, dim in [(3, 2), (3, 3), (4, 5), (4, 10), (5, 36)]: + trials = 10 + max_val = dim**n + samples = np.random.randint(0, max_val, size=trials) + + digits = np.asarray(tc.quantum.sample_int2bin(samples, n=n, dim=dim)) + print(digits) + + assert digits.shape == (trials, n) + + assert digits.min() >= 0 and digits.max() < dim + + weights = dim ** np.arange(n - 1, -1, -1) + recon = (digits * weights).sum(axis=1) + assert np.array_equal(recon, samples) + + +@pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb")]) +def test_count_s2d(backend): + cases = [ + (3, None, [0, 3, 7], [5, 2, 1]), + (2, 3, [0, 1, 8], [1, 2, 3]), + (3, 5, [0, 57, 124], [4, 1, 6]), + (2, 10, [7, 42, 99], [2, 5, 1]), + (2, 36, [0, 35, 1295], [9, 8, 7]), + ] + + for n, dim, idx, cnt in cases: + D = 2 if dim is None else dim + size = D**n + + s_indices = np.asarray(idx, dtype=np.int64) + s_counts_i = np.asarray(cnt, dtype=np.int64) + + dense_i = np.asarray( + tc.quantum.count_s2d((s_indices, s_counts_i), n=n, dim=dim) + ) + print("int case:", n, dim, dense_i) + + assert dense_i.shape == (size,) + expected_i = np.zeros(size, dtype=np.int64) + expected_i[s_indices] = s_counts_i + np.testing.assert_array_equal(dense_i, expected_i) + + s_counts_f = np.asarray(cnt, dtype=np.float32) + dense_f = np.asarray( + tc.quantum.count_s2d((s_indices, s_counts_f), n=n, dim=dim) + ) + print("float case:", n, dim, dense_f) + + assert dense_f.shape == (size,) + assert dense_f.dtype == np.float32 + expected_f = np.zeros(size, dtype=np.float32) + expected_f[s_indices] = s_counts_f + np.testing.assert_array_equal(dense_f, expected_f) + + +@pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb")]) +def test_sample_bin2int(backend): + for n, dim in [(3, 2), (3, 3), (4, 5), (4, 10), (5, 36)]: + trials = 10 + digits = np.random.randint(0, dim, size=(trials, n)) + ints = np.asarray(tc.quantum.sample_bin2int(digits, n=n, dim=dim)) + + assert ints.shape == (trials,) + weights = dim ** np.arange(n - 1, -1, -1) + expected = (digits * weights).sum(axis=1) + np.testing.assert_array_equal(ints, expected) + + +@pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb")]) +def test_count_vector2dict(backend): + for n, dim in [(3, 2), (3, 3), (4, 5), (3, 10), (3, 16), (2, 36)]: + size = dim**n + counts_np = np.random.randint(0, 7, size=size, dtype=np.int64) + counts = tc.backend.convert_to_tensor(counts_np) + + out_int = tc.quantum.count_vector2dict(counts, n=n, key="int", dim=dim) + assert isinstance(out_int, dict) + assert len(out_int) == size + for i in range(size): + assert out_int[i] == int(counts_np[i]) + + out_bin = tc.quantum.count_vector2dict(counts, n=n, key="bin", dim=dim) + assert isinstance(out_bin, dict) + assert len(out_bin) == size + expected_keys = [np.base_repr(i, base=dim).zfill(n) for i in range(size)] + assert set(out_bin.keys()) == set(expected_keys) + for i, k in enumerate(expected_keys): + assert out_bin[k] == int(counts_np[i]) + + +@pytest.mark.parametrize("backend", [lf("npb"), lf("tfb"), lf("jaxb")]) +def test_count_tuple2dict(backend): + for n, dim in [(3, 2), (3, 3), (2, 5), (3, 10), (2, 16), (2, 36)]: + size = dim**n + indices = np.random.choice(size, size=min(5, size), replace=False) + counts = np.random.randint(1, 10, size=len(indices)) + + idx_tensor = tc.backend.cast(tc.backend.convert_to_tensor(indices), "int64") + cnt_tensor = tc.backend.cast(tc.backend.convert_to_tensor(counts), "int64") + count_tuple = (idx_tensor, cnt_tensor) + + out_int = tc.quantum.count_tuple2dict(count_tuple, n=n, key="int", dim=dim) + assert isinstance(out_int, dict) + assert set(out_int.keys()) == set(indices) + for i, c in zip(indices, counts): + assert out_int[int(i)] == int(c) + + out_bin = tc.quantum.count_tuple2dict(count_tuple, n=n, key="bin", dim=dim) + assert isinstance(out_bin, dict) + expected_keys = [np.base_repr(int(i), base=dim).zfill(n) for i in indices] + assert set(out_bin.keys()) == set(expected_keys) + for k, c in zip(expected_keys, counts): + assert out_bin[k] == int(c) diff --git a/tests/test_results.py b/tests/test_results.py index 9be81889..3d4f1199 100644 --- a/tests/test_results.py +++ b/tests/test_results.py @@ -7,6 +7,7 @@ from tensorcircuit.results.readout_mitigation import ReadoutMit d = {"000": 2, "101": 3, "100": 4} +d_higher = {"A00": 2, "9BC": 3, "XYZ": 4} def test_marginal_count(): @@ -49,6 +50,12 @@ def test_merge_count(): def test_count2vec(): assert counts.vec2count(counts.count2vec(d, normalization=False), prune=True) == d + assert ( + counts.vec2count( + counts.count2vec(d, normalization=False, dim=36), prune=True, dim=36 + ) + == d + ) def test_kl():