Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
7437768
feat: implemented custom gradient for keras
BrunoLiegiBastonLiegi Mar 21, 2025
5a83823
fix: small fix
BrunoLiegiBastonLiegi Mar 21, 2025
867b4db
fix: fixed custom grad
BrunoLiegiBastonLiegi Mar 21, 2025
e95f2e7
feat: minot modifications
BrunoLiegiBastonLiegi Mar 24, 2025
a8882ef
Merge branch 'main' into keras_custom_gradient
BrunoLiegiBastonLiegi Mar 24, 2025
edb2a8a
fix: trying other keras backends
BrunoLiegiBastonLiegi Mar 24, 2025
6b27136
fix: minor modifications
BrunoLiegiBastonLiegi Mar 25, 2025
ab1bf7f
fix: trying to run eagerly
BrunoLiegiBastonLiegi Mar 25, 2025
d3276eb
feat: managed to train for the first time
BrunoLiegiBastonLiegi Mar 27, 2025
ee371d2
fix: some fixes to PSR
BrunoLiegiBastonLiegi Mar 27, 2025
57b6842
fix: some fixes to tests
BrunoLiegiBastonLiegi Mar 31, 2025
cddc8e1
fix: fixing pylint and tests
BrunoLiegiBastonLiegi Apr 1, 2025
699c219
fix: trying to better fix the seed
BrunoLiegiBastonLiegi Apr 1, 2025
1645c59
fix: fixes to PSR test + separated composition test
BrunoLiegiBastonLiegi Apr 1, 2025
8665590
fix: tidying up
BrunoLiegiBastonLiegi Apr 1, 2025
8af5ddc
fix: some fixes for symbolic execution and tests
BrunoLiegiBastonLiegi Apr 2, 2025
fe7734b
fix: using tf from interface
BrunoLiegiBastonLiegi Apr 2, 2025
20bcb69
Apply suggestions from code review
BrunoLiegiBastonLiegi Apr 2, 2025
037c42d
fix: replaced uniform with normal
BrunoLiegiBastonLiegi Apr 2, 2025
0d67c1f
Merge pull request #77 from qiboteam/keras_custom_gradient
BrunoLiegiBastonLiegi Apr 17, 2025
e376421
remove unnessary methods
renatomello Apr 29, 2025
5c03955
Merge pull request #86 from qiboteam/remove_methods
BrunoLiegiBastonLiegi May 5, 2025
a408094
fix: working
BrunoLiegiBastonLiegi May 15, 2025
0b7ac85
feat: added batched result and improved batched execution
BrunoLiegiBastonLiegi May 20, 2025
414851a
feat: added batched result and improved batched execution
BrunoLiegiBastonLiegi May 20, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
62 changes: 62 additions & 0 deletions src/qiboml/backends/einsum_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
from functools import cache

from qibo.backends.einsum_utils import EINSUM_CHARS


@cache
def prepare_strings(qubits, nqubits):
if nqubits + len(qubits) + 1 > len(EINSUM_CHARS): # pragma: no cover
raise_error(NotImplementedError, "Not enough einsum characters.")

inp = list(EINSUM_CHARS[: nqubits + 1])
out = inp[:]
trans = list(EINSUM_CHARS[nqubits + 1 : nqubits + len(qubits) + 1])
for i, q in enumerate(qubits):
q += 1
trans.append(inp[q])
out[q] = trans[i]

inp = "".join(inp)
out = "".join(out)
trans = "".join(trans)
rest = EINSUM_CHARS[nqubits + len(qubits) + 1 :]
return inp, out, trans, rest


@cache
def prepare_strings_same_qubits(qubits, nqubits):
inp, out, trans, rest = prepare_strings(qubits, nqubits)
trans = rest[0] + trans
return inp, out, trans, rest[1:]


@cache
def control_order(control_qubits, target_qubits, nqubits):
loop_start = 0
order = list(control_qubits)
targets = list(target_qubits)
for control in control_qubits:
for i in range(loop_start, control):
order.append(i)
loop_start = control + 1
for i, t in enumerate(target_qubits):
if t > control:
targets[i] -= 1
for i in range(loop_start, nqubits):
order.append(i)
for i in range(len(order)):
order[i] += 1
targets[i] += 1
return order, targets


@cache
def apply_gate_string(qubits, nqubits):
inp, out, trans, _ = prepare_strings(qubits, nqubits)
return f"{inp},{trans}->{out}"


@cache
def apply_gates_same_qubits_string(qubits, nqubits):
inp, out, trans, _ = prepare_strings_same_qubits(qubits, nqubits)
return f"{inp},{trans}->{out}"
18 changes: 18 additions & 0 deletions src/qiboml/backends/einsum_utils.py~
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@

def prepare_strings(qubits, nqubits):
if nqubits + len(qubits) + 1 > len(EINSUM_CHARS): # pragma: no cover
raise_error(NotImplementedError, "Not enough einsum characters.")

inp = list(EINSUM_CHARS[: nqubits + 1])
out = inp[:]
trans = list(EINSUM_CHARS[nqubits + 1 : nqubits + len(qubits) + 1])
for i, q in enumerate(qubits):
q += 1
trans.append(inp[q])
out[q] = trans[i]

inp = "".join(inp)
out = "".join(out)
trans = "".join(trans)
rest = EINSUM_CHARS[nqubits + len(qubits) + 1 :]
return inp, out, trans, rest
135 changes: 134 additions & 1 deletion src/qiboml/backends/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,14 @@
from typing import Union

import numpy as np
from qibo import __version__
import qibo.backends.einsum_utils as einsum_utils
from qibo import Circuit, __version__, gates
from qibo.backends.npmatrices import NumpyMatrices
from qibo.backends.numpy import NumpyBackend
from qibo.result import CircuitResult, MeasurementOutcomes, QuantumState

import qiboml.backends.einsum_utils as batched_einsum_utils
from qiboml.result import BatchedResult


class TorchMatrices(NumpyMatrices):
Expand Down Expand Up @@ -309,3 +314,131 @@ def _test_regressions(self, name):
{5: 17, 4: 5, 7: 4, 1: 2, 6: 2},
{4: 9, 2: 5, 5: 5, 3: 4, 6: 4, 0: 1, 1: 1, 7: 1},
]

def apply_gate_batched(self, gate, state, nqubits):
state = self.np.reshape(state, (state.shape[0],) + nqubits * (2,))
matrix = gate.matrix(self)
if gate.is_controlled_by:
matrix = self.np.reshape(matrix, 2 * len(gate.target_qubits) * (2,))
ncontrol = len(gate.control_qubits)
nactive = nqubits - ncontrol
order, targets = batched_einsum_utils.control_order(
gate.control_qubits, nqubits
)
state = self.np.transpose(state, order)
# Apply `einsum` only to the part of the state where all controls
# are active. This should be `state[-1]`
state = self.np.reshape(
state, (state.shape[0],) + (2**ncontrol,) + nactive * (2,)
)
opstring = batched_einsum_utils.apply_gate_string(targets, nactive)
updates = self.np.einsum(opstring, state[:, -1], matrix)
# Concatenate the updated part of the state `updates` with the
# part of of the state that remained unaffected `state[:-1]`.
state = self.np.concatenate([state[:, :-1], updates[None]], axis=1)
state = self.np.reshape(state, (state.shape[0],) + nqubits * (2,))
# Put qubit indices back to their proper places
state = self.np.transpose(state, batched_einsum_utils.reverse_order(order))
else:
matrix = self.np.reshape(matrix, 2 * len(gate.qubits) * (2,))
opstring = batched_einsum_utils.apply_gate_string(gate.qubits, nqubits)
state = self.np.einsum(opstring, state, matrix)
return self.np.reshape(state, (state.shape[0], -1) + (2**nqubits,))

def apply_gates_same_qubits_batched(self, gate_list, state, nqubits):
qubits = gate_list[0].qubits
state = self.np.reshape(state, (state.shape[0],) + nqubits * (2,))
matrix = self.np.reshape(
self.np.vstack([g.matrix(self) for g in gate_list]),
(len(gate_list),) + 2 * len(gate_list[0].qubits) * (2,),
)
if False: # gate.is_controlled_by:
raise NotImplementedError
else:
opstring = batched_einsum_utils.apply_gates_same_qubits_string(
qubits, nqubits
)
state = self.np.einsum(opstring, state, matrix)
return self.np.reshape(state, (state.shape[0], -1) + (2**nqubits,))

def execute_batch_of_circuits(
self, circuits: list[Circuit], initial_state=None, nshots: int = 1000
):

try:
nqubits = circuits[0].nqubits

if circuits[0].density_matrix:
if initial_state is None:
state = self.zero_density_matrix(nqubits)
else:
state = self.cast(initial_state)

for i in range(len(circuits[0].queue)):
_gates = [c.queue[i] for c in circuits]

state = gate.apply_density_matrix_batched(self, state, nqubits)

else:
if initial_state is None:
state = self.zero_state(nqubits)
state = self.np.vstack(
len(circuits) * (state.reshape(1, -1, 2**nqubits),)
)
else:
state = self.cast(initial_state)

for i in range(len(circuits[0].queue)):
_gates = [c.queue[i] for c in circuits]
first_gate = _gates[0]
same_gate = all(isinstance(g, first_gate.__class__) for g in _gates)
same_qubits = all(g.qubits == first_gate.qubits for g in _gates)
par_gate = isinstance(first_gate, gates.ParametrizedGate)
if same_qubits:
if same_gate and not par_gate:
state = self.apply_gate_batched(first_gate, state, nqubits)
else:
state = self.apply_gates_same_qubits_batched(
_gates, state, nqubits
)
else:
raise NotImplementedError

circuit = circuits[0]
if circuit.has_unitary_channel:
# here we necessarily have `density_matrix=True`, otherwise
# execute_circuit_repeated would have been called
if circuit.measurements:
circuit._final_state = BatchedResult(
[
CircuitResult(
s, c.measurements, backend=self, nshots=nshots
)
for s, c in zip(state, circuits)
]
)
return circuit._final_state
else:
circuit._final_state = BatchedResult(
[QuantumState(s, backend=self) for s in state]
)
return circuit._final_state

else:
if circuit.measurements:
circuit._final_state = BatchedResult(
[
CircuitResult(
s, c.measurements, backend=self, nshots=nshots
)
for s, c in zip(state, circuits)
]
)
return circuit._final_state
else:
circuit._final_state = BatchedResult(
[QuantumState(s, backend=self) for s in state]
)
return circuit._final_state
except:
pass
20 changes: 0 additions & 20 deletions src/qiboml/backends/tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,26 +244,6 @@ def calculate_jacobian_matrix(

return tape.jacobian(real, parameters)

def calculate_hamiltonian_matrix_product(self, matrix1, matrix2):
if self.is_sparse(matrix1) or self.is_sparse(matrix2):
raise_error(
NotImplementedError,
"Multiplication of sparse matrices is not supported with Tensorflow.",
)
return super().calculate_hamiltonian_matrix_product(matrix1, matrix2)

def calculate_hamiltonian_state_product(self, matrix, state):
rank = len(tuple(state.shape))
if rank == 1: # vector
return self.np.matmul(matrix, state[:, self.np.newaxis])[:, 0]
elif rank == 2: # matrix
return self.np.matmul(matrix, state)
else:
raise_error(
ValueError,
f"Cannot multiply Hamiltonian with rank-{rank} tensor.",
)

def _test_regressions(self, name):
if name == "test_measurementresult_apply_bitflips":
return [
Expand Down
Loading
Loading