Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 23 additions & 5 deletions include/na/zoned/Compiler.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@

#include "Architecture.hpp"
#include "code_generator/CodeGenerator.hpp"
#include "decomposer/NoOpDecomposer.hpp"
#include "ir/QuantumComputation.hpp"
#include "ir/operations/Operation.hpp"
#include "layout_synthesizer/PlaceAndRouteSynthesizer.hpp"
Expand Down Expand Up @@ -43,9 +44,10 @@ namespace na::zoned {
* allowing for better performance than having the components as members of the
* compiler and setting them at runtime.
*/
template <class ConcreteType, class Scheduler, class ReuseAnalyzer,
class LayoutSynthesizer, class CodeGenerator>
template <class ConcreteType, class Scheduler, class Decomposer,
class ReuseAnalyzer, class LayoutSynthesizer, class CodeGenerator>
class Compiler : protected Scheduler,
protected Decomposer,
protected ReuseAnalyzer,
protected LayoutSynthesizer,
protected CodeGenerator {
Expand All @@ -59,6 +61,8 @@ class Compiler : protected Scheduler,
struct Config {
/// Configuration for the scheduler
typename Scheduler::Config schedulerConfig{};
/// Configuration for the decomposer
typename Decomposer::Config decomposerConfig{};
/// Configuration for the reuse analyzer
typename ReuseAnalyzer::Config reuseAnalyzerConfig{};
/// Configuration for the layout synthesizer
Expand All @@ -68,6 +72,7 @@ class Compiler : protected Scheduler,
/// Log level for the compiler
spdlog::level::level_enum logLevel = spdlog::level::info;
NLOHMANN_DEFINE_TYPE_INTRUSIVE_WITH_DEFAULT(Config, schedulerConfig,
decomposerConfig,
reuseAnalyzerConfig,
layoutSynthesizerConfig,
codeGeneratorConfig, logLevel);
Expand All @@ -78,6 +83,7 @@ class Compiler : protected Scheduler,
*/
struct Statistics {
int64_t schedulingTime; ///< Time taken for scheduling in us
int64_t decomposingTime; ///< Time taken for decomposing in us
int64_t reuseAnalysisTime; ///< Time taken for reuse analysis in us
/// Statistics collected during layout synthesis.
typename LayoutSynthesizer::Statistics layoutSynthesizerStatistics;
Expand Down Expand Up @@ -106,6 +112,7 @@ class Compiler : protected Scheduler,
*/
Compiler(const Architecture& architecture, const Config& config)
: Scheduler(architecture, config.schedulerConfig),
Decomposer(architecture, config.decomposerConfig),
ReuseAnalyzer(architecture, config.reuseAnalyzerConfig),
LayoutSynthesizer(architecture, config.layoutSynthesizerConfig),
CodeGenerator(architecture, config.codeGeneratorConfig),
Expand Down Expand Up @@ -195,6 +202,17 @@ class Compiler : protected Scheduler,
}
#endif // SPDLOG_ACTIVE_LEVEL <= SPDLOG_LEVEL_DEBUG

SPDLOG_DEBUG("Decomposing...");
const auto decomposingStart = std::chrono::system_clock::now();
const auto& decomposedSingleQubitGateLayers =
SELF.decompose(singleQubitGateLayers);
const auto decomposingEnd = std::chrono::system_clock::now();
statistics_.decomposingTime =
std::chrono::duration_cast<std::chrono::microseconds>(decomposingEnd -
decomposingStart)
.count();
SPDLOG_INFO("Time for decomposing: {}us", statistics_.decomposingTime);

SPDLOG_DEBUG("Analyzing reuse...");
const auto reuseAnalysisStart = std::chrono::system_clock::now();
const auto& reuseQubits = SELF.analyzeReuse(twoQubitGateLayers);
Expand Down Expand Up @@ -222,7 +240,7 @@ class Compiler : protected Scheduler,
SPDLOG_DEBUG("Generating code...");
const auto codeGenerationStart = std::chrono::system_clock::now();
NAComputation code =
SELF.generate(singleQubitGateLayers, placement, routing);
SELF.generate(decomposedSingleQubitGateLayers, placement, routing);
const auto codeGenerationEnd = std::chrono::system_clock::now();
assert(code.validate().first);
statistics_.codeGenerationTime =
Expand Down Expand Up @@ -257,7 +275,7 @@ class RoutingAgnosticSynthesizer
: PlaceAndRouteSynthesizer(architecture) {}
};
class RoutingAgnosticCompiler final
: public Compiler<RoutingAgnosticCompiler, ASAPScheduler,
: public Compiler<RoutingAgnosticCompiler, ASAPScheduler, NoOpDecomposer,
VertexMatchingReuseAnalyzer, RoutingAgnosticSynthesizer,
CodeGenerator> {
public:
Expand All @@ -279,7 +297,7 @@ class RoutingAwareSynthesizer
: PlaceAndRouteSynthesizer(architecture) {}
};
class RoutingAwareCompiler final
: public Compiler<RoutingAwareCompiler, ASAPScheduler,
: public Compiler<RoutingAwareCompiler, ASAPScheduler, NoOpDecomposer,
VertexMatchingReuseAnalyzer, RoutingAwareSynthesizer,
CodeGenerator> {
public:
Expand Down
4 changes: 3 additions & 1 deletion include/na/zoned/Types.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,10 @@

namespace na::zoned {
/// A list of single-qubit gates representing a single-qubit gate layer.
using SingleQubitGateLayer =
using SingleQubitGateRefLayer =
std::vector<std::reference_wrapper<const qc::Operation>>;
/// A list of single-qubit gates representing a single-qubit gate layer.
using SingleQubitGateLayer = std::vector<std::unique_ptr<const qc::Operation>>;
/// A pair of qubits as an array that allows iterating over the qubits.
using QubitPair = std::array<qc::Qubit, 2>;
/// A list of two-qubit gates representing a two-qubit gate layer.
Expand Down
35 changes: 35 additions & 0 deletions include/na/zoned/decomposer/DecomposerBase.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
/*
* Copyright (c) 2023 - 2026 Chair for Design Automation, TUM
* Copyright (c) 2025 - 2026 Munich Quantum Software Company GmbH
* All rights reserved.
*
* SPDX-License-Identifier: MIT
*
* Licensed under the MIT License
*/

#pragma once

#include "na/zoned/Types.hpp"

#include <vector>

namespace na::zoned {
/**
* The Abstract Base Class for the Decomposer of the MQT's Zoned Neutral Atom
* Compiler.
*/
class DecomposerBase {
public:
virtual ~DecomposerBase() = default;
/**
* This function defines the interface of the decomposer.
* @param singleQubitGateLayers are the layers of single-qubit gates that are
* meant to be first decomposed into the native gate set.
* @return the new single-qubit gate layers
*/
[[nodiscard]] virtual auto decompose(
const std::vector<SingleQubitGateRefLayer>& singleQubitGateLayers) const
-> std::vector<SingleQubitGateLayer> = 0;
};
} // namespace na::zoned
54 changes: 54 additions & 0 deletions include/na/zoned/decomposer/NoOpDecomposer.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
/*
* Copyright (c) 2023 - 2026 Chair for Design Automation, TUM
* Copyright (c) 2025 - 2026 Munich Quantum Software Company GmbH
* All rights reserved.
*
* SPDX-License-Identifier: MIT
*
* Licensed under the MIT License
*/

#pragma once

#include "na/zoned/Architecture.hpp"
#include "na/zoned/Types.hpp"
#include "na/zoned/decomposer/DecomposerBase.hpp"

#include <vector>

namespace na::zoned {
/**
* The class NoOpDecomposer implements a dummy no-op decomposer that just copies
* every operation.
*/
class NoOpDecomposer : public DecomposerBase {

public:
/// The configuration of the NoOpDecomposer
struct Config {
template <
typename BasicJsonType,
nlohmann::detail::enable_if_t<
nlohmann::detail::is_basic_json<BasicJsonType>::value, int> = 0>
friend void to_json(BasicJsonType& /* unused */,
const Config& /* unused */) {}

template <
typename BasicJsonType,
nlohmann::detail::enable_if_t<
nlohmann::detail::is_basic_json<BasicJsonType>::value, int> = 0>
friend void from_json(const BasicJsonType& /* unused */,
Config& /* unused */) {};
};

/**
* Create a new NoOpDecomposer.
*/
NoOpDecomposer(const Architecture& /* unused */, const Config& /* unused */) {
}

[[nodiscard]] auto decompose(
const std::vector<SingleQubitGateRefLayer>& singleQubitGateLayers) const
-> std::vector<SingleQubitGateLayer> override;
};
} // namespace na::zoned
2 changes: 1 addition & 1 deletion include/na/zoned/scheduler/ASAPScheduler.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ class ASAPScheduler : public SchedulerBase {
* operations. A pair of qubits represents every two-qubit operation.
*/
[[nodiscard]] auto schedule(const qc::QuantumComputation& qc) const
-> std::pair<std::vector<SingleQubitGateLayer>,
-> std::pair<std::vector<SingleQubitGateRefLayer>,
std::vector<TwoQubitGateLayer>>;
};
} // namespace na::zoned
2 changes: 1 addition & 1 deletion include/na/zoned/scheduler/SchedulerBase.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class SchedulerBase {
* operations. A pair of qubits represents every two-qubit operation.
*/
[[nodiscard]] virtual auto schedule(const qc::QuantumComputation& qc) const
-> std::pair<std::vector<SingleQubitGateLayer>,
-> std::pair<std::vector<SingleQubitGateRefLayer>,
std::vector<TwoQubitGateLayer>> = 0;
};
} // namespace na::zoned
75 changes: 35 additions & 40 deletions src/na/zoned/code_generator/CodeGenerator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,12 +62,11 @@ auto CodeGenerator::appendSingleQubitGates(
// This flag is used for circuit consisting of only one qubit since in this
// case, global and local gates are the same.
bool singleQubitGate = false;
if (op.get().isGlobal(nQubits)) {
if (op->isGlobal(nQubits)) {
// a global operation can be wrapped in a compound operation or a standard
// operation acting on all qubits
if (op.get().isCompoundOperation()) {
const auto& compOp =
dynamic_cast<const qc::CompoundOperation&>(op.get());
if (op->isCompoundOperation()) {
const auto& compOp = dynamic_cast<const qc::CompoundOperation&>(*op);
const auto opType = compOp.front()->getType();
if (opType == qc::RY) {
code.emplaceBack<GlobalRYOp>(globalZone,
Expand All @@ -80,9 +79,8 @@ auto CodeGenerator::appendSingleQubitGates(
assert(false);
}
} else {
if (const auto opType = op.get().getType(); opType == qc::RY) {
code.emplaceBack<GlobalRYOp>(globalZone,
op.get().getParameter().front());
if (const auto opType = op->getType(); opType == qc::RY) {
code.emplaceBack<GlobalRYOp>(globalZone, op->getParameter().front());
} else if (opType == qc::Y) {
code.emplaceBack<GlobalRYOp>(globalZone, qc::PI);
} else if (nQubits == 1) {
Expand All @@ -101,67 +99,64 @@ auto CodeGenerator::appendSingleQubitGates(
if (singleQubitGate) {
// one qubit gates act exactly on one qubit and are converted to local
// gates
assert(op.get().getNqubits() == 1);
const qc::Qubit qubit = op.get().getTargets().front();
assert(op->getNqubits() == 1);
const qc::Qubit qubit = op->getTargets().front();
// By default, all variants of rotational z-gates are supported
if (op.get().getType() == qc::RZ || op.get().getType() == qc::P) {
code.emplaceBack<LocalRZOp>(atoms[qubit],
op.get().getParameter().front());
} else if (op.get().getType() == qc::Z) {
if (op->getType() == qc::RZ || op->getType() == qc::P) {
code.emplaceBack<LocalRZOp>(atoms[qubit], op->getParameter().front());
} else if (op->getType() == qc::Z) {
code.emplaceBack<LocalRZOp>(atoms[qubit], qc::PI);
} else if (op.get().getType() == qc::S) {
} else if (op->getType() == qc::S) {
code.emplaceBack<LocalRZOp>(atoms[qubit], qc::PI_2);
} else if (op.get().getType() == qc::Sdg) {
} else if (op->getType() == qc::Sdg) {
code.emplaceBack<LocalRZOp>(atoms[qubit], -qc::PI_2);
} else if (op.get().getType() == qc::T) {
} else if (op->getType() == qc::T) {
code.emplaceBack<LocalRZOp>(atoms[qubit], qc::PI_4);
} else if (op.get().getType() == qc::Tdg) {
} else if (op->getType() == qc::Tdg) {
code.emplaceBack<LocalRZOp>(atoms[qubit], -qc::PI_4);
} else {
// in this case, the gate is not any variant of a rotational z-gate.
// depending on the settings, a warning is printed.
if (config_.warnUnsupportedGates) {
SPDLOG_WARN(
"Gate not part of basis gates will be inserted as U3 gate: {}",
qc::toString(op.get().getType()));
qc::toString(op->getType()));
}
if (op.get().getType() == qc::U) {
code.emplaceBack<LocalUOp>(
atoms[qubit], op.get().getParameter().front(),
op.get().getParameter().at(1), op.get().getParameter().at(2));
} else if (op.get().getType() == qc::U2) {
if (op->getType() == qc::U) {
code.emplaceBack<LocalUOp>(atoms[qubit], op->getParameter().front(),
op->getParameter().at(1),
op->getParameter().at(2));
} else if (op->getType() == qc::U2) {
code.emplaceBack<LocalUOp>(atoms[qubit], qc::PI_2,
op.get().getParameter().front(),
op.get().getParameter().at(1));
} else if (op.get().getType() == qc::RX) {
code.emplaceBack<LocalUOp>(atoms[qubit],
op.get().getParameter().front(), -qc::PI_2,
qc::PI_2);
} else if (op.get().getType() == qc::RY) {
code.emplaceBack<LocalUOp>(atoms[qubit],
op.get().getParameter().front(), 0, 0);
} else if (op.get().getType() == qc::H) {
op->getParameter().front(),
op->getParameter().at(1));
} else if (op->getType() == qc::RX) {
code.emplaceBack<LocalUOp>(atoms[qubit], op->getParameter().front(),
-qc::PI_2, qc::PI_2);
} else if (op->getType() == qc::RY) {
code.emplaceBack<LocalUOp>(atoms[qubit], op->getParameter().front(),
0, 0);
} else if (op->getType() == qc::H) {
code.emplaceBack<LocalUOp>(atoms[qubit], qc::PI_2, 0, qc::PI);
} else if (op.get().getType() == qc::X) {
} else if (op->getType() == qc::X) {
code.emplaceBack<LocalUOp>(atoms[qubit], qc::PI, 0, qc::PI);
} else if (op.get().getType() == qc::Y) {
} else if (op->getType() == qc::Y) {
code.emplaceBack<LocalUOp>(atoms[qubit], qc::PI, qc::PI_2, qc::PI_2);
} else if (op.get().getType() == qc::Vdg) {
} else if (op->getType() == qc::Vdg) {
code.emplaceBack<LocalUOp>(atoms[qubit], -qc::PI_2, qc::PI_2,
-qc::PI_2);
} else if (op.get().getType() == qc::SX) {
} else if (op->getType() == qc::SX) {
code.emplaceBack<LocalUOp>(atoms[qubit], qc::PI_2, -qc::PI_2,
qc::PI_2);
} else if (op.get().getType() == qc::SXdg ||
op.get().getType() == qc::V) {
} else if (op->getType() == qc::SXdg || op->getType() == qc::V) {
code.emplaceBack<LocalUOp>(atoms[qubit], -qc::PI_2, -qc::PI_2,
qc::PI_2);
} else {
// if the gate type is not recognized, an error is printed and the
// gate is not included in the output.
std::ostringstream oss;
oss << "\033[1;31m[ERROR]\033[0m Unsupported single-qubit gate: "
<< op.get().getType() << "\n";
<< op->getType() << "\n";
throw std::invalid_argument(oss.str());
}
}
Expand Down
35 changes: 35 additions & 0 deletions src/na/zoned/decomposer/NoOpDecomposer.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
/*
* Copyright (c) 2023 - 2026 Chair for Design Automation, TUM
* Copyright (c) 2025 - 2026 Munich Quantum Software Company GmbH
* All rights reserved.
*
* SPDX-License-Identifier: MIT
*
* Licensed under the MIT License
*/

#include "na/zoned/decomposer/NoOpDecomposer.hpp"

#include "ir/QuantumComputation.hpp"
#include "na/zoned/Architecture.hpp"

#include <utility>
#include <vector>

namespace na::zoned {
auto NoOpDecomposer::decompose(
const std::vector<SingleQubitGateRefLayer>& singleQubitGateLayers) const
-> std::vector<SingleQubitGateLayer> {
std::vector<SingleQubitGateLayer> result;
result.reserve(singleQubitGateLayers.size());
for (const auto& layer : singleQubitGateLayers) {
SingleQubitGateLayer newLayer;
newLayer.reserve(layer.size());
for (const auto& opRef : layer) {
newLayer.emplace_back(opRef.get().clone());
}
result.emplace_back(std::move(newLayer));
}
return result;
}
} // namespace na::zoned
6 changes: 3 additions & 3 deletions src/na/zoned/scheduler/ASAPScheduler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,14 @@ ASAPScheduler::ASAPScheduler(const Architecture& architecture,
}
}
auto ASAPScheduler::schedule(const qc::QuantumComputation& qc) const
-> std::pair<std::vector<SingleQubitGateLayer>,
-> std::pair<std::vector<SingleQubitGateRefLayer>,
std::vector<TwoQubitGateLayer>> {
if (qc.empty()) {
// early exit if there are no operations to schedule
return std::pair{std::vector<SingleQubitGateLayer>{},
return std::pair{std::vector<SingleQubitGateRefLayer>{},
std::vector<TwoQubitGateLayer>{}};
}
std::vector<SingleQubitGateLayer> singleQubitGateLayers(1);
std::vector<SingleQubitGateRefLayer> singleQubitGateLayers(1);
std::vector<TwoQubitGateLayer> twoQubitGateLayers(0);
// the following vector contains a mapping from qubits to the layer where
// the next two-qubit gate can be scheduled for that qubit, i.e., the layer
Expand Down
Loading
Loading