Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 11 additions & 4 deletions docs/gdb-debugging.md
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
# GDB Debugging with Firecracker

**The GDB feature is not for production use.**

Firecracker supports debugging the guest kernel via GDB remote serial protocol.
This allows us to connect GDB to the firecracker process and step through debug
the guest kernel. Currently only debugging on x86 is supported.
the guest kernel.

The GDB feature requires Firecracker to be booted with a config file.

## Prerequisites

Firstly, to enable GDB debugging we need to compile Firecracker with the `debug`
Firstly, to enable GDB debugging we need to compile Firecracker with the `gdb`
feature enabled, this will enable the necessary components for the debugging
process.

Expand Down Expand Up @@ -102,9 +104,14 @@ command in the GDB session which will terminate both.
mitigated by setting these kernel config values:

```
CONFIG_SCHED_MC=y
CONFIG_SCHED_MC_PRIO=y
CONFIG_SCHED_MC=n
CONFIG_SCHED_MC_PRIO=n
```

- Currently we support a limited subset of cpu registers for get and set
operations, if more are required feel free to contribute.

- On ARM the guest virtual address translation will only work on guests with 4kb
pages and not all physical address sizes are supported. If the current
translation implementation doesn't cover a specific setup, feel free to
contribute.
10 changes: 10 additions & 0 deletions src/vmm/src/arch/aarch64/regs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,16 @@ arm64_sys_reg!(SYS_CNTV_CVAL_EL0, 3, 3, 14, 3, 2);
// https://elixir.bootlin.com/linux/v6.8/source/arch/arm64/include/asm/sysreg.h#L459
arm64_sys_reg!(SYS_CNTPCT_EL0, 3, 3, 14, 0, 1);

// Translation Table Base Register
// https://developer.arm.com/documentation/ddi0595/2021-03/AArch64-Registers/TTBR1-EL1--Translation-Table-Base-Register-1--EL1-
arm64_sys_reg!(TTBR1_EL1, 3, 0, 2, 0, 1);
// Translation Control Register
// https://developer.arm.com/documentation/ddi0601/2024-09/AArch64-Registers/TCR-EL1--Translation-Control-Register--EL1-
arm64_sys_reg!(TCR_EL1, 3, 0, 2, 0, 2);
// AArch64 Memory Model Feature Register
// https://developer.arm.com/documentation/100798/0400/register-descriptions/aarch64-system-registers/id-aa64mmfr0-el1--aarch64-memory-model-feature-register-0--el1
arm64_sys_reg!(ID_AA64MMFR0_EL1, 3, 0, 0, 7, 0);

/// Vector lengths pseudo-register
/// TODO: this can be removed after https://github.com/rust-vmm/kvm-bindings/pull/89
/// is merged and new version is used in Firecracker.
Expand Down
3 changes: 0 additions & 3 deletions src/vmm/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,6 @@ use vm_superio::Rtc;
use vm_superio::Serial;
use vmm_sys_util::eventfd::EventFd;

#[cfg(all(feature = "gdb", target_arch = "aarch64"))]
compile_error!("GDB feature not supported on ARM");

#[cfg(target_arch = "x86_64")]
use crate::acpi;
use crate::arch::InitrdConfig;
Expand Down
299 changes: 273 additions & 26 deletions src/vmm/src/gdb/arch/aarch64.rs
Original file line number Diff line number Diff line change
@@ -1,62 +1,309 @@
// Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0

use std::mem::offset_of;

use gdbstub_arch::aarch64::reg::AArch64CoreRegs as CoreRegs;
use kvm_bindings::{
kvm_guest_debug, kvm_regs, user_pt_regs, KVM_GUESTDBG_ENABLE, KVM_GUESTDBG_SINGLESTEP,
KVM_GUESTDBG_USE_HW, KVM_GUESTDBG_USE_SW_BP, KVM_REG_ARM64, KVM_REG_ARM_CORE, KVM_REG_SIZE_U64,
};
use kvm_ioctls::VcpuFd;
use vm_memory::GuestAddress;
use vm_memory::{Bytes, GuestAddress};

Check warning on line 12 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L12

Added line #L12 was not covered by tests

use crate::arch::aarch64::regs::{
arm64_core_reg_id, Aarch64RegisterVec, ID_AA64MMFR0_EL1, TCR_EL1, TTBR1_EL1,
};
use crate::arch::aarch64::vcpu::get_registers;

Check warning on line 17 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L14-L17

Added lines #L14 - L17 were not covered by tests
use crate::gdb::target::GdbTargetError;
use crate::Vmm;

/// Configures the number of bytes required for a software breakpoint.

Check warning on line 21 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L19-L21

Added lines #L19 - L21 were not covered by tests
///
/// The breakpoint instruction operation also includes the immediate argument which we 0 hence the
/// size.
pub const SW_BP_SIZE: usize = 4;

/// The bytes stored for a software breakpoint.
///
/// This is the BRK instruction with a 0 immediate argument.
/// https://developer.arm.com/documentation/ddi0602/2024-09/Base-Instructions/BRK--Breakpoint-instruction-
pub const SW_BP: [u8; SW_BP_SIZE] = [0, 0, 32, 212];

/// Register id for the program counter

Check warning on line 33 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L24-L33

Added lines #L24 - L33 were not covered by tests
const PC_REG_ID: u64 = arm64_core_reg_id!(KVM_REG_SIZE_U64, offset_of!(user_pt_regs, pc));

/// Retrieve a single register from a Vcpu
fn get_sys_reg(reg: u64, vcpu_fd: &VcpuFd) -> Result<u64, GdbTargetError> {
let mut register_vec = Aarch64RegisterVec::default();
get_registers(vcpu_fd, &[reg], &mut register_vec)?;
let register = register_vec
.iter()
.next()
.ok_or(GdbTargetError::ReadRegisterVecError)?;

Ok(register.value())
}

/// Gets the PC value for a Vcpu

Check warning on line 48 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L36-L48

Added lines #L36 - L48 were not covered by tests
pub fn get_instruction_pointer(vcpu_fd: &VcpuFd) -> Result<u64, GdbTargetError> {
get_sys_reg(PC_REG_ID, vcpu_fd)
}

Check warning on line 51 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L51

Added line #L51 was not covered by tests

/// Configures the number of bytes required for a software breakpoint
pub const SW_BP_SIZE: usize = 1;
/// Helper to extract a specific number of bits at an offset from a u64
macro_rules! extract_bits_64 {
($value: tt, $offset: tt, $length: tt) => {
($value >> $offset) & (!0u64 >> (64 - $length))
};

Check warning on line 57 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L53-L57

Added lines #L53 - L57 were not covered by tests
}

/// Mask to clear the last 3 bits from the page table entry
const PTE_ADDRESS_MASK: u64 = !0b111u64;

/// The bytes stored for a software breakpoint
pub const SW_BP: [u8; SW_BP_SIZE] = [0];
/// Read a u64 value from a guest memory address
fn read_address(vmm: &Vmm, address: u64) -> Result<u64, GdbTargetError> {
let mut buf = [0; 8];
vmm.guest_memory().read(&mut buf, GuestAddress(address))?;

/// Gets the RIP value for a Vcpu
pub fn get_instruction_pointer(_vcpu_fd: &VcpuFd) -> Result<u64, GdbTargetError> {
unimplemented!()
Ok(u64::from_le_bytes(buf))
}

/// Translates a virtual address according to the vCPU's current address translation mode.
pub fn translate_gva(_vcpu_fd: &VcpuFd, _gva: u64) -> Result<u64, GdbTargetError> {
unimplemented!()
/// The grainsize used with 4KB paging
const GRAIN_SIZE: usize = 9;

/// Translates a virtual address according to the Vcpu's current address translation mode.
/// Returns the GPA (guest physical address)
///
/// To simplify the implementation we've made some assumptions about the paging setup.
/// Here we just assert firstly paging is setup and these assumptions are correct.
pub fn translate_gva(vcpu_fd: &VcpuFd, gva: u64, vmm: &Vmm) -> Result<u64, GdbTargetError> {
// Check this virtual address is in kernel space
if extract_bits_64!(gva, 55, 1) == 0 {
return Err(GdbTargetError::GvaTranslateError);
}

// Translation control register
let tcr_el1: u64 = get_sys_reg(TCR_EL1, vcpu_fd)?;

// If this is 0 then translation is not yet ready
if extract_bits_64!(tcr_el1, 16, 6) == 0 {
return Ok(gva);
}

// Check 4KB pages are being used
if extract_bits_64!(tcr_el1, 30, 2) != 2 {
return Err(GdbTargetError::GvaTranslateError);
}

// ID_AA64MMFR0_EL1 provides information about the implemented memory model and memory
// management. Check this is a physical address size we support
let pa_size = match get_sys_reg(ID_AA64MMFR0_EL1, vcpu_fd)? & 0b1111 {
0 => 32,
1 => 36,
2 => 40,
3 => 42,
4 => 44,
5 => 48,
_ => return Err(GdbTargetError::GvaTranslateError),
};

// A mask of the physical address size for a virtual address
let pa_address_mask: u64 = !0u64 >> (64 - pa_size);
// A mask used to take the bottom 12 bits of a value this is as we have a grainsize of 9
// asserted with our 4kb page, plus the offset of 3
let lower_mask: u64 = 0xFFF;
// A mask for a physical address mask with the lower 12 bits cleared
let desc_mask: u64 = pa_address_mask & !lower_mask;

let page_indices = [
(gva >> (GRAIN_SIZE * 4)) & lower_mask,
(gva >> (GRAIN_SIZE * 3)) & lower_mask,
(gva >> (GRAIN_SIZE * 2)) & lower_mask,
(gva >> GRAIN_SIZE) & lower_mask,
];

// Transition table base register used for initial table lookup.
// Take the bottom 48 bits from the register value.
let mut address: u64 = get_sys_reg(TTBR1_EL1, vcpu_fd)? & pa_address_mask;
let mut level = 0;

while level < 4 {
// Clear the bottom 3 bits from this address
let pte = read_address(vmm, (address + page_indices[level]) & PTE_ADDRESS_MASK)?;
address = pte & desc_mask;

// If this is a valid table entry and we aren't at the end of the page tables
// then loop again and check next level
if (pte & 2 != 0) && (level < 3) {
level += 1;
continue;
}
break;
}

// Generate a mask to split between the page table entry and the GVA. The split point is
// dependent on which level we terminate at. This is calculated by taking the level we
// hit multiplied by the grainsize then adding the 3 offset
let page_size = 1u64 << ((GRAIN_SIZE * (4 - level)) + 3);
// Clear bottom bits of page size
address &= !(page_size - 1);
address |= gva & (page_size - 1);
Ok(address)
}

/// Configures the kvm guest debug regs to register the hardware breakpoints
fn set_kvm_debug(
_control: u32,
_vcpu_fd: &VcpuFd,
_addrs: &[GuestAddress],
control: u32,
vcpu_fd: &VcpuFd,
addrs: &[GuestAddress],

Check warning on line 158 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L156-L158

Added lines #L156 - L158 were not covered by tests
) -> Result<(), GdbTargetError> {
unimplemented!()
let mut dbg = kvm_guest_debug {
control,
..Default::default()
};

for (i, addr) in addrs.iter().enumerate() {
// DBGBCR_EL1 (Debug Breakpoint Control Registers, D13.3.2):
// bit 0: 1 (Enabled)
// bit 1~2: 0b11 (PMC = EL1/EL0)
// bit 5~8: 0b1111 (BAS = AArch64)
// others: 0
dbg.arch.dbg_bcr[i] = 0b1 | (0b11 << 1) | (0b1111 << 5);
// DBGBVR_EL1 (Debug Breakpoint Value Registers, D13.3.3):

Check warning on line 172 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L160-L172

Added lines #L160 - L172 were not covered by tests
// bit 2~52: VA[2:52]
dbg.arch.dbg_bvr[i] = (!0u64 >> 11) & addr.0;
}

vcpu_fd.set_guest_debug(&dbg)?;

Ok(())
}

/// Bits in a Vcpu pstate for IRQ
const IRQ_ENABLE_FLAGS: u64 = 0x80 | 0x40;
/// Register id for pstate
const PSTATE_ID: u64 = arm64_core_reg_id!(KVM_REG_SIZE_U64, offset_of!(user_pt_regs, pstate));

/// Disable IRQ interrupts to avoid getting stuck in a loop while single stepping
///
/// When GDB hits a single breakpoint and resumes it will follow the steps:
/// - Clear SW breakpoint we've hit
/// - Single step
/// - Re-insert the SW breakpoint
/// - Resume
/// However, with IRQ enabled the single step takes us into the IRQ handler so when we resume we
/// immediately hit the SW breapoint we just re-inserted getting stuck in a loop.

Check warning on line 195 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L175-L195

Added lines #L175 - L195 were not covered by tests
fn toggle_interrupts(vcpu_fd: &VcpuFd, enable: bool) -> Result<(), GdbTargetError> {
let mut pstate = get_sys_reg(PSTATE_ID, vcpu_fd)?;

if enable {
pstate |= IRQ_ENABLE_FLAGS;
} else {
pstate &= !IRQ_ENABLE_FLAGS;
}

vcpu_fd.set_one_reg(PSTATE_ID, &pstate.to_le_bytes())?;

Ok(())

Check warning on line 207 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L198-L207

Added lines #L198 - L207 were not covered by tests
}

/// Configures the Vcpu for debugging and sets the hardware breakpoints on the Vcpu
pub fn vcpu_set_debug(
_vcpu_fd: &VcpuFd,
_addrs: &[GuestAddress],
_step: bool,
vcpu_fd: &VcpuFd,
addrs: &[GuestAddress],
step: bool,

Check warning on line 214 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L212-L214

Added lines #L212 - L214 were not covered by tests
) -> Result<(), GdbTargetError> {
unimplemented!()
let mut control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW | KVM_GUESTDBG_USE_SW_BP;
if step {
control |= KVM_GUESTDBG_SINGLESTEP;

Check warning on line 218 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L216-L218

Added lines #L216 - L218 were not covered by tests
}

toggle_interrupts(vcpu_fd, step)?;
set_kvm_debug(control, vcpu_fd, addrs)

Check warning on line 222 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L221-L222

Added lines #L221 - L222 were not covered by tests
}

/// Injects a BP back into the guest kernel for it to handle, this is particularly useful for the
/// kernels selftesting which can happen during boot.
/// KVM does not support injecting breakpoints on aarch64 so this is a no-op

Check warning on line 225 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L225

Added line #L225 was not covered by tests
pub fn vcpu_inject_bp(
_vcpu_fd: &VcpuFd,
_addrs: &[GuestAddress],
_step: bool,
) -> Result<(), GdbTargetError> {
unimplemented!()
Ok(())

Check warning on line 231 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L231

Added line #L231 was not covered by tests
}
/// The number of general purpose registers
const GENERAL_PURPOSE_REG_COUNT: usize = 31;
/// The number of core registers we read from the Vcpu
const CORE_REG_COUNT: usize = 33;
/// Stores the register ids of registers to be read from the Vcpu
const CORE_REG_IDS: [u64; CORE_REG_COUNT] = {
let mut regs = [0; CORE_REG_COUNT];
let mut idx = 0;

let reg_offset = offset_of!(kvm_regs, regs);
let mut off = reg_offset;

Check warning on line 243 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L233-L243

Added lines #L233 - L243 were not covered by tests
while idx < GENERAL_PURPOSE_REG_COUNT {
regs[idx] = arm64_core_reg_id!(KVM_REG_SIZE_U64, off);
idx += 1;
off += std::mem::size_of::<u64>();
}

regs[idx] = arm64_core_reg_id!(KVM_REG_SIZE_U64, offset_of!(user_pt_regs, sp));
idx += 1;

regs[idx] = arm64_core_reg_id!(KVM_REG_SIZE_U64, offset_of!(user_pt_regs, pc));
regs
};

Check warning on line 255 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L246-L255

Added lines #L246 - L255 were not covered by tests

/// Reads the registers for the Vcpu
pub fn read_registers(_vcpu_fd: &VcpuFd, _regs: &mut CoreRegs) -> Result<(), GdbTargetError> {
unimplemented!()
pub fn read_registers(vcpu_fd: &VcpuFd, regs: &mut CoreRegs) -> Result<(), GdbTargetError> {
let mut register_vec = Aarch64RegisterVec::default();
get_registers(vcpu_fd, &CORE_REG_IDS, &mut register_vec)?;

let mut registers = register_vec.iter();

for i in 0..GENERAL_PURPOSE_REG_COUNT {
regs.x[i] = registers
.next()
.ok_or(GdbTargetError::ReadRegisterVecError)?
.value();

Check warning on line 268 in src/vmm/src/gdb/arch/aarch64.rs

View check run for this annotation

Codecov / codecov/patch

src/vmm/src/gdb/arch/aarch64.rs#L258-L268

Added lines #L258 - L268 were not covered by tests
}

regs.sp = registers
.next()
.ok_or(GdbTargetError::ReadRegisterVecError)?
.value();

regs.pc = registers
.next()
.ok_or(GdbTargetError::ReadRegisterVecError)?
.value();

Ok(())
}

/// Writes to the registers for the Vcpu
pub fn write_registers(_vcpu_fd: &VcpuFd, _regs: &CoreRegs) -> Result<(), GdbTargetError> {
unimplemented!()
pub fn write_registers(vcpu_fd: &VcpuFd, regs: &CoreRegs) -> Result<(), GdbTargetError> {
let kreg_off = offset_of!(kvm_regs, regs);
let mut off = kreg_off;
for i in 0..GENERAL_PURPOSE_REG_COUNT {
vcpu_fd.set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U64, off),
&regs.x[i].to_le_bytes(),
)?;
off += std::mem::size_of::<u64>();
}

let off = offset_of!(user_pt_regs, sp);
vcpu_fd.set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U64, off + kreg_off),
&regs.sp.to_le_bytes(),
)?;

let off = offset_of!(user_pt_regs, pc);
vcpu_fd.set_one_reg(
arm64_core_reg_id!(KVM_REG_SIZE_U64, off + kreg_off),
&regs.pc.to_le_bytes(),
)?;

Ok(())
}
Loading
Loading