Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 22 additions & 1 deletion src/bin/ch-remote.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ mod test_util;

use std::io::Read;
use std::marker::PhantomData;
use std::num::NonZeroU32;
use std::os::unix::net::UnixStream;
use std::process;

Expand Down Expand Up @@ -505,6 +506,11 @@ fn rest_api_do_command(matches: &ArgMatches, socket: &mut UnixStream) -> ApiResu
.unwrap()
.get_one::<u64>("migration-timeout-s")
.unwrap_or(&3600),
*matches
.subcommand_matches("send-migration")
.unwrap()
.get_one::<NonZeroU32>("connections")
.unwrap_or(&NonZeroU32::new(1).unwrap()),
);
simple_api_command(socket, "PUT", "send-migration", Some(&send_migration_data))
.map_err(Error::HttpApiClient)
Expand Down Expand Up @@ -935,12 +941,19 @@ fn receive_migration_data(url: &str) -> String {
serde_json::to_string(&receive_migration_data).unwrap()
}

fn send_migration_data(url: &str, local: bool, downtime: u64, migration_timeout: u64) -> String {
fn send_migration_data(
url: &str,
local: bool,
downtime: u64,
migration_timeout: u64,
connections: NonZeroU32,
) -> String {
let send_migration_data = vmm::api::VmSendMigrationData {
destination_url: url.to_owned(),
local,
downtime,
migration_timeout,
connections,
};

serde_json::to_string(&send_migration_data).unwrap()
Expand Down Expand Up @@ -1116,6 +1129,14 @@ fn get_cli_commands_sorted() -> Box<[Command]> {
Command::new("resume").about("Resume the VM"),
Command::new("send-migration")
.about("Initiate a VM migration")
.arg(
Arg::new("connections")
.long("connections")
.help("The number of connections to use for the migration")
.num_args(1)
.value_parser(clap::value_parser!(u32))
.default_value("1"),
)
.arg(
Arg::new("downtime-ms")
.long("downtime-ms")
Expand Down
140 changes: 138 additions & 2 deletions vm-migration/src/protocol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ impl Response {
}

#[repr(C)]
#[derive(Clone, Default, Serialize, Deserialize)]
#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq, Eq)]
pub struct MemoryRange {
pub gpa: u64,
pub length: u64,
Expand Down Expand Up @@ -244,12 +244,89 @@ impl MemoryRange {
}
}

#[derive(Clone, Default, Serialize, Deserialize)]
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct MemoryRangeTable {
data: Vec<MemoryRange>,
}

#[derive(Debug, Clone, Default)]
struct MemoryRangeTableIterator {
chunk_size: u64,
data: Vec<MemoryRange>,
}

impl MemoryRangeTableIterator {
pub fn new(table: &MemoryRangeTable, chunk_size: u64) -> Self {
MemoryRangeTableIterator {
chunk_size,
data: table.data.clone(),
}
}
}

impl Iterator for MemoryRangeTableIterator {
type Item = MemoryRangeTable;

/// Return the next memory range in the table, making sure that
/// the returned range is not larger than `chunk_size`.
///
/// **Note**: Do not rely on the order of the ranges returned by this
/// iterator. This allows for a more efficient implementation.
fn next(&mut self) -> Option<Self::Item> {
let mut ranges: Vec<MemoryRange> = vec![];
let mut ranges_size: u64 = 0;

loop {
assert!(ranges_size <= self.chunk_size);

if ranges_size == self.chunk_size || self.data.is_empty() {
break;
}

if let Some(range) = self.data.pop() {
let next_range: MemoryRange = if ranges_size + range.length > self.chunk_size {
// How many bytes we need to put back into the table.
let leftover_bytes = ranges_size + range.length - self.chunk_size;
assert!(leftover_bytes <= range.length);
let returned_bytes = range.length - leftover_bytes;
assert!(returned_bytes <= range.length);
assert!(leftover_bytes + returned_bytes == range.length);

self.data.push(MemoryRange {
gpa: range.gpa + returned_bytes,
length: leftover_bytes,
});
MemoryRange {
gpa: range.gpa,
length: returned_bytes,
}
} else {
range
};

ranges_size += next_range.length;
ranges.push(next_range);
}
}

if ranges.is_empty() {
None
} else {
Some(MemoryRangeTable { data: ranges })
}
}
}

impl MemoryRangeTable {
pub fn ranges(&self) -> &[MemoryRange] {
&self.data
}

/// Partitions the table into chunks of at most `chunk_size` bytes.
pub fn partition(&self, chunk_size: u64) -> impl Iterator<Item = MemoryRangeTable> {
MemoryRangeTableIterator::new(self, chunk_size)
}

pub fn from_bitmap(
bitmap: impl IntoIterator<Item = u64>,
start_addr: u64,
Expand Down Expand Up @@ -316,3 +393,62 @@ impl MemoryRangeTable {
Self { data }
}
}

#[cfg(test)]
mod tests {
use super::*;

#[test]
fn test_memory_range_table() {
let mut table = MemoryRangeTable::default();
// Test blocks that are shorter than the chunk size.
table.push(MemoryRange {
gpa: 0,
length: 1 << 10,
});
// Test blocks that are longer than the chunk size.
table.push(MemoryRange {
gpa: 0x1000,
length: 3 << 20,
});
// And add another blocks, so we get a chunk that spans two memory
// ranges.
table.push(MemoryRange {
gpa: 4 << 20,
length: 1 << 20,
});

let table = table; // drop mut

let chunks = table
.partition(2 << 20)
.map(|table| table.data)
.collect::<Vec<_>>();

// The implementation currently returns the ranges in reverse order. If
// this tests becomes more complex, we can compare everything as sets.
assert_eq!(
chunks,
vec![
vec![
MemoryRange {
gpa: 4 << 20,
length: 1 << 20
},
MemoryRange {
gpa: 0x1000,
length: 1 << 20
}
],
vec![MemoryRange {
gpa: 0x1000 + (1 << 20),
length: 2 << 20
},],
vec![MemoryRange {
gpa: 0,
length: 1 << 10
}]
]
);
}
}
19 changes: 16 additions & 3 deletions vmm/src/api/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ pub mod dbus;
pub mod http;

use std::io;
use std::num::NonZeroU32;
use std::sync::mpsc::{RecvError, SendError, Sender, channel};

use micro_http::Body;
Expand Down Expand Up @@ -255,7 +256,7 @@ pub struct VmCoredumpData {
pub destination_url: String,
}

#[derive(Clone, Deserialize, Serialize, Default, Debug)]
#[derive(Clone, Deserialize, Serialize, Debug)]
pub struct VmReceiveMigrationData {
/// URL for the reception of migration state
pub receiver_url: String,
Expand All @@ -266,9 +267,13 @@ pub struct VmReceiveMigrationData {
pub net_fds: Option<Vec<RestoredNetConfig>>,
}

#[derive(Clone, Deserialize, Serialize, Default, Debug)]
#[derive(Clone, Deserialize, Serialize, Debug)]
pub struct VmSendMigrationData {
/// URL to migrate the VM to
/// URL to migrate the VM to.
///
/// This is not actually a URL, but we are stuck with the name, because it's
/// part of the HTTP API. The destination is a string, such as
/// tcp:<host>:<port> or unix:/path/to/socket.
pub destination_url: String,
/// Send memory across socket without copying
#[serde(default)]
Expand All @@ -279,13 +284,21 @@ pub struct VmSendMigrationData {
/// Second level migration timeout
#[serde(default)]
pub migration_timeout: u64,
/// The number of parallel connections for migration
#[serde(default = "default_connections")]
pub connections: NonZeroU32,
}

// Default value for downtime the same as qemu.
fn default_downtime() -> u64 {
300
}

// We use a single connection for backward compatibility as default.
fn default_connections() -> NonZeroU32 {
NonZeroU32::new(1).unwrap()
}

pub enum ApiResponsePayload {
/// No data is sent on the channel.
Empty,
Expand Down
4 changes: 4 additions & 0 deletions vmm/src/api/openapi/cloud-hypervisor.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1245,6 +1245,10 @@ components:
- destination_url
type: object
properties:
connections:
type: integer
format: int64
default: 1
destination_url:
type: string
local:
Expand Down
Loading
Loading