Skip to content

Commit 2c3c03d

Browse files
committed
fix(vsock): pass correct index when triggering interrupts
We were confusing queue indexex with event indexes, when passing the index of the queue that needed to be triggered after handling events. Fix the logic to pass the correct index. This refactors a bit the code to signal the queues in each event handler method. With MMIO we don't need to signal each queue independently (one signal will cause the guest to scan all queues). With PCI though, we are using MSI-X, so we need to signal each queue independently. Also, change vsock functional integration tests to also run for PCI-enabled microVMs. Signed-off-by: Babis Chalios <[email protected]>
1 parent c6f69ee commit 2c3c03d

File tree

3 files changed

+53
-47
lines changed

3 files changed

+53
-47
lines changed

src/vmm/src/devices/virtio/vsock/device.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -378,7 +378,7 @@ where
378378
// `TRANSPORT_RESET_EVENT` event we sent during snapshot creation.
379379
if self.is_activated() {
380380
info!("kick vsock {}.", self.id());
381-
self.signal_used_queue(0).unwrap();
381+
self.signal_used_queue(EVQ_INDEX).unwrap();
382382
}
383383
}
384384
}

src/vmm/src/devices/virtio/vsock/event_handler.rs

Lines changed: 40 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -47,81 +47,82 @@ where
4747
const PROCESS_EVQ: u32 = 3;
4848
const PROCESS_NOTIFY_BACKEND: u32 = 4;
4949

50-
pub fn handle_rxq_event(&mut self, evset: EventSet) -> bool {
50+
pub fn handle_rxq_event(&mut self, evset: EventSet) {
5151
if evset != EventSet::IN {
5252
warn!("vsock: rxq unexpected event {:?}", evset);
5353
METRICS.rx_queue_event_fails.inc();
54-
return false;
54+
return;
5555
}
5656

57-
let mut raise_irq = false;
5857
if let Err(err) = self.queue_events[RXQ_INDEX].read() {
5958
error!("Failed to get vsock rx queue event: {:?}", err);
6059
METRICS.rx_queue_event_fails.inc();
6160
} else if self.backend.has_pending_rx() {
62-
// OK to unwrap: Only QueueError::InvalidAvailIdx is returned, and we explicitly
63-
// want to panic on that one.
64-
raise_irq |= self.process_rx().unwrap();
61+
if self.process_rx().unwrap() {
62+
self.signal_used_queue(RXQ_INDEX)
63+
.expect("vsock: Could not trigger device interrupt or RX queue");
64+
}
6565
METRICS.rx_queue_event_count.inc();
6666
}
67-
raise_irq
6867
}
6968

70-
pub fn handle_txq_event(&mut self, evset: EventSet) -> bool {
69+
pub fn handle_txq_event(&mut self, evset: EventSet) {
7170
if evset != EventSet::IN {
7271
warn!("vsock: txq unexpected event {:?}", evset);
7372
METRICS.tx_queue_event_fails.inc();
74-
return false;
73+
return;
7574
}
7675

77-
let mut raise_irq = false;
7876
if let Err(err) = self.queue_events[TXQ_INDEX].read() {
7977
error!("Failed to get vsock tx queue event: {:?}", err);
8078
METRICS.tx_queue_event_fails.inc();
8179
} else {
82-
// OK to unwrap: Only QueueError::InvalidAvailIdx is returned, and we explicitly
83-
// want to panic on that one.
84-
raise_irq |= self.process_tx().unwrap();
80+
if self.process_tx().unwrap() {
81+
self.signal_used_queue(TXQ_INDEX)
82+
.expect("vsock: Could not trigger device interrupt or TX queue");
83+
}
8584
METRICS.tx_queue_event_count.inc();
8685
// The backend may have queued up responses to the packets we sent during
8786
// TX queue processing. If that happened, we need to fetch those responses
8887
// and place them into RX buffers.
89-
if self.backend.has_pending_rx() {
90-
raise_irq |= self.process_rx().unwrap();
88+
if self.backend.has_pending_rx() && self.process_rx().unwrap() {
89+
self.signal_used_queue(RXQ_INDEX)
90+
.expect("vsock: Could not trigger device interrupt or RX queue");
9191
}
9292
}
93-
raise_irq
9493
}
9594

96-
pub fn handle_evq_event(&mut self, evset: EventSet) -> bool {
95+
pub fn handle_evq_event(&mut self, evset: EventSet) {
9796
if evset != EventSet::IN {
9897
warn!("vsock: evq unexpected event {:?}", evset);
9998
METRICS.ev_queue_event_fails.inc();
100-
return false;
99+
return;
101100
}
102101

103102
if let Err(err) = self.queue_events[EVQ_INDEX].read() {
104103
error!("Failed to consume vsock evq event: {:?}", err);
105104
METRICS.ev_queue_event_fails.inc();
106105
}
107-
false
108106
}
109107

110108
/// Notify backend of new events.
111-
pub fn notify_backend(&mut self, evset: EventSet) -> Result<bool, InvalidAvailIdx> {
109+
pub fn notify_backend(&mut self, evset: EventSet) -> Result<(), InvalidAvailIdx> {
112110
self.backend.notify(evset);
113111
// After the backend has been kicked, it might've freed up some resources, so we
114112
// can attempt to send it more data to process.
115113
// In particular, if `self.backend.send_pkt()` halted the TX queue processing (by
116114
// returning an error) at some point in the past, now is the time to try walking the
117115
// TX queue again.
118-
// OK to unwrap: Only QueueError::InvalidAvailIdx is returned, and we explicitly
119-
// want to panic on that one.
120-
let mut raise_irq = self.process_tx()?;
121-
if self.backend.has_pending_rx() {
122-
raise_irq |= self.process_rx()?;
116+
if self.process_tx()? {
117+
self.signal_used_queue(TXQ_INDEX)
118+
.expect("vsock: Could not trigger device interrupt or TX queue");
119+
}
120+
if self.backend.has_pending_rx() && self.process_rx()? {
121+
self.signal_used_queue(RXQ_INDEX)
122+
.expect("vsock: Could not trigger device interrupt or RX queue");
123123
}
124-
Ok(raise_irq)
124+
125+
Ok(())
125126
}
126127

127128
fn register_runtime_events(&self, ops: &mut EventOps) {
@@ -189,19 +190,14 @@ where
189190
let evset = event.event_set();
190191

191192
if self.is_activated() {
192-
let mut raise_irq = false;
193193
match source {
194194
Self::PROCESS_ACTIVATE => self.handle_activate_event(ops),
195-
Self::PROCESS_RXQ => raise_irq = self.handle_rxq_event(evset),
196-
Self::PROCESS_TXQ => raise_irq = self.handle_txq_event(evset),
197-
Self::PROCESS_EVQ => raise_irq = self.handle_evq_event(evset),
198-
Self::PROCESS_NOTIFY_BACKEND => raise_irq = self.notify_backend(evset).unwrap(),
195+
Self::PROCESS_RXQ => self.handle_rxq_event(evset),
196+
Self::PROCESS_TXQ => self.handle_txq_event(evset),
197+
Self::PROCESS_EVQ => self.handle_evq_event(evset),
198+
Self::PROCESS_NOTIFY_BACKEND => self.notify_backend(evset).unwrap(),
199199
_ => warn!("Unexpected vsock event received: {:?}", source),
200200
};
201-
if raise_irq {
202-
self.signal_used_queue(source as usize)
203-
.expect("vsock: Could not trigger device interrupt");
204-
}
205201
} else {
206202
warn!(
207203
"Vsock: The device is not yet activated. Spurious event received: {:?}",
@@ -309,7 +305,9 @@ mod tests {
309305
let mut ctx = test_ctx.create_event_handler_context();
310306
ctx.mock_activate(test_ctx.mem.clone(), test_ctx.interrupt.clone());
311307

312-
assert!(!ctx.device.handle_txq_event(EventSet::IN));
308+
let metric_before = METRICS.tx_queue_event_fails.count();
309+
ctx.device.handle_txq_event(EventSet::IN);
310+
assert_eq!(metric_before + 1, METRICS.tx_queue_event_fails.count());
313311
}
314312
}
315313

@@ -370,7 +368,9 @@ mod tests {
370368
let mut ctx = test_ctx.create_event_handler_context();
371369
ctx.mock_activate(test_ctx.mem.clone(), test_ctx.interrupt.clone());
372370
ctx.device.backend.set_pending_rx(false);
373-
assert!(!ctx.device.handle_rxq_event(EventSet::IN));
371+
let metric_before = METRICS.rx_queue_event_fails.count();
372+
ctx.device.handle_rxq_event(EventSet::IN);
373+
assert_eq!(metric_before + 1, METRICS.rx_queue_event_fails.count());
374374
}
375375
}
376376

@@ -381,7 +381,9 @@ mod tests {
381381
let test_ctx = TestContext::new();
382382
let mut ctx = test_ctx.create_event_handler_context();
383383
ctx.device.backend.set_pending_rx(false);
384-
assert!(!ctx.device.handle_evq_event(EventSet::IN));
384+
let metric_before = METRICS.ev_queue_event_fails.count();
385+
ctx.device.handle_evq_event(EventSet::IN);
386+
assert_eq!(metric_before + 1, METRICS.ev_queue_event_fails.count());
385387
}
386388
}
387389

tests/integration_tests/functional/test_vsock.py

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -37,15 +37,15 @@
3737
TEST_WORKER_COUNT = 10
3838

3939

40-
def test_vsock(uvm_plain_any, bin_vsock_path, test_fc_session_root_path):
40+
def test_vsock(uvm_plain_any, pci_enabled, bin_vsock_path, test_fc_session_root_path):
4141
"""
4242
Test guest and host vsock initiated connections.
4343
4444
Check the module docstring for details on the setup.
4545
"""
4646

4747
vm = uvm_plain_any
48-
vm.spawn()
48+
vm.spawn(pci=pci_enabled)
4949

5050
vm.basic_config()
5151
vm.add_net_iface()
@@ -102,12 +102,12 @@ def negative_test_host_connections(vm, blob_path, blob_hash):
102102
validate_fc_metrics(metrics)
103103

104104

105-
def test_vsock_epipe(uvm_plain, bin_vsock_path, test_fc_session_root_path):
105+
def test_vsock_epipe(uvm_plain, pci_enabled, bin_vsock_path, test_fc_session_root_path):
106106
"""
107107
Vsock negative test to validate SIGPIPE/EPIPE handling.
108108
"""
109109
vm = uvm_plain
110-
vm.spawn()
110+
vm.spawn(pci=pci_enabled)
111111
vm.basic_config()
112112
vm.add_net_iface()
113113
vm.api.vsock.put(vsock_id="vsock0", guest_cid=3, uds_path=f"/{VSOCK_UDS_PATH}")
@@ -129,7 +129,7 @@ def test_vsock_epipe(uvm_plain, bin_vsock_path, test_fc_session_root_path):
129129

130130

131131
def test_vsock_transport_reset_h2g(
132-
uvm_nano, microvm_factory, bin_vsock_path, test_fc_session_root_path
132+
uvm_plain, pci_enabled, microvm_factory, bin_vsock_path, test_fc_session_root_path
133133
):
134134
"""
135135
Vsock transport reset test.
@@ -146,7 +146,9 @@ def test_vsock_transport_reset_h2g(
146146
6. Close VM -> Load VM from Snapshot -> check that vsock
147147
device is still working.
148148
"""
149-
test_vm = uvm_nano
149+
test_vm = uvm_plain
150+
test_vm.spawn(pci=pci_enabled)
151+
test_vm.basic_config(vcpu_count=2, mem_size_mib=256)
150152
test_vm.add_net_iface()
151153
test_vm.api.vsock.put(vsock_id="vsock0", guest_cid=3, uds_path=f"/{VSOCK_UDS_PATH}")
152154
test_vm.start()
@@ -213,11 +215,13 @@ def test_vsock_transport_reset_h2g(
213215
validate_fc_metrics(metrics)
214216

215217

216-
def test_vsock_transport_reset_g2h(uvm_nano, microvm_factory):
218+
def test_vsock_transport_reset_g2h(uvm_plain, pci_enabled, microvm_factory):
217219
"""
218220
Vsock transport reset test.
219221
"""
220-
test_vm = uvm_nano
222+
test_vm = uvm_plain
223+
test_vm.spawn(pci=pci_enabled)
224+
test_vm.basic_config(vcpu_count=2, mem_size_mib=256)
221225
test_vm.add_net_iface()
222226
test_vm.api.vsock.put(vsock_id="vsock0", guest_cid=3, uds_path=f"/{VSOCK_UDS_PATH}")
223227
test_vm.start()

0 commit comments

Comments
 (0)