|
1 | | -use alloc::vec::Vec; |
2 | | -use easy_fs::BlockDevice; |
3 | | -use lazy_static::lazy_static; |
4 | | -use virtio_drivers::{Hal, VirtIOBlk, VirtIOHeader}; |
| 1 | +use super::BlockDevice; |
| 2 | +use crate::drivers::bus::virtio::VirtioHal; |
| 3 | +use crate::sync::{Condvar, UPIntrFreeCell}; |
| 4 | +use crate::task::schedule; |
| 5 | +use crate::DEV_NON_BLOCKING_ACCESS; |
| 6 | +use alloc::collections::BTreeMap; |
| 7 | +use virtio_drivers::{BlkResp, RespStatus, VirtIOBlk, VirtIOHeader}; |
5 | 8 |
|
6 | | -use crate::{ |
7 | | - mm::{ |
8 | | - frame_alloc, frame_dealloc, kernel_token, FrameTracker, PageTable, PhysAddr, PhysPageNum, |
9 | | - StepByOne, |
10 | | - }, |
11 | | - sync::UPSafeCell, |
12 | | -}; |
| 9 | +#[allow(unused)] |
| 10 | +const VIRTIO0: usize = 0x10008000; |
13 | 11 |
|
14 | | -const VIRTIO0: usize = 0x10001000; |
15 | | - |
16 | | -pub struct VirtIOBlock(UPSafeCell<VirtIOBlk<'static, VirtioHal>>); |
17 | | - |
18 | | -impl VirtIOBlock { |
19 | | - pub fn new() -> Self { |
20 | | - Self(unsafe { |
21 | | - UPSafeCell::new(VirtIOBlk::new(&mut *(VIRTIO0 as *mut VirtIOHeader)).unwrap()) |
22 | | - }) |
23 | | - } |
| 12 | +pub struct VirtIOBlock { |
| 13 | + virtio_blk: UPIntrFreeCell<VirtIOBlk<'static, VirtioHal>>, |
| 14 | + condvars: BTreeMap<u16, Condvar>, |
24 | 15 | } |
| 16 | + |
25 | 17 | impl BlockDevice for VirtIOBlock { |
26 | 18 | fn read_block(&self, block_id: usize, buf: &mut [u8]) { |
27 | | - self.0 |
28 | | - .exclusive_access() |
29 | | - .read_block(block_id, buf) |
30 | | - .expect("Error when reading VirtIOBlk") |
| 19 | + let nb = *DEV_NON_BLOCKING_ACCESS.exclusive_access(); |
| 20 | + if nb { |
| 21 | + let mut resp = BlkResp::default(); |
| 22 | + let task_cx_ptr = self.virtio_blk.exclusive_session(|blk| { |
| 23 | + let token = unsafe { blk.read_block_nb(block_id, buf, &mut resp).unwrap() }; |
| 24 | + self.condvars.get(&token).unwrap().wait_no_sched() |
| 25 | + }); |
| 26 | + schedule(task_cx_ptr); |
| 27 | + assert_eq!( |
| 28 | + resp.status(), |
| 29 | + RespStatus::Ok, |
| 30 | + "Error when reading VirtIOBlk" |
| 31 | + ); |
| 32 | + } else { |
| 33 | + self.virtio_blk |
| 34 | + .exclusive_access() |
| 35 | + .read_block(block_id, buf) |
| 36 | + .expect("Error when reading VirtIOBlk"); |
| 37 | + } |
31 | 38 | } |
32 | 39 |
|
33 | 40 | fn write_block(&self, block_id: usize, buf: &[u8]) { |
34 | | - self.0 |
35 | | - .exclusive_access() |
36 | | - .write_block(block_id, buf) |
37 | | - .expect("Error when writing VirtIOBlk") |
| 41 | + let nb = *DEV_NON_BLOCKING_ACCESS.exclusive_access(); |
| 42 | + if nb { |
| 43 | + let mut resp = BlkResp::default(); |
| 44 | + let task_cx_ptr = self.virtio_blk.exclusive_session(|blk| { |
| 45 | + let token = unsafe { blk.write_block_nb(block_id, buf, &mut resp).unwrap() }; |
| 46 | + self.condvars.get(&token).unwrap().wait_no_sched() |
| 47 | + }); |
| 48 | + schedule(task_cx_ptr); |
| 49 | + assert_eq!( |
| 50 | + resp.status(), |
| 51 | + RespStatus::Ok, |
| 52 | + "Error when writing VirtIOBlk" |
| 53 | + ); |
| 54 | + } else { |
| 55 | + self.virtio_blk |
| 56 | + .exclusive_access() |
| 57 | + .write_block(block_id, buf) |
| 58 | + .expect("Error when writing VirtIOBlk"); |
| 59 | + } |
38 | 60 | } |
39 | | -} |
40 | | - |
41 | | -lazy_static! { |
42 | | - static ref QUEUE_FRAMES: UPSafeCell<Vec<FrameTracker>> = unsafe { UPSafeCell::new(Vec::new()) }; |
43 | | -} |
44 | 61 |
|
45 | | -pub struct VirtioHal; |
46 | | -impl Hal for VirtioHal { |
47 | | - fn dma_alloc(pages: usize) -> virtio_drivers::PhysAddr { |
48 | | - let mut ppn_base = PhysPageNum(0); |
49 | | - for i in 0..pages { |
50 | | - let frame = frame_alloc().unwrap(); |
51 | | - if i == 0 { |
52 | | - ppn_base = frame.ppn; |
| 62 | + fn handle_irq(&self) { |
| 63 | + self.virtio_blk.exclusive_session(|blk| { |
| 64 | + while let Ok(token) = blk.pop_used() { |
| 65 | + self.condvars.get(&token).unwrap().signal(); |
53 | 66 | } |
54 | | - // make sure consecutive! |
55 | | - assert_eq!(frame.ppn.0, ppn_base.0 + i); |
56 | | - QUEUE_FRAMES.exclusive_access().push(frame); |
57 | | - } |
58 | | - let pa: PhysAddr = ppn_base.into(); |
59 | | - pa.0 |
| 67 | + }); |
60 | 68 | } |
| 69 | +} |
61 | 70 |
|
62 | | - fn dma_dealloc(paddr: virtio_drivers::PhysAddr, pages: usize) -> i32 { |
63 | | - let pa = PhysAddr::from(paddr); |
64 | | - let mut ppn_base: PhysPageNum = pa.into(); |
65 | | - for _ in 0..pages { |
66 | | - // make sure consecutive! |
67 | | - frame_dealloc(ppn_base); |
68 | | - ppn_base.step(); |
| 71 | +impl VirtIOBlock { |
| 72 | + pub fn new() -> Self { |
| 73 | + let virtio_blk = unsafe { |
| 74 | + UPIntrFreeCell::new( |
| 75 | + VirtIOBlk::<VirtioHal>::new(&mut *(VIRTIO0 as *mut VirtIOHeader)).unwrap(), |
| 76 | + ) |
| 77 | + }; |
| 78 | + let mut condvars = BTreeMap::new(); |
| 79 | + let channels = virtio_blk.exclusive_access().virt_queue_size(); |
| 80 | + for i in 0..channels { |
| 81 | + let condvar = Condvar::new(); |
| 82 | + condvars.insert(i, condvar); |
| 83 | + } |
| 84 | + Self { |
| 85 | + virtio_blk, |
| 86 | + condvars, |
69 | 87 | } |
70 | | - 0 |
71 | | - } |
72 | | - |
73 | | - fn phys_to_virt(paddr: virtio_drivers::PhysAddr) -> virtio_drivers::VirtAddr { |
74 | | - paddr |
75 | | - } |
76 | | - |
77 | | - fn virt_to_phys(vaddr: virtio_drivers::VirtAddr) -> virtio_drivers::PhysAddr { |
78 | | - PageTable::from_token(kernel_token()) |
79 | | - .translate_va(vaddr.into()) |
80 | | - .unwrap() |
81 | | - .0 |
82 | 88 | } |
83 | 89 | } |
0 commit comments