|
1 | 1 | use super::page::PhysicalPage; |
2 | 2 | use super::record::{Record, RecordAddress}; |
| 3 | +use serde::{Deserialize, Serialize}; |
| 4 | +use std::fs::File; |
| 5 | +use std::io::{BufReader, BufWriter, Write}; |
3 | 6 | use std::sync::{Arc, Mutex}; |
4 | 7 |
|
| 8 | +#[derive(Clone, Default, Deserialize, Serialize, Debug)] |
| 9 | +pub struct BaseContainerMetadata { |
| 10 | + // This takes the place of the actual pages in the disk version |
| 11 | + // With this number, we are able to load all of the pages |
| 12 | + num_pages: usize, |
| 13 | + |
| 14 | + num_cols: i64, |
| 15 | + |
| 16 | + rid_column: i64, |
| 17 | + schema_encoding_column: i64, |
| 18 | + indirection_column: i64, |
| 19 | +} |
| 20 | + |
| 21 | +impl BaseContainerMetadata { |
| 22 | + pub fn load_state(&self) -> BaseContainer { |
| 23 | + let mut base = BaseContainer::new(self.num_cols); |
| 24 | + |
| 25 | + for i in 0..self.num_pages { |
| 26 | + // Load the page |
| 27 | + let p = PhysicalPage::load_state(i as i64); |
| 28 | + // Put the page into an Arc Mutex |
| 29 | + let m = Arc::new(Mutex::new(p)); |
| 30 | + |
| 31 | + // Add the physical page |
| 32 | + base.physical_pages.push(m); |
| 33 | + } |
| 34 | + |
| 35 | + base.rid_column = self.rid_column; |
| 36 | + base.schema_encoding_column = self.schema_encoding_column; |
| 37 | + base.indirection_column = self.indirection_column; |
| 38 | + |
| 39 | + return base; |
| 40 | + } |
| 41 | +} |
| 42 | + |
5 | 43 | #[derive(Clone, Default)] |
6 | 44 | pub struct BaseContainer { |
7 | 45 | // pages |
@@ -181,6 +219,71 @@ impl BaseContainer { |
181 | 219 |
|
182 | 220 | values |
183 | 221 | } |
| 222 | + |
| 223 | + pub fn save_state(&self) { |
| 224 | + let base_meta = self.get_metadata(); |
| 225 | + let hardcoded_filename = "./base_container.data"; |
| 226 | + |
| 227 | + let mut index = 0; |
| 228 | + // The Rust compiler suggested that I clone here but it's definitely way better to not copy |
| 229 | + // all of the data and just use a reference |
| 230 | + for p in &self.physical_pages { |
| 231 | + // Save the page |
| 232 | + let m = p.lock().unwrap(); |
| 233 | + m.save_state(index); |
| 234 | + index += 1; |
| 235 | + } |
| 236 | + |
| 237 | + let base_bytes: Vec<u8> = bincode::serialize(&base_meta).expect("Should serialize."); |
| 238 | + |
| 239 | + let mut file = BufWriter::new(File::create(hardcoded_filename).expect("Should open file.")); |
| 240 | + file.write_all(&base_bytes).expect("Should serialize."); |
| 241 | + } |
| 242 | + |
| 243 | + pub fn get_metadata(&self) -> BaseContainerMetadata { |
| 244 | + BaseContainerMetadata { |
| 245 | + num_pages: self.physical_pages.len(), |
| 246 | + num_cols: self.num_cols, |
| 247 | + rid_column: self.rid_column, |
| 248 | + schema_encoding_column: self.schema_encoding_column, |
| 249 | + indirection_column: self.indirection_column, |
| 250 | + } |
| 251 | + } |
| 252 | +} |
| 253 | + |
| 254 | +#[derive(Clone, Default, Deserialize, Serialize, Debug)] |
| 255 | +pub struct TailContainerMetadata { |
| 256 | + // This takes the place of the actual pages in the disk version |
| 257 | + // With this number, we are able to load all of the pages |
| 258 | + num_pages: usize, |
| 259 | + |
| 260 | + num_cols: i64, |
| 261 | + |
| 262 | + rid_column: i64, |
| 263 | + schema_encoding_column: i64, |
| 264 | + indirection_column: i64, |
| 265 | +} |
| 266 | + |
| 267 | +impl TailContainerMetadata { |
| 268 | + pub fn load_state(&self) -> TailContainer { |
| 269 | + let mut tail = TailContainer::new(self.num_cols); |
| 270 | + |
| 271 | + for i in 0..self.num_pages { |
| 272 | + // Load the page |
| 273 | + let p = PhysicalPage::load_state(i as i64); |
| 274 | + // Put the page into an Arc Mutex |
| 275 | + let m = Arc::new(Mutex::new(p)); |
| 276 | + |
| 277 | + // Add the physical page |
| 278 | + tail.physical_pages.push(m); |
| 279 | + } |
| 280 | + |
| 281 | + tail.rid_column = self.rid_column; |
| 282 | + tail.schema_encoding_column = self.schema_encoding_column; |
| 283 | + tail.indirection_column = self.indirection_column; |
| 284 | + |
| 285 | + return tail; |
| 286 | + } |
184 | 287 | } |
185 | 288 |
|
186 | 289 | #[derive(Clone, Default)] |
@@ -357,6 +460,36 @@ impl TailContainer { |
357 | 460 |
|
358 | 461 | values |
359 | 462 | } |
| 463 | + |
| 464 | + pub fn save_state(&self) { |
| 465 | + let tail_meta = self.get_metadata(); |
| 466 | + let hardcoded_filename = "./tail_container.data"; |
| 467 | + |
| 468 | + let mut index = 0; |
| 469 | + // The Rust compiler suggested that I clone here but it's definitely way better to not copy |
| 470 | + // all of the data and just use a reference |
| 471 | + for p in &self.physical_pages { |
| 472 | + // Save the page |
| 473 | + let m = p.lock().unwrap(); |
| 474 | + m.save_state(index); |
| 475 | + index += 1; |
| 476 | + } |
| 477 | + |
| 478 | + let tail_bytes: Vec<u8> = bincode::serialize(&tail_meta).expect("Should serialize."); |
| 479 | + |
| 480 | + let mut file = BufWriter::new(File::create(hardcoded_filename).expect("Should open file.")); |
| 481 | + file.write_all(&tail_bytes).expect("Should serialize."); |
| 482 | + } |
| 483 | + |
| 484 | + pub fn get_metadata(&self) -> TailContainerMetadata { |
| 485 | + TailContainerMetadata { |
| 486 | + num_pages: self.physical_pages.len(), |
| 487 | + num_cols: self.num_cols, |
| 488 | + rid_column: self.rid_column, |
| 489 | + schema_encoding_column: self.schema_encoding_column, |
| 490 | + indirection_column: self.indirection_column, |
| 491 | + } |
| 492 | + } |
360 | 493 | } |
361 | 494 |
|
362 | 495 | #[cfg(test)] |
|
0 commit comments