forked from Y2Z/monolith
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcache.rs
More file actions
154 lines (135 loc) · 5.69 KB
/
cache.rs
File metadata and controls
154 lines (135 loc) · 5.69 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
use std::collections::HashMap;
use std::fs::File;
use std::io::{BufWriter, Write};
use std::path::Path;
use redb::{Database, Error, TableDefinition};
pub struct CacheMetadataItem {
data: Option<Vec<u8>>, // Asset's blob; used for caching small files or if on-disk database isn't utilized
media_type: Option<String>, // MIME-type, things like "text/plain", "image/png"...
charset: Option<String>, // "UTF-8", "UTF-16"...
}
// #[derive(Debug)]
pub struct Cache {
min_file_size: usize, // Only use database for assets larger than this size (in bytes), otherwise keep them in RAM
metadata: HashMap<String, CacheMetadataItem>, // Dictionary of metadata (and occasionally data [mostly for very small files])
db: Option<Database>, // Pointer to database instance; None if not yet initialized or if failed to initialize
db_ok: Option<bool>, // None by default, Some(true) if was able to initialize database, Some (false) if an error occured
db_file_path: Option<String>, // Filesystem path to file used for storing database
}
const FILE_WRITE_BUF_LEN: usize = 1024 * 100; // On-disk cache file write buffer size (in bytes)
const TABLE: TableDefinition<&str, &[u8]> = TableDefinition::new("_");
impl Cache {
pub fn new(min_file_size: usize, db_file_path: Option<String>) -> Cache {
let mut cache = Cache {
min_file_size,
metadata: HashMap::new(),
db: None,
db_ok: None,
db_file_path: db_file_path.clone(),
};
if db_file_path.is_some() {
// Attempt to initialize on-disk database
match Database::create(Path::new(&db_file_path.unwrap())) {
Ok(db) => {
cache.db = Some(db);
cache.db_ok = Some(true);
cache
}
Err(..) => {
cache.db_ok = Some(false);
cache
}
}
} else {
cache.db_ok = Some(false);
cache
}
}
pub fn set(&mut self, key: &str, data: &Vec<u8>, media_type: String, charset: String) {
let mut cache_metadata_item: CacheMetadataItem = CacheMetadataItem {
data: if self.db_ok.is_some() && self.db_ok.unwrap() {
None
} else {
Some(data.to_owned().to_vec())
},
media_type: Some(media_type.to_owned()),
charset: Some(charset),
};
if (self.db_ok.is_none() || !self.db_ok.unwrap()) || data.len() <= self.min_file_size {
cache_metadata_item.data = Some(data.to_owned().to_vec());
} else {
match self.db.as_ref().unwrap().begin_write() {
Ok(write_txn) => {
{
let mut table = write_txn.open_table(TABLE).unwrap();
table.insert(key, &*data.to_owned()).unwrap();
}
write_txn.commit().unwrap();
}
Err(..) => {
// Fall back to caching everything in memory
cache_metadata_item.data = Some(data.to_owned().to_vec());
}
}
}
self.metadata
.insert((*key).to_string(), cache_metadata_item);
}
pub fn get(&self, key: &str) -> Result<(Vec<u8>, String, String), Error> {
if self.metadata.contains_key(key) {
let metadata_item = self.metadata.get(key).unwrap();
if metadata_item.data.is_some() {
return Ok((
metadata_item.data.as_ref().unwrap().to_vec(),
metadata_item.media_type.as_ref().expect("").to_string(),
metadata_item.charset.as_ref().expect("").to_string(),
));
} else if self.db_ok.is_some() && self.db_ok.unwrap() {
let read_txn = self.db.as_ref().unwrap().begin_read()?;
let table = read_txn.open_table(TABLE)?;
let data = table.get(key)?;
let bytes = data.unwrap();
return Ok((
bytes.value().to_vec(),
metadata_item.media_type.as_ref().expect("").to_string(),
metadata_item.charset.as_ref().expect("").to_string(),
));
}
}
Err(Error::TransactionInProgress) // XXX
}
pub fn contains_key(&self, key: &str) -> bool {
self.metadata.contains_key(key)
}
pub fn destroy_database_file(&mut self) {
if self.db_ok.is_none() || !self.db_ok.unwrap() {
return;
}
// Destroy database instance (prevents writes into file)
self.db = None;
self.db_ok = Some(false);
// Wipe database file
if let Some(db_file_path) = self.db_file_path.to_owned() {
// Overwrite file with zeroes
if let Ok(temp_file) = File::options()
.read(true)
.write(true)
.open(db_file_path.clone())
{
let mut buffer = [0; FILE_WRITE_BUF_LEN];
let mut remaining_size: usize = temp_file.metadata().unwrap().len() as usize;
let mut writer = BufWriter::new(temp_file);
while remaining_size > 0 {
let bytes_to_write: usize = if remaining_size < FILE_WRITE_BUF_LEN {
remaining_size
} else {
FILE_WRITE_BUF_LEN
};
let buffer = &mut buffer[..bytes_to_write];
writer.write(buffer).unwrap();
remaining_size -= bytes_to_write;
}
}
}
}
}