Skip to content

Commit 18f5229

Browse files
committed
more dbsanity
1 parent 3cf752f commit 18f5229

File tree

1 file changed

+14
-0
lines changed

1 file changed

+14
-0
lines changed

paranoia/dbsanity.go

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,9 @@ var queriesThatShouldHaveNoRows = []string{
3333
"SELECT blob_id FROM blob_entries WHERE final_size = 0 AND rowid != (SELECT rowid FROM blob_entries WHERE final_size = 0 LIMIT 1)",
3434
// ensure we only have one zero-byte sizes entry
3535
"SELECT hash FROM sizes WHERE size = 0 AND rowid != (SELECT rowid FROM sizes WHERE size = 0 LIMIT 1)",
36+
// zero byte entries should have expected hash
37+
"SELECT hash FROM sizes WHERE size = 0 AND hash != X'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'",
38+
"SELECT blob_id FROM blob_entries WHERE final_size = 0 AND hash != X'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'",
3639

3740
// make sure that there are no two blob_entries at the same blob_id and offset, EXCEPT for the one entry that's allowed to be zero-byte
3841
`
@@ -93,6 +96,8 @@ var queriesThatShouldHaveNoRows = []string{
9396
"SELECT hash FROM files WHERE start > strftime('%s', 'now') + 60",
9497
"SELECT hash FROM files WHERE end > strftime('%s', 'now') + 60",
9598
"SELECT blob_id FROM blob_storage WHERE timestamp > strftime('%s', 'now') + 60",
99+
"SELECT password FROM shares WHERE shared_at > strftime('%s','now') + 60",
100+
"SELECT password FROM shares WHERE revoked_at > strftime('%s','now') + 60",
96101

97102
// prior to the gb epoch (first commit)
98103
"SELECT hash FROM files WHERE start < 1572924988",
@@ -165,6 +170,9 @@ var queriesThatShouldHaveNoRows = []string{
165170
// checksum is de facto required
166171
"SELECT blob_id FROM blob_storage WHERE checksum IS NULL",
167172

173+
// path ends with hex blob id (except for gdrive, where it's an opaque ID)
174+
"SELECT blob_id FROM blob_storage INNER JOIN storage USING (storage_id) WHERE (LENGTH(path) < 64 OR LOWER(SUBSTR(path, -64)) != LOWER(HEX(blob_id))) AND type != 'GDrive'",
175+
168176
// if the same blob has been uploaded to two storages of the same type (such as S3), make sure that the path and checksum matches
169177
// this is a good sanity check after doing a `gb replicate`!
170178
`
@@ -222,6 +230,12 @@ var queriesThatShouldHaveNoRows = []string{
222230
// older blobs with 1 encryption key should not be shared
223231
"SELECT blob_id FROM blob_entries WHERE blob_id IN (SELECT blob_id FROM share_entries) GROUP BY blob_id HAVING COUNT(DISTINCT encryption_key) = 1 AND COUNT(*) > 1",
224232

233+
// ensure ordinals are contiguous
234+
"SELECT password FROM share_entries GROUP BY password HAVING MIN(ordinal) != 0 OR MAX(ordinal) != COUNT(*) - 1",
235+
236+
// all shares have an entry
237+
"SELECT password FROM shares LEFT OUTER JOIN share_entries USING (password) GROUP BY password HAVING COUNT(share_entries.rowid) = 0",
238+
225239
// these next two could totally be rewritten as one query with a WHERE giant_condition_1 OR giant_condition_2
226240
// but it's super slow since it can't efficiently use indexes then
227241
// these two are SUPER fast as-is, no need to combine

0 commit comments

Comments
 (0)