@@ -4,10 +4,12 @@ A subvolume
4
4
5
5
import { type Filesystem , DEFAULT_SUBVOLUME_SIZE } from "./filesystem" ;
6
6
import refCache from "@cocalc/util/refcache" ;
7
- import { exists , listdir , mkdirp , sudo } from "./util" ;
7
+ import { readFile , writeFile , unlink } from "node:fs/promises" ;
8
+ import { exists , isdir , listdir , mkdirp , sudo } from "./util" ;
8
9
import { join , normalize } from "path" ;
9
10
import { updateRollingSnapshots , type SnapshotCounts } from "./snapshots" ;
10
- //import { human_readable_size } from "@cocalc/util/misc";
11
+ import { DirectoryListingEntry } from "@cocalc/util/types" ;
12
+ import getListing from "@cocalc/backend/get-listing" ;
11
13
import getLogger from "@cocalc/backend/logger" ;
12
14
13
15
export const SNAPSHOTS = ".snapshots" ;
@@ -23,10 +25,11 @@ interface Options {
23
25
}
24
26
25
27
export class Subvolume {
26
- private filesystem : Filesystem ;
27
28
public readonly name : string ;
28
- public readonly path : string ;
29
- public readonly snapshotsDir : string ;
29
+
30
+ private filesystem : Filesystem ;
31
+ private readonly path : string ;
32
+ private readonly snapshotsDir : string ;
30
33
31
34
constructor ( { filesystem, name } : Options ) {
32
35
this . filesystem = filesystem ;
@@ -70,6 +73,72 @@ export class Subvolume {
70
73
} ) ;
71
74
} ;
72
75
76
+ // this should provide a path that is guaranteed to be
77
+ // inside this.path on the filesystem or throw error
78
+ // [ ] TODO: not sure if the code here is sufficient!!
79
+ private normalize = ( path : string ) => {
80
+ return join ( this . path , normalize ( path ) ) ;
81
+ } ;
82
+
83
+ /////////////
84
+ // Files
85
+ /////////////
86
+ ls = async (
87
+ path : string ,
88
+ { hidden, limit } : { hidden ?: boolean ; limit ?: number } = { } ,
89
+ ) : Promise < DirectoryListingEntry [ ] > => {
90
+ path = normalize ( path ) ;
91
+ return await getListing ( this . normalize ( path ) , hidden , {
92
+ limit,
93
+ home : "/" ,
94
+ } ) ;
95
+ } ;
96
+
97
+ readFile = async ( path : string , encoding ?: any ) : Promise < string | Buffer > => {
98
+ path = normalize ( path ) ;
99
+ return await readFile ( this . normalize ( path ) , encoding ) ;
100
+ } ;
101
+
102
+ writeFile = async ( path : string , data : string | Buffer ) => {
103
+ path = normalize ( path ) ;
104
+ return await writeFile ( this . normalize ( path ) , data ) ;
105
+ } ;
106
+
107
+ unlink = async ( path : string ) => {
108
+ await unlink ( this . normalize ( path ) ) ;
109
+ } ;
110
+
111
+ rsync = async ( {
112
+ src,
113
+ target,
114
+ args = [ "-axH" ] ,
115
+ timeout = 5 * 60 * 1000 ,
116
+ } : {
117
+ src : string ;
118
+ target : string ;
119
+ args ?: string [ ] ;
120
+ timeout ?: number ;
121
+ } ) : Promise < { stdout : string ; stderr : string ; exit_code : number } > => {
122
+ let srcPath = this . normalize ( src ) ;
123
+ let targetPath = this . normalize ( target ) ;
124
+ if ( ! srcPath . endsWith ( "/" ) && ( await isdir ( srcPath ) ) ) {
125
+ srcPath += "/" ;
126
+ if ( ! targetPath . endsWith ( "/" ) ) {
127
+ targetPath += "/" ;
128
+ }
129
+ }
130
+ return await sudo ( {
131
+ command : "rsync" ,
132
+ args : [ ...args , srcPath , targetPath ] ,
133
+ err_on_exit : false ,
134
+ timeout : timeout / 1000 ,
135
+ } ) ;
136
+ } ;
137
+
138
+ /////////////
139
+ // QUOTA
140
+ /////////////
141
+
73
142
private quotaInfo = async ( ) => {
74
143
const { stdout } = await sudo ( {
75
144
verbose : false ,
@@ -150,6 +219,13 @@ export class Subvolume {
150
219
return { used, free, size } ;
151
220
} ;
152
221
222
+ /////////////
223
+ // SNAPSHOTS
224
+ /////////////
225
+ snapshotPath = ( snapshot : string , ...segments ) => {
226
+ return join ( SNAPSHOTS , snapshot , ...segments ) ;
227
+ } ;
228
+
153
229
private makeSnapshotsDir = async ( ) => {
154
230
if ( await exists ( this . snapshotsDir ) ) {
155
231
return ;
@@ -233,6 +309,13 @@ export class Subvolume {
233
309
return snapGen < pathGen ;
234
310
} ;
235
311
312
+ /////////////
313
+ // BACKUPS
314
+ // There is a single global dedup'd backup archive stored in the btrfs filesystem.
315
+ // Obviously, admins should rsync this regularly to a separate location as a genuine
316
+ // backup strategy.
317
+ /////////////
318
+
236
319
// create a new bup backup
237
320
createBupBackup = async ( {
238
321
// timeout used for bup index and bup save commands
@@ -304,7 +387,7 @@ export class Subvolume {
304
387
const i = path . indexOf ( "/" ) ; // remove the commit name
305
388
await sudo ( {
306
389
command : "rm" ,
307
- args : [ "-rf" , join ( this . path , path . slice ( i + 1 ) ) ] ,
390
+ args : [ "-rf" , this . normalize ( path . slice ( i + 1 ) ) ] ,
308
391
} ) ;
309
392
await sudo ( {
310
393
command : "bup" ,
@@ -320,16 +403,7 @@ export class Subvolume {
320
403
} ) ;
321
404
} ;
322
405
323
- bupLs = async (
324
- path : string ,
325
- ) : Promise <
326
- {
327
- path : string ;
328
- size : number ;
329
- timestamp : number ;
330
- isdir : boolean ;
331
- } [ ]
332
- > => {
406
+ bupLs = async ( path : string ) : Promise < DirectoryListingEntry [ ] > => {
333
407
path = normalize ( path ) ;
334
408
const { stdout } = await sudo ( {
335
409
command : "bup" ,
@@ -343,30 +417,25 @@ export class Subvolume {
343
417
join ( `/${ this . name } ` , path ) ,
344
418
] ,
345
419
} ) ;
346
- const v : {
347
- path : string ;
348
- size : number ;
349
- timestamp : number ;
350
- isdir : boolean ;
351
- } [ ] = [ ] ;
420
+ const v : DirectoryListingEntry [ ] = [ ] ;
352
421
for ( const x of stdout . split ( "\n" ) ) {
353
422
// [-rw-------","6b851643360e435eb87ef9a6ab64a8b1/6b851643360e435eb87ef9a6ab64a8b1","5","2025-07-15","06:12","a.txt"]
354
423
const w = x . split ( / \s + / ) ;
355
424
if ( w . length >= 6 ) {
356
- let isdir , path ;
425
+ let isdir , name ;
357
426
if ( w [ 5 ] . endsWith ( "@" ) || w [ 5 ] . endsWith ( "=" ) || w [ 5 ] . endsWith ( "|" ) ) {
358
427
w [ 5 ] = w [ 5 ] . slice ( 0 , - 1 ) ;
359
428
}
360
429
if ( w [ 5 ] . endsWith ( "/" ) ) {
361
430
isdir = true ;
362
- path = w [ 5 ] . slice ( 0 , - 1 ) ;
431
+ name = w [ 5 ] . slice ( 0 , - 1 ) ;
363
432
} else {
364
- path = w [ 5 ] ;
433
+ name = w [ 5 ] ;
365
434
isdir = false ;
366
435
}
367
436
const size = parseInt ( w [ 2 ] ) ;
368
- const timestamp = new Date ( w [ 3 ] + "T" + w [ 4 ] ) . valueOf ( ) ;
369
- v . push ( { path , size, timestamp , isdir } ) ;
437
+ const mtime = new Date ( w [ 3 ] + "T" + w [ 4 ] ) . valueOf ( ) / 1000 ;
438
+ v . push ( { name , size, mtime , isdir } ) ;
370
439
}
371
440
}
372
441
return v ;
@@ -392,6 +461,18 @@ export class Subvolume {
392
461
} ) ;
393
462
} ;
394
463
464
+ /////////////
465
+ // BTRFS send/recv
466
+ // Not used. Instead we will rely on bup (and snapshots of the underlying disk) for backups, since:
467
+ // - much easier to check they are valid
468
+ // - decoupled from any btrfs issues
469
+ // - not tied to any specific filesystem at all
470
+ // - easier to offsite via incremntal rsync
471
+ // - much more space efficient with *global* dedup and compression
472
+ // - bup is really just git, which is very proven
473
+ // The drawback is speed.
474
+ /////////////
475
+
395
476
// this was just a quick proof of concept -- I don't like it. Should switch to using
396
477
// timestamps and a lock.
397
478
// To recover these, doing recv for each in order does work. Then you have to
0 commit comments