Skip to content

Commit e29cfff

Browse files
committed
handle \n and \ properly in the journals
Signed-off-by: Utkarsh Srivastava <[email protected]>
1 parent b1910bd commit e29cfff

File tree

4 files changed

+207
-12
lines changed

4 files changed

+207
-12
lines changed

src/sdk/glacier.js

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,32 @@ class Glacier {
167167
return restore_status.state === Glacier.RESTORE_STATUS_CAN_RESTORE;
168168
}
169169

170+
/**
171+
* encode_log takes in data intended for the backend and encodes
172+
* it.
173+
*
174+
* This method must be overwritten for all the backends if they need
175+
* different encodings for their logs.
176+
* @param {string} data
177+
* @returns {string}
178+
*/
179+
encode_log(data) {
180+
return data;
181+
}
182+
183+
/**
184+
* decode_log takes in data intended for the backend and decodes
185+
* it.
186+
*
187+
* This method must be overwritten for all the backends if they need
188+
* different encodings for their logs.
189+
* @param {string} data
190+
* @returns {string}
191+
*/
192+
decode_log(data) {
193+
return data;
194+
}
195+
170196
/**
171197
* get_restore_status returns status of the object at the given
172198
* file_path

src/sdk/glacier_tapecloud.js

Lines changed: 45 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -190,13 +190,21 @@ class TapeCloudUtils {
190190
}
191191

192192
class TapeCloudGlacier extends Glacier {
193+
static LOG_DELIM = ' -- ';
194+
193195
async migrate(fs_context, log_file, failure_recorder) {
194196
dbg.log2('TapeCloudGlacier.migrate starting for', log_file);
195197

196-
const file = new LogFile(fs_context, log_file, '\n-- ');
198+
// Wrap failure recorder to make sure we correctly encode the entries
199+
// before appending them to the failure log
200+
const encoded_failure_recorder = async failure => failure_recorder(this.encode_log(failure));
201+
202+
const file = new LogFile(fs_context, log_file);
197203

198204
try {
199205
await file.collect_and_process(async (entry, batch_recorder) => {
206+
entry = this.decode_log(entry);
207+
200208
let should_migrate = true;
201209
try {
202210
should_migrate = await this.should_migrate(fs_context, entry);
@@ -214,7 +222,7 @@ class TapeCloudGlacier extends Glacier {
214222
// Can't really do anything if this fails - provider
215223
// needs to make sure that appropriate error handling
216224
// is being done there
217-
await failure_recorder(entry);
225+
await encoded_failure_recorder(entry);
218226
return;
219227
}
220228

@@ -224,13 +232,13 @@ class TapeCloudGlacier extends Glacier {
224232
// Can't really do anything if this fails - provider
225233
// needs to make sure that appropriate error handling
226234
// is being done there
227-
await batch_recorder(entry);
235+
await batch_recorder(this.encode_log(entry));
228236
},
229237
async batch => {
230238
// This will throw error only if our eeadm error handler
231239
// panics as well and at that point it's okay to
232240
// not handle the error and rather keep the log file around
233-
await this._migrate(batch, failure_recorder);
241+
await this._migrate(batch, encoded_failure_recorder);
234242
});
235243

236244
return true;
@@ -243,9 +251,15 @@ class TapeCloudGlacier extends Glacier {
243251
async restore(fs_context, log_file, failure_recorder) {
244252
dbg.log2('TapeCloudGlacier.restore starting for', log_file);
245253

246-
const file = new LogFile(fs_context, log_file, '\n-- ');
254+
// Wrap failure recorder to make sure we correctly encode the entries
255+
// before appending them to the failure log
256+
const encoded_failure_recorder = async failure => failure_recorder(this.encode_log(failure));
257+
258+
const file = new LogFile(fs_context, log_file);
247259
try {
248260
await file.collect_and_process(async (entry, batch_recorder) => {
261+
entry = this.decode_log(entry);
262+
249263
try {
250264
const should_restore = await Glacier.should_restore(fs_context, entry);
251265
if (!should_restore) {
@@ -254,7 +268,7 @@ class TapeCloudGlacier extends Glacier {
254268
}
255269

256270
// Add entry to the tempwal
257-
await batch_recorder(entry);
271+
await batch_recorder(this.encode_log(entry));
258272
} catch (error) {
259273
if (error.code === 'ENOENT') {
260274
// Skip this file
@@ -265,17 +279,19 @@ class TapeCloudGlacier extends Glacier {
265279
'adding log entry', entry,
266280
'to failure recorder due to error', error,
267281
);
268-
await failure_recorder(entry);
282+
await encoded_failure_recorder(entry);
269283
}
270284
},
271285
async batch => {
272286
const success = await this._recall(
273287
batch,
274288
async entry_path => {
289+
entry_path = this.decode_log(entry_path);
275290
dbg.log2('TapeCloudGlacier.restore.partial_failure - entry:', entry_path);
276-
await failure_recorder(entry_path);
291+
await encoded_failure_recorder(entry_path);
277292
},
278293
async entry_path => {
294+
entry_path = this.decode_log(entry_path);
279295
dbg.log2('TapeCloudGlacier.restore.partial_success - entry:', entry_path);
280296
await this._finalize_restore(fs_context, entry_path);
281297
}
@@ -286,6 +302,7 @@ class TapeCloudGlacier extends Glacier {
286302
if (success) {
287303
const batch_file = new LogFile(fs_context, batch);
288304
await batch_file.collect_and_process(async (entry_path, batch_recorder) => {
305+
entry_path = this.decode_log(entry_path);
289306
dbg.log2('TapeCloudGlacier.restore.batch - entry:', entry_path);
290307
await this._finalize_restore(fs_context, entry_path);
291308
});
@@ -311,6 +328,26 @@ class TapeCloudGlacier extends Glacier {
311328
return result.toLowerCase().trim() === 'true';
312329
}
313330

331+
/**
332+
*
333+
* @param {string} data
334+
* @returns
335+
*/
336+
encode_log(data) {
337+
const encoded = data.replace(/\\/g, '\\\\').replace(/\n/g, '\\n');
338+
return `${TapeCloudGlacier.LOG_DELIM}${encoded}`;
339+
}
340+
341+
/**
342+
*
343+
* @param {string} data
344+
* @returns
345+
*/
346+
decode_log(data) {
347+
return data.substring(TapeCloudGlacier.LOG_DELIM.length).replace(/\\n/g, '\n').replace(/\\\\/g, '\\');
348+
}
349+
350+
314351
// ============= PRIVATE FUNCTIONS =============
315352

316353
/**

src/sdk/namespace_fs.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3687,13 +3687,13 @@ class NamespaceFS {
36873687
async append_to_migrate_wal(entry) {
36883688
if (!config.NSFS_GLACIER_LOGS_ENABLED) return;
36893689

3690-
await NamespaceFS.migrate_wal.append(`-- ${entry}`);
3690+
await NamespaceFS.migrate_wal.append(Glacier.getBackend().encode_log(entry));
36913691
}
36923692

36933693
async append_to_restore_wal(entry) {
36943694
if (!config.NSFS_GLACIER_LOGS_ENABLED) return;
36953695

3696-
await NamespaceFS.restore_wal.append(`-- ${entry}`);
3696+
await NamespaceFS.restore_wal.append(Glacier.getBackend().encode_log(entry));
36973697
}
36983698

36993699
static get migrate_wal() {

src/test/unit_tests/nsfs/test_nsfs_glacier_backend.js

Lines changed: 134 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,8 @@ mocha.describe('nsfs_glacier', function() {
151151
mocha.describe('nsfs_glacier_tapecloud', async function() {
152152
const upload_key = 'upload_key_1';
153153
const restore_key = 'restore_key_1';
154+
const restore_key_spcl_char_1 = 'restore_key_2_\n';
155+
const restore_key_spcl_char_2 = 'restore_key_2_\n_2';
154156
const xattr = { key: 'value', key2: 'value2' };
155157
xattr[s3_utils.XATTR_SORT_SYMBOL] = true;
156158

@@ -267,10 +269,140 @@ mocha.describe('nsfs_glacier', function() {
267269
failure_backend._process_expired = async () => { /**noop*/ };
268270
failure_backend._recall = async (_file, failure_recorder, success_recorder) => {
269271
// This unintentionally also replicates duplicate entries in WAL
270-
await failure_recorder(failed_file_path);
272+
await failure_recorder(failure_backend.encode_log(failed_file_path));
271273

272274
// This unintentionally also replicates duplicate entries in WAL
273-
await success_recorder(success_file_path);
275+
await success_recorder(failure_backend.encode_log(success_file_path));
276+
277+
return false;
278+
};
279+
280+
const upload_res_1 = await glacier_ns.upload_object(failed_params, dummy_object_sdk);
281+
console.log('upload_object response', inspect(upload_res_1));
282+
283+
const upload_res_2 = await glacier_ns.upload_object(success_params, dummy_object_sdk);
284+
console.log('upload_object response', inspect(upload_res_2));
285+
286+
const restore_res_1 = await glacier_ns.restore_object(failed_params, dummy_object_sdk);
287+
assert(restore_res_1);
288+
289+
const restore_res_2 = await glacier_ns.restore_object(success_params, dummy_object_sdk);
290+
assert(restore_res_2);
291+
292+
const fs_context = glacier_ns.prepare_fs_context(dummy_object_sdk);
293+
294+
// Issue restore
295+
await NamespaceFS.restore_wal._process(async file => {
296+
await failure_backend.restore(fs_context, file, async () => { /*noop*/ });
297+
298+
// Don't delete the file
299+
return false;
300+
});
301+
302+
// Ensure success object is restored
303+
const success_md = await glacier_ns.read_object_md(success_params, dummy_object_sdk);
304+
305+
assert(!success_md.restore_status.ongoing);
306+
307+
const expected_expiry = Glacier.generate_expiry(new Date(), success_params.days, '', config.NSFS_GLACIER_EXPIRY_TZ);
308+
assert(expected_expiry.getTime() >= success_md.restore_status.expiry_time.getTime());
309+
assert(now <= success_md.restore_status.expiry_time.getTime());
310+
311+
// Ensure failed object is NOT restored
312+
const failure_stats = await nb_native().fs.stat(
313+
fs_context,
314+
failed_file_path,
315+
);
316+
317+
assert(!failure_stats.xattr[Glacier.XATTR_RESTORE_EXPIRY] || failure_stats.xattr[Glacier.XATTR_RESTORE_EXPIRY] === '');
318+
assert(failure_stats.xattr[Glacier.XATTR_RESTORE_REQUEST]);
319+
});
320+
321+
mocha.it('restore-object should successfully restore objects with special characters', async function() {
322+
const now = Date.now();
323+
const data = crypto.randomBytes(100);
324+
const all_params = [
325+
{
326+
bucket: upload_bkt,
327+
key: restore_key_spcl_char_1,
328+
storage_class: s3_utils.STORAGE_CLASS_GLACIER,
329+
xattr,
330+
days: 1,
331+
source_stream: buffer_utils.buffer_to_read_stream(data)
332+
},
333+
{
334+
bucket: upload_bkt,
335+
key: restore_key_spcl_char_2,
336+
storage_class: s3_utils.STORAGE_CLASS_GLACIER,
337+
xattr,
338+
days: 1,
339+
source_stream: buffer_utils.buffer_to_read_stream(data)
340+
}
341+
];
342+
343+
for (const params of all_params) {
344+
const upload_res = await glacier_ns.upload_object(params, dummy_object_sdk);
345+
console.log('upload_object response', inspect(upload_res));
346+
347+
const restore_res = await glacier_ns.restore_object(params, dummy_object_sdk);
348+
assert(restore_res);
349+
350+
// Issue restore
351+
await NamespaceFS.restore_wal._process(async file => {
352+
const fs_context = glacier_ns.prepare_fs_context(dummy_object_sdk);
353+
await backend.restore(fs_context, file);
354+
355+
// Don't delete the file
356+
return false;
357+
});
358+
359+
// Ensure object is restored
360+
const md = await glacier_ns.read_object_md(params, dummy_object_sdk);
361+
362+
assert(!md.restore_status.ongoing);
363+
364+
const expected_expiry = Glacier.generate_expiry(new Date(), params.days, '', config.NSFS_GLACIER_EXPIRY_TZ);
365+
assert(expected_expiry.getTime() >= md.restore_status.expiry_time.getTime());
366+
assert(now <= md.restore_status.expiry_time.getTime());
367+
}
368+
});
369+
370+
mocha.it('restore-object should not restore failed item with special characters', async function() {
371+
const now = Date.now();
372+
const data = crypto.randomBytes(100);
373+
const failed_restore_key = `${restore_key_spcl_char_1}_failured`;
374+
const success_restore_key = `${restore_key_spcl_char_1}_success`;
375+
376+
const failed_params = {
377+
bucket: upload_bkt,
378+
key: failed_restore_key,
379+
storage_class: s3_utils.STORAGE_CLASS_GLACIER,
380+
xattr,
381+
days: 1,
382+
source_stream: buffer_utils.buffer_to_read_stream(data)
383+
};
384+
385+
const success_params = {
386+
bucket: upload_bkt,
387+
key: success_restore_key,
388+
storage_class: s3_utils.STORAGE_CLASS_GLACIER,
389+
xattr,
390+
days: 1,
391+
source_stream: buffer_utils.buffer_to_read_stream(data)
392+
};
393+
394+
const failed_file_path = glacier_ns._get_file_path(failed_params);
395+
const success_file_path = glacier_ns._get_file_path(success_params);
396+
397+
const failure_backend = new TapeCloudGlacier();
398+
failure_backend._migrate = async () => true;
399+
failure_backend._process_expired = async () => { /**noop*/ };
400+
failure_backend._recall = async (_file, failure_recorder, success_recorder) => {
401+
// This unintentionally also replicates duplicate entries in WAL
402+
await failure_recorder(failure_backend.encode_log(failed_file_path));
403+
404+
// This unintentionally also replicates duplicate entries in WAL
405+
await success_recorder(failure_backend.encode_log(success_file_path));
274406

275407
return false;
276408
};

0 commit comments

Comments
 (0)