Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion config.js
Original file line number Diff line number Diff line change
Expand Up @@ -985,7 +985,8 @@ config.NSFS_GLACIER_RESERVED_BUCKET_TAGS = {};
// anonymous account name
config.ANONYMOUS_ACCOUNT_NAME = 'anonymous';

config.NFSF_UPLOAD_STREAM_MEM_THRESHOLD = 8 * 1024 * 1024;
config.NSFS_UPLOAD_STREAM_MEM_THRESHOLD = 8 * 1024 * 1024;
config.NSFS_DOWNLOAD_STREAM_MEM_THRESHOLD = 8 * 1024 * 1024;

// we want to change our handling related to EACCESS error
config.NSFS_LIST_IGNORE_ENTRY_ON_EACCES = true;
Expand Down
263 changes: 93 additions & 170 deletions src/sdk/namespace_fs.js

Large diffs are not rendered by default.

133 changes: 133 additions & 0 deletions src/test/unit_tests/internal/test_file_reader.test.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
/* Copyright (C) 2020 NooBaa */
'use strict';

const fs = require('fs');
const path = require('path');
const assert = require('assert');
const buffer_utils = require('../../../util/buffer_utils');
const native_fs_utils = require('../../../util/native_fs_utils');
const { FileReader } = require('../../../util/file_reader');
const { multi_buffer_pool } = require('../../../sdk/namespace_fs');

const fs_context = {};

describe('FileReader', () => {

const test_files = fs.readdirSync(__dirname).map(file => path.join(__dirname, file));

/**
* @param {(file_path: string, start?: number, end?: number) => void} tester
*/
function describe_read_cases(tester) {
describe('list files and read entire', () => {
for (const file_path of test_files) {
tester(file_path);
}
});
describe('skip start cases', () => {
tester(__filename, 1, Infinity);
tester(__filename, 3, Infinity);
tester(__filename, 11, Infinity);
tester(__filename, 1023, Infinity);
tester(__filename, 1024, Infinity);
tester(__filename, 1025, Infinity);
});
describe('edge cases', () => {
tester(__filename, 0, 1);
tester(__filename, 0, 2);
tester(__filename, 0, 3);
tester(__filename, 1, 2);
tester(__filename, 1, 3);
tester(__filename, 2, 3);
tester(__filename, 0, 1023);
tester(__filename, 0, 1024);
tester(__filename, 0, 1025);
tester(__filename, 1, 1023);
tester(__filename, 1, 1024);
tester(__filename, 1, 1025);
tester(__filename, 1023, 1024);
tester(__filename, 1023, 1025);
tester(__filename, 1024, 1025);
tester(__filename, 123, 345);
tester(__filename, 1000000000, Infinity);
});
}

describe('as stream.Readable', () => {

describe_read_cases(tester);

function tester(file_path, start = 0, end = Infinity) {
const basename = path.basename(file_path);
it(`test read ${start}-${end} ${basename}`, async () => {
await native_fs_utils.use_file({
fs_context,
bucket_path: file_path,
open_path: file_path,
scope: async file => {
const stat = await file.stat(fs_context);
const aborter = new AbortController();
const signal = aborter.signal;
const file_reader = new FileReader({
fs_context,
file,
file_path,
stat,
start,
end,
signal,
multi_buffer_pool,
highWaterMark: 1024, // bytes
});
const data = await buffer_utils.read_stream_join(file_reader);
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

add also a test for read_into_stream()

const node_fs_stream = fs.createReadStream(file_path, { start, end: end > 0 ? end - 1 : 0 });
const node_fs_data = await buffer_utils.read_stream_join(node_fs_stream);
assert.strictEqual(data.length, node_fs_data.length);
assert.strictEqual(data.toString(), node_fs_data.toString());
}
});
});
}
});

describe('read_into_stream with buffer pooling', () => {

describe_read_cases(tester);

function tester(file_path, start = 0, end = Infinity) {
const basename = path.basename(file_path);
it(`test read ${start}-${end} ${basename}`, async () => {
await native_fs_utils.use_file({
fs_context,
bucket_path: file_path,
open_path: file_path,
scope: async file => {
const stat = await file.stat(fs_context);
const aborter = new AbortController();
const signal = aborter.signal;
const file_reader = new FileReader({
fs_context,
file,
file_path,
stat,
start,
end,
signal,
multi_buffer_pool,
highWaterMark: 1024, // bytes
});
const writable = buffer_utils.write_stream();
await file_reader.read_into_stream(writable);
const data = writable.join();
const node_fs_stream = fs.createReadStream(file_path, { start, end: end > 0 ? end - 1 : 0 });
const node_fs_data = await buffer_utils.read_stream_join(node_fs_stream);
assert.strictEqual(data.length, node_fs_data.length);
assert.strictEqual(data.toString(), node_fs_data.toString());
}
});
});
}

});

});
9 changes: 3 additions & 6 deletions src/tools/file_writer_hashing.js
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ const assert = require('assert');
const FileWriter = require('../util/file_writer');
const config = require('../../config');
const nb_native = require('../util/nb_native');
const stream_utils = require('../util/stream_utils');
const P = require('../util/promise');
const stream = require('stream');
const fs = require('fs');
Expand Down Expand Up @@ -72,12 +71,11 @@ async function hash_target(chunk_size = CHUNK, parts = PARTS, iov_max = IOV_MAX)
}());
const target = new TargetHash();
const file_writer = new FileWriter({
target_file: target,
target_file: /**@type {any}*/ (target),
fs_context: DEFAULT_FS_CONFIG,
namespace_resource_id: 'MajesticSloth'
});
await stream_utils.pipeline([source_stream, file_writer]);
await stream_utils.wait_finished(file_writer);
await file_writer.write_entire_stream(source_stream);
const write_hash = target.digest();
console.log(
'Hash target',
Expand Down Expand Up @@ -114,8 +112,7 @@ async function file_target(chunk_size = CHUNK, parts = PARTS, iov_max = IOV_MAX)
fs_context: DEFAULT_FS_CONFIG,
namespace_resource_id: 'MajesticSloth'
});
await stream_utils.pipeline([source_stream, file_writer]);
await stream_utils.wait_finished(file_writer);
await file_writer.write_entire_stream(source_stream);
if (XATTR) {
await target_file.replacexattr(
DEFAULT_FS_CONFIG,
Expand Down
Loading
Loading