Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions benchmark/fs/bench-mkdirpSync.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
'use strict';

// Benchmarks MKDirpSync (fs.mkdirSync with recursive: true), which iterates
// over a continuation_data path queue. Varying depth exercises the inner loop
// more, making the continuation_data pointer cache more impactful.

const common = require('../common');
const fs = require('fs');
const path = require('path');
const tmpdir = require('../../test/common/tmpdir');
tmpdir.refresh();

const bench = common.createBenchmark(main, {
n: [1e3],
depth: [4, 8, 16],
});

let dirc = 0;

function main({ n, depth }) {
bench.start();
for (let i = 0; i < n; i++) {
const parts = Array.from({ length: depth }, () => String(++dirc));
fs.mkdirSync(path.join(tmpdir.path, ...parts), { recursive: true });
}
bench.end(n);
}
42 changes: 42 additions & 0 deletions benchmark/streams/writable-writev-string.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
'use strict';

const common = require('../common.js');
const { Writable } = require('stream');

// Benchmarks StreamBase::Writev with string chunks, exercising the chunk
// cache that avoids redundant V8 array accesses, ToString, and ParseEncoding
// calls between the sizing pass and the write pass.
const bench = common.createBenchmark(main, {
n: [1e4],
chunks: [4, 16, 64],
encoding: ['utf8', 'latin1'],
type: ['string', 'buffer', 'mixed'],
});

function main({ n, chunks, encoding, type }) {
const str = 'Hello, benchmark! '.repeat(4);
const buf = Buffer.from(str, encoding);

const wr = new Writable({
writev(chunks, cb) { cb(); },
write(chunk, enc, cb) { cb(); },
});

bench.start();
for (let i = 0; i < n; i++) {
wr.cork();
for (let j = 0; j < chunks; j++) {
if (type === 'buffer') {
wr.write(buf);
} else if (type === 'string') {
wr.write(str, encoding);
} else {
// Alternate buffer and string to hit the mixed (non-all_buffers) path.
if (j % 2 === 0) wr.write(buf);

Check failure on line 35 in benchmark/streams/writable-writev-string.js

View workflow job for this annotation

GitHub Actions / lint-js-and-md

Unexpected if as the only statement in an else block
else wr.write(str, encoding);
}
}
wr.uncork();
}
bench.end(n);
}
18 changes: 9 additions & 9 deletions src/node_file.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1866,16 +1866,17 @@ int MKDirpSync(uv_loop_t* loop,
req_wrap->continuation_data()->PushPath(std::move(path));
}

while (req_wrap->continuation_data()->paths().size() > 0) {
std::string next_path = req_wrap->continuation_data()->PopPath();
FSContinuationData* cont_data = req_wrap->continuation_data();
while (cont_data->paths().size() > 0) {
std::string next_path = cont_data->PopPath();
int err = uv_fs_mkdir(loop, req, next_path.c_str(), mode, nullptr);
while (true) {
switch (err) {
// Note: uv_fs_req_cleanup in terminal paths will be called by
// ~FSReqWrapSync():
case 0:
req_wrap->continuation_data()->MaybeSetFirstPath(next_path);
if (req_wrap->continuation_data()->paths().empty()) {
cont_data->MaybeSetFirstPath(next_path);
if (cont_data->paths().empty()) {
return 0;
}
break;
Expand All @@ -1889,9 +1890,9 @@ int MKDirpSync(uv_loop_t* loop,
std::string dirname =
next_path.substr(0, next_path.find_last_of(kPathSeparator));
if (dirname != next_path) {
req_wrap->continuation_data()->PushPath(std::move(next_path));
req_wrap->continuation_data()->PushPath(std::move(dirname));
} else if (req_wrap->continuation_data()->paths().empty()) {
cont_data->PushPath(std::move(next_path));
cont_data->PushPath(std::move(dirname));
} else if (cont_data->paths().empty()) {
err = UV_EEXIST;
continue;
}
Expand All @@ -1903,8 +1904,7 @@ int MKDirpSync(uv_loop_t* loop,
err = uv_fs_stat(loop, req, next_path.c_str(), nullptr);
if (err == 0 && !S_ISDIR(req->statbuf.st_mode)) {
uv_fs_req_cleanup(req);
if (orig_err == UV_EEXIST &&
req_wrap->continuation_data()->paths().size() > 0) {
if (orig_err == UV_EEXIST && cont_data->paths().size() > 0) {
return UV_ENOTDIR;
}
return UV_EEXIST;
Expand Down
76 changes: 41 additions & 35 deletions src/stream_base.cc
Original file line number Diff line number Diff line change
Expand Up @@ -201,12 +201,24 @@ int StreamBase::Writev(const FunctionCallbackInfo<Value>& args) {
size_t offset;

if (!all_buffers) {
// Cache per-chunk data from the first pass so the second pass avoids
// redundant V8 array accesses, ToString conversions, and ParseEncoding
// calls. Local<> handles remain valid for the duration of this scope.
struct CachedChunk {
Local<Value> value;
Local<String> string; // empty for Buffer chunks
enum encoding enc;
};
MaybeStackBuffer<CachedChunk, 16> chunk_cache(count);

// Determine storage size first
for (size_t i = 0; i < count; i++) {
Local<Value> chunk;
if (!chunks->Get(context, i * 2).ToLocal(&chunk))
return -1;

chunk_cache[i].value = chunk;

if (Buffer::HasInstance(chunk))
continue;
// Buffer chunk, no additional storage required
Expand All @@ -219,6 +231,8 @@ int StreamBase::Writev(const FunctionCallbackInfo<Value>& args) {
if (!chunks->Get(context, i * 2 + 1).ToLocal(&next_chunk))
return -1;
enum encoding encoding = ParseEncoding(isolate, next_chunk);
chunk_cache[i].string = string;
chunk_cache[i].enc = encoding;
size_t chunk_size;
if ((encoding == UTF8 &&
string->Length() > 65535 &&
Expand All @@ -232,33 +246,20 @@ int StreamBase::Writev(const FunctionCallbackInfo<Value>& args) {

if (storage_size > INT_MAX)
return UV_ENOBUFS;
} else {
for (size_t i = 0; i < count; i++) {
Local<Value> chunk;
if (!chunks->Get(context, i).ToLocal(&chunk))
return -1;
bufs[i].base = Buffer::Data(chunk);
bufs[i].len = Buffer::Length(chunk);
}
}

std::unique_ptr<BackingStore> bs;
if (storage_size > 0) {
bs = ArrayBuffer::NewBackingStore(
isolate, storage_size, BackingStoreInitializationMode::kUninitialized);
}
std::unique_ptr<BackingStore> bs;
if (storage_size > 0) {
bs = ArrayBuffer::NewBackingStore(
isolate, storage_size, BackingStoreInitializationMode::kUninitialized);
}

offset = 0;
if (!all_buffers) {
offset = 0;
for (size_t i = 0; i < count; i++) {
Local<Value> chunk;
if (!chunks->Get(context, i * 2).ToLocal(&chunk))
return -1;

// Write buffer
if (Buffer::HasInstance(chunk)) {
bufs[i].base = Buffer::Data(chunk);
bufs[i].len = Buffer::Length(chunk);
// string.IsEmpty() signals a Buffer chunk; enc is uninitialised in
// that case so we must not read it.
if (chunk_cache[i].string.IsEmpty()) {
bufs[i].base = Buffer::Data(chunk_cache[i].value);
bufs[i].len = Buffer::Length(chunk_cache[i].value);
continue;
}

Expand All @@ -268,28 +269,33 @@ int StreamBase::Writev(const FunctionCallbackInfo<Value>& args) {
static_cast<char*>(bs ? bs->Data() : nullptr) + offset;
size_t str_size = (bs ? bs->ByteLength() : 0) - offset;

Local<String> string;
if (!chunk->ToString(context).ToLocal(&string))
return -1;
Local<Value> next_chunk;
if (!chunks->Get(context, i * 2 + 1).ToLocal(&next_chunk))
return -1;
enum encoding encoding = ParseEncoding(isolate, next_chunk);
str_size = StringBytes::Write(isolate,
str_storage,
str_size,
string,
encoding);
chunk_cache[i].string,
chunk_cache[i].enc);
bufs[i].base = str_storage;
bufs[i].len = str_size;
offset += str_size;
}

StreamWriteResult res = Write(*bufs, count, nullptr, req_wrap_obj);
SetWriteResult(res);
if (res.wrap != nullptr && storage_size > 0)
res.wrap->SetBackingStore(std::move(bs));
return res.err;
} else {
for (size_t i = 0; i < count; i++) {
Local<Value> chunk;
if (!chunks->Get(context, i).ToLocal(&chunk))
return -1;
bufs[i].base = Buffer::Data(chunk);
bufs[i].len = Buffer::Length(chunk);
}
}

StreamWriteResult res = Write(*bufs, count, nullptr, req_wrap_obj);
SetWriteResult(res);
if (res.wrap != nullptr && storage_size > 0)
res.wrap->SetBackingStore(std::move(bs));
return res.err;
}

Expand Down
Loading