diff --git a/src/stream_base.cc b/src/stream_base.cc index bbb95bf86158ee..bb46ea1febbca0 100644 --- a/src/stream_base.cc +++ b/src/stream_base.cc @@ -374,8 +374,9 @@ void EmitToJSStreamListener::OnStreamRead(ssize_t nread, const uv_buf_t& buf) { } CHECK_LE(static_cast(nread), buf.len); + char* base = Realloc(buf.base, nread); - Local obj = Buffer::New(env, buf.base, nread).ToLocalChecked(); + Local obj = Buffer::New(env, base, nread).ToLocalChecked(); stream->CallJSOnreadMethod(nread, obj); } diff --git a/test/sequential/test-net-bytes-per-incoming-chunk-overhead.js b/test/sequential/test-net-bytes-per-incoming-chunk-overhead.js new file mode 100644 index 00000000000000..8f766e8c7a4106 --- /dev/null +++ b/test/sequential/test-net-bytes-per-incoming-chunk-overhead.js @@ -0,0 +1,41 @@ +// Flags: --expose-gc +'use strict'; + +const common = require('../common'); +const assert = require('assert'); +const net = require('net'); + +// Tests that, when receiving small chunks, we do not keep the full length +// of the original allocation for the libuv read call in memory. + +let client; +let baseRSS; +const receivedChunks = []; +const N = 250000; + +const server = net.createServer(common.mustCall((socket) => { + baseRSS = process.memoryUsage().rss; + + socket.setNoDelay(true); + socket.on('data', (chunk) => { + receivedChunks.push(chunk); + if (receivedChunks.length < N) { + client.write('a'); + } else { + client.end(); + server.close(); + } + }); +})).listen(0, common.mustCall(() => { + client = net.connect(server.address().port); + client.setNoDelay(true); + client.write('hello!'); +})); + +process.on('exit', () => { + global.gc(); + const bytesPerChunk = + (process.memoryUsage().rss - baseRSS) / receivedChunks.length; + // We should always have less than one page (usually ~ 4 kB) per chunk. + assert(bytesPerChunk < 512, `measured ${bytesPerChunk} bytes per chunk`); +});