Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 41 additions & 16 deletions src/node_buffer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -587,9 +587,9 @@ void StringSlice(const FunctionCallbackInfo<Value>& args) {

void CopyImpl(Local<Value> source_obj,
Local<Value> target_obj,
const uint32_t target_start,
const uint32_t source_start,
const uint32_t to_copy) {
const size_t target_start,
const size_t source_start,
const size_t to_copy) {
ArrayBufferViewContents<char> source(source_obj);
SPREAD_BUFFER_ARG(target_obj, target);

Expand All @@ -598,15 +598,29 @@ void CopyImpl(Local<Value> source_obj,

// Assume caller has properly validated args.
void SlowCopy(const FunctionCallbackInfo<Value>& args) {
Environment* env = Environment::GetCurrent(args);
Local<Value> source_obj = args[0];
Local<Value> target_obj = args[1];
const uint32_t target_start = args[2].As<Uint32>()->Value();
const uint32_t source_start = args[3].As<Uint32>()->Value();
const uint32_t to_copy = args[4].As<Uint32>()->Value();

CopyImpl(source_obj, target_obj, target_start, source_start, to_copy);

args.GetReturnValue().Set(to_copy);
// Use IntegerValue() rather than Uint32Value() so that values larger than
// 2**32 are not silently truncated. The JS layer is responsible for
// ensuring these are non-negative safe integers.
const int64_t target_start =
args[2]->IntegerValue(env->context()).ToChecked();
const int64_t source_start =
args[3]->IntegerValue(env->context()).ToChecked();
const int64_t to_copy = args[4]->IntegerValue(env->context()).ToChecked();

CHECK_GE(target_start, 0);
CHECK_GE(source_start, 0);
CHECK_GE(to_copy, 0);

CopyImpl(source_obj,
target_obj,
static_cast<size_t>(target_start),
static_cast<size_t>(source_start),
static_cast<size_t>(to_copy));

args.GetReturnValue().Set(static_cast<double>(to_copy));
}

// Assume caller has properly validated args.
Expand Down Expand Up @@ -1480,11 +1494,12 @@ void CopyArrayBuffer(const FunctionCallbackInfo<Value>& args) {
// args[3] == Source ArrayBuffer Offset
// args[4] == bytesToCopy

Environment* env = Environment::GetCurrent(args);
CHECK(args[0]->IsArrayBuffer() || args[0]->IsSharedArrayBuffer());
CHECK(args[1]->IsUint32());
CHECK(args[1]->IsNumber());
CHECK(args[2]->IsArrayBuffer() || args[2]->IsSharedArrayBuffer());
CHECK(args[3]->IsUint32());
CHECK(args[4]->IsUint32());
CHECK(args[3]->IsNumber());
CHECK(args[4]->IsNumber());

void* destination;
size_t destination_byte_length;
Expand All @@ -1495,9 +1510,19 @@ void CopyArrayBuffer(const FunctionCallbackInfo<Value>& args) {
size_t source_byte_length;
std::tie(source, source_byte_length) = DecomposeBufferToParts(args[2]);

uint32_t destination_offset = args[1].As<Uint32>()->Value();
uint32_t source_offset = args[3].As<Uint32>()->Value();
size_t bytes_to_copy = args[4].As<Uint32>()->Value();
// Use IntegerValue() so offsets larger than 2**32 are not truncated.
const int64_t destination_offset_signed =
args[1]->IntegerValue(env->context()).ToChecked();
const int64_t source_offset_signed =
args[3]->IntegerValue(env->context()).ToChecked();
const int64_t bytes_to_copy_signed =
args[4]->IntegerValue(env->context()).ToChecked();
CHECK_GE(destination_offset_signed, 0);
CHECK_GE(source_offset_signed, 0);
CHECK_GE(bytes_to_copy_signed, 0);
size_t destination_offset = static_cast<size_t>(destination_offset_signed);
size_t source_offset = static_cast<size_t>(source_offset_signed);
size_t bytes_to_copy = static_cast<size_t>(bytes_to_copy_signed);

CHECK_GE(destination_byte_length - destination_offset, bytes_to_copy);
CHECK_GE(source_byte_length - source_offset, bytes_to_copy);
Expand Down
76 changes: 76 additions & 0 deletions test/parallel/test-buffer-copy-4gb.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
'use strict';

// This tests that Buffer.prototype.copy correctly handles copies whose
// source/target offsets or byte counts exceed 2**32. Refs:
// https://github.com/nodejs/node/issues/55422
const common = require('../common');

// Cannot test on 32-bit machines because the test relies on creating
// buffers larger than 4 GiB.
common.skipIf32Bits();

// Allocating 4+ GiB buffers in CI environments is expensive and may also
// fail under sanitizer / memory-constrained builds. Gate the test behind
// an explicit opt-in env var to avoid timeouts/OOMs in normal CI runs.
if (!process.env.NODE_TEST_LARGE_BUFFER) {
common.skip('Skipping: requires NODE_TEST_LARGE_BUFFER=1 (allocates >4GiB)');
}

const assert = require('assert');

const threshold = 0xFFFFFFFF; // 2**32 - 1
const overflow = threshold + 5; // 2**32 + 4 — exercises offsets > 2**32

let largeBuffer;
try {
// Allocate a buffer slightly larger than 2**32 so we can copy to and from
// offsets above the uint32 boundary.
largeBuffer = Buffer.alloc(overflow);
} catch (e) {
if (e.code === 'ERR_MEMORY_ALLOCATION_FAILED' ||
/Array buffer allocation failed/.test(e.message)) {
common.skip('insufficient memory for >4GiB Buffer.alloc');
}
throw e;
}

// Sentinel byte at an index above 2**32. Before the fix, copy() truncates the
// length to 32 bits and the sentinel never gets written, so reading it back
// would yield 0.
const sentinelIndex = threshold + 2;
largeBuffer[sentinelIndex] = 0xAB;

// Test 1: Buffer.prototype.copy with sourceEnd > 2**32 should copy the bytes
// past the 4 GiB boundary instead of silently truncating.
{
const target = Buffer.alloc(overflow);
const copied = largeBuffer.copy(target, 0, 0, overflow);
assert.strictEqual(copied, overflow,
'copy() should report copying the full byte range');
assert.strictEqual(target[sentinelIndex], 0xAB,
'byte beyond 2**32 must be copied, not truncated');
}

// Test 2: Buffer.prototype.copy with targetStart > 2**32 should write at the
// large offset rather than wrapping back to a low address.
{
const target = Buffer.alloc(overflow);
const src = Buffer.from([0xCD]);
const copied = src.copy(target, threshold + 1, 0, 1);
assert.strictEqual(copied, 1);
assert.strictEqual(target[threshold + 1], 0xCD,
'targetStart > 2**32 must not wrap to a low offset');
// The low offset that uint32 truncation would have written to must remain
// untouched.
assert.strictEqual(target[(threshold + 1) >>> 0], 0);
}

// Test 3: Buffer.concat with a single >4 GiB buffer should preserve the
// trailing bytes, exercising the original repro from the issue.
{
largeBuffer.fill(0x6F); // 'o'
const result = Buffer.concat([largeBuffer]);
assert.strictEqual(result.length, overflow);
assert.strictEqual(result[overflow - 1], 0x6F,
'final byte beyond 2**32 must survive concat');
}
Loading