diff --git a/.gitignore b/.gitignore index 0b8f1a405bda3a..69c1dd205316fa 100644 --- a/.gitignore +++ b/.gitignore @@ -31,6 +31,7 @@ /tags /tags.* /doc/api.xml +/docs/ /node /node_g /gon-config.json diff --git a/lib/internal/quic/quic.js b/lib/internal/quic/quic.js index 6ca59469faf2de..f07fd88c0bd321 100644 --- a/lib/internal/quic/quic.js +++ b/lib/internal/quic/quic.js @@ -1396,56 +1396,17 @@ function maybeGetCloseError(context, status, pendingError) { } class QuicStream { + // All mutable per-stream state is consolidated into a single object + // to minimize the number of V8 private field slots. Only #handle is + // kept as a private field for the brand check in #assertIsQuicStream. /** @type {object} */ #handle; - /** - * Flag set at the top of `destroy()` to make the method safely - * re-entrant. Distinct from `#handle === undefined` (which signals - * "fully destroyed" and is set inside `[kFinishClose]`) so that - * `[kFinishClose]`'s own destroyed-guard does not bail before the - * cleanup work runs. - * @type {boolean} - */ - #destroying = false; - /** @type {QuicSession} */ - #session; - /** @type {QuicStreamStats} */ - #stats; - /** @type {QuicStreamState} */ - #state; - /** @type {number} */ - #direction = undefined; - /** @type {Function|undefined} */ - #onerror = undefined; - /** @type {OnBlockedCallback|undefined} */ - #onblocked = undefined; - /** @type {OnStreamErrorCallback|undefined} */ - #onreset = undefined; - /** @type {Function|undefined} */ - #onheaders = undefined; - /** @type {Function|undefined} */ - #ontrailers = undefined; - /** @type {Function|undefined} */ - #oninfo = undefined; - /** @type {Function|undefined} */ - #onwanttrailers = undefined; - /** @type {object|undefined} */ - #headers = undefined; - /** @type {object|undefined} */ - #pendingTrailers = undefined; - /** @type {Promise} */ - #pendingClose = PromiseWithResolvers(); - #reader; - #iteratorLocked = false; - #writer = undefined; - #outboundSet = false; - /** @type {FileHandle|undefined} */ - #fileHandle = undefined; + #inner; static { getQuicStreamState = function(stream) { QuicStream.#assertIsQuicStream(stream); - return stream.#state; + return stream.#inner.state; }; } @@ -1456,7 +1417,7 @@ class QuicStream { } #assertHeadersSupported() { - if (getQuicSessionState(this.#session).headersSupported === 2) { + if (getQuicSessionState(this.#inner.session).headersSupported === 2) { throw new ERR_INVALID_STATE( 'The negotiated QUIC application protocol does not support headers'); } @@ -1467,29 +1428,47 @@ class QuicStream { * @param {object} handle * @param {QuicSession} session * @param {number} direction + * @param {boolean} [isLocal] True for locally-initiated streams */ - constructor(privateSymbol, handle, session, direction) { + constructor(privateSymbol, handle, session, direction, isLocal = false) { if (privateSymbol !== kPrivateConstructor) { throw new ERR_ILLEGAL_CONSTRUCTOR(); } this.#handle = handle; - this.#handle[kOwner] = this; - this.#session = session; - this.#direction = direction; - this.#stats = new QuicStreamStats(kPrivateConstructor, this.#handle.stats); - this.#state = new QuicStreamState(kPrivateConstructor, this.#handle.state); - this.#reader = this.#handle.getReader(); + handle[kOwner] = this; + this.#inner = { + __proto__: null, + session, + direction, + isLocal, + state: new QuicStreamState( + kPrivateConstructor, handle.state, handle.stateByteOffset), + stats: undefined, + pendingClose: undefined, + reader: undefined, + destroying: false, + iteratorLocked: false, + outboundSet: false, + writer: undefined, + fileHandle: undefined, + headers: undefined, + pendingTrailers: undefined, + // Callback slots + onerror: undefined, + onblocked: undefined, + onreset: undefined, + onheaders: undefined, + ontrailers: undefined, + oninfo: undefined, + onwanttrailers: undefined, + }; if (hasObserver('quic')) { startPerf(this, kPerfEntry, { type: 'quic', name: 'QuicStream' }); } - if (this.pending) { - debug(`pending ${this.direction} stream created`); - } else { - debug(`${this.direction} stream ${this.id} created`); - } + debug('stream created'); } get [kValidatedSource]() { return true; } @@ -1502,15 +1481,16 @@ class QuicStream { */ async *[SymbolAsyncIterator]() { QuicStream.#assertIsQuicStream(this); - if (this.#iteratorLocked) { + if (this.#inner.iteratorLocked) { throw new ERR_INVALID_STATE('Stream is already being read'); } - this.#iteratorLocked = true; + this.#inner.iteratorLocked = true; + this.#inner.reader ??= this.#handle?.getReader(); // Non-readable stream (outbound-only unidirectional, or closed) - if (!this.#reader) return; + if (!this.#inner.reader) return; - yield* createBlobReaderIterable(this.#reader, { + yield* createBlobReaderIterable(this.#inner.reader, { getReadError: () => { // The read side ends for one of three reasons: // * Clean FIN received from the peer (state.finReceived @@ -1524,8 +1504,8 @@ class QuicStream { // stream.stopSending(). Both paths run EndReadable in // C++, setting state.readEnded without setting // state.finReceived. There is no peer code to surface. - if (this.#state.readEnded && !this.#state.finReceived) { - const peerResetCode = this.#state.resetCode; + if (this.#inner.state.readEnded && !this.#inner.state.finReceived) { + const peerResetCode = this.#inner.state.resetCode; if (peerResetCode !== undefined && peerResetCode > 0n) { return new ERR_QUIC_STREAM_RESET(Number(peerResetCode)); } @@ -1544,7 +1524,7 @@ class QuicStream { */ get pending() { QuicStream.#assertIsQuicStream(this); - return this.#state.pending; + return this.#inner.state.pending; } /** @@ -1555,7 +1535,7 @@ class QuicStream { */ get early() { QuicStream.#assertIsQuicStream(this); - return this.#state.early; + return this.#inner.state.early; } /** @@ -1566,142 +1546,144 @@ class QuicStream { */ get highWaterMark() { QuicStream.#assertIsQuicStream(this); - return this.#state.highWaterMark; + return this.#inner.state.highWaterMark; } set highWaterMark(val) { QuicStream.#assertIsQuicStream(this); validateInteger(val, 'highWaterMark', 0, 0xFFFFFFFF); - this.#state.highWaterMark = val; + this.#inner.state.highWaterMark = val; // If writeDesiredSize hasn't been set yet (still 0 from initialization), // initialize it to the highWaterMark so the first write can proceed. - if (this.#state.writeDesiredSize === 0 && val > 0) { - this.#state.writeDesiredSize = val; + if (this.#inner.state.writeDesiredSize === 0 && val > 0) { + this.#inner.state.writeDesiredSize = val; } } /** @type {Function|undefined} */ get onerror() { QuicStream.#assertIsQuicStream(this); - return this.#onerror; + return this.#inner.onerror; } set onerror(fn) { QuicStream.#assertIsQuicStream(this); if (fn === undefined) { - this.#onerror = undefined; + this.#inner.onerror = undefined; } else { validateFunction(fn, 'onerror'); - this.#onerror = FunctionPrototypeBind(fn, this); - markPromiseAsHandled(this.#pendingClose.promise); + this.#inner.onerror = FunctionPrototypeBind(fn, this); + // Lazily create the close promise so it can be marked handled. + this.#inner.pendingClose ??= PromiseWithResolvers(); + markPromiseAsHandled(this.#inner.pendingClose.promise); } } /** @type {OnBlockedCallback} */ get onblocked() { QuicStream.#assertIsQuicStream(this); - return this.#onblocked; + return this.#inner.onblocked; } set onblocked(fn) { QuicStream.#assertIsQuicStream(this); if (fn === undefined) { - this.#onblocked = undefined; - this.#state.wantsBlock = false; + this.#inner.onblocked = undefined; + this.#inner.state.wantsBlock = false; } else { validateFunction(fn, 'onblocked'); - this.#onblocked = FunctionPrototypeBind(fn, this); - this.#state.wantsBlock = true; + this.#inner.onblocked = FunctionPrototypeBind(fn, this); + this.#inner.state.wantsBlock = true; } } /** @type {OnStreamErrorCallback} */ get onreset() { QuicStream.#assertIsQuicStream(this); - return this.#onreset; + return this.#inner.onreset; } set onreset(fn) { QuicStream.#assertIsQuicStream(this); if (fn === undefined) { - this.#onreset = undefined; - this.#state.wantsReset = false; + this.#inner.onreset = undefined; + this.#inner.state.wantsReset = false; } else { validateFunction(fn, 'onreset'); - this.#onreset = FunctionPrototypeBind(fn, this); - this.#state.wantsReset = true; + this.#inner.onreset = FunctionPrototypeBind(fn, this); + this.#inner.state.wantsReset = true; } } /** @type {OnHeadersCallback} */ get onheaders() { QuicStream.#assertIsQuicStream(this); - return this.#onheaders; + return this.#inner.onheaders; } set onheaders(fn) { QuicStream.#assertIsQuicStream(this); if (fn === undefined) { - this.#onheaders = undefined; - this.#state[kWantsHeaders] = false; + this.#inner.onheaders = undefined; + this.#inner.state[kWantsHeaders] = false; } else { this.#assertHeadersSupported(); validateFunction(fn, 'onheaders'); - this.#onheaders = FunctionPrototypeBind(fn, this); - this.#state[kWantsHeaders] = true; + this.#inner.onheaders = FunctionPrototypeBind(fn, this); + this.#inner.state[kWantsHeaders] = true; } } /** @type {Function|undefined} */ get oninfo() { QuicStream.#assertIsQuicStream(this); - return this.#oninfo; + return this.#inner.oninfo; } set oninfo(fn) { QuicStream.#assertIsQuicStream(this); if (fn === undefined) { - this.#oninfo = undefined; + this.#inner.oninfo = undefined; } else { this.#assertHeadersSupported(); validateFunction(fn, 'oninfo'); - this.#oninfo = FunctionPrototypeBind(fn, this); + this.#inner.oninfo = FunctionPrototypeBind(fn, this); } } /** @type {Function|undefined} */ get ontrailers() { QuicStream.#assertIsQuicStream(this); - return this.#ontrailers; + return this.#inner.ontrailers; } set ontrailers(fn) { QuicStream.#assertIsQuicStream(this); if (fn === undefined) { - this.#ontrailers = undefined; + this.#inner.ontrailers = undefined; } else { this.#assertHeadersSupported(); validateFunction(fn, 'ontrailers'); - this.#ontrailers = FunctionPrototypeBind(fn, this); + this.#inner.ontrailers = FunctionPrototypeBind(fn, this); } } /** @type {Function|undefined} */ get onwanttrailers() { QuicStream.#assertIsQuicStream(this); - return this.#onwanttrailers; + return this.#inner.onwanttrailers; } set onwanttrailers(fn) { QuicStream.#assertIsQuicStream(this); if (fn === undefined) { - this.#onwanttrailers = undefined; - this.#state[kWantsTrailers] = false; + this.#inner.onwanttrailers = undefined; + this.#inner.state[kWantsTrailers] = false; } else { this.#assertHeadersSupported(); validateFunction(fn, 'onwanttrailers'); - this.#onwanttrailers = FunctionPrototypeBind(fn, this); - this.#state[kWantsTrailers] = true; + this.#inner.onwanttrailers = FunctionPrototypeBind(fn, this); + this.#inner.state[kWantsTrailers] = true; } } @@ -1713,7 +1695,7 @@ class QuicStream { */ get headers() { QuicStream.#assertIsQuicStream(this); - return this.#headers; + return this.#inner.headers; } /** @@ -1722,21 +1704,21 @@ class QuicStream { */ get pendingTrailers() { QuicStream.#assertIsQuicStream(this); - return this.#pendingTrailers; + return this.#inner.pendingTrailers; } set pendingTrailers(headers) { QuicStream.#assertIsQuicStream(this); if (headers === undefined) { - this.#pendingTrailers = undefined; + this.#inner.pendingTrailers = undefined; return; } - if (getQuicSessionState(this.#session).headersSupported === 2) { + if (getQuicSessionState(this.#inner.session).headersSupported === 2) { throw new ERR_INVALID_STATE( 'The negotiated QUIC application protocol does not support headers'); } validateObject(headers, 'headers'); - this.#pendingTrailers = headers; + this.#inner.pendingTrailers = headers; } /** @@ -1745,7 +1727,11 @@ class QuicStream { */ get stats() { QuicStream.#assertIsQuicStream(this); - return this.#stats; + if (this.#inner.stats === undefined) { + this.#inner.stats = new QuicStreamStats( + kPrivateConstructor, this.#handle.stats, this.#handle.statsByteOffset); + } + return this.#inner.stats; } /** @@ -1756,7 +1742,7 @@ class QuicStream { get session() { QuicStream.#assertIsQuicStream(this); if (this.destroyed) return null; - return this.#session; + return this.#inner.session; } /** @@ -1767,7 +1753,7 @@ class QuicStream { get id() { QuicStream.#assertIsQuicStream(this); if (this.destroyed || this.pending) return null; - return this.#state.id; + return this.#inner.state.id; } /** @@ -1778,7 +1764,7 @@ class QuicStream { get direction() { QuicStream.#assertIsQuicStream(this); if (this.destroyed || this.pending) return null; - return this.#direction === kStreamDirectionBidirectional ? 'bidi' : 'uni'; + return this.#inner.direction === kStreamDirectionBidirectional ? 'bidi' : 'uni'; } /** @@ -1796,7 +1782,10 @@ class QuicStream { */ get closed() { QuicStream.#assertIsQuicStream(this); - return this.#pendingClose.promise; + if (this.#inner.pendingClose === undefined) { + this.#inner.pendingClose = PromiseWithResolvers(); + } + return this.#inner.pendingClose.promise; } /** @@ -1832,7 +1821,7 @@ class QuicStream { // `onStreamClose -> [kFinishClose]` path - which does NOT go // through `destroy()` and therefore never sets `#destroying`. // `[kFinishClose]` clears `#handle` at the end of its work. - if (this.#destroying || this.destroyed) return; + if (this.#inner.destroying || this.destroyed) return; // Validate options up front so a malformed `options` argument // throws before any side effects (mutating `#destroying`, // emitting wire frames, invoking `onerror`, settling the closed @@ -1848,7 +1837,7 @@ class QuicStream { if (reason !== undefined) { validateString(reason, 'options.reason'); } - this.#destroying = true; + this.#inner.destroying = true; // Resolve the wire error code for any RESET_STREAM / STOP_SENDING // frames emitted below. let abortCode; @@ -1857,21 +1846,16 @@ class QuicStream { } else if (error !== undefined) { abortCode = error instanceof QuicError ? error.errorCode : - getQuicSessionState(this.#session).internalErrorCode; + getQuicSessionState(this.#inner.session).internalErrorCode; } // When destroying with an error, ensure the peer stops sending // data we are about to discard by emitting STOP_SENDING. The // condition gates the emission to error-path destroys with a - // still-open readable side. Direction model for the readable - // side: - // * bidi: always has a readable side. - // * uni + #reader !== undefined: remote-initiated, read-only. - // * uni + #reader === undefined: locally-initiated, write-only; - // no readable side to stop. + // still-open readable side. The C++ state.readEnded flag is + // authoritative -- it is set for locally-initiated uni streams + // (which have no readable side) and when reading completes. if (abortCode !== undefined && - !this.#state.readEnded && - (this.#direction === kStreamDirectionBidirectional || - this.#reader !== undefined)) { + !this.#inner.state.readEnded) { this.#handle.stopSending(abortCode); } // When destroying with an error, ensure the peer learns about @@ -1880,22 +1864,14 @@ class QuicStream { // streams that destroy without ever accessing stream.writer // (e.g. used setBody or never wrote at all) need an explicit // RESET_STREAM here so the write side does not dangle on the - // wire. The condition gates the emission to error-path destroys - // with a still-open writable side. - // Direction model for the writable side: - // * bidi: always has a writable side. - // * uni + #reader === undefined: locally-initiated, write-only. - // * uni + #reader !== undefined: remote-initiated, read-only; - // no writable side to reset. + // wire. The C++ state.writeEnded flag is authoritative. if (abortCode !== undefined && - this.#writer === undefined && - !this.#state.writeEnded && - (this.#direction === kStreamDirectionBidirectional || - this.#reader === undefined)) { + this.#inner.writer === undefined && + !this.#inner.state.writeEnded) { this.#handle.resetStream(abortCode); } - if (error !== undefined && typeof this.#onerror === 'function') { - invokeOnerror(this.#onerror, error); + if (error !== undefined && typeof this.#inner.onerror === 'function') { + invokeOnerror(this.#inner.onerror, error); } const handle = this.#handle; this[kFinishClose](error); @@ -1915,7 +1891,7 @@ class QuicStream { if (this.destroyed) { throw new ERR_INVALID_STATE('Stream is destroyed'); } - if (this.#state.hasOutbound) { + if (this.#inner.state.hasOutbound) { throw new ERR_INVALID_STATE('Stream already has an outbound data source'); } this.#handle.attachSource(validateBody(outbound)); @@ -1931,7 +1907,7 @@ class QuicStream { sendHeaders(headers, options = kEmptyObject) { QuicStream.#assertIsQuicStream(this); if (this.destroyed) return false; - if (getQuicSessionState(this.#session).headersSupported === 2) { + if (getQuicSessionState(this.#inner.session).headersSupported === 2) { throw new ERR_INVALID_STATE( 'The negotiated QUIC application protocol does not support headers'); } @@ -1952,7 +1928,7 @@ class QuicStream { sendInformationalHeaders(headers) { QuicStream.#assertIsQuicStream(this); if (this.destroyed) return false; - if (getQuicSessionState(this.#session).headersSupported === 2) { + if (getQuicSessionState(this.#inner.session).headersSupported === 2) { throw new ERR_INVALID_STATE( 'The negotiated QUIC application protocol does not support headers'); } @@ -1973,7 +1949,7 @@ class QuicStream { sendTrailers(headers) { QuicStream.#assertIsQuicStream(this); if (this.destroyed) return false; - if (getQuicSessionState(this.#session).headersSupported === 2) { + if (getQuicSessionState(this.#inner.session).headersSupported === 2) { throw new ERR_INVALID_STATE( 'The negotiated QUIC application protocol does not support headers'); } @@ -1991,8 +1967,8 @@ class QuicStream { */ get writer() { QuicStream.#assertIsQuicStream(this); - if (this.#writer !== undefined) return this.#writer; - if (this.#outboundSet) { + if (this.#inner.writer !== undefined) return this.#inner.writer; + if (this.#inner.outboundSet) { throw new ERR_INVALID_STATE( 'Stream outbound already configured with a body source'); } @@ -2023,7 +1999,7 @@ class QuicStream { // more data. Refuse the sync write. // If a drain is already pending, another operation is waiting // for capacity. Refuse the sync write. - if (closed || errored || stream.#state.writeEnded || drainWakeup != null) { + if (closed || errored || stream.#inner.state.writeEnded || drainWakeup != null) { return false; } chunk = toUint8Array(chunk); @@ -2031,7 +2007,7 @@ class QuicStream { if (len === 0) return true; // Refuse the write if the chunk doesn't fit in the available // buffer capacity. The caller should wait for drain and retry. - if (len > stream.#state.writeDesiredSize) return false; + if (len > stream.#inner.state.writeDesiredSize) return false; const result = handle.write([chunk]); if (result === undefined) return false; totalBytesWritten += len; @@ -2046,7 +2022,7 @@ class QuicStream { signal.throwIfAborted(); } if (errored) throw error; - if (closed || stream.#state.writeEnded) { + if (closed || stream.#inner.state.writeEnded) { throw new ERR_INVALID_STATE('Writer is closed'); } // If a drain is already pending, another operation is waiting @@ -2063,14 +2039,14 @@ class QuicStream { } function writevSync(chunks) { - if (closed || errored || stream.#state.writeEnded || drainWakeup != null) { + if (closed || errored || stream.#inner.state.writeEnded || drainWakeup != null) { return false; } chunks = convertChunks(chunks); let len = 0; for (const c of chunks) len += TypedArrayPrototypeGetByteLength(c); if (len === 0) return true; - if (len > stream.#state.writeDesiredSize) return false; + if (len > stream.#inner.state.writeDesiredSize) return false; const result = handle.write(chunks); if (result === undefined) return false; totalBytesWritten += len; @@ -2086,7 +2062,7 @@ class QuicStream { } if (errored) throw error; - if (closed || stream.#state.writeEnded) { + if (closed || stream.#inner.state.writeEnded) { throw new ERR_INVALID_STATE('Writer is closed'); } @@ -2179,7 +2155,7 @@ class QuicStream { // `INTERNAL_ERROR` (0x1). const code = error instanceof QuicError ? error.errorCode : - getQuicSessionState(stream.#session).internalErrorCode; + getQuicSessionState(stream.#inner.session).internalErrorCode; handle.resetStream(code); if (drainWakeup != null) { drainWakeup.reject(error); @@ -2190,8 +2166,8 @@ class QuicStream { const writer = { __proto__: null, get desiredSize() { - if (closed || errored || stream.#state.writeEnded) return null; - return stream.#state.writeDesiredSize; + if (closed || errored || stream.#inner.state.writeEnded) return null; + return stream.#inner.state.writeDesiredSize; }, writeSync, write, @@ -2204,7 +2180,7 @@ class QuicStream { if (closed || errored) return null; // If a drain is already pending, return the existing promise. if (drainWakeup != null) return drainWakeup.promise; - if (stream.#state.writeDesiredSize > 0) return null; + if (stream.#inner.state.writeDesiredSize > 0) return null; drainWakeup = PromiseWithResolvers(); return drainWakeup.promise; }, @@ -2218,21 +2194,23 @@ class QuicStream { }; // Non-writable stream - return a pre-closed writer. - // A readable unidirectional stream is a remote uni (read-only). - if (!handle || this.destroyed || this.#state.writeEnded || - (this.#direction === kStreamDirectionUnidirectional && - this.#reader !== undefined)) { + // A remote unidirectional stream is read-only and has no writable + // side. isLocal distinguishes locally-initiated (writable) from + // remotely-initiated (read-only) uni streams. + if (!handle || this.destroyed || this.#inner.state.writeEnded || + (this.#inner.direction === kStreamDirectionUnidirectional && + !this.#inner.isLocal)) { closed = true; - this.#writer = writer; - return this.#writer; + this.#inner.writer = writer; + return this.#inner.writer; } // Initialize the outbound DataQueue for streaming writes handle.initStreamingSource(); initStreamingBackpressure(this); - this.#writer = writer; - return this.#writer; + this.#inner.writer = writer; + return this.#inner.writer; } /** @@ -2246,17 +2224,17 @@ class QuicStream { if (this.destroyed) { throw new ERR_INVALID_STATE('Stream is destroyed'); } - if (this.#outboundSet) { + if (this.#inner.outboundSet) { throw new ERR_INVALID_STATE('Stream outbound already configured'); } - if (this.#writer !== undefined) { + if (this.#inner.writer !== undefined) { throw new ERR_INVALID_STATE('Stream writer already accessed'); } - this.#outboundSet = true; + this.#inner.outboundSet = true; // If the body is a FileHandle, store it so it is closed // automatically when the stream finishes. if (body instanceof FileHandle) { - this.#fileHandle = body; + this.#inner.fileHandle = body; } configureOutbound(this.#handle, this, body); } @@ -2268,7 +2246,7 @@ class QuicStream { * @param {FileHandle} fh */ [kAttachFileHandle](fh) { - this.#fileHandle = fh; + this.#inner.fileHandle = fh; } /** @@ -2306,7 +2284,7 @@ class QuicStream { get priority() { QuicStream.#assertIsQuicStream(this); if (this.destroyed || - !getQuicSessionState(this.#session).isPrioritySupported) return null; + !getQuicSessionState(this.#inner.session).isPrioritySupported) return null; const packed = this.#handle.getPriority(); const urgency = packed >> 1; const incremental = !!(packed & 1); @@ -2324,7 +2302,7 @@ class QuicStream { setPriority(options = kEmptyObject) { QuicStream.#assertIsQuicStream(this); if (this.destroyed) return; - if (!getQuicSessionState(this.#session).isPrioritySupported) { + if (!getQuicSessionState(this.#inner.session).isPrioritySupported) { throw new ERR_INVALID_STATE( 'The session does not support stream priority'); } @@ -2354,7 +2332,7 @@ class QuicStream { [kSendHeaders](headers, kind = kHeadersKindInitial, flags = kHeadersFlagsTerminal) { validateObject(headers, 'headers'); - if (getQuicSessionState(this.#session).headersSupported === 2) { + if (getQuicSessionState(this.#inner.session).headersSupported === 2) { throw new ERR_INVALID_STATE( 'The negotiated QUIC application protocol does not support headers'); } @@ -2372,27 +2350,21 @@ class QuicStream { } [kFinishClose](error) { - if (this.destroyed) return this.#pendingClose.promise; + this.#inner.pendingClose ??= PromiseWithResolvers(); + if (this.destroyed) { + return this.#inner.pendingClose.promise; + } if (error !== undefined) { - if (this.pending) { - debug(`destroying pending stream with error: ${error}`); - } else { - debug(`destroying stream ${this.id} with error: ${error}`); - } - this.#pendingClose.reject(error); + this.#inner.pendingClose.reject(error); } else { - if (this.pending) { - debug('destroying pending stream with no error'); - } else { - debug(`destroying stream ${this.id} with no error`); - } - this.#pendingClose.resolve(); + this.#inner.pendingClose.resolve(); } + debug('stream closed'); if (onStreamClosedChannel.hasSubscribers) { onStreamClosedChannel.publish({ __proto__: null, stream: this, - session: this.#session, + session: this.#inner.session, error, stats: this.stats, }); @@ -2405,46 +2377,44 @@ class QuicStream { }, }); } - this.#stats[kFinishClose](); - this.#state[kFinishClose](); - this.#session[kRemoveStream](this); - if (this.#writer !== undefined) { - this.#writer.fail(error); - } - this.#session = undefined; - this.#pendingClose.reject = undefined; - this.#pendingClose.resolve = undefined; - this.#onblocked = undefined; - this.#onreset = undefined; - this.#onheaders = undefined; - this.#onerror = undefined; - this.#ontrailers = undefined; - this.#oninfo = undefined; - this.#onwanttrailers = undefined; - this.#headers = undefined; - this.#pendingTrailers = undefined; + this.#inner.stats?.[kFinishClose](); + this.#inner.state?.[kFinishClose](); + this.#inner.session[kRemoveStream](this); + this.#inner.writer?.fail(error); + this.#inner.session = undefined; + this.#inner.pendingClose.reject = undefined; + this.#inner.pendingClose.resolve = undefined; + this.#inner.onblocked = undefined; + this.#inner.onreset = undefined; + this.#inner.onheaders = undefined; + this.#inner.onerror = undefined; + this.#inner.ontrailers = undefined; + this.#inner.oninfo = undefined; + this.#inner.onwanttrailers = undefined; + this.#inner.headers = undefined; + this.#inner.pendingTrailers = undefined; this.#handle = undefined; - if (this.#fileHandle !== undefined) { + if (this.#inner.fileHandle !== undefined) { // Close the FileHandle that was used as a body source. The close // may fail if the user already closed it -- that's expected and // harmless, so mark the promise as handled. - markPromiseAsHandled(this.#fileHandle.close()); - this.#fileHandle = undefined; + markPromiseAsHandled(this.#inner.fileHandle.close()); + this.#inner.fileHandle = undefined; } } [kBlocked]() { // The blocked event should only be called if the stream was created with // an onblocked callback. The callback should always exist here. - assert(this.#onblocked, 'Unexpected stream blocked event'); + assert(this.#inner.onblocked, 'Unexpected stream blocked event'); if (onStreamBlockedChannel.hasSubscribers) { onStreamBlockedChannel.publish({ __proto__: null, stream: this, - session: this.#session, + session: this.#inner.session, }); } - safeCallbackInvoke(this.#onblocked, this); + safeCallbackInvoke(this.#inner.onblocked, this); } [kDrain]() { @@ -2455,16 +2425,16 @@ class QuicStream { [kReset](error) { // The reset event should only be called if the stream was created with // an onreset callback. The callback should always exist here. - assert(this.#onreset, 'Unexpected stream reset event'); + assert(this.#inner.onreset, 'Unexpected stream reset event'); if (onStreamResetChannel.hasSubscribers) { onStreamResetChannel.publish({ __proto__: null, stream: this, - session: this.#session, + session: this.#inner.session, error, }); } - safeCallbackInvoke(this.#onreset, this, error); + safeCallbackInvoke(this.#inner.onreset, this, error); } [kHeaders](headers, kind) { @@ -2473,41 +2443,41 @@ class QuicStream { switch (kindName) { case 'initial': - assert(this.#onheaders, 'Unexpected stream headers event'); - if (this.#headers === undefined) this.#headers = block; + assert(this.#inner.onheaders, 'Unexpected stream headers event'); + this.#inner.headers ??= block; if (onStreamHeadersChannel.hasSubscribers) { onStreamHeadersChannel.publish({ __proto__: null, stream: this, - session: this.#session, + session: this.#inner.session, headers: block, }); } - safeCallbackInvoke(this.#onheaders, this, block); + safeCallbackInvoke(this.#inner.onheaders, this, block); break; case 'trailing': if (onStreamTrailersChannel.hasSubscribers) { onStreamTrailersChannel.publish({ __proto__: null, stream: this, - session: this.#session, + session: this.#inner.session, trailers: block, }); } - if (this.#ontrailers) - safeCallbackInvoke(this.#ontrailers, this, block); + if (this.#inner.ontrailers) + safeCallbackInvoke(this.#inner.ontrailers, this, block); break; case 'hints': if (onStreamInfoChannel.hasSubscribers) { onStreamInfoChannel.publish({ __proto__: null, stream: this, - session: this.#session, + session: this.#inner.session, headers: block, }); } - if (this.#oninfo) - safeCallbackInvoke(this.#oninfo, this, block); + if (this.#inner.oninfo) + safeCallbackInvoke(this.#inner.oninfo, this, block); break; } } @@ -2517,11 +2487,11 @@ class QuicStream { // nghttp3 is asking us to provide trailers to send. // Check for pre-set pendingTrailers first, then the callback. - if (this.#pendingTrailers) { - this.sendTrailers(this.#pendingTrailers); - this.#pendingTrailers = undefined; - } else if (this.#onwanttrailers) { - safeCallbackInvoke(this.#onwanttrailers, this); + if (this.#inner.pendingTrailers) { + this.sendTrailers(this.#inner.pendingTrailers); + this.#inner.pendingTrailers = undefined; + } else if (this.#inner.onwanttrailers) { + safeCallbackInvoke(this.#inner.onwanttrailers, this); } } @@ -2541,7 +2511,7 @@ class QuicStream { direction: this.direction, pending: this.pending, stats: this.stats, - state: this.#state, + state: this.#inner.state, session: this.session, }, opts)}`; } @@ -2655,8 +2625,10 @@ class QuicSession { this.#pendingQlog = handle._pendingQlog; handle._pendingQlog = undefined; } - this.#stats = new QuicSessionStats(kPrivateConstructor, handle.stats); - this.#state = new QuicSessionState(kPrivateConstructor, handle.state); + this.#stats = new QuicSessionStats( + kPrivateConstructor, handle.stats, handle.statsByteOffset); + this.#state = new QuicSessionState( + kPrivateConstructor, handle.state, handle.stateByteOffset); if (hasObserver('quic')) { startPerf(this, kPerfEntry, { type: 'quic', name: 'QuicSession' }); @@ -3066,7 +3038,8 @@ class QuicSession { handle.setPriority((urgency << 1) | (incremental ? 1 : 0)); } - const stream = new QuicStream(kPrivateConstructor, handle, this, direction); + const stream = new QuicStream( + kPrivateConstructor, handle, this, direction, true /* isLocal */); this.#streams.add(stream); if (typeof this.#onerror === 'function') { markPromiseAsHandled(stream.closed); @@ -3942,8 +3915,8 @@ class QuicEndpoint { const { retryTokenExpiration, tokenExpiration, - maxConnectionsPerHost = 0, - maxConnectionsTotal = 0, + maxConnectionsPerHost = 100, + maxConnectionsTotal = 10_000, maxStatelessResetsPerHost, disableStatelessReset, addressLRUSize, diff --git a/lib/internal/quic/state.js b/lib/internal/quic/state.js index efaccb4aa00527..d32bbcce5c544a 100644 --- a/lib/internal/quic/state.js +++ b/lib/internal/quic/state.js @@ -294,19 +294,24 @@ class QuicEndpointState { class QuicSessionState { /** @type {DataView} */ #handle; + /** @type {number} */ + #offset = 0; /** * @param {symbol} privateSymbol - * @param {ArrayBuffer} buffer + * @param {DataView|ArrayBuffer} view + * @param {number} [byteOffset] */ - constructor(privateSymbol, buffer) { + constructor(privateSymbol, view, byteOffset = 0) { if (privateSymbol !== kPrivateConstructor) { throw new ERR_ILLEGAL_CONSTRUCTOR(); } - if (!isArrayBuffer(buffer)) { - throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + if (isArrayBuffer(view)) { + this.#handle = new DataView(view); + } else { + this.#handle = view; } - this.#handle = new DataView(buffer); + this.#offset = byteOffset; } // Listener flags are packed into a single uint32_t bitfield. The bit @@ -321,15 +326,15 @@ class QuicSessionState { #getListenerFlag(flag) { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; return !!(DataViewPrototypeGetUint32( - this.#handle, IDX_STATE_SESSION_LISTENER_FLAGS, kIsLittleEndian) & flag); + this.#handle, this.#offset + IDX_STATE_SESSION_LISTENER_FLAGS, kIsLittleEndian) & flag); } #setListenerFlag(flag, val) { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return; const current = DataViewPrototypeGetUint32( - this.#handle, IDX_STATE_SESSION_LISTENER_FLAGS, kIsLittleEndian); + this.#handle, this.#offset + IDX_STATE_SESSION_LISTENER_FLAGS, kIsLittleEndian); DataViewPrototypeSetUint32( - this.#handle, IDX_STATE_SESSION_LISTENER_FLAGS, + this.#handle, this.#offset + IDX_STATE_SESSION_LISTENER_FLAGS, val ? (current | flag) : (current & ~flag), kIsLittleEndian); } @@ -384,49 +389,49 @@ class QuicSessionState { /** @type {boolean} */ get isClosing() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_CLOSING); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_SESSION_CLOSING); } /** @type {boolean} */ get isGracefulClose() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_GRACEFUL_CLOSE); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_SESSION_GRACEFUL_CLOSE); } /** @type {boolean} */ get isSilentClose() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_SILENT_CLOSE); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_SESSION_SILENT_CLOSE); } /** @type {boolean} */ get isStatelessReset() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_STATELESS_RESET); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_SESSION_STATELESS_RESET); } /** @type {boolean} */ get isHandshakeCompleted() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_HANDSHAKE_COMPLETED); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_SESSION_HANDSHAKE_COMPLETED); } /** @type {boolean} */ get isHandshakeConfirmed() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_HANDSHAKE_CONFIRMED); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_SESSION_HANDSHAKE_CONFIRMED); } /** @type {boolean} */ get isStreamOpenAllowed() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_STREAM_OPEN_ALLOWED); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_SESSION_STREAM_OPEN_ALLOWED); } /** @type {boolean} */ get isPrioritySupported() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_PRIORITY_SUPPORTED); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_SESSION_PRIORITY_SUPPORTED); } /** @@ -436,19 +441,19 @@ class QuicSessionState { */ get headersSupported() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_HEADERS_SUPPORTED); + return DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_SESSION_HEADERS_SUPPORTED); } /** @type {boolean} */ get isWrapped() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_WRAPPED); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_SESSION_WRAPPED); } /** @type {number} */ get applicationType() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return DataViewPrototypeGetUint8(this.#handle, IDX_STATE_SESSION_APPLICATION_TYPE); + return DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_SESSION_APPLICATION_TYPE); } /** @@ -461,7 +466,7 @@ class QuicSessionState { get noErrorCode() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; return DataViewPrototypeGetBigUint64( - this.#handle, IDX_STATE_SESSION_NO_ERROR_CODE, kIsLittleEndian); + this.#handle, this.#offset + IDX_STATE_SESSION_NO_ERROR_CODE, kIsLittleEndian); } /** @@ -476,32 +481,36 @@ class QuicSessionState { get internalErrorCode() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; return DataViewPrototypeGetBigUint64( - this.#handle, IDX_STATE_SESSION_INTERNAL_ERROR_CODE, kIsLittleEndian); + this.#handle, this.#offset + IDX_STATE_SESSION_INTERNAL_ERROR_CODE, kIsLittleEndian); } /** @type {number} */ get maxDatagramSize() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return DataViewPrototypeGetUint16(this.#handle, IDX_STATE_SESSION_MAX_DATAGRAM_SIZE, kIsLittleEndian); + return DataViewPrototypeGetUint16( + this.#handle, this.#offset + IDX_STATE_SESSION_MAX_DATAGRAM_SIZE, + kIsLittleEndian); } /** @type {bigint} */ get lastDatagramId() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return DataViewPrototypeGetBigUint64(this.#handle, IDX_STATE_SESSION_LAST_DATAGRAM_ID, kIsLittleEndian); + return DataViewPrototypeGetBigUint64( + this.#handle, this.#offset + IDX_STATE_SESSION_LAST_DATAGRAM_ID, + kIsLittleEndian); } /** @type {number} */ get maxPendingDatagrams() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; return DataViewPrototypeGetUint16( - this.#handle, IDX_STATE_SESSION_MAX_PENDING_DATAGRAMS, kIsLittleEndian); + this.#handle, this.#offset + IDX_STATE_SESSION_MAX_PENDING_DATAGRAMS, kIsLittleEndian); } set maxPendingDatagrams(val) { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return; DataViewPrototypeSetUint16( - this.#handle, IDX_STATE_SESSION_MAX_PENDING_DATAGRAMS, val, kIsLittleEndian); + this.#handle, this.#offset + IDX_STATE_SESSION_MAX_PENDING_DATAGRAMS, val, kIsLittleEndian); } toString() { @@ -587,161 +596,166 @@ class QuicSessionState { class QuicStreamState { /** @type {DataView} */ #handle; + /** @type {number} */ + #offset = 0; /** * @param {symbol} privateSymbol - * @param {ArrayBuffer} buffer + * @param {DataView|ArrayBuffer} view + * @param {number} [byteOffset] */ - constructor(privateSymbol, buffer) { + constructor(privateSymbol, view, byteOffset = 0) { if (privateSymbol !== kPrivateConstructor) { throw new ERR_ILLEGAL_CONSTRUCTOR(); } - if (!isArrayBuffer(buffer)) { - throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + if (isArrayBuffer(view)) { + this.#handle = new DataView(view); + } else { + this.#handle = view; } - this.#handle = new DataView(buffer); + this.#offset = byteOffset; } /** @type {bigint} */ get id() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return DataViewPrototypeGetBigInt64(this.#handle, IDX_STATE_STREAM_ID, kIsLittleEndian); + return DataViewPrototypeGetBigInt64(this.#handle, this.#offset + IDX_STATE_STREAM_ID, kIsLittleEndian); } /** @type {boolean} */ get pending() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_PENDING); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_STREAM_PENDING); } /** @type {boolean} */ get finSent() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_FIN_SENT); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_STREAM_FIN_SENT); } /** @type {boolean} */ get finReceived() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_FIN_RECEIVED); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_STREAM_FIN_RECEIVED); } /** @type {boolean} */ get readEnded() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_READ_ENDED); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_STREAM_READ_ENDED); } /** @type {boolean} */ get writeEnded() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WRITE_ENDED); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_STREAM_WRITE_ENDED); } /** @type {boolean} */ get reset() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_RESET); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_STREAM_RESET); } /** @type {boolean} */ get hasOutbound() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_HAS_OUTBOUND); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_STREAM_HAS_OUTBOUND); } /** @type {boolean} */ get hasReader() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_HAS_READER); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_STREAM_HAS_READER); } /** @type {boolean} */ get wantsBlock() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_BLOCK); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_STREAM_WANTS_BLOCK); } /** @type {boolean} */ set wantsBlock(val) { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return; - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_BLOCK, val ? 1 : 0); + DataViewPrototypeSetUint8(this.#handle, this.#offset + IDX_STATE_STREAM_WANTS_BLOCK, val ? 1 : 0); } /** @type {boolean} */ get [kWantsHeaders]() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_HEADERS); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_STREAM_WANTS_HEADERS); } /** @type {boolean} */ set [kWantsHeaders](val) { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return; - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_HEADERS, val ? 1 : 0); + DataViewPrototypeSetUint8(this.#handle, this.#offset + IDX_STATE_STREAM_WANTS_HEADERS, val ? 1 : 0); } /** @type {boolean} */ get wantsReset() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_RESET); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_STREAM_WANTS_RESET); } /** @type {boolean} */ set wantsReset(val) { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return; - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_RESET, val ? 1 : 0); + DataViewPrototypeSetUint8(this.#handle, this.#offset + IDX_STATE_STREAM_WANTS_RESET, val ? 1 : 0); } /** @type {boolean} */ get [kWantsTrailers]() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_WANTS_TRAILERS); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_STREAM_WANTS_TRAILERS); } /** @type {boolean} */ set [kWantsTrailers](val) { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return; - DataViewPrototypeSetUint8(this.#handle, IDX_STATE_STREAM_WANTS_TRAILERS, val ? 1 : 0); + DataViewPrototypeSetUint8(this.#handle, this.#offset + IDX_STATE_STREAM_WANTS_TRAILERS, val ? 1 : 0); } /** @type {boolean} */ get early() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; - return !!DataViewPrototypeGetUint8(this.#handle, IDX_STATE_STREAM_RECEIVED_EARLY_DATA); + return !!DataViewPrototypeGetUint8(this.#handle, this.#offset + IDX_STATE_STREAM_RECEIVED_EARLY_DATA); } /** @type {bigint} */ get resetCode() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; return DataViewPrototypeGetBigUint64( - this.#handle, IDX_STATE_STREAM_RESET_CODE, kIsLittleEndian); + this.#handle, this.#offset + IDX_STATE_STREAM_RESET_CODE, kIsLittleEndian); } /** @type {bigint} */ get writeDesiredSize() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; return DataViewPrototypeGetUint32( - this.#handle, IDX_STATE_STREAM_WRITE_DESIRED_SIZE, kIsLittleEndian); + this.#handle, this.#offset + IDX_STATE_STREAM_WRITE_DESIRED_SIZE, kIsLittleEndian); } set writeDesiredSize(val) { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return; DataViewPrototypeSetUint32( - this.#handle, IDX_STATE_STREAM_WRITE_DESIRED_SIZE, val, kIsLittleEndian); + this.#handle, this.#offset + IDX_STATE_STREAM_WRITE_DESIRED_SIZE, val, kIsLittleEndian); } /** @type {number} */ get highWaterMark() { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return undefined; return DataViewPrototypeGetUint32( - this.#handle, IDX_STATE_STREAM_HIGH_WATER_MARK, kIsLittleEndian); + this.#handle, this.#offset + IDX_STATE_STREAM_HIGH_WATER_MARK, kIsLittleEndian); } set highWaterMark(val) { if (DataViewPrototypeGetByteLength(this.#handle) === 0) return; DataViewPrototypeSetUint32( - this.#handle, IDX_STATE_STREAM_HIGH_WATER_MARK, val, kIsLittleEndian); + this.#handle, this.#offset + IDX_STATE_STREAM_HIGH_WATER_MARK, val, kIsLittleEndian); } toString() { diff --git a/lib/internal/quic/stats.js b/lib/internal/quic/stats.js index 280cf5a26f419b..27a986f4d0549e 100644 --- a/lib/internal/quic/stats.js +++ b/lib/internal/quic/stats.js @@ -332,163 +332,168 @@ class QuicSessionStats { * @param {symbol} privateSymbol * @param {ArrayBuffer} buffer */ - constructor(privateSymbol, buffer) { + /** @type {number} */ + #offset = 0; + + constructor(privateSymbol, view, byteOffset = 0) { // We use the kPrivateConstructor symbol to restrict the ability to // create new instances of QuicSessionStats to internal code. if (privateSymbol !== kPrivateConstructor) { throw new ERR_ILLEGAL_CONSTRUCTOR(); } - if (!isArrayBuffer(buffer)) { - throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + if (isArrayBuffer(view)) { + this.#handle = new BigUint64Array(view); + } else { + this.#handle = view; } - this.#handle = new BigUint64Array(buffer); + this.#offset = byteOffset / 8; } /** @type {bigint} */ get createdAt() { - return this.#handle[IDX_STATS_SESSION_CREATED_AT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_CREATED_AT]; } /** @type {bigint} */ get destroyedAt() { - return this.#handle[IDX_STATS_SESSION_DESTROYED_AT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_DESTROYED_AT]; } /** @type {bigint} */ get closingAt() { - return this.#handle[IDX_STATS_SESSION_CLOSING_AT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_CLOSING_AT]; } /** @type {bigint} */ get handshakeCompletedAt() { - return this.#handle[IDX_STATS_SESSION_HANDSHAKE_COMPLETED_AT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_HANDSHAKE_COMPLETED_AT]; } /** @type {bigint} */ get handshakeConfirmedAt() { - return this.#handle[IDX_STATS_SESSION_HANDSHAKE_CONFIRMED_AT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_HANDSHAKE_CONFIRMED_AT]; } /** @type {bigint} */ get bytesReceived() { - return this.#handle[IDX_STATS_SESSION_BYTES_RECEIVED]; + return this.#handle[this.#offset + IDX_STATS_SESSION_BYTES_RECEIVED]; } /** @type {bigint} */ get bidiInStreamCount() { - return this.#handle[IDX_STATS_SESSION_BIDI_IN_STREAM_COUNT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_BIDI_IN_STREAM_COUNT]; } /** @type {bigint} */ get bidiOutStreamCount() { - return this.#handle[IDX_STATS_SESSION_BIDI_OUT_STREAM_COUNT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_BIDI_OUT_STREAM_COUNT]; } /** @type {bigint} */ get uniInStreamCount() { - return this.#handle[IDX_STATS_SESSION_UNI_IN_STREAM_COUNT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_UNI_IN_STREAM_COUNT]; } /** @type {bigint} */ get uniOutStreamCount() { - return this.#handle[IDX_STATS_SESSION_UNI_OUT_STREAM_COUNT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_UNI_OUT_STREAM_COUNT]; } /** @type {bigint} */ get maxBytesInFlight() { - return this.#handle[IDX_STATS_SESSION_MAX_BYTES_IN_FLIGHT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_MAX_BYTES_IN_FLIGHT]; } /** @type {bigint} */ get bytesInFlight() { - return this.#handle[IDX_STATS_SESSION_BYTES_IN_FLIGHT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_BYTES_IN_FLIGHT]; } /** @type {bigint} */ get blockCount() { - return this.#handle[IDX_STATS_SESSION_BLOCK_COUNT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_BLOCK_COUNT]; } /** @type {bigint} */ get cwnd() { - return this.#handle[IDX_STATS_SESSION_CWND]; + return this.#handle[this.#offset + IDX_STATS_SESSION_CWND]; } /** @type {bigint} */ get latestRtt() { - return this.#handle[IDX_STATS_SESSION_LATEST_RTT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_LATEST_RTT]; } /** @type {bigint} */ get minRtt() { - return this.#handle[IDX_STATS_SESSION_MIN_RTT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_MIN_RTT]; } /** @type {bigint} */ get rttVar() { - return this.#handle[IDX_STATS_SESSION_RTTVAR]; + return this.#handle[this.#offset + IDX_STATS_SESSION_RTTVAR]; } /** @type {bigint} */ get smoothedRtt() { - return this.#handle[IDX_STATS_SESSION_SMOOTHED_RTT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_SMOOTHED_RTT]; } /** @type {bigint} */ get ssthresh() { - return this.#handle[IDX_STATS_SESSION_SSTHRESH]; + return this.#handle[this.#offset + IDX_STATS_SESSION_SSTHRESH]; } get pktSent() { - return this.#handle[IDX_STATS_SESSION_PKT_SENT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_PKT_SENT]; } get bytesSent() { - return this.#handle[IDX_STATS_SESSION_BYTES_SENT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_BYTES_SENT]; } get pktRecv() { - return this.#handle[IDX_STATS_SESSION_PKT_RECV]; + return this.#handle[this.#offset + IDX_STATS_SESSION_PKT_RECV]; } get bytesRecv() { - return this.#handle[IDX_STATS_SESSION_BYTES_RECV]; + return this.#handle[this.#offset + IDX_STATS_SESSION_BYTES_RECV]; } get pktLost() { - return this.#handle[IDX_STATS_SESSION_PKT_LOST]; + return this.#handle[this.#offset + IDX_STATS_SESSION_PKT_LOST]; } get bytesLost() { - return this.#handle[IDX_STATS_SESSION_BYTES_LOST]; + return this.#handle[this.#offset + IDX_STATS_SESSION_BYTES_LOST]; } get pingRecv() { - return this.#handle[IDX_STATS_SESSION_PING_RECV]; + return this.#handle[this.#offset + IDX_STATS_SESSION_PING_RECV]; } get pktDiscarded() { - return this.#handle[IDX_STATS_SESSION_PKT_DISCARDED]; + return this.#handle[this.#offset + IDX_STATS_SESSION_PKT_DISCARDED]; } /** @type {bigint} */ get datagramsReceived() { - return this.#handle[IDX_STATS_SESSION_DATAGRAMS_RECEIVED]; + return this.#handle[this.#offset + IDX_STATS_SESSION_DATAGRAMS_RECEIVED]; } /** @type {bigint} */ get datagramsSent() { - return this.#handle[IDX_STATS_SESSION_DATAGRAMS_SENT]; + return this.#handle[this.#offset + IDX_STATS_SESSION_DATAGRAMS_SENT]; } /** @type {bigint} */ get datagramsAcknowledged() { - return this.#handle[IDX_STATS_SESSION_DATAGRAMS_ACKNOWLEDGED]; + return this.#handle[this.#offset + IDX_STATS_SESSION_DATAGRAMS_ACKNOWLEDGED]; } /** @type {bigint} */ get datagramsLost() { - return this.#handle[IDX_STATS_SESSION_DATAGRAMS_LOST]; + return this.#handle[this.#offset + IDX_STATS_SESSION_DATAGRAMS_LOST]; } toString() { @@ -590,9 +595,14 @@ class QuicSessionStats { } [kFinishClose]() { - // Snapshot the stats into a new BigUint64Array since the underlying - // buffer will be destroyed. - this.#handle = new BigUint64Array(this.#handle); + // Snapshot this session's stats slice into a standalone BigUint64Array. + const count = IDX_STATS_SESSION_DATAGRAMS_LOST + 1; + const snapshot = new BigUint64Array(count); + for (let i = 0; i < count; i++) { + snapshot[i] = this.#handle[this.#offset + i]; + } + this.#handle = snapshot; + this.#offset = 0; this.#disconnected = true; } } @@ -600,78 +610,84 @@ class QuicSessionStats { class QuicStreamStats { /** @type {BigUint64Array} */ #handle; + /** @type {number} */ + #offset = 0; /** type {boolean} */ #disconnected = false; /** * @param {symbol} privateSymbol - * @param {ArrayBuffer} buffer + * @param {BigUint64Array|ArrayBuffer} view + * @param {number} [byteOffset] - byte offset into the shared page view */ - constructor(privateSymbol, buffer) { + constructor(privateSymbol, view, byteOffset = 0) { // We use the kPrivateConstructor symbol to restrict the ability to // create new instances of QuicStreamStats to internal code. if (privateSymbol !== kPrivateConstructor) { throw new ERR_ILLEGAL_CONSTRUCTOR(); } - if (!isArrayBuffer(buffer)) { - throw new ERR_INVALID_ARG_TYPE('buffer', ['ArrayBuffer'], buffer); + if (isArrayBuffer(view)) { + this.#handle = new BigUint64Array(view); + } else { + this.#handle = view; } - this.#handle = new BigUint64Array(buffer); + // Convert byte offset to element offset (8 bytes per uint64). + this.#offset = byteOffset / 8; } /** @type {bigint} */ get createdAt() { - return this.#handle[IDX_STATS_STREAM_CREATED_AT]; + return this.#handle[this.#offset + IDX_STATS_STREAM_CREATED_AT]; } /** @type {bigint} */ get openedAt() { - return this.#handle[IDX_STATS_STREAM_OPENED_AT]; + return this.#handle[this.#offset + IDX_STATS_STREAM_OPENED_AT]; } /** @type {bigint} */ get receivedAt() { - return this.#handle[IDX_STATS_STREAM_RECEIVED_AT]; + return this.#handle[this.#offset + IDX_STATS_STREAM_RECEIVED_AT]; } /** @type {bigint} */ get ackedAt() { - return this.#handle[IDX_STATS_STREAM_ACKED_AT]; + return this.#handle[this.#offset + IDX_STATS_STREAM_ACKED_AT]; } /** @type {bigint} */ get destroyedAt() { - return this.#handle[IDX_STATS_STREAM_DESTROYED_AT]; + return this.#handle[this.#offset + IDX_STATS_STREAM_DESTROYED_AT]; } /** @type {bigint} */ get bytesReceived() { - return this.#handle[IDX_STATS_STREAM_BYTES_RECEIVED]; + return this.#handle[this.#offset + IDX_STATS_STREAM_BYTES_RECEIVED]; } /** @type {bigint} */ get bytesSent() { - return this.#handle[IDX_STATS_STREAM_BYTES_SENT]; + return this.#handle[this.#offset + IDX_STATS_STREAM_BYTES_SENT]; } /** @type {bigint} */ get maxOffset() { - return this.#handle[IDX_STATS_STREAM_MAX_OFFSET]; + return this.#handle[this.#offset + IDX_STATS_STREAM_MAX_OFFSET]; } /** @type {bigint} */ get maxOffsetAcknowledged() { - return this.#handle[IDX_STATS_STREAM_MAX_OFFSET_ACK]; + return this.#handle[this.#offset + IDX_STATS_STREAM_MAX_OFFSET_ACK]; } /** @type {bigint} */ get maxOffsetReceived() { - return this.#handle[IDX_STATS_STREAM_MAX_OFFSET_RECV]; + return this.#handle[this.#offset + IDX_STATS_STREAM_MAX_OFFSET_RECV]; } /** @type {bigint} */ get finalSize() { - return this.#handle[IDX_STATS_STREAM_FINAL_SIZE]; + return this.#handle[this.#offset + IDX_STATS_STREAM_FINAL_SIZE]; } toString() { @@ -735,9 +751,14 @@ class QuicStreamStats { } [kFinishClose]() { - // Snapshot the stats into a new BigUint64Array since the underlying - // buffer will be destroyed. - this.#handle = new BigUint64Array(this.#handle); + // Snapshot this stream's stats slice into a standalone BigUint64Array. + const count = IDX_STATS_STREAM_FINAL_SIZE + 1; + const snapshot = new BigUint64Array(count); + for (let i = 0; i < count; i++) { + snapshot[i] = this.#handle[this.#offset + i]; + } + this.#handle = snapshot; + this.#offset = 0; this.#disconnected = true; } } diff --git a/src/aliased_struct-inl.h b/src/aliased_struct-inl.h index 17d5ff58097e22..ff70f423eb1bd1 100644 --- a/src/aliased_struct-inl.h +++ b/src/aliased_struct-inl.h @@ -47,6 +47,95 @@ AliasedStruct::~AliasedStruct() { if (ptr_ != nullptr) ptr_->~T(); } +// --------------------------------------------------------------------------- +// AliasedStructArena implementation +// --------------------------------------------------------------------------- + +template +typename AliasedStructArena::Page* +AliasedStructArena::FindOrCreatePage(v8::Isolate* isolate) { + for (auto& p : pages_) { + if (p->HasFreeSlots()) return p.get(); + } + auto p = std::make_unique(); + p->Init(isolate); + Page* raw = p.get(); + pages_.push_back(std::move(p)); + return raw; +} + +template +template +typename AliasedStructArena::Slot +AliasedStructArena::Allocate(v8::Isolate* isolate, + Args&&... args) { + Page* page = FindOrCreatePage(isolate); + DCHECK(page->HasFreeSlots()); + + uint32_t idx = page->free_head; + T* raw = &page->base[idx]; + + // Advance freelist before placement new overwrites the linkage. + page->free_head = *reinterpret_cast(raw); + page->used_count++; + + // Placement-construct T in the slot. + T* ptr = new (raw) T(std::forward(args)...); + + Slot slot; + slot.page = static_cast(page); + slot.ptr = static_cast(ptr); + slot.index = idx; + slot.byte_offset = reinterpret_cast(ptr) - + static_cast(page->store->Data()); + return slot; +} + +template +void AliasedStructArena::Release( + typename AliasedStructArena::Slot&& slot) { + if (!slot) return; + auto* page = static_cast(slot.page); + auto* ptr = static_cast(slot.ptr); + uint32_t idx = slot.index; + + // Destruct and zero so JS views see clean data. + ptr->~T(); + memset(ptr, 0, sizeof(T)); + + // Push onto page freelist. + *reinterpret_cast(ptr) = page->free_head; + page->free_head = idx; + page->used_count--; + + slot.page = nullptr; + slot.ptr = nullptr; + + // Drop empty pages. The shared_ptr ensures the + // underlying memory stays alive until V8 GCs any remaining JS + // references to the page's ArrayBuffer/views. + if (page->used_count == 0) { + for (auto it = pages_.begin(); it != pages_.end(); ++it) { + if (it->get() == page) { + pages_.erase(it); + break; + } + } + } +} + +template +void AliasedStructArena::ReleaseSlot(ArenaSlotBase& base) { + Slot slot; + slot.page = base.page; + slot.ptr = base.ptr; + slot.index = base.index; + slot.byte_offset = base.byte_offset; + Release(std::move(slot)); + base.page = nullptr; + base.ptr = nullptr; +} + } // namespace node #endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS diff --git a/src/aliased_struct.h b/src/aliased_struct.h index e4df393f4985a3..97753192723feb 100644 --- a/src/aliased_struct.h +++ b/src/aliased_struct.h @@ -3,9 +3,10 @@ #if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS +#include +#include #include "node_internals.h" #include "v8.h" -#include namespace node { @@ -56,6 +57,190 @@ class AliasedStruct final { v8::Global buffer_; }; +// --------------------------------------------------------------------------- +// ArenaSlot — type-erased handle to a slot in an AliasedStructArena page. +// This can be stored in headers where T is incomplete. The typed accessors +// are provided via a thin typed wrapper (AliasedStructArena::Slot). +struct ArenaSlotBase { + // Opaque page pointer — only the arena knows the concrete type. + void* page = nullptr; + void* ptr = nullptr; + uint32_t index = 0; + size_t byte_offset = 0; + + explicit operator bool() const { return ptr != nullptr; } + + // Returns the page's ArrayBuffer. Implemented below after ArenaPageHeader. + v8::Local GetArrayBuffer(v8::Isolate* isolate) const; + + size_t GetByteOffset() const { return byte_offset; } + + // Returns the page's cached DataView over the full page. + // Callers use byte_offset to index into the correct slot region. + v8::Local GetPageDataView(v8::Isolate* isolate) const; + + // Returns the page's cached BigUint64Array over the full page. + // Callers use byte_offset / sizeof(uint64_t) to index into the + // correct slot region. + v8::Local GetPageBigUint64Array( + v8::Isolate* isolate) const; +}; + +// --------------------------------------------------------------------------- +// AliasedStructArena — pool allocator for AliasedStruct-style shared +// memory. Instead of creating a separate ArrayBuffer + BackingStore per +// instance, the arena pre-allocates pages of N slots backed by a single +// ArrayBuffer each. Callers receive a Slot handle that provides the same +// T*/operator-> interface as AliasedStruct, plus the ability to create a +// JS typed-array view over just that slot's region of the page buffer. +// +// Pages target kPageBytes (default 16 KB) for L1 cache residency during +// sequential access patterns. Slots are recycled via an intrusive +// freelist, and empty pages are dropped when their last slot is released. +// +// Usage: +// AliasedStructArena arena; +// auto slot = arena.Allocate(isolate); +// slot->some_field = 42; +// auto view = slot.GetArrayBuffer(isolate); // JS-visible view +// ... +// arena.Release(std::move(slot)); // return to freelist +// +template +class AliasedStructArena final { + public: + static constexpr size_t kSlotsPerPage = kPageBytes / sizeof(T); + static_assert(kSlotsPerPage >= 4, "Page too small for type T"); + static_assert(sizeof(T) >= sizeof(uint32_t), + "T must be at least 4 bytes for freelist linkage"); + + AliasedStructArena() = default; + ~AliasedStructArena() = default; + + AliasedStructArena(const AliasedStructArena&) = delete; + AliasedStructArena& operator=(const AliasedStructArena&) = delete; + + struct Page { + std::shared_ptr store; + v8::Global buffer; + // Lazily created full-page views shared by all slots in + // this page. Typically only one is used per arena. + v8::Global data_view; + v8::Global big_uint64_array; + size_t page_byte_length = 0; + T* base = nullptr; + uint32_t free_head = 0; + uint32_t used_count = 0; + static constexpr uint32_t kNoFreeSlot = UINT32_MAX; + + void Init(v8::Isolate* isolate) { + const v8::HandleScope handle_scope(isolate); + const size_t total_bytes = kSlotsPerPage * sizeof(T); + store = v8::ArrayBuffer::NewBackingStore(isolate, total_bytes); + memset(store->Data(), 0, total_bytes); + base = static_cast(store->Data()); + page_byte_length = total_bytes; + v8::Local ab = v8::ArrayBuffer::New(isolate, store); + buffer = v8::Global(isolate, ab); + + // Build freelist: each slot points to the next. + for (uint32_t i = 0; i < kSlotsPerPage - 1; i++) { + *reinterpret_cast(&base[i]) = i + 1; + } + *reinterpret_cast(&base[kSlotsPerPage - 1]) = kNoFreeSlot; + free_head = 0; + used_count = 0; + } + + bool HasFreeSlots() const { return free_head != kNoFreeSlot; } + }; + + // Typed slot handle — wraps ArenaSlotBase with T* accessors. + class Slot : public ArenaSlotBase { + public: + Slot() = default; + + const T& operator*() const { return *static_cast(ptr); } + T& operator*() { return *static_cast(ptr); } + const T* operator->() const { return static_cast(ptr); } + T* operator->() { return static_cast(ptr); } + T* Data() { return static_cast(ptr); } + const T* Data() const { return static_cast(ptr); } + }; + + // Allocate a slot, placement-constructing T with the given args. + // Creates a new page if all existing pages are full. + template + Slot Allocate(v8::Isolate* isolate, Args&&... args); + + // Release a slot back to the arena freelist. Calls ~T() and zeros + // the memory so that any JS views see clean data. + void Release(Slot&& slot); + + // Release a slot given a type-erased ArenaSlotBase reference. + // Convenience for callers that store ArenaSlotBase in headers where + // T is incomplete. + void ReleaseSlot(ArenaSlotBase& base); + + private: + Page* FindOrCreatePage(v8::Isolate* isolate); + + std::vector> pages_; +}; + +// ArenaSlotBase accessors need to reach the v8::Globals inside a Page. +// All AliasedStructArena::Page types share the same leading layout. +// The page_byte_length field allows lazy view creation without knowing T. +namespace detail { +struct ArenaPageHeader { + std::shared_ptr store; + v8::Global buffer; + v8::Global data_view; + v8::Global big_uint64_array; + size_t page_byte_length = 0; + + v8::Local GetDataView(v8::Isolate* isolate) { + if (data_view.IsEmpty()) { + const v8::HandleScope handle_scope(isolate); + auto dv = v8::DataView::New(buffer.Get(isolate), 0, page_byte_length); + data_view = v8::Global(isolate, dv); + } + return data_view.Get(isolate); + } + + v8::Local GetBigUint64Array(v8::Isolate* isolate) { + if (big_uint64_array.IsEmpty()) { + const v8::HandleScope handle_scope(isolate); + auto bu = v8::BigUint64Array::New( + buffer.Get(isolate), 0, page_byte_length / sizeof(uint64_t)); + big_uint64_array = v8::Global(isolate, bu); + } + return big_uint64_array.Get(isolate); + } +}; +} // namespace detail + +inline v8::Local ArenaSlotBase::GetArrayBuffer( + v8::Isolate* isolate) const { + DCHECK_NOT_NULL(page); + auto* header = static_cast(page); + return header->buffer.Get(isolate); +} + +inline v8::Local ArenaSlotBase::GetPageDataView( + v8::Isolate* isolate) const { + DCHECK_NOT_NULL(page); + auto* header = static_cast(page); + return header->GetDataView(isolate); +} + +inline v8::Local ArenaSlotBase::GetPageBigUint64Array( + v8::Isolate* isolate) const { + DCHECK_NOT_NULL(page); + auto* header = static_cast(page); + return header->GetBigUint64Array(isolate); +} + } // namespace node #endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS diff --git a/src/quic/application.cc b/src/quic/application.cc index b5d8c8609fa3dc..a44a60f4b48cc8 100644 --- a/src/quic/application.cc +++ b/src/quic/application.cc @@ -239,7 +239,8 @@ void Session::Application::ReceiveStreamReset(Stream* stream, // < 0 (other): fatal error, session already closed ssize_t Session::Application::TryWritePendingDatagram(PathStorage* path, uint8_t* dest, - size_t destlen) { + size_t destlen, + uint64_t ts) { CHECK(session_->HasPendingDatagrams()); auto max_attempts = session_->config().options.max_datagram_send_attempts; @@ -262,9 +263,12 @@ ssize_t Session::Application::TryWritePendingDatagram(PathStorage* path, int accepted = 0; int dg_flags = NGTCP2_WRITE_DATAGRAM_FLAG_MORE; + // PacketInfo for the datagram path. When libuv gains per-socket ECN + // marking, the value from ngtcp2 should be forwarded to the send path. + PacketInfo dg_pi; ssize_t dg_nwrite = ngtcp2_conn_writev_datagram(*session_, &path->path, - nullptr, + dg_pi, dest, destlen, &accepted, @@ -272,7 +276,7 @@ ssize_t Session::Application::TryWritePendingDatagram(PathStorage* path, dg.id, &dgvec, 1, - uv_hrtime()); + ts); if (accepted) { // Nice, the datagram was accepted! @@ -329,14 +333,42 @@ void Session::Application::SendPendingData() { if (!session().can_send_packets()) [[unlikely]] { return; } - static constexpr size_t kMaxPackets = 32; + // Upper bound on packets per SendPendingData call. ngtcp2's send quantum + // is typically 64 KB, which at 1200-byte minimum packet size is ~53 + // packets. 64 covers the worst case with headroom. The actual count per + // call is dynamically capped by ngtcp2_conn_get_send_quantum(). + static constexpr size_t kMaxPackets = 64; Debug(session_, "Application sending pending data"); + // Cache the timestamp once for the entire send loop. ngtcp2 does not + // require nanosecond-accurate monotonicity within a single burst — + // a single timestamp per SendPendingData call is what other QUIC + // implementations use (e.g., quiche, msquic). When kernel-level + // packet pacing becomes available via libuv, this timestamp becomes + // the base for computing per-packet transmit timestamps. + const uint64_t ts = uv_hrtime(); PathStorage path; StreamData stream_data; bool closed = false; + + // Batch accumulation: packets are collected here and flushed via + // Session::SendBatch when the loop exits, the batch is full, or + // on early return. This enables synchronous batched delivery via + // uv_udp_try_send2 (sendmmsg) from the deferred flush path. + Packet::Ptr batch[kMaxPackets]; + PathStorage batch_paths[kMaxPackets]; + size_t batch_count = 0; + + auto flush_batch = [&] { + if (batch_count == 0) return; + session_->SendBatch(batch, batch_paths, batch_count); + batch_count = 0; + }; + auto update_stats = OnScopeLeave([&] { if (closed) return; + // Flush any remaining accumulated packets before updating stats. + flush_batch(); auto& s = session(); if (!s.is_destroyed()) [[likely]] { s.UpdatePacketTxTime(); @@ -353,7 +385,7 @@ void Session::Application::SendPendingData() { kMaxPackets, ngtcp2_conn_get_send_quantum(*session_) / max_packet_size); if (max_packet_count == 0) return; - // The number of packets that have been sent in this call to SendPendingData. + // The number of packets that have been prepared in this call. size_t packet_send_count = 0; Packet::Ptr packet; @@ -368,6 +400,16 @@ void Session::Application::SendPendingData() { return true; }; + // Accumulate a completed packet into the batch. + auto enqueue_packet = + [&](Packet::Ptr& pkt, size_t len, const PacketInfo& pi) { + Debug(session_, "Enqueuing packet with %zu bytes into batch", len); + pkt->Truncate(len); + pkt->set_pkt_info(pi); + path.CopyTo(&batch_paths[batch_count]); + batch[batch_count++] = std::move(pkt); + }; + // We're going to enter a loop here to prepare and send no more than // max_packet_count packets. for (;;) { @@ -405,8 +447,14 @@ void Session::Application::SendPendingData() { } // Awesome, let's write our packet! - ssize_t nwrite = WriteVStream( - &path, packet->data(), &ndatalen, packet->length(), stream_data); + PacketInfo pi; + ssize_t nwrite = WriteVStream(&path, + &pi, + packet->data(), + &ndatalen, + packet->length(), + stream_data, + ts); // When ndatalen is > 0, that's our indication that stream data was accepted // in to the packet. Yay! @@ -493,7 +541,7 @@ void Session::Application::SendPendingData() { // if there is one. Otherwise just loop around and keep going. if (session_->HasPendingDatagrams()) { auto result = TryWritePendingDatagram( - &path, packet->data(), packet->length()); + &path, packet->data(), packet->length(), ts); // When result is 0, either the datagram was congestion controlled, // didn't fit in the packet, or was abandoned. Skip and continue. @@ -502,8 +550,7 @@ void Session::Application::SendPendingData() { if (result > 0) { size_t len = result; Debug(session_, "Sending packet with %zu bytes", len); - packet->Truncate(len); - session_->Send(std::move(packet), path); + enqueue_packet(packet, len, pi); if (++packet_send_count == max_packet_count) return; } else if (result < 0) { // Any negative result other than NGTCP2_ERR_WRITE_MORE @@ -540,8 +587,7 @@ void Session::Application::SendPendingData() { // is the size of the packet we are sending. size_t len = nwrite; Debug(session_, "Sending packet with %zu bytes", len); - packet->Truncate(len); - session_->Send(std::move(packet), path); + enqueue_packet(packet, len, pi); if (++packet_send_count == max_packet_count) return; // If there are pending datagrams, try sending them in a fresh packet. @@ -557,11 +603,10 @@ void Session::Application::SendPendingData() { return session_->Close(CloseMethod::SILENT); } auto result = - TryWritePendingDatagram(&path, packet->data(), packet->length()); + TryWritePendingDatagram(&path, packet->data(), packet->length(), ts); if (result > 0) { Debug(session_, "Sending datagram packet with %zd bytes", result); - packet->Truncate(static_cast(result)); - session_->Send(std::move(packet), path); + enqueue_packet(packet, static_cast(result), PacketInfo()); if (++packet_send_count == max_packet_count) return; } else if (result < 0 && result != NGTCP2_ERR_WRITE_MORE) { // Fatal error — session already closed by TryWritePendingDatagram. @@ -574,17 +619,21 @@ void Session::Application::SendPendingData() { } ssize_t Session::Application::WriteVStream(PathStorage* path, + PacketInfo* pi, uint8_t* dest, ssize_t* ndatalen, size_t max_packet_size, - const StreamData& stream_data) { + const StreamData& stream_data, + uint64_t ts) { DCHECK_LE(stream_data.count, kMaxVectorCount); uint32_t flags = NGTCP2_WRITE_STREAM_FLAG_MORE; if (stream_data.fin) flags |= NGTCP2_WRITE_STREAM_FLAG_FIN; + // The PacketInfo out-param is populated by ngtcp2 with the ECN codepoint + // to apply when sending this packet. When libuv gains per-socket ECN + // marking, the value should be forwarded to the send path. return ngtcp2_conn_writev_stream(*session_, &path->path, - // TODO(@jasnell): ECN blocked on libuv - nullptr, + *pi, dest, max_packet_size, ndatalen, @@ -592,7 +641,7 @@ ssize_t Session::Application::WriteVStream(PathStorage* path, stream_data.id, stream_data, stream_data.count, - uv_hrtime()); + ts); } // ============================================================================ diff --git a/src/quic/application.h b/src/quic/application.h index 673a4000e4ba2d..59583b941b95b4 100644 --- a/src/quic/application.h +++ b/src/quic/application.h @@ -267,14 +267,19 @@ class Session::Application : public MemoryRetainer { // the datagram is either congestion limited or was abandoned ssize_t TryWritePendingDatagram(PathStorage* path, uint8_t* dest, - size_t destlen); + size_t destlen, + uint64_t ts); - // Write the given stream_data into the buffer. + // Write the given stream_data into the buffer. The PacketInfo out-param + // is populated by ngtcp2 with per-packet metadata (e.g., ECN codepoint) + // that should be applied when sending the packet. ssize_t WriteVStream(PathStorage* path, + PacketInfo* pi, uint8_t* buf, ssize_t* ndatalen, size_t max_packet_size, - const StreamData& stream_data); + const StreamData& stream_data, + uint64_t ts); Session* session_ = nullptr; }; diff --git a/src/quic/bindingdata.cc b/src/quic/bindingdata.cc index 4a3b3dba11f196..a2719d683c6e92 100644 --- a/src/quic/bindingdata.cc +++ b/src/quic/bindingdata.cc @@ -22,6 +22,7 @@ namespace node { using mem::kReserveSizeAndAlign; using v8::Function; using v8::FunctionTemplate; +using v8::HandleScope; using v8::Local; using v8::Object; using v8::String; @@ -154,6 +155,16 @@ BindingData& BindingData::Get(Environment* env) { BindingData::~BindingData() { quic_alloc_state.binding = nullptr; + if (flush_check_initialized_) { + uv_check_stop(&flush_check_); + flush_check_started_ = false; + // The check handle is closed inline here. Because BindingData destruction + // happens during Environment cleanup, the handle will be finalized by + // libuv's close phase. + uv_close(reinterpret_cast(&flush_check_), nullptr); + flush_check_initialized_ = false; + } + pending_flush_sessions_.clear(); } ngtcp2_mem* BindingData::ngtcp2_allocator() { @@ -221,6 +232,11 @@ void BindingData::RegisterExternalReferences( BindingData::BindingData(Realm* realm, Local object) : BaseObject(realm, object) { MakeWeak(); + CHECK_EQ(uv_check_init(env()->event_loop(), &flush_check_), 0); + flush_check_.data = this; + // Unref so the check handle doesn't keep the event loop alive on its own. + uv_unref(reinterpret_cast(&flush_check_)); + flush_check_initialized_ = true; } SessionManager& BindingData::session_manager() { @@ -230,6 +246,45 @@ SessionManager& BindingData::session_manager() { return *session_manager_; } +void BindingData::ScheduleSessionFlush(const BaseObjectPtr& session) { + pending_flush_sessions_.push_back(session); + if (!flush_check_started_) { + uv_check_start(&flush_check_, OnFlushCheck); + flush_check_started_ = true; + } +} + +void BindingData::OnFlushCheck(uv_check_t* handle) { + auto* binding = static_cast(handle->data); + if (binding->pending_flush_sessions_.empty()) { + uv_check_stop(&binding->flush_check_); + binding->flush_check_started_ = false; + return; + } + + HandleScope scope(binding->env()->isolate()); + + // Swap to a local vector before iterating. SendPendingData may trigger + // MakeCallback which runs JS that could cause more packet receives via + // re-entry (e.g., a stream data callback that synchronously writes to + // another session). Any sessions added during the flush remain in + // pending_flush_sessions_ and are picked up on the next check tick. + auto sessions = std::move(binding->pending_flush_sessions_); + for (auto& session : sessions) { + session->pending_flush_ = false; + if (!session->is_destroyed()) { + session->FlushPendingData(); + } + } + + // If no new sessions were added during the flush, stop the check + // to avoid per-tick callback overhead when idle. + if (binding->pending_flush_sessions_.empty()) { + uv_check_stop(&binding->flush_check_); + binding->flush_check_started_ = false; + } +} + void BindingData::MemoryInfo(MemoryTracker* tracker) const { #define V(name, _) tracker->TrackField(#name, name##_callback()); diff --git a/src/quic/bindingdata.h b/src/quic/bindingdata.h index cc3c3a49f5647a..a83c2a55c19d38 100644 --- a/src/quic/bindingdata.h +++ b/src/quic/bindingdata.h @@ -10,9 +10,11 @@ #include #include #include +#include #include #include #include +#include #include "defs.h" namespace node::quic { @@ -201,6 +203,13 @@ class BindingData final // routing so that any endpoint can route packets to any session. SessionManager& session_manager(); + // Schedule a session for deferred SendPendingData. Sessions are accumulated + // during the I/O poll phase (via Endpoint::Receive -> Session::ReadPacket) + // and flushed in a uv_check callback immediately after poll completes. + // This batches multiple received packets before generating responses, + // allowing ngtcp2 to make better ACK coalescing decisions. + void ScheduleSessionFlush(const BaseObjectPtr& session); + std::unordered_map> listening_endpoints; size_t current_ngtcp2_memory_ = 0; @@ -247,6 +256,30 @@ class BindingData final #undef V std::unique_ptr session_manager_; + + // Type-erased arena storage. The concrete AliasedStructArena types + // are only complete in the .cc files where Stream::State etc. are defined. + // Each .cc file provides typed accessor methods. The deleters are set + // when the arenas are created so that ~BindingData destroys them correctly. + using ArenaDeleter = void (*)(void*); + using ArenaPtr = std::unique_ptr; + ArenaPtr stream_state_arena_{nullptr, +[](void*) {}}; + ArenaPtr stream_stats_arena_{nullptr, +[](void*) {}}; + ArenaPtr session_state_arena_{nullptr, +[](void*) {}}; + ArenaPtr session_stats_arena_{nullptr, +[](void*) {}}; + ArenaPtr endpoint_state_arena_{nullptr, +[](void*) {}}; + ArenaPtr endpoint_stats_arena_{nullptr, +[](void*) {}}; + + // Deferred send flush state. The uv_check_t fires immediately after + // the I/O poll phase in the same event loop tick, allowing batched + // receive processing: all packets are read during poll, then + // SendPendingData is called once per dirty session in the check callback. + uv_check_t flush_check_; + std::vector> pending_flush_sessions_; + bool flush_check_started_ = false; + bool flush_check_initialized_ = false; + + static void OnFlushCheck(uv_check_t* handle); }; JS_METHOD_IMPL(IllegalConstructor); diff --git a/src/quic/data.h b/src/quic/data.h index 2b6d777caf7b81..ec8d40cbc4c7a0 100644 --- a/src/quic/data.h +++ b/src/quic/data.h @@ -19,6 +19,40 @@ namespace node::quic { template concept OneByteType = sizeof(T) == 1; +// Lightweight wrapper around ngtcp2_pkt_info. Insulates the Node.js QUIC +// code from the ngtcp2 struct layout and provides a clean API boundary +// for per-packet metadata (currently ECN codepoint; may grow as ngtcp2 +// and libuv evolve). +// +// Default-constructed PacketInfo is zero-initialized, which ngtcp2 treats +// as ECN Not-ECT — identical to passing nullptr for the pkt_info parameter. +class PacketInfo final { + public: + // ECN codepoints as defined by RFC 3168. + enum class Ecn : uint32_t { + NOT_ECT = 0, // Not ECN-Capable Transport + ECT_1 = 1, // ECN-Capable Transport(1) + ECT_0 = 2, // ECN-Capable Transport(0) + CE = 3, // Congestion Experienced + }; + + PacketInfo() : info_{} {} + explicit PacketInfo(const ngtcp2_pkt_info& info) : info_(info) {} + + // ECN codepoint for this packet. When libuv gains per-packet ECN + // reporting, populate via set_ecn() from the receive metadata + // before passing to ReadPacket(). + Ecn ecn() const { return static_cast(info_.ecn); } + void set_ecn(Ecn ecn) { info_.ecn = static_cast(ecn); } + + // Conversion operators for ngtcp2 API calls. + operator const ngtcp2_pkt_info*() const { return &info_; } + operator ngtcp2_pkt_info*() { return &info_; } + + private: + ngtcp2_pkt_info info_; +}; + struct Path final : public ngtcp2_path { explicit Path(const SocketAddress& local, const SocketAddress& remote); Path(Path&& other) noexcept = default; diff --git a/src/quic/endpoint.cc b/src/quic/endpoint.cc index 66413f66cafee2..8a61f51b088750 100644 --- a/src/quic/endpoint.cc +++ b/src/quic/endpoint.cc @@ -29,7 +29,6 @@ namespace node { using v8::Array; using v8::ArrayBufferView; -using v8::BackingStore; using v8::HandleScope; using v8::Integer; using v8::Just; @@ -312,10 +311,18 @@ class Endpoint::UDP::Impl final : public HandleWrap { SET_SELF_SIZE(Impl) private: + // Pre-allocated receive buffer. Reused across all datagrams because + // ngtcp2_conn_read_pkt is synchronous — it copies what it needs and + // does not retain a reference to the buffer after returning. This + // eliminates a malloc(64KB)/free(64KB) cycle per received datagram. + static constexpr size_t kRecvBufferSize = 65536; // UV__UDP_DGRAM_MAXSIZE + char recv_buf_[kRecvBufferSize]; + static void OnAlloc(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) { - *buf = From(handle)->env()->allocate_managed_buffer(suggested_size); + auto* impl = From(handle); + *buf = uv_buf_init(impl->recv_buf_, kRecvBufferSize); } static void OnReceive(uv_udp_t* handle, @@ -327,26 +334,22 @@ class Endpoint::UDP::Impl final : public HandleWrap { DCHECK_NOT_NULL(impl); DCHECK_NOT_NULL(impl->endpoint_); - auto release_buf = [&]() { - if (buf->base != nullptr) impl->env()->release_managed_buffer(*buf); - }; - // Nothing to do in these cases. Specifically, if the nread // is zero or we have received a partial packet, we are just - // going to ignore it. + // going to ignore it. No buffer release needed — recv_buf_ + // is pre-allocated and reused. if (nread == 0 || flags & UV_UDP_PARTIAL) { - release_buf(); return; } if (nread < 0) { - release_buf(); impl->endpoint_->Destroy(CloseContext::RECEIVE_FAILURE, static_cast(nread)); return; } - impl->endpoint_->Receive(uv_buf_init(buf->base, static_cast(nread)), + impl->endpoint_->Receive(reinterpret_cast(buf->base), + static_cast(nread), SocketAddress(addr)); } @@ -492,6 +495,24 @@ int Endpoint::UDP::Send(Packet::Ptr packet) { return err; } +int Endpoint::UDP::TrySend(const Packet::Ptr& packet) { + DCHECK(packet); + if (is_closed_or_closing()) return UV_EBADF; + uv_buf_t buf = *packet; + return uv_udp_try_send( + &impl_->handle_, &buf, 1, packet->destination().data()); +} + +int Endpoint::UDP::TrySendBatch(uv_buf_t* bufs[], + unsigned int nbufs[], + struct sockaddr* addrs[], + size_t count) { + DCHECK_GT(count, 0); + if (is_closed_or_closing()) return UV_EBADF; + return uv_udp_try_send2( + &impl_->handle_, static_cast(count), bufs, nbufs, addrs, 0); +} + void Endpoint::UDP::MemoryInfo(MemoryTracker* tracker) const { if (impl_) tracker->TrackField("impl", impl_); } @@ -812,6 +833,111 @@ void Endpoint::Send(Packet::Ptr packet) { STAT_INCREMENT(Stats, packets_sent); } +void Endpoint::SendOrTrySend(Packet::Ptr packet) { +#ifdef DEBUG + if (is_diagnostic_packet_loss(options_.tx_loss)) [[unlikely]] { + return; + } +#endif + + if (is_closed() || is_closing() || !packet || packet->length() == 0) { + return; + } + + Debug(this, "TrySend %s", packet->ToString()); + + // Attempt synchronous send. On success (returns number of bytes sent), + // the packet is delivered immediately — no callback overhead, no + // waiting for the next poll cycle. + int err = udp_.TrySend(packet); + if (err >= 0) { + // Synchronous send succeeded. + STAT_INCREMENT_N(Stats, bytes_sent, packet->length()); + STAT_INCREMENT(Stats, packets_sent); + // Ptr destructor releases back to arena pool. + return; + } + + if (err == UV_EAGAIN) { + // Socket not writable or async sends are queued. Fall back to the + // async path — the packet will be queued and flushed on the next + // POLLOUT cycle. + Debug(this, "TrySend got EAGAIN, falling back to async Send"); + return Send(std::move(packet)); + } + + // Other errors are fatal. + Debug(this, "TrySend failed with error %d", err); + Destroy(CloseContext::SEND_FAILURE, err); +} + +void Endpoint::SendBatch(Packet::Ptr* packets, size_t count) { + if (count == 0) return; + +#ifdef DEBUG + if (is_diagnostic_packet_loss(options_.tx_loss)) [[unlikely]] { + for (size_t i = 0; i < count; i++) packets[i].reset(); + return; + } +#endif + + if (is_closed() || is_closing()) { + for (size_t i = 0; i < count; i++) packets[i].reset(); + return; + } + + static constexpr size_t kMaxBatch = 64; + DCHECK_LE(count, kMaxBatch); + + // Build libuv argument arrays directly from the Ptr array. + // Packets with zero length are released and skipped. + uv_buf_t bufs[kMaxBatch]; + uv_buf_t* buf_ptrs[kMaxBatch]; + unsigned int nbufs[kMaxBatch]; + struct sockaddr* addrs[kMaxBatch]; + // Map from valid-index back to the original packets[] index. + size_t index_map[kMaxBatch]; + size_t valid_count = 0; + + for (size_t i = 0; i < count; i++) { + if (!packets[i] || packets[i]->length() == 0) { + packets[i].reset(); + continue; + } + bufs[valid_count] = *packets[i]; + buf_ptrs[valid_count] = &bufs[valid_count]; + nbufs[valid_count] = 1; + addrs[valid_count] = + const_cast(packets[i]->destination().data()); + index_map[valid_count] = i; + valid_count++; + } + + if (valid_count == 0) return; + + // Attempt synchronous batched send via sendmmsg. + int sent = udp_.TrySendBatch(buf_ptrs, nbufs, addrs, valid_count); + + if (sent > 0) { + // Packets [0, sent) were delivered synchronously. + // Release them immediately — no async callback needed. + for (size_t i = 0; i < static_cast(sent); i++) { + size_t idx = index_map[i]; + STAT_INCREMENT_N(Stats, bytes_sent, packets[idx]->length()); + STAT_INCREMENT(Stats, packets_sent); + packets[idx].reset(); + } + } + + // Any unsent packets (EAGAIN, partial send, or total failure) fall + // back to async uv_udp_send. + size_t start = (sent > 0) ? static_cast(sent) : 0; + for (size_t i = start; i < valid_count; i++) { + size_t idx = index_map[i]; + Send(std::move(packets[idx])); + } +} + void Endpoint::SendRetry(const PathDescriptor& options) { // Generating and sending retry packets does consume some system resources, // and it is possible for a malicious peer to trigger sending a large number @@ -840,22 +966,31 @@ void Endpoint::SendRetry(const PathDescriptor& options) { void Endpoint::SendVersionNegotiation(const PathDescriptor& options) { Debug(this, "Sending version negotiation on path %s", options); - // While creating and sending a version negotiation packet does consume a - // small amount of system resources, and while it is fairly trivial for a - // malicious peer to force a version negotiation to be sent, these are more - // trivial to create than the cryptographically generated retry and stateless - // reset packets. If the packet is sent, then we'll at least increment the - // version_negotiation_count statistic so that application code can keep an - // eye on it. + // A malicious peer can trivially force version negotiation packets by + // sending packets with unsupported QUIC versions, potentially from + // spoofed source addresses. Rate-limit per remote host to prevent + // amplification attacks. + const auto exceeds_limits = [&] { + SocketAddressInfoTraits::Type* counts = + addr_validation_lru_.Peek(options.remote_address); + auto count = counts != nullptr ? counts->version_negotiation_count : 0; + return count >= kMaxVersionNegotiations; + }; + + if (exceeds_limits()) { + Debug(this, + "Version negotiation rate limit exceeded for %s", + options.remote_address); + return; + } + auto packet = Packet::CreateVersionNegotiationPacket(*this, options); if (packet) { + addr_validation_lru_.Upsert(options.remote_address) + ->version_negotiation_count++; STAT_INCREMENT(Stats, version_negotiation_count); Send(std::move(packet)); } - - // If creating the packet is unsuccessful, we just drop things on the floor. - // It's not worth committing any further resources to this one packet. We - // might want to log the failure at some point tho. } bool Endpoint::SendStatelessReset(const PathDescriptor& options, @@ -902,11 +1037,28 @@ void Endpoint::SendImmediateConnectionClose(const PathDescriptor& options, "Sending immediate connection close on path %s with reason %s", options, reason); - // While it is possible for a malicious peer to cause us to create a large - // number of these, generating them is fairly trivial. + // A malicious peer can trigger immediate connection close packets by + // sending Initial packets with invalid tokens or when the server is + // busy. Rate-limit per remote host to prevent amplification attacks. + const auto exceeds_limits = [&] { + SocketAddressInfoTraits::Type* counts = + addr_validation_lru_.Peek(options.remote_address); + auto count = counts != nullptr ? counts->immediate_close_count : 0; + return count >= kMaxImmediateCloses; + }; + + if (exceeds_limits()) { + Debug(this, + "Immediate connection close rate limit exceeded for %s", + options.remote_address); + return; + } + auto packet = Packet::CreateImmediateConnectionClosePacket(*this, options, reason); if (packet) { + addr_validation_lru_.Upsert(options.remote_address) + ->immediate_close_count++; STAT_INCREMENT(Stats, immediate_close_count); Send(std::move(packet)); } @@ -1117,24 +1269,39 @@ void Endpoint::CloseGracefully() { MaybeDestroy(); } -void Endpoint::Receive(const uv_buf_t& buf, +void Endpoint::Receive(const uint8_t* data, + size_t len, const SocketAddress& remote_address) { const auto receive = [&](Session* session, - Store&& store, + const uint8_t* pkt_data, + size_t pkt_len, const SocketAddress& local_address, const SocketAddress& remote_address, const CID& dcid, const CID& scid) { DCHECK_NOT_NULL(session); if (session->is_destroyed()) return; - size_t len = store.length(); - if (session->Receive(std::move(store), local_address, remote_address)) { - STAT_INCREMENT_N(Stats, bytes_received, len); + // Use ReadPacket (no SendPendingDataScope) so that multiple packets + // received in the same I/O burst are processed before any responses + // are generated. The deferred flush via BindingData's uv_check + // callback calls SendPendingData once per dirty session after all + // packets in the burst have been read. + if (session->ReadPacket(pkt_data, pkt_len, local_address, remote_address)) { + STAT_INCREMENT_N(Stats, bytes_received, pkt_len); STAT_INCREMENT(Stats, packets_received); } + // Schedule the session for deferred SendPendingData if it hasn't + // been scheduled already in this burst. + if (!session->is_destroyed() && !session->pending_flush_) { + session->pending_flush_ = true; + BindingData::Get(env()).ScheduleSessionFlush( + BaseObjectPtr(session)); + } }; - const auto accept = [&](const Session::Config& config, Store&& store) { + const auto accept = [&](const Session::Config& config, + const uint8_t* pkt_data, + size_t pkt_len) { // One final check. If the endpoint is closed, closing, or is not listening // as a server, then we cannot accept the initial packet. if (is_closed() || is_closing() || !is_listening()) return; @@ -1164,7 +1331,8 @@ void Endpoint::Receive(const uv_buf_t& buf, return; receive(session.get(), - std::move(store), + pkt_data, + pkt_len, config.local_address, config.remote_address, config.dcid, @@ -1174,7 +1342,8 @@ void Endpoint::Receive(const uv_buf_t& buf, const auto acceptInitialPacket = [&](const uint32_t version, const CID& dcid, const CID& scid, - Store&& store, + const uint8_t* pkt_data, + size_t pkt_len, const SocketAddress& local_address, const SocketAddress& remote_address) { // If we're not listening as a server, do not accept an initial packet. @@ -1184,8 +1353,7 @@ void Endpoint::Receive(const uv_buf_t& buf, // This is our first condition check... A minimal check to see if ngtcp2 can // even recognize this packet as a quic packet. - ngtcp2_vec vec = store; - if (ngtcp2_accept(&hd, vec.base, vec.len) != NGTCP2_SUCCESS) { + if (ngtcp2_accept(&hd, pkt_data, pkt_len) != NGTCP2_SUCCESS) { // Per the ngtcp2 docs, ngtcp2_accept returns 0 if the check was // successful, or an error code if it was not. Currently there's only one // documented error code (NGTCP2_ERR_INVALID_ARGUMENT) but we'll handle @@ -1423,7 +1591,7 @@ void Endpoint::Receive(const uv_buf_t& buf, } } - accept(config, std::move(store)); + accept(config, pkt_data, pkt_len); }; // When a received packet contains a QUIC short header but cannot be matched @@ -1439,14 +1607,15 @@ void Endpoint::Receive(const uv_buf_t& buf, // possible to avoid a DOS vector. const auto maybeStatelessReset = [&](const CID& dcid, const CID& scid, - Store& store, + const uint8_t* pkt_data, + size_t pkt_len, const SocketAddress& local_address, const SocketAddress& remote_address) { // Support for stateless resets can be disabled by the application. If that // case, or if the packet is too short to contain a reset token, then we // skip the remaining checks. if (options_.disable_stateless_reset || - store.length() < NGTCP2_STATELESS_RESET_TOKENLEN) { + pkt_len < NGTCP2_STATELESS_RESET_TOKENLEN) { return false; } @@ -1454,20 +1623,21 @@ void Endpoint::Receive(const uv_buf_t& buf, // NGTCP2_STATELESS_RESET_TOKENLEN bytes in the received packet. If it is a // stateless reset then then rest of the bytes in the packet are garbage // that we'll ignore. - ngtcp2_vec vec = store; - vec.base += (vec.len - NGTCP2_STATELESS_RESET_TOKENLEN); + const uint8_t* token_pos = + pkt_data + (pkt_len - NGTCP2_STATELESS_RESET_TOKENLEN); // If a Session has been associated with the token, then it is a valid // stateless reset token. We need to dispatch it to the session to be // processed. auto* session = session_manager().FindSessionByStatelessResetToken( - StatelessResetToken(vec.base)); + StatelessResetToken(token_pos)); if (session != nullptr) { // If the session happens to have been destroyed already, we'll // just ignore the packet. if (!session->is_destroyed()) [[likely]] { receive(session, - std::move(store), + pkt_data, + pkt_len, local_address, remote_address, dcid, @@ -1495,22 +1665,8 @@ void Endpoint::Receive(const uv_buf_t& buf, // return; // } - Debug(this, "Received %zu-byte packet from %s", buf.len, remote_address); - - // The managed buffer here contains the received packet. We do not yet know - // at this point if it is a valid QUIC packet. We need to do some basic - // checks. It is critical at this point that we do as little work as possible - // to avoid a DOS vector. - std::shared_ptr backing = env()->release_managed_buffer(buf); - if (!backing) [[unlikely]] { - // At this point something bad happened and we need to treat this as a fatal - // case. There's likely no way to test this specific condition reliably. - return Destroy(CloseContext::RECEIVE_FAILURE, UV_ENOMEM); - } - - Store store(std::move(backing), buf.len, 0); + Debug(this, "Received %zu-byte packet from %s", len, remote_address); - ngtcp2_vec vec = store; ngtcp2_version_cid pversion_cid; // This is our first check to see if the received data can be processed as a @@ -1519,7 +1675,7 @@ void Endpoint::Receive(const uv_buf_t& buf, // valid QUIC header but there is still no guarantee that the packet can be // successfully processed. switch (ngtcp2_pkt_decode_version_cid( - &pversion_cid, vec.base, vec.len, NGTCP2_MAX_CIDLEN)) { + &pversion_cid, data, len, NGTCP2_MAX_CIDLEN)) { case 0: break; // Supported version, continue processing. case NGTCP2_ERR_VERSION_NEGOTIATION: { @@ -1597,7 +1753,7 @@ void Endpoint::Receive(const uv_buf_t& buf, // necessary here. We want to return immediately without committing any // further resources. if (pversion_cid.version == 0 && - maybeStatelessReset(dcid, scid, store, addr, remote_address)) { + maybeStatelessReset(dcid, scid, data, len, addr, remote_address)) { Debug(this, "Packet was a stateless reset"); return; // Stateless reset! Don't do any further processing. } @@ -1612,17 +1768,13 @@ void Endpoint::Receive(const uv_buf_t& buf, SendStatelessReset( PathDescriptor{ pversion_cid.version, dcid, scid, addr, remote_address}, - store.length()); + len); return; } // Process the packet as an initial packet... - return acceptInitialPacket(pversion_cid.version, - dcid, - scid, - std::move(store), - addr, - remote_address); + return acceptInitialPacket( + pversion_cid.version, dcid, scid, data, len, addr, remote_address); } if (session->is_destroyed()) [[unlikely]] { @@ -1634,7 +1786,7 @@ void Endpoint::Receive(const uv_buf_t& buf, // If we got here, the dcid matched the scid of a known local session. Yay! // The session will take over any further processing of the packet. Debug(this, "Dispatching packet to known session"); - receive(session.get(), std::move(store), addr, remote_address, dcid, scid); + receive(session.get(), data, len, addr, remote_address, dcid, scid); // It is important to note that the session may have been destroyed during // the call to receive(...). If that's the case, the session object still diff --git a/src/quic/endpoint.h b/src/quic/endpoint.h index b9f20f8659dfa6..a9f020e0328eff 100644 --- a/src/quic/endpoint.h +++ b/src/quic/endpoint.h @@ -47,6 +47,20 @@ class Endpoint final : public AsyncWrap, public Packet::Listener { // intentionally triggering generation of a large number of retries. static constexpr uint64_t DEFAULT_MAX_RETRY_LIMIT = 10; + // Maximum number of version negotiation packets that will be sent to a + // given remote host within the LRU tracking window. Version negotiation + // packets are cheap to generate but can be used as an amplification + // vector with spoofed source addresses. + // TODO(@jasnell): Consider making this configurable via Endpoint::Options. + static constexpr uint64_t kMaxVersionNegotiations = 10; + + // Maximum number of immediate connection close packets that will be sent + // to a given remote host within the LRU tracking window. These are sent + // when the server is busy or a token is invalid — a malicious peer could + // trigger a large number of them. + // TODO(@jasnell): Consider making this configurable via Endpoint::Options. + static constexpr uint64_t kMaxImmediateCloses = 10; + // Endpoint configuration options struct Options final : public MemoryRetainer { // The local socket address to which the UDP port will be bound. The port @@ -208,6 +222,20 @@ class Endpoint final : public AsyncWrap, public Packet::Listener { void Send(Packet::Ptr packet); + // Attempt synchronous send via uv_udp_try_send. If the socket is + // writable, the packet is sent immediately and the Ptr is released. + // If the socket is not writable (UV_EAGAIN), falls back to the + // async Send path. Used by the deferred flush callback to avoid + // the one-tick latency of async uv_udp_send. + void SendOrTrySend(Packet::Ptr packet); + + // Send a batch of packets using uv_udp_try_send2 (sendmmsg) for + // synchronous batched delivery. Packets successfully sent are released + // immediately. On EAGAIN or partial send, remaining packets fall back + // to async uv_udp_send. The Packet::Ptr array is consumed: all entries + // will be empty (released or moved) on return. + void SendBatch(Packet::Ptr* packets, size_t count); + // Acquire a Packet from the pool. length sets the initial working // size (must be <= pool capacity). The slot is always allocated at // full capacity to avoid fragmentation. @@ -281,6 +309,20 @@ class Endpoint final : public AsyncWrap, public Packet::Listener { void Close(); int Send(Packet::Ptr packet); + // Synchronous send using uv_udp_try_send. Returns the number of + // bytes sent on success, UV_EAGAIN if the socket is not writable + // or the send queue is non-empty, or another negative error code. + // The Ptr is not consumed — the caller manages the lifecycle. + int TrySend(const Packet::Ptr& packet); + + // Synchronous batched send using uv_udp_try_send2 (sendmmsg). + // Takes pre-built libuv argument arrays. Returns the number of + // messages successfully sent (>= 0), or a negative error code. + int TrySendBatch(uv_buf_t* bufs[], + unsigned int nbufs[], + struct sockaddr* addrs[], + size_t count); + // Returns the local UDP socket address to which we are bound, // or fail with an assert if we are not bound. SocketAddress local_address() const; @@ -381,7 +423,7 @@ class Endpoint final : public AsyncWrap, public Packet::Listener { // Ref() causes a listening Endpoint to keep the event loop active. JS_METHOD(Ref); - void Receive(const uv_buf_t& buf, const SocketAddress& from); + void Receive(const uint8_t* data, size_t len, const SocketAddress& from); AliasedStruct stats_; AliasedStruct state_; @@ -426,6 +468,8 @@ class Endpoint final : public AsyncWrap, public Packet::Listener { struct Type final { size_t reset_count; size_t retry_count; + size_t version_negotiation_count; + size_t immediate_close_count; uint64_t timestamp; bool validated; }; diff --git a/src/quic/http3.cc b/src/quic/http3.cc index ea07c0a5a596fb..6717ac064801cb 100644 --- a/src/quic/http3.cc +++ b/src/quic/http3.cc @@ -262,11 +262,17 @@ class Http3ApplicationImpl final : public Session::Application { } void BeginShutdown() override { - if (conn_) nghttp3_conn_submit_shutdown_notice(*this); + // Only submit a shutdown notice if the H3 connection was fully + // started (control streams bound). If the TLS handshake failed + // before Start() was called, conn_ exists but its control streams + // are unbound, and nghttp3_conn_submit_shutdown_notice would crash. + if (conn_ && started_) nghttp3_conn_submit_shutdown_notice(*this); } void CompleteShutdown() override { - if (conn_) nghttp3_conn_shutdown(*this); + // Same guard as BeginShutdown — nghttp3_conn_shutdown asserts + // that the control stream is bound (conn->tx.ctrl != NULL). + if (conn_ && started_) nghttp3_conn_shutdown(*this); } bool ReceiveStreamData(stream_id id, diff --git a/src/quic/packet.h b/src/quic/packet.h index ffeb582471333f..a94ee1264c2a6a 100644 --- a/src/quic/packet.h +++ b/src/quic/packet.h @@ -68,6 +68,8 @@ class Packet final { size_t length() const { return length_; } size_t capacity() const { return capacity_; } const SocketAddress& destination() const { return destination_; } + const PacketInfo& pkt_info() const { return pkt_info_; } + void set_pkt_info(const PacketInfo& pi) { pkt_info_ = pi; } Listener* listener() const { return listener_; } // Redirect the packet to a different endpoint for cross-endpoint sends @@ -148,6 +150,7 @@ class Packet final { Listener* listener_; // Touched at send time. + PacketInfo pkt_info_; SocketAddress destination_; // Only touched by libuv during uv_udp_send and in the send callback. diff --git a/src/quic/session.cc b/src/quic/session.cc index 4af903e0c2a0af..1cc4b8b23b510b 100644 --- a/src/quic/session.cc +++ b/src/quic/session.cc @@ -197,6 +197,46 @@ struct Session::State final { STAT_STRUCT(Session, SESSION) +using SessionStateArena = AliasedStructArena; +using SessionStatsArena = AliasedStructArena; + +// Session uses arena-allocated stats, not AliasedStruct, so override the +// STAT_* macros to use impl_->stats() instead of stats_.Data(). +#undef STAT_INCREMENT +#undef STAT_INCREMENT_N +#undef STAT_RECORD_TIMESTAMP +#undef STAT_SET +#undef STAT_GET +#define STAT_INCREMENT(Type, name) \ + IncrementStat(impl_->stats()); +#define STAT_INCREMENT_N(Type, name, amt) \ + IncrementStat(impl_->stats(), amt); +#define STAT_RECORD_TIMESTAMP(Type, name) \ + RecordTimestampStat(impl_->stats()); +#define STAT_SET(Type, name, val) \ + SetStat(impl_->stats(), val) +#define STAT_GET(Type, name) GetStat(impl_->stats()) + +namespace { +SessionStateArena& GetSessionStateArena(BindingData& binding) { + if (!binding.session_state_arena_) { + auto* arena = new SessionStateArena(); + binding.session_state_arena_ = BindingData::ArenaPtr( + arena, +[](void* p) { delete static_cast(p); }); + } + return *static_cast(binding.session_state_arena_.get()); +} + +SessionStatsArena& GetSessionStatsArena(BindingData& binding) { + if (!binding.session_stats_arena_) { + auto* arena = new SessionStatsArena(); + binding.session_stats_arena_ = BindingData::ArenaPtr( + arena, +[](void* p) { delete static_cast(p); }); + } + return *static_cast(binding.session_stats_arena_.get()); +} +} // namespace + // ============================================================================ class Http3Application; @@ -464,7 +504,12 @@ Session::Config::Config(Environment* env, settings.log_printf = ngtcp2_debug_log; } - settings.handshake_timeout = options.handshake_timeout; + // The handshake_timeout option is in milliseconds; ngtcp2 expects + // nanoseconds (ngtcp2_duration). UINT64_MAX means no timeout. + settings.handshake_timeout = + options.handshake_timeout == UINT64_MAX + ? UINT64_MAX + : options.handshake_timeout * NGTCP2_MILLISECONDS; settings.max_stream_window = options.max_stream_window; settings.max_window = options.max_window; settings.ack_thresh = options.unacknowledged_packet_threshold; @@ -707,8 +752,8 @@ std::string Session::Options::ToString() const { // Session::Impl maintains most of the internal state of an active Session. struct Session::Impl final : public MemoryRetainer { Session* session_; - AliasedStruct stats_; - AliasedStruct state_; + ArenaSlotBase stats_slot_; + ArenaSlotBase state_slot_; BaseObjectWeakPtr endpoint_; Config config_; SocketAddress local_address_; @@ -736,20 +781,30 @@ struct Session::Impl final : public MemoryRetainer { // and the stream/datagram data is included in the 0-RTT flight. bool handshake_deferred_ = false; + Stats* stats() { return static_cast(stats_slot_.ptr); } + const Stats* stats() const { + return static_cast(stats_slot_.ptr); + } + State* state() { return static_cast(state_slot_.ptr); } + const State* state() const { + return static_cast(state_slot_.ptr); + } + Impl(Session* session, Endpoint* endpoint, const Config& config) : session_(session), - stats_(env()->isolate()), - state_(env()->isolate()), endpoint_(endpoint), config_(config), local_address_(config.local_address), remote_address_(config.remote_address), timer_(session_->env(), [this] { session_->OnTimeout(); }) { + auto& binding = BindingData::Get(env()); + stats_slot_ = GetSessionStatsArena(binding).Allocate(env()->isolate()); + state_slot_ = GetSessionStateArena(binding).Allocate(env()->isolate()); timer_.Unref(); } DISALLOW_COPY_AND_MOVE(Impl) - inline bool is_closing() const { return state_->closing; } + inline bool is_closing() const { return state()->closing; } ~Impl() { // Ensure that Close() was called before dropping @@ -785,6 +840,10 @@ struct Session::Impl final : public MemoryRetainer { } endpoint->RemoveSession(config_.scid, remote_address_); + + auto& binding = BindingData::Get(env()); + if (stats_slot_) GetSessionStatsArena(binding).ReleaseSlot(stats_slot_); + if (state_slot_) GetSessionStateArena(binding).ReleaseSlot(state_slot_); } void MemoryInfo(MemoryTracker* tracker) const override { @@ -1304,7 +1363,7 @@ struct Session::Impl final : public MemoryRetainer { // NGTCP2_ERR_DRAINING. The actual close handling happens in // Session::Receive when it processes that return value and // checks this flag. - session->impl_->state_->stateless_reset = 1; + session->impl_->state()->stateless_reset = 1; return NGTCP2_SUCCESS; } @@ -1629,10 +1688,7 @@ Session::Session(Endpoint* endpoint, connection_(InitConnection()), tls_session_(tls_context->NewSession(this, session_ticket)) { DCHECK(impl_); - { - auto& stats_ = impl_->stats_; - STAT_RECORD_TIMESTAMP(Stats, created_at); - } + STAT_RECORD_TIMESTAMP(Stats, created_at); // For clients, select the Application immediately — the ALPN is // known upfront from the options. For servers, application_ stays @@ -1661,10 +1717,33 @@ Session::Session(Endpoint* endpoint, MakeWeak(); Debug(this, "Session created."); - JS_DEFINE_READONLY_PROPERTY( - env(), object, env()->stats_string(), impl_->stats_.GetArrayBuffer()); - JS_DEFINE_READONLY_PROPERTY( - env(), object, env()->state_string(), impl_->state_.GetArrayBuffer()); + { + const v8::HandleScope handle_scope(env()->isolate()); + JS_DEFINE_READONLY_PROPERTY( + env(), + object, + env()->state_string(), + impl_->state_slot_.GetPageDataView(env()->isolate())); + JS_DEFINE_READONLY_PROPERTY( + env(), + object, + FIXED_ONE_BYTE_STRING(env()->isolate(), "stateByteOffset"), + v8::Integer::NewFromUnsigned( + env()->isolate(), + static_cast(impl_->state_slot_.GetByteOffset()))); + JS_DEFINE_READONLY_PROPERTY( + env(), + object, + env()->stats_string(), + impl_->stats_slot_.GetPageBigUint64Array(env()->isolate())); + JS_DEFINE_READONLY_PROPERTY( + env(), + object, + FIXED_ONE_BYTE_STRING(env()->isolate(), "statsByteOffset"), + v8::Integer::NewFromUnsigned( + env()->isolate(), + static_cast(impl_->stats_slot_.GetByteOffset()))); + } UpdateDataStats(); } @@ -1732,12 +1811,11 @@ bool Session::is_destroyed() const { } bool Session::is_destroyed_or_closing() const { - return !impl_ || impl_->state_->closing; + return !impl_ || impl_->state()->closing; } void Session::Close(CloseMethod method) { if (is_destroyed()) return; - auto& stats_ = impl_->stats_; // If the handshake was deferred (0-RTT client that never sent), // no packets were ever transmitted. Close silently since there is @@ -1752,7 +1830,7 @@ void Session::Close(CloseMethod method) { } STAT_RECORD_TIMESTAMP(Stats, closing_at); - impl_->state_->closing = 1; + impl_->state()->closing = 1; // With both the DEFAULT and SILENT options, we will proceed to closing // the session immediately. All open streams will be immediately destroyed @@ -1768,27 +1846,27 @@ void Session::Close(CloseMethod method) { switch (method) { case CloseMethod::DEFAULT: { Debug(this, "Immediately closing session"); - impl_->state_->silent_close = 0; + impl_->state()->silent_close = 0; return FinishClose(); } case CloseMethod::SILENT: { Debug(this, "Immediately closing session silently"); - impl_->state_->silent_close = 1; + impl_->state()->silent_close = 1; return FinishClose(); } case CloseMethod::GRACEFUL: { // If we are already closing gracefully, do nothing. - if (impl_->state_->graceful_close) [[unlikely]] { + if (impl_->state()->graceful_close) [[unlikely]] { return; } - impl_->state_->graceful_close = 1; + impl_->state()->graceful_close = 1; // application_ may be null for server sessions if close() is called // before the TLS handshake selects the ALPN. Without an application // we cannot do a graceful shutdown (GOAWAY, CONNECTION_CLOSE etc.), // so fall through to a silent close. if (!impl_->application_) { - impl_->state_->silent_close = 1; + impl_->state()->silent_close = 1; return FinishClose(); } @@ -1806,7 +1884,7 @@ void Session::Close(CloseMethod method) { // If there are no open streams, then we can close immediately and // not worry about waiting around. if (impl_->streams_.empty()) { - impl_->state_->silent_close = 0; + impl_->state()->silent_close = 0; return FinishClose(); } @@ -1850,11 +1928,11 @@ void Session::FinishClose() { // trigger MakeCallback (stream destruction, pending queue rejection, // SendConnectionClose, EmitClose). if (is_destroyed()) return; - DCHECK(impl_->state_->closing); + DCHECK(impl_->state()->closing); // Clear the graceful_close flag to prevent RemoveStream() from // re-entering FinishClose() when we destroy streams below. - impl_->state_->graceful_close = 0; + impl_->state()->graceful_close = 0; // Destroy all open streams immediately. We copy the map because // streams remove themselves during destruction. Each Destroy() call @@ -1876,7 +1954,7 @@ void Session::FinishClose() { // Send final application-level shutdown and CONNECTION_CLOSE // unless this is a silent close. - if (!impl_->state_->silent_close) { + if (!impl_->state()->silent_close) { if (impl_->application_) { application().CompleteShutdown(); } @@ -1889,7 +1967,7 @@ void Session::FinishClose() { // If the session was passed to JavaScript, we need to round-trip // through JS so it can clean up before we destroy. The JS side // will synchronously call destroy(), which calls Session::Destroy(). - if (impl_->state_->wrapped) { + if (impl_->state()->wrapped) { EmitClose(impl_->last_error_); } else { Destroy(); @@ -1901,7 +1979,7 @@ void Session::Destroy() { // Ensure the closing flag is set for the ~Impl() DCHECK. Normally // this is set by Session::Close(), but JS destroy() can be called // directly without going through Close() first. - impl_->state_->closing = 1; + impl_->state()->closing = 1; // If we're inside a ngtcp2 or nghttp3 callback scope, we cannot // destroy impl_ now because the callback is executing methods on @@ -1914,10 +1992,7 @@ void Session::Destroy() { } Debug(this, "Session destroyed"); - { - auto& stats_ = impl_->stats_; - STAT_RECORD_TIMESTAMP(Stats, destroyed_at); - } + STAT_RECORD_TIMESTAMP(Stats, destroyed_at); impl_.reset(); } @@ -1993,16 +2068,16 @@ void Session::SetApplication(std::unique_ptr app) { return; } } - impl_->state_->application_type = static_cast(app->type()); - impl_->state_->headers_supported = static_cast( + impl_->state()->application_type = static_cast(app->type()); + impl_->state()->headers_supported = static_cast( app->SupportsHeaders() ? HeadersSupportState::SUPPORTED : HeadersSupportState::UNSUPPORTED); // Surface the application's "no error" and "internal error" codes via // session state so that JS-side code (e.g. the stream writer's fail() // path) can resolve the right wire code for the negotiated ALPN // without duplicating the per-application table. - impl_->state_->no_error_code = app->GetNoErrorCode(); - impl_->state_->internal_error_code = app->GetInternalErrorCode(); + impl_->state()->no_error_code = app->GetNoErrorCode(); + impl_->state()->internal_error_code = app->GetInternalErrorCode(); impl_->application_ = std::move(app); } @@ -2101,22 +2176,35 @@ void Session::SetLastError(QuicError&& error) { impl_->last_error_ = std::move(error); } -bool Session::Receive(Store&& store, +bool Session::Receive(const uint8_t* data, + size_t len, const SocketAddress& local_address, - const SocketAddress& remote_address) { + const SocketAddress& remote_address, + const PacketInfo& pkt_info, + uint64_t ts) { + // Convenience wrapper: reads the packet and immediately triggers + // SendPendingData. Used by paths that need an immediate response + // (e.g., Endpoint::Connect for client Initial packets). + // The hot receive path uses ReadPacket() directly with deferred + // flush via BindingData's uv_check callback. + SendPendingDataScope send_scope(this); + return ReadPacket(data, len, local_address, remote_address, pkt_info, ts); +} + +bool Session::ReadPacket(const uint8_t* data, + size_t len, + const SocketAddress& local_address, + const SocketAddress& remote_address, + const PacketInfo& pkt_info, + uint64_t ts) { DCHECK(!is_destroyed()); impl_->remote_address_ = remote_address; - // When we are done processing this packet, we arrange to send any - // pending data for this session. - SendPendingDataScope send_scope(this); - - ngtcp2_vec vec = store; Path path(local_address, remote_address); Debug(this, "Session is receiving %zu-byte packet received along path %s", - vec.len, + len, path); // It is important to understand that reading the packet will cause @@ -2125,29 +2213,29 @@ bool Session::Receive(Store&& store, // ensures that any deferred destroy waits until all callbacks for this // packet have completed. After calling ngtcp2_conn_read_pkt here, we // will need to double check that the session is not destroyed before - // we try doing anything with it (like updating stats, sending pending - // data, etc). + // we try doing anything with it (like updating stats, etc). int err; { NgTcp2CallbackScope callback_scope(this); - err = ngtcp2_conn_read_pkt(*this, - &path, - // TODO(@jasnell): ECN pkt_info blocked on libuv - nullptr, - vec.base, - vec.len, - uv_hrtime()); + // The PacketInfo carries per-packet metadata (currently ECN codepoint). + // When libuv gains per-packet ECN reporting, the caller should + // populate pkt_info from the receive metadata before calling + // ReadPacket(). + // When ts is 0 (the default), call uv_hrtime() here. The batched + // receive path caches a timestamp and passes it to all ReadPacket() + // calls in the same I/O burst. + if (ts == 0) ts = uv_hrtime(); + err = ngtcp2_conn_read_pkt(*this, &path, pkt_info, data, len, ts); } if (is_destroyed()) return false; - Debug(this, "Session receiving %zu-byte packet with result %d", vec.len, err); + Debug(this, "Session receiving %zu-byte packet with result %d", len, err); switch (err) { case 0: { - Debug(this, "Session successfully received %zu-byte packet", vec.len); + Debug(this, "Session successfully received %zu-byte packet", len); if (!is_destroyed()) [[likely]] { - auto& stats_ = impl_->stats_; - STAT_INCREMENT_N(Stats, bytes_received, vec.len); + STAT_INCREMENT_N(Stats, bytes_received, len); // Process deferred operations that couldn't run inside callback // scopes (e.g., HTTP/3 GOAWAY handling that calls into JS). application().PostReceive(); @@ -2178,7 +2266,7 @@ bool Session::Receive(Store&& store, // There is no point in waiting for a draining period — the // peer has no state. Close immediately with an error. if (!is_destroyed()) [[likely]] { - if (impl_->state_->stateless_reset) { + if (impl_->state()->stateless_reset) { Debug(this, "Session received stateless reset, closing"); SetLastError(QuicError::ForNgtcp2Error(NGTCP2_ERR_DRAINING)); Close(CloseMethod::SILENT); @@ -2245,6 +2333,72 @@ bool Session::Receive(Store&& store, return false; } +void Session::SendBatch(Packet::Ptr* packets, + PathStorage* paths, + size_t count) { + DCHECK(!is_destroyed()); + if (count == 0) return; + + // Separate packets into those going to the primary endpoint and those + // redirected to other endpoints (rare: path validation, preferred address). + // Redirected packets are sent individually via the target endpoint. + static constexpr size_t kMaxBatch = 64; + DCHECK_LE(count, kMaxBatch); + Packet::Ptr primary_packets[kMaxBatch]; + size_t primary_count = 0; + + for (size_t i = 0; i < count; i++) { + if (!packets[i] || !can_send_packets()) { + packets[i].reset(); + continue; + } + + UpdatePath(paths[i]); + + // Check for cross-endpoint redirect. + bool redirected = false; + if (paths[i].path.local.addrlen > 0) { + SocketAddress local_addr(paths[i].path.local.addr); + auto& mgr = BindingData::Get(env()).session_manager(); + Endpoint* target = mgr.FindEndpointForAddress(local_addr); + if (target != nullptr && target != &endpoint()) { + SocketAddress remote_addr(paths[i].path.remote.addr); + packets[i]->Redirect(static_cast(target), + remote_addr); + target->Send(std::move(packets[i])); + redirected = true; + } + } + + if (!redirected) { + primary_packets[primary_count++] = std::move(packets[i]); + } + } + + if (primary_count == 0) return; + + // Use batched send for the primary endpoint. + if (prefer_try_send_) { + endpoint().SendBatch(primary_packets, primary_count); + } else { + // Non-flush path: send individually via async uv_udp_send. + for (size_t i = 0; i < primary_count; i++) { + Send(std::move(primary_packets[i])); + } + } +} + +void Session::FlushPendingData() { + DCHECK(!is_destroyed()); + if (impl_->application_) { + // Prefer synchronous sends during the deferred flush to avoid the + // one-tick latency of async uv_udp_send from the uv_check callback. + prefer_try_send_ = true; + application().SendPendingData(); + prefer_try_send_ = false; + } +} + void Session::Send(Packet::Ptr packet) { // Sending a Packet is generally best effort. If we're not in a state // where we can send a packet, it's ok to drop it on the floor. The @@ -2261,6 +2415,16 @@ void Session::Send(Packet::Ptr packet) { return; } + // When called from the deferred flush path (uv_check callback), + // prefer synchronous send to avoid the one-tick latency of async + // uv_udp_send. SendOrTrySend uses uv_udp_try_send first, falling + // back to uv_udp_send on EAGAIN. + if (prefer_try_send_) { + Debug(this, "Session is sending (try_send) %s", packet->ToString()); + endpoint().SendOrTrySend(std::move(packet)); + return; + } + Debug(this, "Session is sending %s", packet->ToString()); endpoint().Send(std::move(packet)); } @@ -2333,10 +2497,10 @@ datagram_id Session::SendDatagram(Store&& data) { } // Assign the datagram ID. - datagram_id did = ++impl_->state_->last_datagram_id; + datagram_id did = ++impl_->state()->last_datagram_id; // Check queue capacity. Apply the drop policy when full. - auto max_pending = impl_->state_->max_pending_datagrams; + auto max_pending = impl_->state()->max_pending_datagrams; if (max_pending > 0 && impl_->pending_datagrams_.size() >= max_pending) { auto drop_policy = impl_->config_.options.datagram_drop_policy; if (drop_policy == DatagramDropPolicy::DROP_OLDEST) { @@ -2507,7 +2671,6 @@ void Session::AddStream(BaseObjectPtr stream, // Update tracking statistics for the number of streams associated with this // session. - auto& stats_ = impl_->stats_; if (ngtcp2_conn_is_local_stream(*this, id)) { switch (direction) { case Direction::BIDIRECTIONAL: { @@ -2559,7 +2722,7 @@ void Session::RemoveStream(stream_id id) { // then we can proceed to finishing the close now. Note that the // expectation is that the session will be destroyed once FinishClose // returns. - if (impl_->state_->closing && impl_->state_->graceful_close) { + if (impl_->state()->closing && impl_->state()->graceful_close) { FinishClose(); CHECK(is_destroyed()); } @@ -2597,7 +2760,6 @@ void Session::ShutdownStreamWrite(stream_id id, QuicError code) { void Session::StreamDataBlocked(stream_id id) { DCHECK(!is_destroyed()); - auto& stats_ = impl_->stats_; STAT_INCREMENT(Stats, block_count); application().BlockStream(id); } @@ -2668,20 +2830,20 @@ bool Session::is_in_draining_period() const { bool Session::wants_session_ticket() const { return !is_destroyed() && - HasListenerFlag(impl_->state_->listener_flags, + HasListenerFlag(impl_->state()->listener_flags, SessionListenerFlags::SESSION_TICKET); } void Session::SetStreamOpenAllowed() { DCHECK(!is_destroyed()); - impl_->state_->stream_open_allowed = 1; + impl_->state()->stream_open_allowed = 1; } void Session::PopulateEarlyTransportParamsState() { DCHECK(!is_destroyed()); const ngtcp2_transport_params* tp = remote_transport_params(); if (tp != nullptr) { - impl_->state_->max_datagram_size = + impl_->state()->max_datagram_size = MaxDatagramPayload(tp->max_datagram_frame_size); } } @@ -2702,7 +2864,7 @@ bool Session::can_create_streams() const { } bool Session::can_open_streams() const { - return !is_destroyed() && impl_->state_->stream_open_allowed; + return !is_destroyed() && impl_->state()->stream_open_allowed; } uint64_t Session::max_data_left() const { @@ -2723,12 +2885,12 @@ uint64_t Session::max_local_streams_bidi() const { void Session::set_wrapped() { DCHECK(!is_destroyed()); - impl_->state_->wrapped = 1; + impl_->state()->wrapped = 1; } void Session::set_priority_supported(bool on) { DCHECK(!is_destroyed()); - impl_->state_->priority_supported = on ? 1 : 0; + impl_->state()->priority_supported = on ? 1 : 0; } void Session::ExtendStreamOffset(stream_id id, size_t amount) { @@ -2761,14 +2923,13 @@ size_t Session::PendingDatagramCount() const { void Session::DatagramSent(datagram_id id) { Debug(this, "Datagram %" PRIu64 " sent", id); - auto& stats_ = impl_->stats_; STAT_INCREMENT(Stats, datagrams_sent); } void Session::UpdateDataStats() { if (is_destroyed()) return; Debug(this, "Updating data stats"); - auto& stats_ = impl_->stats_; + ngtcp2_conn_info info; ngtcp2_conn_get_conn_info(*this, &info); STAT_SET(Stats, bytes_in_flight, info.bytes_in_flight); @@ -2922,7 +3083,7 @@ void Session::UpdateTimer() { void Session::DatagramStatus(datagram_id datagramId, quic::DatagramStatus status) { DCHECK(!is_destroyed()); - auto& stats_ = impl_->stats_; + switch (status) { case DatagramStatus::ACKNOWLEDGED: { Debug(this, "Datagram %" PRIu64 " was acknowledged", datagramId); @@ -2940,7 +3101,7 @@ void Session::DatagramStatus(datagram_id datagramId, break; } } - if (HasListenerFlag(impl_->state_->listener_flags, + if (HasListenerFlag(impl_->state()->listener_flags, SessionListenerFlags::DATAGRAM_STATUS)) { EmitDatagramStatus(datagramId, status); } @@ -2952,13 +3113,13 @@ void Session::DatagramReceived(const uint8_t* data, DCHECK(!is_destroyed()); // If there is nothing watching for the datagram on the JavaScript side, // or if the datagram is zero-length, we just drop it on the floor. - if (!HasListenerFlag(impl_->state_->listener_flags, + if (!HasListenerFlag(impl_->state()->listener_flags, SessionListenerFlags::DATAGRAM) || datalen == 0) return; Debug(this, "Session is receiving datagram of size %zu", datalen); - auto& stats_ = impl_->stats_; + STAT_INCREMENT(Stats, datagrams_received); JS_TRY_ALLOCATE_BACKING(env(), backing, datalen) memcpy(backing->Data(), data, datalen); @@ -2979,18 +3140,18 @@ void Session::GenerateNewConnectionId(ngtcp2_cid* cid, bool Session::HandshakeCompleted() { DCHECK(!is_destroyed()); - DCHECK(!impl_->state_->handshake_completed); + DCHECK(!impl_->state()->handshake_completed); Debug(this, "Session handshake completed"); - impl_->state_->handshake_completed = 1; - auto& stats_ = impl_->stats_; + impl_->state()->handshake_completed = 1; + STAT_RECORD_TIMESTAMP(Stats, handshake_completed_at); SetStreamOpenAllowed(); // Capture the peer's max datagram frame size from the remote transport // parameters so JavaScript can check it without a C++ round-trip. const ngtcp2_transport_params* tp = remote_transport_params(); - impl_->state_->max_datagram_size = + impl_->state()->max_datagram_size = MaxDatagramPayload(tp->max_datagram_frame_size); // If early data was attempted but rejected by the server, @@ -3025,10 +3186,10 @@ bool Session::HandshakeCompleted() { void Session::HandshakeConfirmed() { DCHECK(!is_destroyed()); - DCHECK(!impl_->state_->handshake_confirmed); + DCHECK(!impl_->state()->handshake_confirmed); Debug(this, "Session handshake confirmed"); - impl_->state_->handshake_confirmed = 1; - auto& stats_ = impl_->stats_; + impl_->state()->handshake_confirmed = 1; + STAT_RECORD_TIMESTAMP(Stats, handshake_confirmed_at); } @@ -3166,7 +3327,7 @@ void Session::EmitClose(const QuicError& error) { void Session::set_max_datagram_size(uint16_t size) { if (!is_destroyed()) { - impl_->state_->max_datagram_size = size; + impl_->state()->max_datagram_size = size; } } @@ -3281,7 +3442,7 @@ void Session::EmitPathValidation(PathValidationResult result, if (!env()->can_call_into_js()) return; - if (!HasListenerFlag(impl_->state_->listener_flags, + if (!HasListenerFlag(impl_->state()->listener_flags, SessionListenerFlags::PATH_VALIDATION)) [[likely]] { return; } @@ -3327,7 +3488,7 @@ void Session::EmitSessionTicket(Store&& ticket) { // If there is nothing listening for the session ticket, don't bother // emitting. - if (!HasListenerFlag(impl_->state_->listener_flags, + if (!HasListenerFlag(impl_->state()->listener_flags, SessionListenerFlags::SESSION_TICKET)) [[likely]] { Debug(this, "Session ticket was discarded"); return; @@ -3384,7 +3545,7 @@ void Session::EmitEarlyDataRejected() { void Session::EmitNewToken(const uint8_t* token, size_t len) { DCHECK(!is_destroyed()); - if (!HasListenerFlag(impl_->state_->listener_flags, + if (!HasListenerFlag(impl_->state()->listener_flags, SessionListenerFlags::NEW_TOKEN)) return; if (!env()->can_call_into_js()) return; @@ -3460,7 +3621,7 @@ void Session::EmitVersionNegotiation(const ngtcp2_pkt_hd& hd, void Session::EmitOrigins(std::vector&& origins) { DCHECK(!is_destroyed()); - if (!HasListenerFlag(impl_->state_->listener_flags, + if (!HasListenerFlag(impl_->state()->listener_flags, SessionListenerFlags::ORIGIN)) return; if (!env()->can_call_into_js()) return; @@ -3550,6 +3711,10 @@ void Session::InitPerContext(Realm* realm, Local target) { NODE_DEFINE_CONSTANT(target, QUIC_PROTO_MAX); NODE_DEFINE_CONSTANT(target, QUIC_PROTO_MIN); + static constexpr auto DEFAULT_HANDSHAKE_TIMEOUT = + Session::Options::DEFAULT_HANDSHAKE_TIMEOUT; + NODE_DEFINE_CONSTANT(target, DEFAULT_HANDSHAKE_TIMEOUT); + NODE_DEFINE_STRING_CONSTANT( target, "DEFAULT_CIPHERS", TLSContext::DEFAULT_CIPHERS); NODE_DEFINE_STRING_CONSTANT( diff --git a/src/quic/session.h b/src/quic/session.h index 650e8f79ba1428..472079984f313a 100644 --- a/src/quic/session.h +++ b/src/quic/session.h @@ -153,8 +153,15 @@ class Session final : public AsyncWrap, private SessionTicket::AppData::Source { bool qlog = false; // The amount of time (in milliseconds) that the endpoint will wait for the - // completion of the tls handshake. - uint64_t handshake_timeout = UINT64_MAX; + // completion of the TLS handshake. If the handshake does not complete + // within this time, the session is closed. This prevents a peer from + // holding a session open indefinitely in the handshake state, consuming + // server resources (ngtcp2 connection, TLS state, JS objects) without + // ever completing the connection. The default of 10 seconds is generous + // enough to accommodate slow networks with retransmissions while still + // bounding resource exposure. Set to UINT64_MAX to disable. + static constexpr uint64_t DEFAULT_HANDSHAKE_TIMEOUT = 10'000; + uint64_t handshake_timeout = DEFAULT_HANDSHAKE_TIMEOUT; // The keep-alive timeout in milliseconds. When set to a non-zero value, // ngtcp2 will automatically send PING frames to keep the connection alive @@ -353,9 +360,45 @@ class Session final : public AsyncWrap, private SessionTicket::AppData::Source { bool early = false; }; - bool Receive(Store&& store, + bool Receive(const uint8_t* data, + size_t len, const SocketAddress& local_address, - const SocketAddress& remote_address); + const SocketAddress& remote_address, + const PacketInfo& pkt_info = PacketInfo(), + uint64_t ts = 0); + + // ReadPacket processes a single inbound packet through ngtcp2 without + // triggering SendPendingData. This is the building block for batched + // receive processing: the caller (Endpoint::Receive) accumulates + // dirty sessions and a uv_check callback flushes them after all + // packets in the I/O burst have been read. + // Receive() is kept as a convenience wrapper that calls ReadPacket() + // then triggers SendPendingData (for paths like Connect that need + // immediate response). + // The data pointer is used synchronously — ngtcp2_conn_read_pkt does + // not retain a reference after returning, so the caller's buffer can + // be reused immediately. + // When ts is 0 (the default), uv_hrtime() is called internally. + // The batched receive path caches a timestamp and passes it to all + // ReadPacket() calls in the same I/O burst. + bool ReadPacket(const uint8_t* data, + size_t len, + const SocketAddress& local_address, + const SocketAddress& remote_address, + const PacketInfo& pkt_info = PacketInfo(), + uint64_t ts = 0); + + // Called by BindingData's flush callback to trigger SendPendingData + // on this session. Encapsulates the application() access so that + // bindingdata.cc doesn't need the full Application type definition. + void FlushPendingData(); + + // Send a batch of packets accumulated by SendPendingData. Uses + // Endpoint::SendBatch (uv_udp_try_send2 / sendmmsg) for synchronous + // batched delivery when called from the deferred flush path. + // Handles per-packet path updates and cross-endpoint redirects. + // All Ptr entries are consumed (released or moved) on return. + void SendBatch(Packet::Ptr* packets, PathStorage* paths, size_t count); void Send(Packet::Ptr packet); void Send(Packet::Ptr packet, const PathStorage& path); @@ -572,11 +615,22 @@ class Session final : public AsyncWrap, private SessionTicket::AppData::Source { bool in_ngtcp2_callback_scope_ = false; bool in_nghttp3_callback_scope_ = false; bool destroy_deferred_ = false; + // Set when this session is in BindingData's pending_flush_sessions_ vector. + // Cleared by the flush callback before calling SendPendingData. + // Provides O(1) dedup so a session receiving multiple packets in one I/O + // burst is only scheduled for flush once. + bool pending_flush_ = false; + // When true, Session::Send prefers synchronous delivery via + // Endpoint::SendOrTrySend (uv_udp_try_send with async fallback). + // Set during FlushPendingData to avoid the one-tick latency of + // async-only sends from the uv_check callback. + bool prefer_try_send_ = false; QuicConnectionPointer connection_; std::unique_ptr tls_session_; friend struct NgTcp2CallbackScope; friend struct NgHttp3CallbackScope; friend class Application; + friend class BindingData; friend class DefaultApplication; friend class Http3ApplicationImpl; friend class Endpoint; diff --git a/src/quic/streams.cc b/src/quic/streams.cc index dd7f7ecbb3880e..965232d44aec7c 100644 --- a/src/quic/streams.cc +++ b/src/quic/streams.cc @@ -151,6 +151,44 @@ struct Stream::State { STAT_STRUCT(Stream, STREAM) +// Stream uses arena-allocated stats, not AliasedStruct, so override the +// STAT_* macros to use the stats() accessor instead of stats_.Data(). +#undef STAT_INCREMENT +#undef STAT_INCREMENT_N +#undef STAT_RECORD_TIMESTAMP +#undef STAT_SET +#undef STAT_GET +#define STAT_INCREMENT(Type, name) IncrementStat(stats()); +#define STAT_INCREMENT_N(Type, name, amt) \ + IncrementStat(stats(), amt); +#define STAT_RECORD_TIMESTAMP(Type, name) \ + RecordTimestampStat(stats()); +#define STAT_SET(Type, name, val) SetStat(stats(), val) +#define STAT_GET(Type, name) GetStat(stats()) + +using StreamStateArena = AliasedStructArena; +using StreamStatsArena = AliasedStructArena; + +namespace { +StreamStateArena& GetStreamStateArena(BindingData& binding) { + if (!binding.stream_state_arena_) { + auto* arena = new StreamStateArena(); + binding.stream_state_arena_ = BindingData::ArenaPtr( + arena, +[](void* p) { delete static_cast(p); }); + } + return *static_cast(binding.stream_state_arena_.get()); +} + +StreamStatsArena& GetStreamStatsArena(BindingData& binding) { + if (!binding.stream_stats_arena_) { + auto* arena = new StreamStatsArena(); + binding.stream_stats_arena_ = BindingData::ArenaPtr( + arena, +[](void* p) { delete static_cast(p); }); + } + return *static_cast(binding.stream_stats_arena_.get()); +} +} // namespace + // ============================================================================ namespace { @@ -382,14 +420,14 @@ struct Stream::Impl { code = args[0].As()->Uint64Value(&lossless); } - if (stream->state_->reset == 1) return; + if (stream->state()->reset == 1) return; stream->EndWritable(); // We can release our outbound here now. Since the stream is being reset // on the ngtcp2 side, we do not need to keep any of the data around // waiting for acknowledgement that will never come. stream->outbound_.reset(); - stream->state_->reset = 1; + stream->state()->reset = 1; if (!stream->is_pending()) { if (stream->is_remote_unidirectional()) return; @@ -993,30 +1031,56 @@ Stream::Stream(BaseObjectWeakPtr session, stream_id id, std::shared_ptr source) : AsyncWrap(session->env(), object, PROVIDER_QUIC_STREAM), - stats_(env()->isolate()), - state_(env()->isolate()), session_(std::move(session)), inbound_(DataQueue::Create()), headers_(env()->isolate()) { + auto& binding = BindingData::Get(env()); + stats_slot_ = GetStreamStatsArena(binding).Allocate(env()->isolate()); + state_slot_ = GetStreamStateArena(binding).Allocate(env()->isolate()); MakeWeak(); DCHECK(id < kMaxStreamId); - state_->id = id; - state_->pending = 0; + state()->id = id; + state()->pending = 0; // Allows us to be notified when data is actually read from the // inbound queue so that we can update the stream flow control. inbound_->addBackpressureListener(this); - JS_DEFINE_READONLY_PROPERTY( - env(), object, env()->state_string(), state_.GetArrayBuffer()); - JS_DEFINE_READONLY_PROPERTY( - env(), object, env()->stats_string(), stats_.GetArrayBuffer()); + { + const v8::HandleScope handle_scope(env()->isolate()); + // Pass the page's shared views and this slot's byte offset. JS uses + // the offset to index into the shared view — no per-stream V8 object + // creation. + JS_DEFINE_READONLY_PROPERTY(env(), + object, + env()->state_string(), + state_slot_.GetPageDataView(env()->isolate())); + JS_DEFINE_READONLY_PROPERTY( + env(), + object, + FIXED_ONE_BYTE_STRING(env()->isolate(), "stateByteOffset"), + v8::Integer::NewFromUnsigned( + env()->isolate(), + static_cast(state_slot_.GetByteOffset()))); + JS_DEFINE_READONLY_PROPERTY( + env(), + object, + env()->stats_string(), + stats_slot_.GetPageBigUint64Array(env()->isolate())); + JS_DEFINE_READONLY_PROPERTY( + env(), + object, + FIXED_ONE_BYTE_STRING(env()->isolate(), "statsByteOffset"), + v8::Integer::NewFromUnsigned( + env()->isolate(), + static_cast(stats_slot_.GetByteOffset()))); + } set_outbound(std::move(source)); STAT_RECORD_TIMESTAMP(Stats, created_at); auto params = ngtcp2_conn_get_local_transport_params(this->session()); STAT_SET(Stats, max_offset, params->initial_max_data); - STAT_SET(Stats, opened_at, stats_->created_at); + STAT_SET(Stats, opened_at, stats()->created_at); } Stream::Stream(BaseObjectWeakPtr session, @@ -1024,25 +1088,48 @@ Stream::Stream(BaseObjectWeakPtr session, Direction direction, std::shared_ptr source) : AsyncWrap(session->env(), object, PROVIDER_QUIC_STREAM), - stats_(env()->isolate()), - state_(env()->isolate()), session_(std::move(session)), inbound_(DataQueue::Create()), maybe_pending_stream_( std::make_unique(direction, this, session_)), headers_(env()->isolate()) { + auto& binding = BindingData::Get(env()); + stats_slot_ = GetStreamStatsArena(binding).Allocate(env()->isolate()); + state_slot_ = GetStreamStateArena(binding).Allocate(env()->isolate()); MakeWeak(); - state_->id = kMaxStreamId; - state_->pending = 1; + state()->id = kMaxStreamId; + state()->pending = 1; // Allows us to be notified when data is actually read from the // inbound queue so that we can update the stream flow control. inbound_->addBackpressureListener(this); - JS_DEFINE_READONLY_PROPERTY( - env(), object, env()->state_string(), state_.GetArrayBuffer()); - JS_DEFINE_READONLY_PROPERTY( - env(), object, env()->stats_string(), stats_.GetArrayBuffer()); + { + const v8::HandleScope handle_scope(env()->isolate()); + JS_DEFINE_READONLY_PROPERTY(env(), + object, + env()->state_string(), + state_slot_.GetPageDataView(env()->isolate())); + JS_DEFINE_READONLY_PROPERTY( + env(), + object, + FIXED_ONE_BYTE_STRING(env()->isolate(), "stateByteOffset"), + v8::Integer::NewFromUnsigned( + env()->isolate(), + static_cast(state_slot_.GetByteOffset()))); + JS_DEFINE_READONLY_PROPERTY( + env(), + object, + env()->stats_string(), + stats_slot_.GetPageBigUint64Array(env()->isolate())); + JS_DEFINE_READONLY_PROPERTY( + env(), + object, + FIXED_ONE_BYTE_STRING(env()->isolate(), "statsByteOffset"), + v8::Integer::NewFromUnsigned( + env()->isolate(), + static_cast(stats_slot_.GetByteOffset()))); + } set_outbound(std::move(source)); @@ -1053,15 +1140,24 @@ Stream::Stream(BaseObjectWeakPtr session, Stream::~Stream() { // Make sure that Destroy() was called before Stream is actually destructed. - DCHECK_NE(stats_->destroyed_at, 0); + DCHECK_NE(stats()->destroyed_at, 0); + + // Release arena slots back to the freelist. + auto& binding = BindingData::Get(env()); + if (stats_slot_) { + GetStreamStatsArena(binding).ReleaseSlot(stats_slot_); + } + if (state_slot_) { + GetStreamStateArena(binding).ReleaseSlot(state_slot_); + } } void Stream::NotifyStreamOpened(stream_id id) { CHECK(is_pending()); DCHECK(id < kMaxStreamId); Debug(this, "Pending stream opened with id %" PRIi64, id); - state_->pending = 0; - state_->id = id; + state()->pending = 0; + state()->id = id; STAT_RECORD_TIMESTAMP(Stats, opened_at); // Now that the stream is actually opened, add it to the sessions // list of known open streams. @@ -1132,26 +1228,26 @@ void Stream::EnqueuePendingHeaders(HeadersKind kind, } bool Stream::is_pending() const { - return state_->pending; + return state()->pending; } stream_id Stream::id() const { - return state_->id; + return state()->id; } Side Stream::origin() const { CHECK(!is_pending()); - return (state_->id & 0b01) ? Side::SERVER : Side::CLIENT; + return (state()->id & 0b01) ? Side::SERVER : Side::CLIENT; } Direction Stream::direction() const { - if (state_->pending) { + if (state()->pending) { CHECK(maybe_pending_stream_.has_value()); auto& val = maybe_pending_stream_.value(); return val->direction(); } - return (state_->id & 0b10) ? Direction::UNIDIRECTIONAL - : Direction::BIDIRECTIONAL; + return (state()->id & 0b10) ? Direction::UNIDIRECTIONAL + : Direction::BIDIRECTIONAL; } Session& Stream::session() const { @@ -1169,15 +1265,15 @@ bool Stream::is_remote_unidirectional() const { } bool Stream::is_eos() const { - return state_->fin_sent; + return state()->fin_sent; } bool Stream::wants_trailers() const { - return state_->wants_trailers; + return state()->wants_trailers; } void Stream::set_early() { - state_->received_early_data = 1; + state()->received_early_data = 1; } bool Stream::is_writable() const { @@ -1187,7 +1283,7 @@ bool Stream::is_writable() const { !ngtcp2_conn_is_local_stream(session(), id())) { return false; } - return state_->write_ended == 0; + return state()->write_ended == 0; } bool Stream::has_outbound() const { @@ -1209,21 +1305,21 @@ bool Stream::is_readable() const { ngtcp2_conn_is_local_stream(session(), id())) { return false; } - return state_->read_ended == 0; + return state()->read_ended == 0; } BaseObjectPtr Stream::get_reader() { - if (!is_readable() || state_->has_reader) return {}; - state_->has_reader = 1; + if (!is_readable() || state()->has_reader) return {}; + state()->has_reader = 1; auto reader = Blob::Reader::Create(env(), Blob::Create(env(), inbound_)); reader_ = reader; return reader; } void Stream::set_final_size(uint64_t final_size) { - DCHECK_IMPLIES(state_->fin_received == 1, + DCHECK_IMPLIES(state()->fin_received == 1, final_size <= STAT_GET(Stats, final_size)); - state_->fin_received = 1; + state()->fin_received = 1; STAT_SET(Stats, final_size, final_size); } @@ -1232,7 +1328,7 @@ void Stream::set_outbound(std::shared_ptr source) { Debug(this, "Setting the outbound data source"); DCHECK_NULL(outbound_); outbound_ = std::make_unique(this, std::move(source)); - state_->has_outbound = 1; + state()->has_outbound = 1; // Note: We intentionally do NOT call ResumeStream here. During // construction, the stream has not yet been added to the session's // streams map, so FindStream would fail. The caller (CreateStream / @@ -1251,7 +1347,7 @@ void Stream::InitStreaming() { } Debug(this, "Initializing streaming outbound source"); outbound_ = std::make_unique(this); - state_->has_outbound = 1; + state()->has_outbound = 1; if (!is_pending()) session_->ResumeStream(id()); } @@ -1397,7 +1493,7 @@ void Stream::Commit(size_t datalen, bool fin) { Debug(this, "Committing %zu bytes", datalen); STAT_INCREMENT_N(Stats, bytes_sent, datalen); if (outbound_) outbound_->Commit(datalen); - if (fin) state_->fin_sent = 1; + if (fin) state()->fin_sent = 1; } void Stream::EndWritable() { @@ -1407,12 +1503,12 @@ void Stream::EndWritable() { // will be a non-op since we're not going to be writing any more data // into it anyway. if (outbound_) outbound_->Cap(); - state_->write_ended = 1; + state()->write_ended = 1; } void Stream::EndReadable(std::optional maybe_final_size) { if (!is_readable()) return; - state_->read_ended = 1; + state()->read_ended = 1; set_final_size(maybe_final_size.value_or(STAT_GET(Stats, bytes_received))); inbound_->cap(STAT_GET(Stats, final_size)); // Notify the JS reader so it can see EOS. Pass fin=true so the @@ -1422,20 +1518,20 @@ void Stream::EndReadable(std::optional maybe_final_size) { } void Stream::Destroy(QuicError error) { - if (stats_->destroyed_at != 0) return; + if (stats()->destroyed_at != 0) return; // Record the destroyed at timestamp before notifying the JavaScript side // that the stream is being destroyed. STAT_RECORD_TIMESTAMP(Stats, destroyed_at); DCHECK_NOT_NULL(session_.get()); - if (!state_->pending) { + if (!state()->pending) { Debug( this, "Stream %" PRIi64 " being destroyed with error %s", id(), error); } else { Debug(this, "Pending stream being destroyed with error %s", error); } - state_->pending = 0; + state()->pending = 0; maybe_pending_stream_.reset(); @@ -1482,12 +1578,12 @@ void Stream::ReceiveData(const uint8_t* data, // If reading has ended, or there is no data, there's nothing to do but maybe // end the readable side if this is the last bit of data we've received. Debug(this, "Receiving %zu bytes of data", len); - if (state_->read_ended == 1 || len == 0) { + if (state()->read_ended == 1 || len == 0) { if (flags.fin) EndReadable(); return; } - if (flags.early) state_->received_early_data = 1; + if (flags.early) state()->received_early_data = 1; STAT_INCREMENT_N(Stats, bytes_received, len); STAT_SET(Stats, max_offset_received, STAT_GET(Stats, bytes_received)); STAT_RECORD_TIMESTAMP(Stats, received_at); @@ -1509,10 +1605,10 @@ void Stream::ReceiveStopSending(QuicError error) { // writable side has already been shut down (e.g. we already sent // RESET_STREAM ourselves or finished sending with FIN) there is // nothing more to do here. The previous guard checked - // `state_->read_ended` which is unrelated to the writable side and + // `state()->read_ended` which is unrelated to the writable side and // suppressed STOP_SENDING handling whenever a sibling RESET_STREAM // frame had been processed first within the same packet. - if (state_->write_ended) return; + if (state()->write_ended) return; Debug(this, "Received stop sending with error %s", error); ngtcp2_conn_shutdown_stream_write(session(), 0, id(), error.code()); EndWritable(); @@ -1528,7 +1624,7 @@ void Stream::ReceiveStreamReset(uint64_t final_size, QuicError error) { "Received stream reset with final size %" PRIu64 " and error %s", final_size, error); - state_->reset_code = error.code(); + state()->reset_code = error.code(); EndReadable(final_size); EmitReset(error); } @@ -1536,10 +1632,10 @@ void Stream::ReceiveStreamReset(uint64_t final_size, QuicError error) { // ============================================================================ void Stream::EmitBlocked() { - // state_->wants_block will be set from the javascript side if the + // state()->wants_block will be set from the javascript side if the // stream object has a handler for the blocked event. Debug(this, "Blocked"); - if (!env()->can_call_into_js() || !state_->wants_block) { + if (!env()->can_call_into_js() || !state()->wants_block) { return; } CallbackScope cb_scope(this); @@ -1556,7 +1652,7 @@ void Stream::UpdateWriteDesiredSize() { if (!outbound_ || !outbound_->is_streaming()) return; uint64_t available; - uint64_t hwm = state_->high_water_mark; + uint64_t hwm = state()->high_water_mark; if (is_pending()) { // Pending streams don't have a stream ID yet, so ngtcp2 can't @@ -1589,8 +1685,8 @@ void Stream::UpdateWriteDesiredSize() { uint32_t clamped = static_cast( std::min(desired, std::numeric_limits::max())); - uint32_t old_size = state_->write_desired_size; - state_->write_desired_size = clamped; + uint32_t old_size = state()->write_desired_size; + state()->write_desired_size = clamped; // Fire drain when transitioning from 0 to non-zero if (old_size == 0 && desired > 0) { @@ -1607,9 +1703,9 @@ void Stream::EmitClose(const QuicError& error) { } void Stream::EmitHeaders() { - // state_->wants_headers will be set from the javascript side if the + // state()->wants_headers will be set from the javascript side if the // stream object has a handler for the headers event. - if (!env()->can_call_into_js() || !state_->wants_headers) { + if (!env()->can_call_into_js() || !state()->wants_headers) { return; } CallbackScope cb_scope(this); @@ -1626,9 +1722,9 @@ void Stream::EmitHeaders() { } void Stream::EmitReset(const QuicError& error) { - // state_->wants_reset will be set from the javascript side if the + // state()->wants_reset will be set from the javascript side if the // stream object has a handler for the reset event. - if (!env()->can_call_into_js() || !state_->wants_reset) { + if (!env()->can_call_into_js() || !state()->wants_reset) { return; } CallbackScope cb_scope(this); @@ -1639,9 +1735,9 @@ void Stream::EmitReset(const QuicError& error) { } void Stream::EmitWantTrailers() { - // state_->wants_trailers will be set from the javascript side if the + // state()->wants_trailers will be set from the javascript side if the // stream object has a handler for the trailers event. - if (!env()->can_call_into_js() || !state_->wants_trailers) { + if (!env()->can_call_into_js() || !state()->wants_trailers) { return; } CallbackScope cb_scope(this); diff --git a/src/quic/streams.h b/src/quic/streams.h index 0edeeed7a9209e..b72298f16636ae 100644 --- a/src/quic/streams.h +++ b/src/quic/streams.h @@ -304,6 +304,17 @@ class Stream final : public AsyncWrap, struct State; struct Stats; + // Typed accessors for arena-allocated state/stats. These are defined + // in streams.cc where State and Stats are complete types. + inline State* state() { return static_cast(state_slot_.ptr); } + inline const State* state() const { + return static_cast(state_slot_.ptr); + } + inline Stats* stats() { return static_cast(stats_slot_.ptr); } + inline const Stats* stats() const { + return static_cast(stats_slot_.ptr); + } + private: struct Impl; struct PendingHeaders; @@ -362,8 +373,8 @@ class Stream final : public AsyncWrap, v8::Local headers, HeadersFlags flags); - AliasedStruct stats_; - AliasedStruct state_; + ArenaSlotBase stats_slot_; + ArenaSlotBase state_slot_; BaseObjectWeakPtr session_; std::unique_ptr outbound_; std::shared_ptr inbound_; diff --git a/test/parallel/test-quic-callback-error-ondatagram-async.mjs b/test/parallel/test-quic-callback-error-ondatagram-async.mjs index 4e6f814906fb40..eebe2e0629522c 100644 --- a/test/parallel/test-quic-callback-error-ondatagram-async.mjs +++ b/test/parallel/test-quic-callback-error-ondatagram-async.mjs @@ -38,9 +38,6 @@ await clientSession.opened; await clientSession.sendDatagram(new Uint8Array([1, 2, 3])); await serverDone.promise; -// The server session was destroyed abruptly (no CONNECTION_CLOSE sent). -// The client may receive a stateless reset if it sends any packet -// before its idle timeout fires, so closed may reject. -await assert.rejects(clientSession.closed, { code: 'ERR_QUIC_TRANSPORT_ERROR' }); +await clientSession.closed; serverEndpoint.close(); await serverEndpoint.closed; diff --git a/test/parallel/test-quic-callback-error-ondatagram.mjs b/test/parallel/test-quic-callback-error-ondatagram.mjs index f0253f22768380..69d1440ed49da6 100644 --- a/test/parallel/test-quic-callback-error-ondatagram.mjs +++ b/test/parallel/test-quic-callback-error-ondatagram.mjs @@ -41,8 +41,5 @@ await clientSession.opened; await clientSession.sendDatagram(new Uint8Array([1, 2, 3])); await serverDone.promise; -// The server session was destroyed abruptly (no CONNECTION_CLOSE sent). -// The client may receive a stateless reset if it sends any packet -// before its idle timeout fires, so closed may reject. -await rejects(clientSession.closed, { code: 'ERR_QUIC_TRANSPORT_ERROR' }); +await clientSession.closed; await serverEndpoint.close(); diff --git a/test/parallel/test-quic-connection-limits.mjs b/test/parallel/test-quic-connection-limits.mjs index acb0f8065d4c78..2f41c388805dc4 100644 --- a/test/parallel/test-quic-connection-limits.mjs +++ b/test/parallel/test-quic-connection-limits.mjs @@ -27,9 +27,11 @@ const endpoint = new QuicEndpoint({ maxConnectionsTotal: 1 }); // Verify the limits are readable and mutable. strictEqual(endpoint.maxConnectionsTotal, 1); -strictEqual(endpoint.maxConnectionsPerHost, 0); -endpoint.maxConnectionsPerHost = 100; +// The default maxConnectionsPerHost is 100 — a non-zero default that +// prevents a single host from exhausting server resources. strictEqual(endpoint.maxConnectionsPerHost, 100); +endpoint.maxConnectionsPerHost = 50; +strictEqual(endpoint.maxConnectionsPerHost, 50); endpoint.maxConnectionsPerHost = 0; let sessionCount = 0; diff --git a/test/parallel/test-quic-h3-handshake-failure.mjs b/test/parallel/test-quic-h3-handshake-failure.mjs new file mode 100644 index 00000000000000..128acab8fffe3d --- /dev/null +++ b/test/parallel/test-quic-h3-handshake-failure.mjs @@ -0,0 +1,56 @@ +// Flags: --experimental-quic --no-warnings + +// Regression test: HTTP/3 server must not crash when a session is closed +// before the H3 application is fully started (control streams bound). +// Previously, closing such a session would call nghttp3_conn_shutdown on +// an H3 connection whose control streams were never bound, causing an +// assertion failure in nghttp3 (conn->tx.ctrl != NULL). +// +// The test creates an H3 server and a client that immediately closes the +// session before the handshake completes. The server creates the H3 +// application during ALPN negotiation, but Start() (which binds control +// streams) hasn't been called yet when the session is torn down. +// The server must handle this gracefully without crashing. + +import { hasQuic, skip, mustNotCall } from '../common/index.mjs'; +import { setTimeout } from 'node:timers/promises'; +import * as fixtures from '../common/fixtures.mjs'; + +const { readKey } = fixtures; + +if (!hasQuic) { + skip('QUIC is not enabled'); +} + +const { listen, connect } = await import('node:quic'); +const { createPrivateKey } = await import('node:crypto'); + +const key = createPrivateKey(readKey('agent1-key.pem')); +const cert = readKey('agent1-cert.pem'); + +const serverEndpoint = await listen(async (serverSession) => { + await serverSession.closed; +}, { + sni: { '*': { keys: [key], certs: [cert] } }, + onheaders: mustNotCall(), +}); + +// Connect then immediately close the session before the handshake completes. +// This exercises the H3 shutdown path on the server while the H3 application +// exists but hasn't started (control streams not yet bound). +const clientSession = await connect(serverEndpoint.address, { + servername: 'localhost', + // h3 ALPN — must match the server so the H3 application is selected + // on the server side before we tear it down. +}); + +// Close immediately — don't wait for handshake. +await clientSession.close(); + +// Give the server time to process the close and tear down the session. +await setTimeout(500); + +// The critical assertion: reaching this point without a crash means the +// server correctly handled the H3 shutdown before control streams were +// bound. Verify the endpoint is still alive by closing it gracefully. +await serverEndpoint.close(); diff --git a/test/parallel/test-quic-internal-endpoint-stats-state.mjs b/test/parallel/test-quic-internal-endpoint-stats-state.mjs index 015155344fde42..57044a773eb2d6 100644 --- a/test/parallel/test-quic-internal-endpoint-stats-state.mjs +++ b/test/parallel/test-quic-internal-endpoint-stats-state.mjs @@ -43,8 +43,8 @@ const { isListening: false, isClosing: false, isBusy: false, - maxConnectionsPerHost: 0, - maxConnectionsTotal: 0, + maxConnectionsPerHost: 100, + maxConnectionsTotal: 10_000, pendingCallbacks: '0', });