From c76e8a151a088163f8c5da2abfb79d105054847f Mon Sep 17 00:00:00 2001 From: Max Inden Date: Mon, 16 Sep 2024 11:54:07 +0200 Subject: [PATCH 01/13] fix: typos (#2113) * fix(qpack): typo * typo --- neqo-http3/src/connection_client.rs | 2 +- neqo-qpack/src/decoder_instructions.rs | 2 +- neqo-qpack/src/encoder.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index 9a1a15b4f5..25840e91a6 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -4351,7 +4351,7 @@ mod tests { assert_eq!(*server.conn.state(), State::Init); let out = server.conn.process(out.as_dgram_ref(), now()); - // Check that control and qpack streams anda SETTINGS frame are received. + // Check that control and qpack streams and a SETTINGS frame are received. // Also qpack encoder stream will send "change capacity" instruction because it has // the peer settings already. server.check_control_qpack_request_streams_resumption( diff --git a/neqo-qpack/src/decoder_instructions.rs b/neqo-qpack/src/decoder_instructions.rs index 6c4cb654b0..f69f9da3ac 100644 --- a/neqo-qpack/src/decoder_instructions.rs +++ b/neqo-qpack/src/decoder_instructions.rs @@ -89,7 +89,7 @@ impl DecoderInstructionReader { /// 2) `ClosedCriticalStream` /// 3) other errors will be translated to `DecoderStream` by the caller of this function. pub fn read_instructions(&mut self, recv: &mut R) -> Res { - qdebug!([self], "read a new instraction"); + qdebug!([self], "read a new instruction"); loop { match &mut self.state { DecoderInstructionReaderState::ReadInstruction => { diff --git a/neqo-qpack/src/encoder.rs b/neqo-qpack/src/encoder.rs index f9ff8bfd1b..bf142c3e9f 100644 --- a/neqo-qpack/src/encoder.rs +++ b/neqo-qpack/src/encoder.rs @@ -129,7 +129,7 @@ impl QPackEncoder { } fn read_instructions(&mut self, conn: &mut Connection, stream_id: StreamId) -> Res<()> { - qdebug!([self], "read a new instraction"); + qdebug!([self], "read a new instruction"); loop { let mut recv = ReceiverConnWrapper::new(conn, stream_id); match self.instruction_reader.read_instructions(&mut recv) { From 1841054e5238bf37aa84544544a727b2c21d68d4 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 16 Sep 2024 04:21:14 -0700 Subject: [PATCH 02/13] ci: Unpin Rust nightly (#2112) --- .github/workflows/bench.yml | 2 +- .github/workflows/check.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 44d2860da0..d758aff65b 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -10,7 +10,7 @@ env: CARGO_PROFILE_RELEASE_DEBUG: true CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 - TOOLCHAIN: nightly-2024-09-01 + TOOLCHAIN: nightly RUSTFLAGS: -C link-arg=-fuse-ld=lld -C link-arg=-Wl,--no-rosegment, -C force-frame-pointers=yes PERF_OPT: record -F997 --call-graph fp -g diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index cdf3ed0d58..e2861e3400 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -28,7 +28,7 @@ jobs: # Don't increase beyond what Firefox is currently using: # https://searchfox.org/mozilla-central/search?q=MINIMUM_RUST_VERSION&path=python/mozboot/mozboot/util.py # Keep in sync with Cargo.toml - rust-toolchain: [1.76.0, stable, nightly-2024-09-01] + rust-toolchain: [1.76.0, stable, nightly] type: [debug] include: - os: ubuntu-latest From 75372c214c5a5d1d220e5cc502afffad8d820e21 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 16 Sep 2024 04:22:01 -0700 Subject: [PATCH 03/13] fix: Don't pace during QNS `zerortt` test (#2115) If we pace, we might get the initial server flight before sending sufficient 0-RTT data to pass the QNS check. Broken out of #1998 --- neqo-bin/src/client/mod.rs | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/neqo-bin/src/client/mod.rs b/neqo-bin/src/client/mod.rs index ebbab98286..863b34dced 100644 --- a/neqo-bin/src/client/mod.rs +++ b/neqo-bin/src/client/mod.rs @@ -245,15 +245,26 @@ impl Args { "handshake" | "transfer" | "retry" | "ecn" => { self.shared.use_old_http = true; } - "zerortt" | "resumption" => { + "resumption" => { if self.urls.len() < 2 { - qerror!("Warning: resumption tests won't work without >1 URL"); + qerror!("Warning: resumption test won't work without >1 URL"); + exit(127); + } + self.shared.use_old_http = true; + self.resume = true; + } + "zerortt" => { + if self.urls.len() < 2 { + qerror!("Warning: zerortt test won't work without >1 URL"); exit(127); } self.shared.use_old_http = true; self.resume = true; // PMTUD probes inflate what we sent in 1-RTT, causing QNS to fail the test. self.shared.quic_parameters.no_pmtud = true; + // If we pace, we might get the initial server flight before sending sufficient + // 0-RTT data to pass the QNS check. So let's burst. + self.shared.quic_parameters.no_pacing = true; } "multiconnect" => { self.shared.use_old_http = true; From b70e5cfc4c5e98cfbd2bc9d1e95238139260c2e1 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 17 Sep 2024 03:39:14 -0700 Subject: [PATCH 04/13] fix: Remove unnecessary `space` function (#2122) Funtion is only used internally. Broken out of #1998 --- neqo-transport/src/recovery/mod.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/neqo-transport/src/recovery/mod.rs b/neqo-transport/src/recovery/mod.rs index f74aa11337..5820dc07a0 100644 --- a/neqo-transport/src/recovery/mod.rs +++ b/neqo-transport/src/recovery/mod.rs @@ -153,11 +153,6 @@ impl LossRecoverySpace { } } - #[must_use] - pub const fn space(&self) -> PacketNumberSpace { - self.space - } - /// Find the time we sent the first packet that is lower than the /// largest acknowledged and that isn't yet declared lost. /// Use the value we prepared earlier in `detect_lost_packets`. @@ -867,14 +862,14 @@ impl LossRecovery { let pto = Self::pto_period_inner( primary_path.borrow().rtt(), self.pto_state.as_ref(), - space.space(), + space.space, self.fast_pto, ); space.detect_lost_packets(now, loss_delay, pto, &mut lost_packets); primary_path.borrow_mut().on_packets_lost( space.largest_acked_sent_time, - space.space(), + space.space, &lost_packets[first..], &mut self.stats.borrow_mut(), now, From 17f57103682cf2d782875555b54b3e127db7c89a Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 17 Sep 2024 04:13:36 -0700 Subject: [PATCH 05/13] ci: Turn on sccache for Rust (#2121) --- .github/actions/rust/action.yml | 39 ++++++++++++--------------------- 1 file changed, 14 insertions(+), 25 deletions(-) diff --git a/.github/actions/rust/action.yml b/.github/actions/rust/action.yml index efdc7eb8fa..780236b212 100644 --- a/.github/actions/rust/action.yml +++ b/.github/actions/rust/action.yml @@ -33,6 +33,20 @@ runs: components: ${{ inputs.components }} targets: ${{ inputs.targets }} + - name: Use sccache + uses: mozilla-actions/sccache-action@2e7f9ec7921547d4b46598398ca573513895d0bd # v0.0.4 + + - name: Enable sscache + shell: bash + run: | + echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> "$GITHUB_ENV" + echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> "$GITHUB_ENV" + if [ "$GITHUB_WORKFLOW" ]; then + echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" + fi + echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" + echo "CARGO_INCREMENTAL=0" >> "$GITHUB_ENV" + - name: Set up MSVC (Windows) if: runner.os == 'Windows' uses: ilammy/msvc-dev-cmd@v1 @@ -58,28 +72,3 @@ runs: env: GITHUB_TOKEN: ${{ inputs.token }} run: cargo +${{ inputs.version }} quickinstall $(echo ${{ inputs.tools }} | tr -d ",") - - # sccache slows CI down, so we leave it disabled. - # Leaving the steps below commented out, so we can re-evaluate enabling it later. - # - name: Use sccache - # uses: mozilla-actions/sccache-action@2e7f9ec7921547d4b46598398ca573513895d0bd # v0.0.4 - - # - name: Enable sscache - # shell: bash - # run: | - # if [ "${{ runner.os }}" = "Windows" ]; then - # echo "CC=sccache cl" >> "$GITHUB_ENV" - # echo "CXX=sccache cl" >> "$GITHUB_ENV" - # else - # echo "CC=sccache cc" >> "$GITHUB_ENV" - # echo "CXX=sccache c++" >> "$GITHUB_ENV" - # fi - # echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" - # echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" - # echo "CARGO_INCREMENTAL=0" >> "$GITHUB_ENV" - - # Ditto for rust-cache. - # - name: Use Rust cache - # uses: Swatinem/rust-cache@23bce251a8cd2ffc3c1075eaa2367cf899916d84 # v2.7.3 - # with: - # cache-all-crates: "true" From cca3db2aea9d65757a75deacd8852238d148c2d6 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 17 Sep 2024 04:13:53 -0700 Subject: [PATCH 06/13] ci: Speed up NSS build with `sccache` (#2091) * ci: Speed up NSS build with `sccache` Maybe? * Again * No Windows * Fix cmake * Fixes * Fixes * echo * Again * exe * No Windows * No Windows * Finalize * Only set compiler env for the NSS build --------- Signed-off-by: Lars Eggert --- .github/actions/nss/action.yml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/.github/actions/nss/action.yml b/.github/actions/nss/action.yml index f698ac8932..591f0a2a7d 100644 --- a/.github/actions/nss/action.yml +++ b/.github/actions/nss/action.yml @@ -59,6 +59,25 @@ runs: echo "System NSS is suitable: $NSS_VERSION" echo "BUILD_NSS=0" >> "$GITHUB_ENV" + - name: Use sccache + uses: mozilla-actions/sccache-action@2e7f9ec7921547d4b46598398ca573513895d0bd # v0.0.4 + + - name: Enable sscache + shell: bash + run: | + if [ "${{ runner.os }}" != "Windows" ]; then + # TODO: Figure out how to make this work on Windows + echo "SCCACHE_CC=sccache cc" >> "$GITHUB_ENV" + echo "SCCACHE_CXX=sccache c++" >> "$GITHUB_ENV" + fi + echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> "$GITHUB_ENV" + echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> "$GITHUB_ENV" + if [ "$GITHUB_WORKFLOW" ]; then + echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" + fi + echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" + echo "CARGO_INCREMENTAL=0" >> "$GITHUB_ENV" + - name: Checkout NSS if: env.BUILD_NSS == '1' uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 @@ -138,6 +157,7 @@ runs: echo "DYLD_FALLBACK_LIBRARY_PATH=$NSS_OUT/lib" >> "$GITHUB_ENV" echo "$NSS_OUT/lib" >> "$GITHUB_PATH" echo "NSS_DIR=$NSS_DIR" >> "$GITHUB_ENV" + [ "$SCCACHE_CC" ] && [ "$SCCACHE_CXX" ] && export CC="$SCCACHE_CC" CXX="$SCCACHE_CXX" $NSS_DIR/build.sh -g -Ddisable_tests=1 $OPT --static env: NSS_DIR: ${{ github.workspace }}/nss From 0cb89a994c98579813571bf9f8aef724c06a68a7 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 17 Sep 2024 05:16:01 -0700 Subject: [PATCH 07/13] ci: Only install sccache action once per workflow (#2123) * ci: Disable sccache on Windows again Because for some reason today, the action doesn't install: ``` Error: Error: File was unable to be removed Error: EBUSY: resource busy or locked, rmdir 'C:\hostedtoolcache\windows\sccache\0.8.1\x64' Error: File was unable to be removed Error: EBUSY: resource busy or locked, rmdir 'C:\hostedtoolcache\windows\sccache\0.8.1\x64' ``` * Check if it's installed already * Fix --- .github/actions/nss/action.yml | 4 ++++ .github/actions/rust/action.yml | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/.github/actions/nss/action.yml b/.github/actions/nss/action.yml index 591f0a2a7d..b5989476f0 100644 --- a/.github/actions/nss/action.yml +++ b/.github/actions/nss/action.yml @@ -60,6 +60,10 @@ runs: echo "BUILD_NSS=0" >> "$GITHUB_ENV" - name: Use sccache + # Apparently the action can't be installed twice in the same workflow, so check if + # it's already installed by checking if the RUSTC_WRAPPER environment variable is set + # (which every "use" of this action needs to therefore set) + if: env.RUSTC_WRAPPER != 'sccache' uses: mozilla-actions/sccache-action@2e7f9ec7921547d4b46598398ca573513895d0bd # v0.0.4 - name: Enable sscache diff --git a/.github/actions/rust/action.yml b/.github/actions/rust/action.yml index 780236b212..c96ec7b269 100644 --- a/.github/actions/rust/action.yml +++ b/.github/actions/rust/action.yml @@ -34,6 +34,10 @@ runs: targets: ${{ inputs.targets }} - name: Use sccache + # Apparently the action can't be installed twice in the same workflow, so check if + # it's already installed by checking if the RUSTC_WRAPPER environment variable is set + # (which every "use" of this action needs to therefore set) + if: env.RUSTC_WRAPPER != 'sccache' uses: mozilla-actions/sccache-action@2e7f9ec7921547d4b46598398ca573513895d0bd # v0.0.4 - name: Enable sscache From b72b3bae2983b19d19c7b483f6d08db45ef4a1ed Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 17 Sep 2024 06:03:18 -0700 Subject: [PATCH 08/13] fix: Don't encode large RTT guesses in tickets (#2114) * fix: Don't encode large RTT guesses in tickets Because under lossy conditions (e.g., QNS `handshakeloss` test), the guess can be multiple times the actual RTT, which when encoded in the resumption ticket will cause an extremely slow second handshake, often causing the test to time out. Broken out of #1998 Fixes #2088 * Fixes & tests * Suggestion from @mxinden * Fix --- neqo-transport/src/connection/mod.rs | 22 +++- .../src/connection/tests/resumption.rs | 124 +++++++++++++++++- neqo-transport/src/path.rs | 4 +- neqo-transport/src/recovery/mod.rs | 10 +- neqo-transport/src/rtt.rs | 25 +++- 5 files changed, 173 insertions(+), 12 deletions(-) diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 29bc85ff73..577a93277c 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -48,7 +48,7 @@ use crate::{ quic_datagrams::{DatagramTracking, QuicDatagrams}, recovery::{LossRecovery, RecoveryToken, SendProfile, SentPacket}, recv_stream::RecvStreamStats, - rtt::{RttEstimate, GRANULARITY}, + rtt::{RttEstimate, GRANULARITY, INITIAL_RTT}, send_stream::SendStream, stats::{Stats, StatsCell}, stream_id::StreamType, @@ -594,9 +594,25 @@ impl Connection { fn make_resumption_token(&mut self) -> ResumptionToken { debug_assert_eq!(self.role, Role::Client); debug_assert!(self.crypto.has_resumption_token()); + // Values less than GRANULARITY are ignored when using the token, so use 0 where needed. let rtt = self.paths.primary().map_or_else( - || RttEstimate::default().estimate(), - |p| p.borrow().rtt().estimate(), + // If we don't have a path, we don't have an RTT. + || Duration::from_millis(0), + |p| { + let rtt = p.borrow().rtt().estimate(); + if p.borrow().rtt().is_guesstimate() { + // When we have no actual RTT sample, do not encode a guestimated RTT larger + // than the default initial RTT. (The guess can be very large under lossy + // conditions.) + if rtt < INITIAL_RTT { + rtt + } else { + Duration::from_millis(0) + } + } else { + rtt + } + }, ); self.crypto diff --git a/neqo-transport/src/connection/tests/resumption.rs b/neqo-transport/src/connection/tests/resumption.rs index 7410e76ef8..01e977e506 100644 --- a/neqo-transport/src/connection/tests/resumption.rs +++ b/neqo-transport/src/connection/tests/resumption.rs @@ -6,7 +6,16 @@ use std::{cell::RefCell, mem, rc::Rc, time::Duration}; -use test_fixture::{assertions, now}; +use neqo_common::{Datagram, Decoder, Role}; +use neqo_crypto::AuthenticationStatus; +use test_fixture::{ + assertions, + header_protection::{ + apply_header_protection, decode_initial_header, initial_aead_and_hp, + remove_header_protection, + }, + now, split_datagram, +}; use super::{ connect, connect_with_rtt, default_client, default_server, exchange_ticket, get_tokens, @@ -14,7 +23,9 @@ use super::{ }; use crate::{ addr_valid::{AddressValidation, ValidateAddress}, - ConnectionParameters, Error, Version, + frame::FRAME_TYPE_PADDING, + rtt::INITIAL_RTT, + ConnectionParameters, Error, State, Version, MIN_INITIAL_PACKET_SIZE, }; #[test] @@ -75,6 +86,115 @@ fn remember_smoothed_rtt() { ); } +fn ticket_rtt(rtt: Duration) -> Duration { + // A simple ACK_ECN frame for a single packet with packet number 0 with a single ECT(0) mark. + const ACK_FRAME_1: &[u8] = &[0x03, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00]; + + let mut client = new_client( + ConnectionParameters::default().versions(Version::Version1, vec![Version::Version1]), + ); + let mut server = default_server(); + let mut now = now(); + + let client_initial = client.process_output(now); + let (_, client_dcid, _, _) = + decode_initial_header(client_initial.as_dgram_ref().unwrap(), Role::Client).unwrap(); + let client_dcid = client_dcid.to_owned(); + + now += rtt / 2; + let server_packet = server.process(client_initial.as_dgram_ref(), now).dgram(); + let (server_initial, server_hs) = split_datagram(server_packet.as_ref().unwrap()); + let (protected_header, _, _, payload) = + decode_initial_header(&server_initial, Role::Server).unwrap(); + + // Now decrypt the packet. + let (aead, hp) = initial_aead_and_hp(&client_dcid, Role::Server); + let (header, pn) = remove_header_protection(&hp, protected_header, payload); + assert_eq!(pn, 0); + let pn_len = header.len() - protected_header.len(); + let mut buf = vec![0; payload.len()]; + let mut plaintext = aead + .decrypt(pn, &header, &payload[pn_len..], &mut buf) + .unwrap() + .to_owned(); + + // Now we need to find the frames. Make some really strong assumptions. + let mut dec = Decoder::new(&plaintext[..]); + assert_eq!(dec.decode(ACK_FRAME_1.len()), Some(ACK_FRAME_1)); + assert_eq!(dec.decode_varint(), Some(0x06)); // CRYPTO + assert_eq!(dec.decode_varint(), Some(0x00)); // offset + dec.skip_vvec(); // Skip over the payload. + + // Replace the ACK frame with PADDING. + plaintext[..ACK_FRAME_1.len()].fill(FRAME_TYPE_PADDING.try_into().unwrap()); + + // And rebuild a packet. + let mut packet = header.clone(); + packet.resize(MIN_INITIAL_PACKET_SIZE, 0); + aead.encrypt(pn, &header, &plaintext, &mut packet[header.len()..]) + .unwrap(); + apply_header_protection(&hp, &mut packet, protected_header.len()..header.len()); + let si = Datagram::new( + server_initial.source(), + server_initial.destination(), + server_initial.tos(), + packet, + ); + + // Now a connection can be made successfully. + now += rtt / 2; + client.process_input(&si, now); + client.process_input(&server_hs.unwrap(), now); + client.authenticated(AuthenticationStatus::Ok, now); + let finished = client.process_output(now); + + assert_eq!(*client.state(), State::Connected); + + now += rtt / 2; + _ = server.process(finished.as_dgram_ref(), now); + assert_eq!(*server.state(), State::Confirmed); + + // Don't deliver the server's handshake finished, it has ACKs. + // Now get a ticket. + let validation = AddressValidation::new(now, ValidateAddress::NoToken).unwrap(); + let validation = Rc::new(RefCell::new(validation)); + server.set_validation(&validation); + send_something(&mut server, now); + server.send_ticket(now, &[]).expect("can send ticket"); + let ticket = server.process_output(now).dgram(); + assert!(ticket.is_some()); + now += rtt / 2; + client.process_input(&ticket.unwrap(), now); + let token = get_tokens(&mut client).pop().unwrap(); + + // And connect again. + let mut client = default_client(); + client.enable_resumption(now, token).unwrap(); + let ticket_rtt = client.paths.rtt(); + let mut server = resumed_server(&client); + + connect_with_rtt(&mut client, &mut server, now, Duration::from_millis(50)); + assert_eq!( + client.paths.rtt(), + Duration::from_millis(50), + "previous RTT should be completely erased" + ); + ticket_rtt +} + +#[test] +fn ticket_rtt_less_than_default() { + assert_eq!( + ticket_rtt(Duration::from_millis(10)), + Duration::from_millis(10) + ); +} + +#[test] +fn ticket_rtt_larger_than_default() { + assert_eq!(ticket_rtt(Duration::from_millis(500)), INITIAL_RTT); +} + /// Check that a resumed connection uses a token on Initial packets. #[test] fn address_validation_token_resume() { diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 35d29f0253..49c289f60b 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -27,7 +27,7 @@ use crate::{ packet::PacketBuilder, pmtud::Pmtud, recovery::{RecoveryToken, SentPacket}, - rtt::RttEstimate, + rtt::{RttEstimate, RttSource}, sender::PacketSender, stats::FrameStats, tracking::PacketNumberSpace, @@ -984,7 +984,7 @@ impl Path { &self.qlog, now - sent.time_sent(), Duration::new(0, 0), - false, + RttSource::Guesstimate, now, ); } diff --git a/neqo-transport/src/recovery/mod.rs b/neqo-transport/src/recovery/mod.rs index 5820dc07a0..f2c3e8e298 100644 --- a/neqo-transport/src/recovery/mod.rs +++ b/neqo-transport/src/recovery/mod.rs @@ -27,7 +27,7 @@ use crate::{ packet::PacketNumber, path::{Path, PathRef}, qlog::{self, QlogMetric}, - rtt::RttEstimate, + rtt::{RttEstimate, RttSource}, stats::{Stats, StatsCell}, tracking::{PacketNumberSpace, PacketNumberSpaceSet}, }; @@ -558,9 +558,13 @@ impl LossRecovery { now: Instant, ack_delay: Duration, ) { - let confirmed = self.confirmed_time.map_or(false, |t| t < send_time); + let source = if self.confirmed_time.map_or(false, |t| t < send_time) { + RttSource::AckConfirmed + } else { + RttSource::Ack + }; if let Some(sample) = now.checked_duration_since(send_time) { - rtt.update(&self.qlog, sample, ack_delay, confirmed, now); + rtt.update(&self.qlog, sample, ack_delay, source, now); } } diff --git a/neqo-transport/src/rtt.rs b/neqo-transport/src/rtt.rs index 7bfbbe4aaa..027b574aad 100644 --- a/neqo-transport/src/rtt.rs +++ b/neqo-transport/src/rtt.rs @@ -28,6 +28,17 @@ pub const GRANULARITY: Duration = Duration::from_millis(1); // Defined in -recovery 6.2 as 333ms but using lower value. pub const INITIAL_RTT: Duration = Duration::from_millis(100); +/// The source of the RTT measurement. +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, Copy)] +pub enum RttSource { + /// RTT guess from a retry or dropping a packet number space. + Guesstimate, + /// Ack on an unconfirmed connection. + Ack, + /// Ack on a confirmed connection. + AckConfirmed, +} + #[derive(Debug)] #[allow(clippy::module_name_repetitions)] pub struct RttEstimate { @@ -37,6 +48,7 @@ pub struct RttEstimate { rttvar: Duration, min_rtt: Duration, ack_delay: PeerAckDelay, + best_source: RttSource, } impl RttEstimate { @@ -58,6 +70,7 @@ impl RttEstimate { rttvar: Duration::from_millis(0), min_rtt: rtt, ack_delay: PeerAckDelay::Fixed(Duration::from_millis(25)), + best_source: RttSource::Ack, } } @@ -83,17 +96,24 @@ impl RttEstimate { self.ack_delay.update(cwnd, mtu, self.smoothed_rtt); } + pub fn is_guesstimate(&self) -> bool { + self.best_source == RttSource::Guesstimate + } + pub fn update( &mut self, qlog: &NeqoQlog, mut rtt_sample: Duration, ack_delay: Duration, - confirmed: bool, + source: RttSource, now: Instant, ) { + debug_assert!(source >= self.best_source); + self.best_source = max(self.best_source, source); + // Limit ack delay by max_ack_delay if confirmed. let mad = self.ack_delay.max(); - let ack_delay = if confirmed && ack_delay > mad { + let ack_delay = if self.best_source == RttSource::AckConfirmed && ack_delay > mad { mad } else { ack_delay @@ -205,6 +225,7 @@ impl Default for RttEstimate { rttvar: INITIAL_RTT / 2, min_rtt: INITIAL_RTT, ack_delay: PeerAckDelay::default(), + best_source: RttSource::Guesstimate, } } } From 0e40d362a94a106d60c43e08d7d9eef5ce2838d2 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 17 Sep 2024 23:28:13 -0700 Subject: [PATCH 09/13] ci: Run more things in parallel (#2125) * `cargo machete` * `cargo fmt` * `cargo clippy` --- .github/workflows/check.yml | 59 +++++++---------------------------- .github/workflows/clippy.yml | 47 ++++++++++++++++++++++++++++ .github/workflows/machete.yml | 34 ++++++++++++++++++++ .github/workflows/mutants.yml | 19 +++++------ .github/workflows/rustfmt.yml | 32 +++++++++++++++++++ 5 files changed, 133 insertions(+), 58 deletions(-) create mode 100644 .github/workflows/clippy.yml create mode 100644 .github/workflows/machete.yml create mode 100644 .github/workflows/rustfmt.yml diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index e2861e3400..49dfb8ec80 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -7,6 +7,7 @@ on: branches: ["main"] paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] merge_group: + workflow_dispatch: env: CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 @@ -20,7 +21,6 @@ permissions: jobs: check: - name: Build & test strategy: fail-fast: false matrix: @@ -42,23 +42,19 @@ jobs: shell: bash steps: - - name: Checkout - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - name: Install Rust - uses: ./.github/actions/rust + - uses: ./.github/actions/rust with: version: ${{ matrix.rust-toolchain }} - components: rustfmt, clippy, llvm-tools-preview - tools: cargo-llvm-cov, cargo-nextest, cargo-hack, cargo-fuzz, cargo-machete + components: clippy, llvm-tools-preview + tools: cargo-llvm-cov, cargo-nextest, cargo-hack, cargo-fuzz token: ${{ secrets.GITHUB_TOKEN }} - - name: Get minimum NSS version - id: nss-version + - id: nss-version run: echo "minimum=$(cat neqo-crypto/min_version.txt)" >> "$GITHUB_OUTPUT" - - name: Install NSS - uses: ./.github/actions/nss + - uses: ./.github/actions/nss with: minimum-version: ${{ steps.nss-version.outputs.minimum }} @@ -66,6 +62,10 @@ jobs: run: | # shellcheck disable=SC2086 cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --all-targets --features ci + # Check that the fuzz targets also build + if [ ${{ startsWith(matrix.rust-toolchain, 'nightly') && 'nightly' }} == 'nightly' ]; then + cargo +${{ matrix.rust-toolchain }} fuzz check + fi - name: Run tests and determine coverage run: | @@ -90,41 +90,7 @@ jobs: RUST_LOG: warn BUILD_DIR: ${{ matrix.type == 'release' && 'release' || 'debug' }} - - name: Check formatting - run: | - if [ "${{ startsWith(matrix.rust-toolchain, 'nightly') && 'nightly' }}" != "nightly" ]; then - CONFIG_PATH="--config-path=$(mktemp)" - fi - # shellcheck disable=SC2086 - cargo +${{ matrix.rust-toolchain }} fmt --all -- --check $CONFIG_PATH - if: success() || failure() - - - name: Check for unused dependencies - run: | - # --with-metadata has false positives, see https://github.com/bnjbvr/cargo-machete/issues/127 - cargo +${{ matrix.rust-toolchain }} machete - - - name: Clippy - run: | - # Use cargo-hack to run clippy on each crate individually with its - # respective default features only. Can reveal warnings otherwise - # hidden given that a plain cargo clippy combines all features of the - # workspace. See e.g. https://github.com/mozilla/neqo/pull/1695. - cargo +${{ matrix.rust-toolchain }} hack clippy --all-targets --feature-powerset --exclude-features gecko -- -D warnings || ${{ matrix.rust-toolchain == 'nightly' }} - # Check that the fuzz targets also build - if [ ${{ startsWith(matrix.rust-toolchain, 'nightly') && 'nightly' }} == 'nightly' ]; then - cargo +${{ matrix.rust-toolchain }} fuzz check - fi - if: success() || failure() - - - name: Check rustdoc links - run: cargo +${{ matrix.rust-toolchain }} doc --workspace --no-deps --document-private-items - env: - RUSTDOCFLAGS: "--deny rustdoc::broken_intra_doc_links --deny warnings" - if: success() || failure() - - - name: Upload coverage reports to Codecov - uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673 # v4.5.0 + - uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673 # v4.5.0 with: file: lcov.info fail_ci_if_error: false @@ -135,6 +101,5 @@ jobs: if: matrix.type == 'debug' && matrix.rust-toolchain == 'stable' bench: - name: "Benchmark" needs: [check] uses: ./.github/workflows/bench.yml diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml new file mode 100644 index 0000000000..a1ef1ed6ba --- /dev/null +++ b/.github/workflows/clippy.yml @@ -0,0 +1,47 @@ +name: Clippy +on: + push: + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] + pull_request: + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] + merge_group: + workflow_dispatch: +env: + CARGO_TERM_COLOR: always + RUST_BACKTRACE: 1 + +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + clippy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: ./.github/actions/rust + with: + components: clippy + tools: cargo-hack, cargo-fuzz + token: ${{ secrets.GITHUB_TOKEN }} + + - id: nss-version + run: echo "minimum=$(cat neqo-crypto/min_version.txt)" >> "$GITHUB_OUTPUT" + + - uses: ./.github/actions/nss + with: + minimum-version: ${{ steps.nss-version.outputs.minimum }} + + # Use cargo-hack to run clippy on each crate individually with its + # respective default features only. Can reveal warnings otherwise + # hidden given that a plain cargo clippy combines all features of the + # workspace. See e.g. https://github.com/mozilla/neqo/pull/1695. + - run: cargo hack clippy --all-targets --feature-powerset --exclude-features gecko -- -D warnings + - run: cargo doc --workspace --no-deps --document-private-items + env: + RUSTDOCFLAGS: "--deny rustdoc::broken_intra_doc_links --deny warnings" diff --git a/.github/workflows/machete.yml b/.github/workflows/machete.yml new file mode 100644 index 0000000000..3e7404bba1 --- /dev/null +++ b/.github/workflows/machete.yml @@ -0,0 +1,34 @@ +name: Machete +on: + workflow_dispatch: + pull_request: + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] + merge_group: +env: + CARGO_TERM_COLOR: always + RUST_BACKTRACE: 1 + +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + machete: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Install Rust + uses: ./.github/actions/rust + with: + tools: cargo-machete + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Check for unused dependencies + run: | + # --with-metadata has false positives, see https://github.com/bnjbvr/cargo-machete/issues/127 + cargo machete diff --git a/.github/workflows/mutants.yml b/.github/workflows/mutants.yml index 202f53d652..e1e0314ea3 100644 --- a/.github/workflows/mutants.yml +++ b/.github/workflows/mutants.yml @@ -1,11 +1,12 @@ name: Find mutants on: - schedule: - - cron: '42 3 * * 2,5' # Runs at 03:42 UTC (m and h chosen arbitrarily) twice a week. - workflow_dispatch: + push: + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] pull_request: branches: ["main"] paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] + workflow_dispatch: concurrency: group: ${{ github.workflow }}-${{ github.ref_name }} @@ -22,17 +23,14 @@ jobs: with: fetch-depth: 0 - - name: Get minimum NSS version - id: nss-version + - id: nss-version run: echo "minimum=$(cat neqo-crypto/min_version.txt)" >> "$GITHUB_OUTPUT" - - name: Install NSS - uses: ./.github/actions/nss + - uses: ./.github/actions/nss with: minimum-version: ${{ steps.nss-version.outputs.minimum }} - - name: Install Rust - uses: ./.github/actions/rust + - uses: ./.github/actions/rust with: tools: cargo-mutants token: ${{ secrets.GITHUB_TOKEN }} @@ -63,8 +61,7 @@ jobs: echo '```' } > "$GITHUB_STEP_SUMMARY" - - name: Archive mutants.out - uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + - uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 if: always() with: name: mutants.out diff --git a/.github/workflows/rustfmt.yml b/.github/workflows/rustfmt.yml new file mode 100644 index 0000000000..0f48b029a0 --- /dev/null +++ b/.github/workflows/rustfmt.yml @@ -0,0 +1,32 @@ +name: Format +on: + push: + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] + pull_request: + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] + merge_group: + workflow_dispatch: +env: + CARGO_TERM_COLOR: always + RUST_BACKTRACE: 1 + +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + format: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: ./.github/actions/rust + with: + version: nightly + components: rustfmt + token: ${{ secrets.GITHUB_TOKEN }} + - run: cargo fmt --all -- --check From b780e5331e6e61a87b05d342128fdf4d6f7e06b4 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 18 Sep 2024 03:00:05 -0700 Subject: [PATCH 10/13] ci: More parallelization and caching (#2124) * ci: Try and use artifacts to cache prebuilt NSS * needs * actions/cache * Fix * Fix * no sccache * sccache is killing our cache * Set env * Env * NSS_PREBUILT * Check if set * Cache on self-hosted runner * Fixes * Fixes * Fixes * Run fuzz in parallel * Invert * fuzz-bench * SCCACHE_INSTALLED && build -> check * Fixes * Fixes * Don't update rustup * Compile less --- .github/actions/nss/action.yml | 66 ++++++++++++++++++++++++-------- .github/actions/rust/action.yml | 22 +++++------ .github/workflows/bench.yml | 4 +- .github/workflows/check.yml | 19 +++++---- .github/workflows/clippy.yml | 2 +- .github/workflows/fuzz-bench.yml | 39 +++++++++++++++++++ Cargo.toml | 5 +++ neqo-crypto/build.rs | 13 ++++--- 8 files changed, 125 insertions(+), 45 deletions(-) create mode 100644 .github/workflows/fuzz-bench.yml diff --git a/.github/actions/nss/action.yml b/.github/actions/nss/action.yml index b5989476f0..b8f7470f38 100644 --- a/.github/actions/nss/action.yml +++ b/.github/actions/nss/action.yml @@ -61,14 +61,19 @@ runs: - name: Use sccache # Apparently the action can't be installed twice in the same workflow, so check if - # it's already installed by checking if the RUSTC_WRAPPER environment variable is set + # it's already installed by checking if the SCCACHE_ENABLED environment variable is set # (which every "use" of this action needs to therefore set) - if: env.RUSTC_WRAPPER != 'sccache' + # + # Also, only enable sscache on our self-hosted runner, because the GitHub cache limit + # is too small for this to be effective there. + if: env.SCCACHE_ENABLED != '1' && env.BUILD_NSS == '1' && runner.environment != 'github-hosted' uses: mozilla-actions/sccache-action@2e7f9ec7921547d4b46598398ca573513895d0bd # v0.0.4 - name: Enable sscache + if: env.BUILD_NSS == '1' && runner.environment != 'github-hosted' shell: bash run: | + echo "SCCACHE_ENABLED=1" >> "$GITHUB_ENV" if [ "${{ runner.os }}" != "Windows" ]; then # TODO: Figure out how to make this work on Windows echo "SCCACHE_CC=sccache cc" >> "$GITHUB_ENV" @@ -76,11 +81,9 @@ runs: fi echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> "$GITHUB_ENV" echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> "$GITHUB_ENV" - if [ "$GITHUB_WORKFLOW" ]; then + if [ "${{ runner.environment }}" == "github-hosted" ]; then echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" fi - echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" - echo "CARGO_INCREMENTAL=0" >> "$GITHUB_ENV" - name: Checkout NSS if: env.BUILD_NSS == '1' @@ -96,6 +99,34 @@ runs: repository: nss-dev/nspr path: nspr + - name: Get head revisions + if: env.BUILD_NSS == '1' + shell: bash + run: | + NSS_HEAD=$(git -C nss rev-parse HEAD) + NSPR_HEAD=$(git -C nspr rev-parse HEAD) + echo "NSS_HEAD=$NSS_HEAD" >> "$GITHUB_ENV" + echo "NSPR_HEAD=$NSPR_HEAD" >> "$GITHUB_ENV" + + - name: Cache NSS + id: cache + if: env.BUILD_NSS == '1' && runner.environment == 'github-hosted' + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + with: + path: dist + key: nss-${{ runner.os }}-${{ inputs.type }}-${{ env.NSS_HEAD }}-${{ env.NSPR_HEAD }} + + - name: Check if build is needed + if: env.BUILD_NSS == '1' && runner.environment == 'github-hosted' + shell: bash + run: | + if [ "${{ steps.cache.outputs.cache-hit }}" == "true" ]; then + echo "Using cached prebuilt NSS" + echo "BUILD_NSS=0" >> "$GITHUB_ENV" + else + echo "Building NSS from source" + fi + - name: Install build dependencies (Linux) shell: bash if: runner.os == 'Linux' && env.BUILD_NSS == '1' && runner.environment == 'github-hosted' @@ -143,6 +174,21 @@ runs: # See https://github.com/ilammy/msvc-dev-cmd#name-conflicts-with-shell-bash rm /usr/bin/link.exe || true + - name: Set up environment + shell: bash + run: | + NSS_TARGET="${{ inputs.type }}" + echo "NSS_TARGET=$NSS_TARGET" >> "$GITHUB_ENV" + NSS_OUT="$NSS_DIR/../dist/$NSS_TARGET" + echo "LD_LIBRARY_PATH=$NSS_OUT/lib" >> "$GITHUB_ENV" + echo "DYLD_FALLBACK_LIBRARY_PATH=$NSS_OUT/lib" >> "$GITHUB_ENV" + echo "$NSS_OUT/lib" >> "$GITHUB_PATH" + echo "NSS_DIR=$NSS_DIR" >> "$GITHUB_ENV" + echo "NSS_PREBUILT=1" >> "$GITHUB_ENV" + env: + NSS_DIR: ${{ github.workspace }}/nss + NSPR_DIR: ${{ github.workspace }}/nspr + - name: Build shell: bash if: env.BUILD_NSS == '1' @@ -154,15 +200,5 @@ runs: OPT="-o" [ "${{ runner.os }}" != "Windows" ] && export CFLAGS="-ggdb3 -fno-omit-frame-pointer" fi - NSS_TARGET="${{ inputs.type }}" - echo "NSS_TARGET=$NSS_TARGET" >> "$GITHUB_ENV" - NSS_OUT="$NSS_DIR/../dist/$NSS_TARGET" - echo "LD_LIBRARY_PATH=$NSS_OUT/lib" >> "$GITHUB_ENV" - echo "DYLD_FALLBACK_LIBRARY_PATH=$NSS_OUT/lib" >> "$GITHUB_ENV" - echo "$NSS_OUT/lib" >> "$GITHUB_PATH" - echo "NSS_DIR=$NSS_DIR" >> "$GITHUB_ENV" [ "$SCCACHE_CC" ] && [ "$SCCACHE_CXX" ] && export CC="$SCCACHE_CC" CXX="$SCCACHE_CXX" $NSS_DIR/build.sh -g -Ddisable_tests=1 $OPT --static - env: - NSS_DIR: ${{ github.workspace }}/nss - NSPR_DIR: ${{ github.workspace }}/nspr diff --git a/.github/actions/rust/action.yml b/.github/actions/rust/action.yml index c96ec7b269..0f47e8fb2b 100644 --- a/.github/actions/rust/action.yml +++ b/.github/actions/rust/action.yml @@ -21,11 +21,6 @@ inputs: runs: using: composite steps: - - name: Upgrade rustup (MacOS) - shell: bash - if: runner.os == 'MacOS' - run: brew update && brew upgrade rustup - - name: Install Rust uses: dtolnay/rust-toolchain@21dc36fb71dd22e3317045c0c31a3f4249868b17 # master with: @@ -35,21 +30,24 @@ runs: - name: Use sccache # Apparently the action can't be installed twice in the same workflow, so check if - # it's already installed by checking if the RUSTC_WRAPPER environment variable is set + # it's already installed by checking if the SCCACHE_ENABLED environment variable is set # (which every "use" of this action needs to therefore set) - if: env.RUSTC_WRAPPER != 'sccache' + # + # Also, only enable sscache on our self-hosted runner, because the GitHub cache limit + # is too small for this to be effective there. + if: env.SCCACHE_ENABLED != '1' && runner.environment != 'github-hosted' uses: mozilla-actions/sccache-action@2e7f9ec7921547d4b46598398ca573513895d0bd # v0.0.4 - name: Enable sscache + if: runner.environment != 'github-hosted' shell: bash run: | - echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> "$GITHUB_ENV" - echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> "$GITHUB_ENV" - if [ "$GITHUB_WORKFLOW" ]; then - echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" - fi + echo "SCCACHE_ENABLED=1" >> "$GITHUB_ENV" echo "RUSTC_WRAPPER=sccache" >> "$GITHUB_ENV" echo "CARGO_INCREMENTAL=0" >> "$GITHUB_ENV" + if [ "${{ runner.environment }}" == "github-hosted" ]; then + echo "SCCACHE_GHA_ENABLED=true" >> "$GITHUB_ENV" + fi - name: Set up MSVC (Windows) if: runner.os == 'Windows' diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index d758aff65b..8b959fb2f2 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -58,8 +58,8 @@ jobs: - name: Build neqo run: | - cargo "+$TOOLCHAIN" bench --features bench --no-run - cargo "+$TOOLCHAIN" build --release + cargo "+$TOOLCHAIN" bench --workspace --features bench --no-run + cargo "+$TOOLCHAIN" build --release --bin neqo-client --bin neqo-server - name: Build msquic run: | diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 49dfb8ec80..fb7877b9ee 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -47,8 +47,8 @@ jobs: - uses: ./.github/actions/rust with: version: ${{ matrix.rust-toolchain }} - components: clippy, llvm-tools-preview - tools: cargo-llvm-cov, cargo-nextest, cargo-hack, cargo-fuzz + components: ${{ matrix.rust-toolchain == 'stable' && 'llvm-tools-preview' || '' }} + tools: ${{ matrix.rust-toolchain == 'stable' && 'cargo-llvm-cov, ' || '' }} cargo-nextest token: ${{ secrets.GITHUB_TOKEN }} - id: nss-version @@ -58,20 +58,19 @@ jobs: with: minimum-version: ${{ steps.nss-version.outputs.minimum }} - - name: Build + - name: Check run: | # shellcheck disable=SC2086 - cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --all-targets --features ci - # Check that the fuzz targets also build - if [ ${{ startsWith(matrix.rust-toolchain, 'nightly') && 'nightly' }} == 'nightly' ]; then - cargo +${{ matrix.rust-toolchain }} fuzz check - fi + cargo +${{ matrix.rust-toolchain }} check $BUILD_TYPE --all-targets --features ci - name: Run tests and determine coverage run: | # shellcheck disable=SC2086 - RUST_LOG=trace cargo +${{ matrix.rust-toolchain }} llvm-cov nextest $BUILD_TYPE --features ci --no-fail-fast --lcov --output-path lcov.info - cargo +${{ matrix.rust-toolchain }} bench --features bench --no-run + if [ "${{ matrix.rust-toolchain }}" == "stable" ]; then + RUST_LOG=trace cargo +${{ matrix.rust-toolchain }} llvm-cov nextest $BUILD_TYPE --features ci --no-fail-fast --lcov --output-path lcov.info + else + RUST_LOG=trace cargo +${{ matrix.rust-toolchain }} nextest run $BUILD_TYPE --features ci --no-fail-fast + fi - name: Run client/server transfer run: | diff --git a/.github/workflows/clippy.yml b/.github/workflows/clippy.yml index a1ef1ed6ba..c323f79048 100644 --- a/.github/workflows/clippy.yml +++ b/.github/workflows/clippy.yml @@ -27,7 +27,7 @@ jobs: - uses: ./.github/actions/rust with: components: clippy - tools: cargo-hack, cargo-fuzz + tools: cargo-hack token: ${{ secrets.GITHUB_TOKEN }} - id: nss-version diff --git a/.github/workflows/fuzz-bench.yml b/.github/workflows/fuzz-bench.yml new file mode 100644 index 0000000000..6ffb3d1cbb --- /dev/null +++ b/.github/workflows/fuzz-bench.yml @@ -0,0 +1,39 @@ +name: Fuzz & Bench +on: + workflow_dispatch: + pull_request: + branches: ["main"] + paths-ignore: ["*.md", "*.png", "*.svg", "LICENSE-*"] + merge_group: +env: + CARGO_TERM_COLOR: always + RUST_BACKTRACE: 1 + +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }} + cancel-in-progress: true + +permissions: + contents: read + +jobs: + fuzz-bench: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: ./.github/actions/rust + with: + version: nightly + tools: cargo-fuzz + token: ${{ secrets.GITHUB_TOKEN }} + + - id: nss-version + run: echo "minimum=$(cat neqo-crypto/min_version.txt)" >> "$GITHUB_OUTPUT" + + - uses: ./.github/actions/nss + with: + minimum-version: ${{ steps.nss-version.outputs.minimum }} + + # Check that the fuzz and bench targets build + - run: cargo fuzz check + - run: cargo bench --features bench --no-run diff --git a/Cargo.toml b/Cargo.toml index 815470bddd..8c7f0e1f6a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,6 +40,11 @@ nursery = { level = "warn", priority = -1 } pedantic = { level = "warn", priority = -1 } multiple_crate_versions = "allow" +# Optimize build dependencies, because bindgen and proc macros / style +# compilation take more to run than to build otherwise. +[profile.dev.build-override] +opt-level = 1 + [profile.release] lto = "fat" diff --git a/neqo-crypto/build.rs b/neqo-crypto/build.rs index 34cc842b5e..b61a8e92af 100644 --- a/neqo-crypto/build.rs +++ b/neqo-crypto/build.rs @@ -109,14 +109,14 @@ fn get_bash() -> PathBuf { ) } -fn build_nss(dir: PathBuf) { +fn build_nss(dir: PathBuf, nsstarget: &str) { let mut build_nss = vec![ String::from("./build.sh"), String::from("-Ddisable_tests=1"), // Generate static libraries in addition to shared libraries. String::from("--static"), ]; - if !is_debug() { + if nsstarget == "Release" { build_nss.push(String::from("-o")); } if let Ok(d) = env::var("NSS_JOBS") { @@ -317,15 +317,18 @@ fn setup_standalone(nss: &str) -> Vec { "The NSS_DIR environment variable is expected to be an absolute path." ); - build_nss(nss.clone()); - // $NSS_DIR/../dist/ let nssdist = nss.parent().unwrap().join("dist"); println!("cargo:rerun-if-env-changed=NSS_TARGET"); let nsstarget = env::var("NSS_TARGET") .unwrap_or_else(|_| fs::read_to_string(nssdist.join("latest")).unwrap()); - let nsstarget = nssdist.join(nsstarget.trim()); + // If NSS_PREBUILT is set, we assume that the NSS libraries are already built. + if env::var("NSS_PREBUILT").is_err() { + build_nss(nss, &nsstarget); + } + + let nsstarget = nssdist.join(nsstarget.trim()); let includes = get_includes(&nsstarget, &nssdist); let nsslibdir = nsstarget.join("lib"); From 899736f063966b4347a60113b63776cefe1734b8 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 18 Sep 2024 03:25:49 -0700 Subject: [PATCH 11/13] fix: Resend more data during handshake (#2119) We don't track which packets are coalesced with others, so when we detect a loss in one packet number space, we cannot resend any coalesced packets in other packet number space. This PR tries to approximate this behavior by scheduling un-ACKed Handshake and 0-RTT packets for RTX when packets are lost in the Initial space. Broken out of #1998 --- neqo-transport/src/connection/mod.rs | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 577a93277c..8dc44803ea 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -1412,12 +1412,14 @@ impl Connection { // that Initial packets were lost. // Resend Initial CRYPTO frames immediately a few times just // in case. As we don't have an RTT estimate yet, this helps - // when there is a short RTT and losses. + // when there is a short RTT and losses. Also mark all 0-RTT + // data as lost. if dcid.is_none() && self.cid_manager.is_valid(packet.dcid()) && self.stats.borrow().saved_datagrams <= EXTRA_INITIALS { self.crypto.resend_unacked(PacketNumberSpace::Initial); + self.resend_0rtt(now); } } (PacketType::VersionNegotiation | PacketType::Retry | PacketType::OtherVersion, ..) => { @@ -2865,8 +2867,13 @@ impl Connection { self.handshake(now, packet_version, space, Some(&buf))?; self.create_resumption_token(now); } else { - // If we get a useless CRYPTO frame send outstanding CRYPTO frames again. + // If we get a useless CRYPTO frame send outstanding CRYPTO frames and 0-RTT + // data again. self.crypto.resend_unacked(space); + if space == PacketNumberSpace::Initial { + self.crypto.resend_unacked(PacketNumberSpace::Handshake); + self.resend_0rtt(now); + } } } Frame::NewToken { token } => { @@ -3078,21 +3085,22 @@ impl Connection { stats.largest_acknowledged = max(stats.largest_acknowledged, largest_acknowledged); } + /// Tell 0-RTT packets that they were "lost". + fn resend_0rtt(&mut self, now: Instant) { + if let Some(path) = self.paths.primary() { + let dropped = self.loss_recovery.drop_0rtt(&path, now); + self.handle_lost_packets(&dropped); + } + } + /// When the server rejects 0-RTT we need to drop a bunch of stuff. fn client_0rtt_rejected(&mut self, now: Instant) { if !matches!(self.zero_rtt_state, ZeroRttState::Sending) { return; } qdebug!([self], "0-RTT rejected"); - - // Tell 0-RTT packets that they were "lost". - if let Some(path) = self.paths.primary() { - let dropped = self.loss_recovery.drop_0rtt(&path, now); - self.handle_lost_packets(&dropped); - } - + self.resend_0rtt(now); self.streams.zero_rtt_rejected(); - self.crypto.states.discard_0rtt_keys(); self.events.client_0rtt_rejected(); } From eb3e83506cdb408d365cedef8803452a064c3dfa Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 18 Sep 2024 12:26:31 +0200 Subject: [PATCH 12/13] perf(transport): have add_datagram take Vec (#2120) `add_datagram` takes a Quic datagram and adds it to the queue of datagrams to be sent out. Previously it would take a reference (i.e. `&[u8]`) and would allocate it into a new `Vec` before enqueuing. At the call-site the original allocation (referenced by the `&[u8]`) would go out-of-scope and thus be de-allocated. This is a wasted allocation for each Quic datagram to be send. This commit has the call-site pass the owned `Vec` down right away. Co-authored-by: Lars Eggert --- .../extended_connect/webtransport_session.rs | 2 +- neqo-transport/src/connection/mod.rs | 2 +- .../src/connection/tests/datagram.rs | 98 ++++++++++++------- neqo-transport/src/quic_datagrams.rs | 9 +- 4 files changed, 69 insertions(+), 42 deletions(-) diff --git a/neqo-http3/src/features/extended_connect/webtransport_session.rs b/neqo-http3/src/features/extended_connect/webtransport_session.rs index 79becfaec2..ea8a87659c 100644 --- a/neqo-http3/src/features/extended_connect/webtransport_session.rs +++ b/neqo-http3/src/features/extended_connect/webtransport_session.rs @@ -415,7 +415,7 @@ impl WebTransportSession { let mut dgram_data = Encoder::default(); dgram_data.encode_varint(self.session_id.as_u64() / 4); dgram_data.encode(buf); - conn.send_datagram(dgram_data.as_ref(), id)?; + conn.send_datagram(dgram_data.into(), id)?; } else { debug_assert!(false); return Err(Error::Unavailable); diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 8dc44803ea..d8d9db2422 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -3460,7 +3460,7 @@ impl Connection { /// to check the estimated max datagram size and to use smaller datagrams. /// `max_datagram_size` is just a current estimate and will change over /// time depending on the encoded size of the packet number, ack frames, etc. - pub fn send_datagram(&mut self, buf: &[u8], id: impl Into) -> Res<()> { + pub fn send_datagram(&mut self, buf: Vec, id: impl Into) -> Res<()> { self.quic_datagrams .add_datagram(buf, id.into(), &mut self.stats.borrow_mut()) } diff --git a/neqo-transport/src/connection/tests/datagram.rs b/neqo-transport/src/connection/tests/datagram.rs index ec2795a232..73ea72d4b4 100644 --- a/neqo-transport/src/connection/tests/datagram.rs +++ b/neqo-transport/src/connection/tests/datagram.rs @@ -58,12 +58,12 @@ fn datagram_disabled_both() { assert_eq!(client.max_datagram_size(), Err(Error::NotAvailable)); assert_eq!(server.max_datagram_size(), Err(Error::NotAvailable)); assert_eq!( - client.send_datagram(DATA_SMALLER_THAN_MTU, None), + client.send_datagram(DATA_SMALLER_THAN_MTU.to_vec(), None), Err(Error::TooMuchData) ); assert_eq!(server.stats().frame_tx.datagram, 0); assert_eq!( - server.send_datagram(DATA_SMALLER_THAN_MTU, None), + server.send_datagram(DATA_SMALLER_THAN_MTU.to_vec(), None), Err(Error::TooMuchData) ); assert_eq!(server.stats().frame_tx.datagram, 0); @@ -82,11 +82,14 @@ fn datagram_enabled_on_client() { Ok(DATAGRAM_LEN_SMALLER_THAN_MTU) ); assert_eq!( - client.send_datagram(DATA_SMALLER_THAN_MTU, Some(1)), + client.send_datagram(DATA_SMALLER_THAN_MTU.to_vec(), Some(1)), Err(Error::TooMuchData) ); let dgram_sent = server.stats().frame_tx.datagram; - assert_eq!(server.send_datagram(DATA_SMALLER_THAN_MTU, Some(1)), Ok(())); + assert_eq!( + server.send_datagram(DATA_SMALLER_THAN_MTU.to_vec(), Some(1)), + Ok(()) + ); let out = server.process_output(now()).dgram().unwrap(); assert_eq!(server.stats().frame_tx.datagram, dgram_sent + 1); @@ -110,11 +113,14 @@ fn datagram_enabled_on_server() { ); assert_eq!(server.max_datagram_size(), Err(Error::NotAvailable)); assert_eq!( - server.send_datagram(DATA_SMALLER_THAN_MTU, Some(1)), + server.send_datagram(DATA_SMALLER_THAN_MTU.to_vec(), Some(1)), Err(Error::TooMuchData) ); let dgram_sent = client.stats().frame_tx.datagram; - assert_eq!(client.send_datagram(DATA_SMALLER_THAN_MTU, Some(1)), Ok(())); + assert_eq!( + client.send_datagram(DATA_SMALLER_THAN_MTU.to_vec(), Some(1)), + Ok(()) + ); let out = client.process_output(now()).dgram().unwrap(); assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 1); @@ -156,7 +162,10 @@ fn limit_data_size() { // Datagram can be queued because they are smaller than allowed by the peer, // but they cannot be sent. - assert_eq!(server.send_datagram(DATA_BIGGER_THAN_MTU, Some(1)), Ok(())); + assert_eq!( + server.send_datagram(DATA_BIGGER_THAN_MTU.to_vec(), Some(1)), + Ok(()) + ); let dgram_dropped_s = server.stats().datagram_tx.dropped_too_big; let dgram_sent_s = server.stats().frame_tx.datagram; @@ -172,7 +181,10 @@ fn limit_data_size() { )); // The same test for the client side. - assert_eq!(client.send_datagram(DATA_BIGGER_THAN_MTU, Some(1)), Ok(())); + assert_eq!( + client.send_datagram(DATA_BIGGER_THAN_MTU.to_vec(), Some(1)), + Ok(()) + ); let dgram_sent_c = client.stats().frame_tx.datagram; assert!(client.process_output(now()).dgram().is_none()); assert_eq!(client.stats().frame_tx.datagram, dgram_sent_c); @@ -188,8 +200,14 @@ fn after_dgram_dropped_continue_writing_frames() { // Datagram can be queued because they are smaller than allowed by the peer, // but they cannot be sent. - assert_eq!(client.send_datagram(DATA_BIGGER_THAN_MTU, Some(1)), Ok(())); - assert_eq!(client.send_datagram(DATA_SMALLER_THAN_MTU, Some(2)), Ok(())); + assert_eq!( + client.send_datagram(DATA_BIGGER_THAN_MTU.to_vec(), Some(1)), + Ok(()) + ); + assert_eq!( + client.send_datagram(DATA_SMALLER_THAN_MTU.to_vec(), Some(2)), + Ok(()) + ); let datagram_dropped = |e| { matches!( @@ -214,7 +232,10 @@ fn datagram_acked() { let (mut client, mut server) = connect_datagram(); let dgram_sent = client.stats().frame_tx.datagram; - assert_eq!(client.send_datagram(DATA_SMALLER_THAN_MTU, Some(1)), Ok(())); + assert_eq!( + client.send_datagram(DATA_SMALLER_THAN_MTU.to_vec(), Some(1)), + Ok(()) + ); let out = client.process_output(now()).dgram(); assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 1); @@ -267,7 +288,7 @@ fn datagram_after_stream_data() { // Write a datagram first. let dgram_sent = client.stats().frame_tx.datagram; - assert_eq!(client.send_datagram(DATA_MTU, Some(1)), Ok(())); + assert_eq!(client.send_datagram(DATA_MTU.to_vec(), Some(1)), Ok(())); // Create a stream with normal priority and send some data. let stream_id = client.stream_create(StreamType::BiDi).unwrap(); @@ -309,7 +330,7 @@ fn datagram_before_stream_data() { // Write a datagram. let dgram_sent = client.stats().frame_tx.datagram; - assert_eq!(client.send_datagram(DATA_MTU, Some(1)), Ok(())); + assert_eq!(client.send_datagram(DATA_MTU.to_vec(), Some(1)), Ok(())); if let ConnectionEvent::Datagram(data) = &send_packet_and_get_server_event(&mut client, &mut server) @@ -331,7 +352,10 @@ fn datagram_lost() { let (mut client, _) = connect_datagram(); let dgram_sent = client.stats().frame_tx.datagram; - assert_eq!(client.send_datagram(DATA_SMALLER_THAN_MTU, Some(1)), Ok(())); + assert_eq!( + client.send_datagram(DATA_SMALLER_THAN_MTU.to_vec(), Some(1)), + Ok(()) + ); let _out = client.process_output(now()).dgram(); // This packet will be lost. assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 1); @@ -358,7 +382,10 @@ fn datagram_sent_once() { let (mut client, _) = connect_datagram(); let dgram_sent = client.stats().frame_tx.datagram; - assert_eq!(client.send_datagram(DATA_SMALLER_THAN_MTU, Some(1)), Ok(())); + assert_eq!( + client.send_datagram(DATA_SMALLER_THAN_MTU.to_vec(), Some(1)), + Ok(()) + ); let _out = client.process_output(now()).dgram(); assert_eq!(client.stats().frame_tx.datagram, dgram_sent + 1); @@ -402,16 +429,19 @@ fn outgoing_datagram_queue_full() { let (mut client, mut server) = connect_datagram(); let dgram_sent = client.stats().frame_tx.datagram; - assert_eq!(client.send_datagram(DATA_SMALLER_THAN_MTU, Some(1)), Ok(())); assert_eq!( - client.send_datagram(DATA_SMALLER_THAN_MTU_2, Some(2)), + client.send_datagram(DATA_SMALLER_THAN_MTU.to_vec(), Some(1)), + Ok(()) + ); + assert_eq!( + client.send_datagram(DATA_SMALLER_THAN_MTU_2.to_vec(), Some(2)), Ok(()) ); // The outgoing datagram queue limit is 2, therefore the datagram with id 1 // will be dropped after adding one more datagram. let dgram_dropped = client.stats().datagram_tx.dropped_queue_full; - assert_eq!(client.send_datagram(DATA_MTU, Some(3)), Ok(())); + assert_eq!(client.send_datagram(DATA_MTU.to_vec(), Some(3)), Ok(())); assert!(matches!( client.next_event().unwrap(), ConnectionEvent::OutgoingDatagramOutcome { id, outcome } if id == 1 && outcome == OutgoingDatagramOutcome::DroppedQueueFull @@ -441,7 +471,7 @@ fn outgoing_datagram_queue_full() { )); } -fn send_datagram(sender: &mut Connection, receiver: &mut Connection, data: &[u8]) { +fn send_datagram(sender: &mut Connection, receiver: &mut Connection, data: Vec) { let dgram_sent = sender.stats().frame_tx.datagram; assert_eq!(sender.send_datagram(data, Some(1)), Ok(())); let out = sender.process_output(now()).dgram().unwrap(); @@ -469,9 +499,9 @@ fn multiple_datagram_events() { let mut server = default_server(); connect_force_idle(&mut client, &mut server); - send_datagram(&mut server, &mut client, FIRST_DATAGRAM); - send_datagram(&mut server, &mut client, SECOND_DATAGRAM); - send_datagram(&mut server, &mut client, THIRD_DATAGRAM); + send_datagram(&mut server, &mut client, FIRST_DATAGRAM.to_vec()); + send_datagram(&mut server, &mut client, SECOND_DATAGRAM.to_vec()); + send_datagram(&mut server, &mut client, THIRD_DATAGRAM.to_vec()); let mut datagrams = client.events().filter_map(|evt| { if let ConnectionEvent::Datagram(d) = evt { @@ -486,7 +516,7 @@ fn multiple_datagram_events() { assert!(datagrams.next().is_none()); // New events can be queued. - send_datagram(&mut server, &mut client, FOURTH_DATAGRAM); + send_datagram(&mut server, &mut client, FOURTH_DATAGRAM.to_vec()); let mut datagrams = client.events().filter_map(|evt| { if let ConnectionEvent::Datagram(d) = evt { Some(d) @@ -515,9 +545,9 @@ fn too_many_datagram_events() { let mut server = default_server(); connect_force_idle(&mut client, &mut server); - send_datagram(&mut server, &mut client, FIRST_DATAGRAM); - send_datagram(&mut server, &mut client, SECOND_DATAGRAM); - send_datagram(&mut server, &mut client, THIRD_DATAGRAM); + send_datagram(&mut server, &mut client, FIRST_DATAGRAM.to_vec()); + send_datagram(&mut server, &mut client, SECOND_DATAGRAM.to_vec()); + send_datagram(&mut server, &mut client, THIRD_DATAGRAM.to_vec()); // Datagram with FIRST_DATAGRAM data will be dropped. assert!(matches!( @@ -536,7 +566,7 @@ fn too_many_datagram_events() { assert_eq!(client.stats().incoming_datagram_dropped, 1); // New events can be queued. - send_datagram(&mut server, &mut client, FOURTH_DATAGRAM); + send_datagram(&mut server, &mut client, FOURTH_DATAGRAM.to_vec()); assert!(matches!( client.next_event().unwrap(), ConnectionEvent::Datagram(data) if data == FOURTH_DATAGRAM @@ -552,11 +582,11 @@ fn multiple_quic_datagrams_in_one_packet() { let dgram_sent = client.stats().frame_tx.datagram; // Enqueue 2 datagrams that can fit in a single packet. assert_eq!( - client.send_datagram(DATA_SMALLER_THAN_MTU_2, Some(1)), + client.send_datagram(DATA_SMALLER_THAN_MTU_2.to_vec(), Some(1)), Ok(()) ); assert_eq!( - client.send_datagram(DATA_SMALLER_THAN_MTU_2, Some(2)), + client.send_datagram(DATA_SMALLER_THAN_MTU_2.to_vec(), Some(2)), Ok(()) ); @@ -608,21 +638,21 @@ fn datagram_fill() { let buf = vec![9; space]; // This will completely fill available space. - send_datagram(&mut client, &mut server, &buf); + send_datagram(&mut client, &mut server, buf.clone()); // This will leave 1 byte free, but more frames won't be added in this space. - send_datagram(&mut client, &mut server, &buf[..buf.len() - 1]); + send_datagram(&mut client, &mut server, buf[..buf.len() - 1].to_vec()); // This will leave 2 bytes free, which is enough space for a length field, // but not enough space for another frame after that. - send_datagram(&mut client, &mut server, &buf[..buf.len() - 2]); + send_datagram(&mut client, &mut server, buf[..buf.len() - 2].to_vec()); // Three bytes free will be space enough for a length frame, but not enough // space left over for another frame (we need 2 bytes). - send_datagram(&mut client, &mut server, &buf[..buf.len() - 3]); + send_datagram(&mut client, &mut server, buf[..buf.len() - 3].to_vec()); // Four bytes free is enough space for another frame. let called = Rc::new(RefCell::new(false)); client.test_frame_writer = Some(Box::new(TrackingFrameWriter { called: Rc::clone(&called), })); - send_datagram(&mut client, &mut server, &buf[..buf.len() - 4]); + send_datagram(&mut client, &mut server, buf[..buf.len() - 4].to_vec()); assert!(*called.borrow()); } diff --git a/neqo-transport/src/quic_datagrams.rs b/neqo-transport/src/quic_datagrams.rs index 241ce30389..0d1a7dd843 100644 --- a/neqo-transport/src/quic_datagrams.rs +++ b/neqo-transport/src/quic_datagrams.rs @@ -150,11 +150,11 @@ impl QuicDatagrams { /// not fit into the packet. pub fn add_datagram( &mut self, - buf: &[u8], + data: Vec, tracking: DatagramTracking, stats: &mut Stats, ) -> Res<()> { - if u64::try_from(buf.len())? > self.remote_datagram_size { + if u64::try_from(data.len())? > self.remote_datagram_size { return Err(Error::TooMuchData); } if self.datagrams.len() == self.max_queued_outgoing_datagrams { @@ -167,10 +167,7 @@ impl QuicDatagrams { ); stats.datagram_tx.dropped_queue_full += 1; } - self.datagrams.push_back(QuicDatagram { - data: buf.to_vec(), - tracking, - }); + self.datagrams.push_back(QuicDatagram { data, tracking }); Ok(()) } From d6279bf40c91c4cb45c358d5f614dc6190e4c9eb Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 18 Sep 2024 04:12:44 -0700 Subject: [PATCH 13/13] fix: Make anto-replay window 1s for tests (#2116) This caused some test failures, and @martinthomson discovered this as the reason at the IETF 120 hackathon. Broken out of #1998 --- test-fixture/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index a4ab021289..d582946681 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -53,7 +53,8 @@ pub fn fixture_init() { // This needs to be > 2ms to avoid it being rounded to zero. // NSS operates in milliseconds and halves any value it is provided. -pub const ANTI_REPLAY_WINDOW: Duration = Duration::from_millis(10); +// But make it a second, so that tests with reasonable RTTs don't fail. +pub const ANTI_REPLAY_WINDOW: Duration = Duration::from_millis(1000); /// A baseline time for all tests. This needs to be earlier than what `now()` produces /// because of the need to have a span of time elapse for anti-replay purposes.