From c460c6e5811e019fb0410bd5ec8ec9cc10b7ad09 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 6 May 2020 12:37:58 -0700 Subject: [PATCH 001/178] Change store debug_assert to only run in tests --- src/proto/streams/store.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/proto/streams/store.rs b/src/proto/streams/store.rs index ebb1cd712..09d8a64a6 100644 --- a/src/proto/streams/store.rs +++ b/src/proto/streams/store.rs @@ -204,6 +204,12 @@ impl Store { } } +// While running h2 unit/integration tests, enable this debug assertion. +// +// In practice, we don't need to ensure this. But the integration tests +// help to make sure we've cleaned up in cases where we could (like, the +// runtime isn't suddenly dropping the task for unknown reasons). +#[cfg(feature = "unstable")] impl Drop for Store { fn drop(&mut self) { use std::thread; From ecb31135cb5dd3475fb09fde735fe916b52b2697 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 6 May 2020 13:10:27 -0700 Subject: [PATCH 002/178] v0.2.5 --- CHANGELOG.md | 4 ++++ Cargo.toml | 4 ++-- src/lib.rs | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 66a88460e..162e47541 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.2.5 (May 6, 2020) + +* Fix rare debug assert failure in store shutdown. + # 0.2.4 (March 30, 2020) * Fix when receiving `SETTINGS_HEADER_TABLE_SIZE` setting. diff --git a/Cargo.toml b/Cargo.toml index ede6b9143..ffd14438d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,14 +5,14 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.2.4" +version = "0.2.5" license = "MIT" authors = [ "Carl Lerche ", "Sean McArthur ", ] description = "An HTTP/2.0 client and server" -documentation = "https://docs.rs/h2/0.2.4/h2/" +documentation = "https://docs.rs/h2/0.2.5/h2/" repository = "https://github.com/hyperium/h2" readme = "README.md" keywords = ["http", "async", "non-blocking"] diff --git a/src/lib.rs b/src/lib.rs index f0bd67d63..8fd77b39b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.2.4")] +#![doc(html_root_url = "https://docs.rs/h2/0.2.5")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From 96570bb564432934ec5e6ec6f1cb203caf809570 Mon Sep 17 00:00:00 2001 From: Han Xu Date: Thu, 14 May 2020 10:47:42 -0700 Subject: [PATCH 003/178] fix documentation for server::Connection --- src/server.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/server.rs b/src/server.rs index 59247b596..b6e739492 100644 --- a/src/server.rs +++ b/src/server.rs @@ -23,10 +23,10 @@ //! //! The [`Connection`] instance is used to accept inbound HTTP/2.0 streams. It //! does this by implementing [`futures::Stream`]. When a new stream is -//! received, a call to [`Connection::poll`] will return `(request, response)`. +//! received, a call to [`Connection::accept`] will return `(request, response)`. //! The `request` handle (of type [`http::Request`]) contains the //! HTTP request head as well as provides a way to receive the inbound data -//! stream and the trailers. The `response` handle (of type [`SendStream`]) +//! stream and the trailers. The `response` handle (of type [`SendResponse`]) //! allows responding to the request, stream the response payload, send //! trailers, and send push promises. //! @@ -36,19 +36,19 @@ //! # Managing the connection //! //! The [`Connection`] instance is used to manage connection state. The caller -//! is required to call either [`Connection::poll`] or +//! is required to call either [`Connection::accept`] or //! [`Connection::poll_close`] in order to advance the connection state. Simply //! operating on [`SendStream`] or [`RecvStream`] will have no effect unless the //! connection state is advanced. //! -//! It is not required to call **both** [`Connection::poll`] and +//! It is not required to call **both** [`Connection::accept`] and //! [`Connection::poll_close`]. If the caller is ready to accept a new stream, -//! then only [`Connection::poll`] should be called. When the caller **does +//! then only [`Connection::accept`] should be called. When the caller **does //! not** want to accept a new stream, [`Connection::poll_close`] should be //! called. //! //! The [`Connection`] instance should only be dropped once -//! [`Connection::poll_close`] returns `Ready`. Once [`Connection::poll`] +//! [`Connection::poll_close`] returns `Ready`. Once [`Connection::accept`] //! returns `Ready(None)`, there will no longer be any more inbound streams. At //! this point, only [`Connection::poll_close`] should be called. //! From ceae69b4687b446666b4491771fe94a08a90cc82 Mon Sep 17 00:00:00 2001 From: cssivision Date: Thu, 21 May 2020 21:56:35 +0800 Subject: [PATCH 004/178] Update Readme (#468) --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 21f00a500..79072f39a 100644 --- a/README.md +++ b/README.md @@ -28,8 +28,7 @@ specification. It does not handle: * TLS * Any feature not described by the HTTP/2.0 specification. -The intent is that this crate will eventually be used by -[hyper](https://github.com/hyperium/hyper), which will provide all of these features. +This crate is now used by [hyper](https://github.com/hyperium/hyper), which will provide all of these features. ## Usage From d6fa8386c4a49aeaf434eefc018d1e7e673ca8bf Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Tue, 7 Jul 2020 09:48:14 -0700 Subject: [PATCH 005/178] Fix test not checking should_recv_frames --- tests/h2-tests/tests/flow_control.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/h2-tests/tests/flow_control.rs b/tests/h2-tests/tests/flow_control.rs index f03404130..3ca65ac73 100644 --- a/tests/h2-tests/tests/flow_control.rs +++ b/tests/h2-tests/tests/flow_control.rs @@ -350,7 +350,7 @@ async fn stream_error_release_connection_capacity() { should_recv_bytes -= bytes.len(); should_recv_frames -= 1; if should_recv_bytes == 0 { - assert_eq!(should_recv_bytes, 0); + assert_eq!(should_recv_frames, 0); } Ok(()) }) From d3b9f1e36aadc1a7a6804e2f8e86d3fe4a244b4f Mon Sep 17 00:00:00 2001 From: David Barsky Date: Tue, 7 Jul 2020 18:55:24 -0400 Subject: [PATCH 006/178] feat(lib): switch from log to tracing (#475) --- Cargo.toml | 2 +- src/client.rs | 4 +- src/codec/framed_read.rs | 12 +++--- src/codec/framed_write.rs | 22 +++++----- src/frame/go_away.rs | 2 +- src/frame/headers.rs | 21 +++++---- src/frame/ping.rs | 2 +- src/frame/reset.rs | 2 +- src/frame/settings.rs | 6 +-- src/frame/window_update.rs | 2 +- src/hpack/decoder.rs | 16 +++---- src/lib.rs | 4 +- src/proto/connection.rs | 30 ++++++------- src/proto/ping_pong.rs | 6 +-- src/proto/settings.rs | 8 ++-- src/proto/streams/counts.rs | 4 +- src/proto/streams/flow_control.rs | 8 ++-- src/proto/streams/prioritize.rs | 60 +++++++++++++------------- src/proto/streams/recv.rs | 34 +++++++-------- src/proto/streams/send.rs | 24 +++++------ src/proto/streams/state.rs | 14 +++--- src/proto/streams/store.rs | 8 ++-- src/proto/streams/stream.rs | 4 +- src/proto/streams/streams.rs | 26 +++++------ src/server.rs | 18 ++++---- tests/h2-tests/Cargo.toml | 2 +- tests/h2-tests/tests/client_request.rs | 4 +- tests/h2-tests/tests/stream_states.rs | 6 +-- tests/h2-tests/tests/trailers.rs | 4 +- 29 files changed, 179 insertions(+), 176 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ffd14438d..e8e04ee9c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,7 +47,7 @@ tokio-util = { version = "0.3.1", features = ["codec"] } tokio = { version = "0.2", features = ["io-util"] } bytes = "0.5.2" http = "0.2" -log = "0.4.1" +tracing = { version = "0.1.13", default-features = false, features = ["std", "log"] } fnv = "1.0.5" slab = "0.4.0" indexmap = "1.0" diff --git a/src/client.rs b/src/client.rs index 63514e322..597eb3dfc 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1129,12 +1129,12 @@ where mut io: T, builder: Builder, ) -> Result<(SendRequest, Connection), crate::Error> { - log::debug!("binding client connection"); + tracing::debug!("binding client connection"); let msg: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; io.write_all(msg).await.map_err(crate::Error::from_io)?; - log::debug!("client connection bound"); + tracing::debug!("client connection bound"); // Create the codec let mut codec = Codec::new(io); diff --git a/src/codec/framed_read.rs b/src/codec/framed_read.rs index 76a236ed2..2674e390a 100644 --- a/src/codec/framed_read.rs +++ b/src/codec/framed_read.rs @@ -62,7 +62,7 @@ impl FramedRead { fn decode_frame(&mut self, mut bytes: BytesMut) -> Result, RecvError> { use self::RecvError::*; - log::trace!("decoding frame from {}B", bytes.len()); + tracing::trace!("decoding frame from {}B", bytes.len()); // Parse the head let head = frame::Head::parse(&bytes); @@ -74,7 +74,7 @@ impl FramedRead { let kind = head.kind(); - log::trace!(" -> kind={:?}", kind); + tracing::trace!(" -> kind={:?}", kind); macro_rules! header_block { ($frame:ident, $head:ident, $bytes:ident) => ({ @@ -124,7 +124,7 @@ impl FramedRead { if is_end_headers { frame.into() } else { - log::trace!("loaded partial header block"); + tracing::trace!("loaded partial header block"); // Defer returning the frame self.partial = Some(Partial { frame: Continuable::$frame(frame), @@ -339,16 +339,16 @@ where fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { loop { - log::trace!("poll"); + tracing::trace!("poll"); let bytes = match ready!(Pin::new(&mut self.inner).poll_next(cx)) { Some(Ok(bytes)) => bytes, Some(Err(e)) => return Poll::Ready(Some(Err(map_err(e)))), None => return Poll::Ready(None), }; - log::trace!("poll; bytes={}B", bytes.len()); + tracing::trace!("poll; bytes={}B", bytes.len()); if let Some(frame) = self.decode_frame(bytes)? { - log::debug!("received; frame={:?}", frame); + tracing::debug!("received; frame={:?}", frame); return Poll::Ready(Some(Ok(frame))); } } diff --git a/src/codec/framed_write.rs b/src/codec/framed_write.rs index c63f12228..47ee592d3 100644 --- a/src/codec/framed_write.rs +++ b/src/codec/framed_write.rs @@ -106,7 +106,7 @@ where // Ensure that we have enough capacity to accept the write. assert!(self.has_capacity()); - log::debug!("send; frame={:?}", item); + tracing::debug!("send; frame={:?}", item); match item { Frame::Data(mut v) => { @@ -150,31 +150,31 @@ where } Frame::Settings(v) => { v.encode(self.buf.get_mut()); - log::trace!("encoded settings; rem={:?}", self.buf.remaining()); + tracing::trace!("encoded settings; rem={:?}", self.buf.remaining()); } Frame::GoAway(v) => { v.encode(self.buf.get_mut()); - log::trace!("encoded go_away; rem={:?}", self.buf.remaining()); + tracing::trace!("encoded go_away; rem={:?}", self.buf.remaining()); } Frame::Ping(v) => { v.encode(self.buf.get_mut()); - log::trace!("encoded ping; rem={:?}", self.buf.remaining()); + tracing::trace!("encoded ping; rem={:?}", self.buf.remaining()); } Frame::WindowUpdate(v) => { v.encode(self.buf.get_mut()); - log::trace!("encoded window_update; rem={:?}", self.buf.remaining()); + tracing::trace!("encoded window_update; rem={:?}", self.buf.remaining()); } Frame::Priority(_) => { /* v.encode(self.buf.get_mut()); - log::trace!("encoded priority; rem={:?}", self.buf.remaining()); + tracing::trace!("encoded priority; rem={:?}", self.buf.remaining()); */ unimplemented!(); } Frame::Reset(v) => { v.encode(self.buf.get_mut()); - log::trace!("encoded reset; rem={:?}", self.buf.remaining()); + tracing::trace!("encoded reset; rem={:?}", self.buf.remaining()); } } @@ -183,18 +183,18 @@ where /// Flush buffered data to the wire pub fn flush(&mut self, cx: &mut Context) -> Poll> { - log::trace!("flush"); + tracing::trace!("flush"); loop { while !self.is_empty() { match self.next { Some(Next::Data(ref mut frame)) => { - log::trace!(" -> queued data frame"); + tracing::trace!(" -> queued data frame"); let mut buf = (&mut self.buf).chain(frame.payload_mut()); ready!(Pin::new(&mut self.inner).poll_write_buf(cx, &mut buf))?; } _ => { - log::trace!(" -> not a queued data frame"); + tracing::trace!(" -> not a queued data frame"); ready!(Pin::new(&mut self.inner).poll_write_buf(cx, &mut self.buf))?; } } @@ -234,7 +234,7 @@ where } } - log::trace!("flushing buffer"); + tracing::trace!("flushing buffer"); // Flush the upstream ready!(Pin::new(&mut self.inner).poll_flush(cx))?; diff --git a/src/frame/go_away.rs b/src/frame/go_away.rs index a46ba7a37..52dd91d4c 100644 --- a/src/frame/go_away.rs +++ b/src/frame/go_away.rs @@ -51,7 +51,7 @@ impl GoAway { } pub fn encode(&self, dst: &mut B) { - log::trace!("encoding GO_AWAY; code={:?}", self.error_code); + tracing::trace!("encoding GO_AWAY; code={:?}", self.error_code); let head = Head::new(Kind::GoAway, 0, StreamId::zero()); head.encode(8, dst); dst.put_u32(self.last_stream_id.into()); diff --git a/src/frame/headers.rs b/src/frame/headers.rs index 2491d8da0..0719f140c 100644 --- a/src/frame/headers.rs +++ b/src/frame/headers.rs @@ -153,7 +153,7 @@ impl Headers { let flags = HeadersFlag(head.flag()); let mut pad = 0; - log::trace!("loading headers; flags={:?}", flags); + tracing::trace!("loading headers; flags={:?}", flags); // Read the padding length if flags.is_padded() { @@ -817,10 +817,10 @@ impl HeaderBlock { macro_rules! set_pseudo { ($field:ident, $val:expr) => {{ if reg { - log::trace!("load_hpack; header malformed -- pseudo not at head of block"); + tracing::trace!("load_hpack; header malformed -- pseudo not at head of block"); malformed = true; } else if self.pseudo.$field.is_some() { - log::trace!("load_hpack; header malformed -- repeated pseudo"); + tracing::trace!("load_hpack; header malformed -- repeated pseudo"); malformed = true; } else { let __val = $val; @@ -829,7 +829,7 @@ impl HeaderBlock { if headers_size < max_header_list_size { self.pseudo.$field = Some(__val); } else if !self.is_over_size { - log::trace!("load_hpack; header list size over max"); + tracing::trace!("load_hpack; header list size over max"); self.is_over_size = true; } } @@ -856,10 +856,13 @@ impl HeaderBlock { || name == "keep-alive" || name == "proxy-connection" { - log::trace!("load_hpack; connection level header"); + tracing::trace!("load_hpack; connection level header"); malformed = true; } else if name == header::TE && value != "trailers" { - log::trace!("load_hpack; TE header not set to trailers; val={:?}", value); + tracing::trace!( + "load_hpack; TE header not set to trailers; val={:?}", + value + ); malformed = true; } else { reg = true; @@ -868,7 +871,7 @@ impl HeaderBlock { if headers_size < max_header_list_size { self.fields.append(name, value); } else if !self.is_over_size { - log::trace!("load_hpack; header list size over max"); + tracing::trace!("load_hpack; header list size over max"); self.is_over_size = true; } } @@ -882,12 +885,12 @@ impl HeaderBlock { }); if let Err(e) = res { - log::trace!("hpack decoding error; err={:?}", e); + tracing::trace!("hpack decoding error; err={:?}", e); return Err(e.into()); } if malformed { - log::trace!("malformed message"); + tracing::trace!("malformed message"); return Err(Error::MalformedMessage); } diff --git a/src/frame/ping.rs b/src/frame/ping.rs index 1802ec185..241d06ea1 100644 --- a/src/frame/ping.rs +++ b/src/frame/ping.rs @@ -85,7 +85,7 @@ impl Ping { pub fn encode(&self, dst: &mut B) { let sz = self.payload.len(); - log::trace!("encoding PING; ack={} len={}", self.ack, sz); + tracing::trace!("encoding PING; ack={} len={}", self.ack, sz); let flags = if self.ack { ACK_FLAG } else { 0 }; let head = Head::new(Kind::Ping, flags, StreamId::zero()); diff --git a/src/frame/reset.rs b/src/frame/reset.rs index 6edecf1a3..b2613028d 100644 --- a/src/frame/reset.rs +++ b/src/frame/reset.rs @@ -38,7 +38,7 @@ impl Reset { } pub fn encode(&self, dst: &mut B) { - log::trace!( + tracing::trace!( "encoding RESET; id={:?} code={:?}", self.stream_id, self.error_code diff --git a/src/frame/settings.rs b/src/frame/settings.rs index c70938144..06de9cf12 100644 --- a/src/frame/settings.rs +++ b/src/frame/settings.rs @@ -141,7 +141,7 @@ impl Settings { // Ensure the payload length is correct, each setting is 6 bytes long. if payload.len() % 6 != 0 { - log::debug!("invalid settings payload length; len={:?}", payload.len()); + tracing::debug!("invalid settings payload length; len={:?}", payload.len()); return Err(Error::InvalidPayloadAckSettings); } @@ -199,13 +199,13 @@ impl Settings { let head = Head::new(Kind::Settings, self.flags.into(), StreamId::zero()); let payload_len = self.payload_len(); - log::trace!("encoding SETTINGS; len={}", payload_len); + tracing::trace!("encoding SETTINGS; len={}", payload_len); head.encode(payload_len, dst); // Encode the settings self.for_each(|setting| { - log::trace!("encoding setting; val={:?}", setting); + tracing::trace!("encoding setting; val={:?}", setting); setting.encode(dst) }); } diff --git a/src/frame/window_update.rs b/src/frame/window_update.rs index 72c1c2581..eed2ce17e 100644 --- a/src/frame/window_update.rs +++ b/src/frame/window_update.rs @@ -48,7 +48,7 @@ impl WindowUpdate { } pub fn encode(&self, dst: &mut B) { - log::trace!("encoding WINDOW_UPDATE; id={:?}", self.stream_id); + tracing::trace!("encoding WINDOW_UPDATE; id={:?}", self.stream_id); let head = Head::new(Kind::WindowUpdate, 0, self.stream_id); head.encode(4, dst); dst.put_u32(self.size_increment); diff --git a/src/hpack/decoder.rs b/src/hpack/decoder.rs index 4befa8702..3009e30e6 100644 --- a/src/hpack/decoder.rs +++ b/src/hpack/decoder.rs @@ -183,7 +183,7 @@ impl Decoder { self.last_max_update = size; } - log::trace!("decode"); + tracing::trace!("decode"); while let Some(ty) = peek_u8(src) { // At this point we are always at the beginning of the next block @@ -191,14 +191,14 @@ impl Decoder { // determined from the first byte. match Representation::load(ty)? { Indexed => { - log::trace!(" Indexed; rem={:?}", src.remaining()); + tracing::trace!(" Indexed; rem={:?}", src.remaining()); can_resize = false; let entry = self.decode_indexed(src)?; consume(src); f(entry); } LiteralWithIndexing => { - log::trace!(" LiteralWithIndexing; rem={:?}", src.remaining()); + tracing::trace!(" LiteralWithIndexing; rem={:?}", src.remaining()); can_resize = false; let entry = self.decode_literal(src, true)?; @@ -209,14 +209,14 @@ impl Decoder { f(entry); } LiteralWithoutIndexing => { - log::trace!(" LiteralWithoutIndexing; rem={:?}", src.remaining()); + tracing::trace!(" LiteralWithoutIndexing; rem={:?}", src.remaining()); can_resize = false; let entry = self.decode_literal(src, false)?; consume(src); f(entry); } LiteralNeverIndexed => { - log::trace!(" LiteralNeverIndexed; rem={:?}", src.remaining()); + tracing::trace!(" LiteralNeverIndexed; rem={:?}", src.remaining()); can_resize = false; let entry = self.decode_literal(src, false)?; consume(src); @@ -226,7 +226,7 @@ impl Decoder { f(entry); } SizeUpdate => { - log::trace!(" SizeUpdate; rem={:?}", src.remaining()); + tracing::trace!(" SizeUpdate; rem={:?}", src.remaining()); if !can_resize { return Err(DecoderError::InvalidMaxDynamicSize); } @@ -248,7 +248,7 @@ impl Decoder { return Err(DecoderError::InvalidMaxDynamicSize); } - log::debug!( + tracing::debug!( "Decoder changed max table size from {} to {}", self.table.size(), new_size @@ -302,7 +302,7 @@ impl Decoder { let len = decode_int(buf, 7)?; if len > buf.remaining() { - log::trace!( + tracing::trace!( "decode_string underflow; len={}; remaining={}", len, buf.remaining() diff --git a/src/lib.rs b/src/lib.rs index 8fd77b39b..e5e1f3c52 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -84,10 +84,10 @@ macro_rules! proto_err { (conn: $($msg:tt)+) => { - log::debug!("connection error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) + tracing::debug!("connection error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) }; (stream: $($msg:tt)+) => { - log::debug!("stream error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) + tracing::debug!("stream error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) }; } diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 49c123efa..c9e33f4a0 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -230,13 +230,13 @@ where // error. This is handled by setting a GOAWAY frame followed by // terminating the connection. Poll::Ready(Err(Connection(e))) => { - log::debug!("Connection::poll; connection error={:?}", e); + tracing::debug!("Connection::poll; connection error={:?}", e); // We may have already sent a GOAWAY for this error, // if so, don't send another, just flush and close up. if let Some(reason) = self.go_away.going_away_reason() { if reason == e { - log::trace!(" -> already going away"); + tracing::trace!(" -> already going away"); self.state = State::Closing(e); continue; } @@ -250,7 +250,7 @@ where // This is handled by resetting the frame then trying to read // another frame. Poll::Ready(Err(Stream { id, reason })) => { - log::trace!("stream error; id={:?}; reason={:?}", id, reason); + tracing::trace!("stream error; id={:?}; reason={:?}", id, reason); self.streams.send_reset(id, reason); } // Attempting to read a frame resulted in an I/O error. All @@ -258,7 +258,7 @@ where // // TODO: Are I/O errors recoverable? Poll::Ready(Err(Io(e))) => { - log::debug!("Connection::poll; IO error={:?}", e); + tracing::debug!("Connection::poll; IO error={:?}", e); let e = e.into(); // Reset all active streams @@ -270,7 +270,7 @@ where } } State::Closing(reason) => { - log::trace!("connection closing after flush"); + tracing::trace!("connection closing after flush"); // Flush/shutdown the codec ready!(self.codec.shutdown(cx))?; @@ -317,28 +317,28 @@ where match ready!(Pin::new(&mut self.codec).poll_next(cx)?) { Some(Headers(frame)) => { - log::trace!("recv HEADERS; frame={:?}", frame); + tracing::trace!("recv HEADERS; frame={:?}", frame); self.streams.recv_headers(frame)?; } Some(Data(frame)) => { - log::trace!("recv DATA; frame={:?}", frame); + tracing::trace!("recv DATA; frame={:?}", frame); self.streams.recv_data(frame)?; } Some(Reset(frame)) => { - log::trace!("recv RST_STREAM; frame={:?}", frame); + tracing::trace!("recv RST_STREAM; frame={:?}", frame); self.streams.recv_reset(frame)?; } Some(PushPromise(frame)) => { - log::trace!("recv PUSH_PROMISE; frame={:?}", frame); + tracing::trace!("recv PUSH_PROMISE; frame={:?}", frame); self.streams.recv_push_promise(frame)?; } Some(Settings(frame)) => { - log::trace!("recv SETTINGS; frame={:?}", frame); + tracing::trace!("recv SETTINGS; frame={:?}", frame); self.settings .recv_settings(frame, &mut self.codec, &mut self.streams)?; } Some(GoAway(frame)) => { - log::trace!("recv GOAWAY; frame={:?}", frame); + tracing::trace!("recv GOAWAY; frame={:?}", frame); // This should prevent starting new streams, // but should allow continuing to process current streams // until they are all EOS. Once they are, State should @@ -347,7 +347,7 @@ where self.error = Some(frame.reason()); } Some(Ping(frame)) => { - log::trace!("recv PING; frame={:?}", frame); + tracing::trace!("recv PING; frame={:?}", frame); let status = self.ping_pong.recv_ping(frame); if status.is_shutdown() { assert!( @@ -360,15 +360,15 @@ where } } Some(WindowUpdate(frame)) => { - log::trace!("recv WINDOW_UPDATE; frame={:?}", frame); + tracing::trace!("recv WINDOW_UPDATE; frame={:?}", frame); self.streams.recv_window_update(frame)?; } Some(Priority(frame)) => { - log::trace!("recv PRIORITY; frame={:?}", frame); + tracing::trace!("recv PRIORITY; frame={:?}", frame); // TODO: handle } None => { - log::trace!("codec closed"); + tracing::trace!("codec closed"); self.streams.recv_eof(false).expect("mutex poisoned"); return Poll::Ready(Ok(())); } diff --git a/src/proto/ping_pong.rs b/src/proto/ping_pong.rs index 0022d4a5b..e0442c838 100644 --- a/src/proto/ping_pong.rs +++ b/src/proto/ping_pong.rs @@ -107,7 +107,7 @@ impl PingPong { &Ping::SHUTDOWN, "pending_ping should be for shutdown", ); - log::trace!("recv PING SHUTDOWN ack"); + tracing::trace!("recv PING SHUTDOWN ack"); return ReceivedPing::Shutdown; } @@ -117,7 +117,7 @@ impl PingPong { if let Some(ref users) = self.user_pings { if ping.payload() == &Ping::USER && users.receive_pong() { - log::trace!("recv PING USER ack"); + tracing::trace!("recv PING USER ack"); return ReceivedPing::Unknown; } } @@ -125,7 +125,7 @@ impl PingPong { // else we were acked a ping we didn't send? // The spec doesn't require us to do anything about this, // so for resiliency, just ignore it for now. - log::warn!("recv PING ack that we never sent: {:?}", ping); + tracing::warn!("recv PING ack that we never sent: {:?}", ping); ReceivedPing::Unknown } else { // Save the ping's payload to be sent as an acknowledgement. diff --git a/src/proto/settings.rs b/src/proto/settings.rs index b1d91e652..453292324 100644 --- a/src/proto/settings.rs +++ b/src/proto/settings.rs @@ -50,7 +50,7 @@ impl Settings { if frame.is_ack() { match &self.local { Local::WaitingAck(local) => { - log::debug!("received settings ACK; applying {:?}", local); + tracing::debug!("received settings ACK; applying {:?}", local); if let Some(max) = local.max_frame_size() { codec.set_max_recv_frame_size(max as usize); @@ -85,7 +85,7 @@ impl Settings { match &self.local { Local::ToSend(..) | Local::WaitingAck(..) => Err(UserError::SendSettingsWhilePending), Local::Synced => { - log::trace!("queue to send local settings: {:?}", frame); + tracing::trace!("queue to send local settings: {:?}", frame); self.local = Local::ToSend(frame); Ok(()) } @@ -115,7 +115,7 @@ impl Settings { // Buffer the settings frame dst.buffer(frame.into()).expect("invalid settings frame"); - log::trace!("ACK sent; applying settings"); + tracing::trace!("ACK sent; applying settings"); if let Some(val) = settings.header_table_size() { dst.set_send_header_table_size(val as usize); @@ -139,7 +139,7 @@ impl Settings { // Buffer the settings frame dst.buffer(settings.clone().into()) .expect("invalid settings frame"); - log::trace!("local settings sent; waiting for ack: {:?}", settings); + tracing::trace!("local settings sent; waiting for ack: {:?}", settings); self.local = Local::WaitingAck(settings.clone()); } diff --git a/src/proto/streams/counts.rs b/src/proto/streams/counts.rs index bcd07e814..a1b7c1df3 100644 --- a/src/proto/streams/counts.rs +++ b/src/proto/streams/counts.rs @@ -133,7 +133,7 @@ impl Counts { // TODO: move this to macro? pub fn transition_after(&mut self, mut stream: store::Ptr, is_reset_counted: bool) { - log::trace!( + tracing::trace!( "transition_after; stream={:?}; state={:?}; is_closed={:?}; \ pending_send_empty={:?}; buffered_send_data={}; \ num_recv={}; num_send={}", @@ -155,7 +155,7 @@ impl Counts { } if stream.is_counted { - log::trace!("dec_num_streams; stream={:?}", stream.id); + tracing::trace!("dec_num_streams; stream={:?}", stream.id); // Decrement the number of active streams. self.dec_num_streams(&mut stream); } diff --git a/src/proto/streams/flow_control.rs b/src/proto/streams/flow_control.rs index f3cea1699..bd0aadc09 100644 --- a/src/proto/streams/flow_control.rs +++ b/src/proto/streams/flow_control.rs @@ -120,7 +120,7 @@ impl FlowControl { return Err(Reason::FLOW_CONTROL_ERROR); } - log::trace!( + tracing::trace!( "inc_window; sz={}; old={}; new={}", sz, self.window_size, @@ -136,7 +136,7 @@ impl FlowControl { /// This is called after receiving a SETTINGS frame with a lower /// INITIAL_WINDOW_SIZE value. pub fn dec_send_window(&mut self, sz: WindowSize) { - log::trace!( + tracing::trace!( "dec_window; sz={}; window={}, available={}", sz, self.window_size, @@ -151,7 +151,7 @@ impl FlowControl { /// This is called after receiving a SETTINGS ACK frame with a lower /// INITIAL_WINDOW_SIZE value. pub fn dec_recv_window(&mut self, sz: WindowSize) { - log::trace!( + tracing::trace!( "dec_recv_window; sz={}; window={}, available={}", sz, self.window_size, @@ -165,7 +165,7 @@ impl FlowControl { /// Decrements the window reflecting data has actually been sent. The caller /// must ensure that the window has capacity. pub fn send_data(&mut self, sz: WindowSize) { - log::trace!( + tracing::trace!( "send_data; sz={}; window={}; available={}", sz, self.window_size, diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index a13393282..180d9365a 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -84,7 +84,7 @@ impl Prioritize { flow.assign_capacity(config.remote_init_window_sz); - log::trace!("Prioritize::new; flow={:?}", flow); + tracing::trace!("Prioritize::new; flow={:?}", flow); Prioritize { pending_send: store::Queue::new(), @@ -112,7 +112,7 @@ impl Prioritize { pub fn schedule_send(&mut self, stream: &mut store::Ptr, task: &mut Option) { // If the stream is waiting to be opened, nothing more to do. if stream.is_send_ready() { - log::trace!("schedule_send; {:?}", stream.id); + tracing::trace!("schedule_send; {:?}", stream.id); // Queue the stream self.pending_send.push(stream); @@ -158,7 +158,7 @@ impl Prioritize { // Update the buffered data counter stream.buffered_send_data += sz; - log::trace!( + tracing::trace!( "send_data; sz={}; buffered={}; requested={}", sz, stream.buffered_send_data, @@ -179,7 +179,7 @@ impl Prioritize { self.reserve_capacity(0, stream, counts); } - log::trace!( + tracing::trace!( "send_data (2); available={}; buffered={}", stream.send_flow.available(), stream.buffered_send_data @@ -214,7 +214,7 @@ impl Prioritize { stream: &mut store::Ptr, counts: &mut Counts, ) { - log::trace!( + tracing::trace!( "reserve_capacity; stream={:?}; requested={:?}; effective={:?}; curr={:?}", stream.id, capacity, @@ -266,7 +266,7 @@ impl Prioritize { inc: WindowSize, stream: &mut store::Ptr, ) -> Result<(), Reason> { - log::trace!( + tracing::trace!( "recv_stream_window_update; stream={:?}; state={:?}; inc={}; flow={:?}", stream.id, stream.state, @@ -326,7 +326,7 @@ impl Prioritize { pub fn clear_pending_capacity(&mut self, store: &mut Store, counts: &mut Counts) { while let Some(stream) = self.pending_capacity.pop(store) { counts.transition(stream, |_, stream| { - log::trace!("clear_pending_capacity; stream={:?}", stream.id); + tracing::trace!("clear_pending_capacity; stream={:?}", stream.id); }) } } @@ -339,7 +339,7 @@ impl Prioritize { ) where R: Resolve, { - log::trace!("assign_connection_capacity; inc={}", inc); + tracing::trace!("assign_connection_capacity; inc={}", inc); self.flow.assign_capacity(inc); @@ -383,7 +383,7 @@ impl Prioritize { stream.send_flow.window_size() - stream.send_flow.available().as_size(), ); - log::trace!( + tracing::trace!( "try_assign_capacity; stream={:?}, requested={}; additional={}; buffered={}; window={}; conn={}", stream.id, total_requested, @@ -416,7 +416,7 @@ impl Prioritize { // TODO: Should prioritization factor into this? let assign = cmp::min(conn_available, additional); - log::trace!(" assigning; stream={:?}, capacity={}", stream.id, assign,); + tracing::trace!(" assigning; stream={:?}, capacity={}", stream.id, assign,); // Assign the capacity to the stream stream.assign_capacity(assign); @@ -425,7 +425,7 @@ impl Prioritize { self.flow.claim_capacity(assign); } - log::trace!( + tracing::trace!( "try_assign_capacity(2); available={}; requested={}; buffered={}; has_unavailable={:?}", stream.send_flow.available(), stream.requested_send_capacity, @@ -485,14 +485,14 @@ impl Prioritize { // The max frame length let max_frame_len = dst.max_send_frame_size(); - log::trace!("poll_complete"); + tracing::trace!("poll_complete"); loop { self.schedule_pending_open(store, counts); match self.pop_frame(buffer, store, max_frame_len, counts) { Some(frame) => { - log::trace!("writing frame={:?}", frame); + tracing::trace!("writing frame={:?}", frame); debug_assert_eq!(self.in_flight_data_frame, InFlightData::Nothing); if let Frame::Data(ref frame) = frame { @@ -538,11 +538,11 @@ impl Prioritize { where B: Buf, { - log::trace!("try reclaim frame"); + tracing::trace!("try reclaim frame"); // First check if there are any data chunks to take back if let Some(frame) = dst.take_last_data_frame() { - log::trace!( + tracing::trace!( " -> reclaimed; frame={:?}; sz={}", frame, frame.payload().inner.get_ref().remaining() @@ -554,7 +554,7 @@ impl Prioritize { match mem::replace(&mut self.in_flight_data_frame, InFlightData::Nothing) { InFlightData::Nothing => panic!("wasn't expecting a frame to reclaim"), InFlightData::Drop => { - log::trace!("not reclaiming frame for cancelled stream"); + tracing::trace!("not reclaiming frame for cancelled stream"); return false; } InFlightData::DataFrame(k) => { @@ -603,11 +603,11 @@ impl Prioritize { } pub fn clear_queue(&mut self, buffer: &mut Buffer>, stream: &mut store::Ptr) { - log::trace!("clear_queue; stream={:?}", stream.id); + tracing::trace!("clear_queue; stream={:?}", stream.id); // TODO: make this more efficient? while let Some(frame) = stream.pending_send.pop_front(buffer) { - log::trace!("dropping; frame={:?}", frame); + tracing::trace!("dropping; frame={:?}", frame); } stream.buffered_send_data = 0; @@ -644,12 +644,12 @@ impl Prioritize { where B: Buf, { - log::trace!("pop_frame"); + tracing::trace!("pop_frame"); loop { match self.pending_send.pop(store) { Some(mut stream) => { - log::trace!( + tracing::trace!( "pop_frame; stream={:?}; stream.state={:?}", stream.id, stream.state @@ -662,7 +662,7 @@ impl Prioritize { // To be safe, we just always ask the stream. let is_pending_reset = stream.is_pending_reset_expiration(); - log::trace!( + tracing::trace!( " --> stream={:?}; is_pending_reset={:?};", stream.id, is_pending_reset @@ -675,7 +675,7 @@ impl Prioritize { let stream_capacity = stream.send_flow.available(); let sz = frame.payload().remaining(); - log::trace!( + tracing::trace!( " --> data frame; stream={:?}; sz={}; eos={:?}; window={}; \ available={}; requested={}; buffered={};", frame.stream_id(), @@ -690,7 +690,7 @@ impl Prioritize { // Zero length data frames always have capacity to // be sent. if sz > 0 && stream_capacity == 0 { - log::trace!( + tracing::trace!( " --> stream capacity is 0; requested={}", stream.requested_send_capacity ); @@ -721,10 +721,10 @@ impl Prioritize { // capacity at this point. debug_assert!(len <= self.flow.window_size()); - log::trace!(" --> sending data frame; len={}", len); + tracing::trace!(" --> sending data frame; len={}", len); // Update the flow control - log::trace!(" -- updating stream flow --"); + tracing::trace!(" -- updating stream flow --"); stream.send_flow.send_data(len); // Decrement the stream's buffered data counter @@ -737,7 +737,7 @@ impl Prioritize { // line. self.flow.assign_capacity(len); - log::trace!(" -- updating connection flow --"); + tracing::trace!(" -- updating connection flow --"); self.flow.send_data(len); // Wrap the frame's data payload to ensure that the @@ -789,7 +789,7 @@ impl Prioritize { // had data buffered to be sent, but all the frames are cleared // in clear_queue(). Instead of doing O(N) traversal through queue // to remove, lets just ignore the stream here. - log::trace!("removing dangling stream from pending_send"); + tracing::trace!("removing dangling stream from pending_send"); // Since this should only happen as a consequence of `clear_queue`, // we must be in a closed state of some kind. debug_assert!(stream.state.is_closed()); @@ -799,7 +799,7 @@ impl Prioritize { } }; - log::trace!("pop_frame; frame={:?}", frame); + tracing::trace!("pop_frame; frame={:?}", frame); if cfg!(debug_assertions) && stream.state.is_idle() { debug_assert!(stream.id > self.last_opened_id); @@ -824,11 +824,11 @@ impl Prioritize { } fn schedule_pending_open(&mut self, store: &mut Store, counts: &mut Counts) { - log::trace!("schedule_pending_open"); + tracing::trace!("schedule_pending_open"); // check for any pending open streams while counts.can_inc_num_send_streams() { if let Some(mut stream) = self.pending_open.pop(store) { - log::trace!("schedule_pending_open; stream={:?}", stream.id); + tracing::trace!("schedule_pending_open; stream={:?}", stream.id); counts.inc_num_send_streams(&mut stream); self.pending_send.push(&mut stream); diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index f0e23a4ad..682200d45 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -160,7 +160,7 @@ impl Recv { stream: &mut store::Ptr, counts: &mut Counts, ) -> Result<(), RecvHeaderBlockError>> { - log::trace!("opening stream; init_window={}", self.init_window_sz); + tracing::trace!("opening stream; init_window={}", self.init_window_sz); let is_initial = stream.state.recv_open(frame.is_end_stream())?; if is_initial { @@ -206,7 +206,7 @@ impl Recv { // So, if peer is a server, we'll send a 431. In either case, // an error is recorded, which will send a REFUSED_STREAM, // since we don't want any of the data frames either. - log::debug!( + tracing::debug!( "stream error REQUEST_HEADER_FIELDS_TOO_LARGE -- \ recv_headers: frame is over size; stream={:?}", stream.id @@ -341,7 +341,7 @@ impl Recv { /// Releases capacity of the connection pub fn release_connection_capacity(&mut self, capacity: WindowSize, task: &mut Option) { - log::trace!( + tracing::trace!( "release_connection_capacity; size={}, connection in_flight_data={}", capacity, self.in_flight_data, @@ -367,7 +367,7 @@ impl Recv { stream: &mut store::Ptr, task: &mut Option, ) -> Result<(), UserError> { - log::trace!("release_capacity; size={}", capacity); + tracing::trace!("release_capacity; size={}", capacity); if capacity > stream.in_flight_recv_data { return Err(UserError::ReleaseCapacityTooBig); @@ -401,7 +401,7 @@ impl Recv { return; } - log::trace!( + tracing::trace!( "auto-release closed stream ({:?}) capacity: {:?}", stream.id, stream.in_flight_recv_data, @@ -426,7 +426,7 @@ impl Recv { /// The `task` is an optional parked task for the `Connection` that might /// be blocked on needing more window capacity. pub fn set_target_connection_window(&mut self, target: WindowSize, task: &mut Option) { - log::trace!( + tracing::trace!( "set_target_connection_window; target={}; available={}, reserved={}", target, self.flow.available(), @@ -469,7 +469,7 @@ impl Recv { let old_sz = self.init_window_sz; self.init_window_sz = target; - log::trace!("update_initial_window_size; new={}; old={}", target, old_sz,); + tracing::trace!("update_initial_window_size; new={}; old={}", target, old_sz,); // Per RFC 7540 ยง6.9.2: // @@ -490,7 +490,7 @@ impl Recv { if target < old_sz { // We must decrease the (local) window on every open stream. let dec = old_sz - target; - log::trace!("decrementing all windows; dec={}", dec); + tracing::trace!("decrementing all windows; dec={}", dec); store.for_each(|mut stream| { stream.recv_flow.dec_recv_window(dec); @@ -499,7 +499,7 @@ impl Recv { } else if target > old_sz { // We must increase the (local) window on every open stream. let inc = target - old_sz; - log::trace!("incrementing all windows; inc={}", inc); + tracing::trace!("incrementing all windows; inc={}", inc); store.for_each(|mut stream| { // XXX: Shouldn't the peer have already noticed our // overflow and sent us a GOAWAY? @@ -549,7 +549,7 @@ impl Recv { return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); } - log::trace!( + tracing::trace!( "recv_data; size={}; connection={}; stream={}", sz, self.flow.window_size(), @@ -557,7 +557,7 @@ impl Recv { ); if is_ignoring_frame { - log::trace!( + tracing::trace!( "recv_data; frame ignored on locally reset {:?} for some time", stream.id, ); @@ -647,7 +647,7 @@ impl Recv { pub fn consume_connection_window(&mut self, sz: WindowSize) -> Result<(), RecvError> { if self.flow.window_size() < sz { - log::debug!( + tracing::debug!( "connection error FLOW_CONTROL_ERROR -- window_size ({:?}) < sz ({:?});", self.flow.window_size(), sz, @@ -681,7 +681,7 @@ impl Recv { // So, if peer is a server, we'll send a 431. In either case, // an error is recorded, which will send a REFUSED_STREAM, // since we don't want any of the data frames either. - log::debug!( + tracing::debug!( "stream error REFUSED_STREAM -- recv_push_promise: \ headers frame is over size; promised_id={:?};", frame.promised_id(), @@ -730,7 +730,7 @@ impl Recv { pub fn ensure_not_idle(&self, id: StreamId) -> Result<(), Reason> { if let Ok(next) = self.next_stream_id { if id >= next { - log::debug!( + tracing::debug!( "stream ID implicitly closed, PROTOCOL_ERROR; stream={:?}", id ); @@ -821,7 +821,7 @@ impl Recv { return; } - log::trace!("enqueue_reset_expiration; {:?}", stream.id); + tracing::trace!("enqueue_reset_expiration; {:?}", stream.id); if !counts.can_inc_num_reset_streams() { // try to evict 1 stream if possible @@ -891,7 +891,7 @@ impl Recv { fn clear_stream_window_update_queue(&mut self, store: &mut Store, counts: &mut Counts) { while let Some(stream) = self.pending_window_updates.pop(store) { counts.transition(stream, |_, stream| { - log::trace!("clear_stream_window_update_queue; stream={:?}", stream.id); + tracing::trace!("clear_stream_window_update_queue; stream={:?}", stream.id); }) } } @@ -981,7 +981,7 @@ impl Recv { }; counts.transition(stream, |_, stream| { - log::trace!("pending_window_updates -- pop; stream={:?}", stream.id); + tracing::trace!("pending_window_updates -- pop; stream={:?}", stream.id); debug_assert!(!stream.is_pending_window_update); if !stream.state.is_recv_streaming() { diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index 4d38593ec..220a8b461 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -77,11 +77,11 @@ impl Send { || fields.contains_key("keep-alive") || fields.contains_key("proxy-connection") { - log::debug!("illegal connection-specific headers found"); + tracing::debug!("illegal connection-specific headers found"); return Err(UserError::MalformedHeaders); } else if let Some(te) = fields.get(http::header::TE) { if te != "trailers" { - log::debug!("illegal connection-specific headers found"); + tracing::debug!("illegal connection-specific headers found"); return Err(UserError::MalformedHeaders); } } @@ -95,7 +95,7 @@ impl Send { stream: &mut store::Ptr, task: &mut Option, ) -> Result<(), UserError> { - log::trace!( + tracing::trace!( "send_push_promise; frame={:?}; init_window={:?}", frame, self.init_window_sz @@ -118,7 +118,7 @@ impl Send { counts: &mut Counts, task: &mut Option, ) -> Result<(), UserError> { - log::trace!( + tracing::trace!( "send_headers; frame={:?}; init_window={:?}", frame, self.init_window_sz @@ -167,7 +167,7 @@ impl Send { let is_closed = stream.state.is_closed(); let is_empty = stream.pending_send.is_empty(); - log::trace!( + tracing::trace!( "send_reset(..., reason={:?}, stream={:?}, ..., \ is_reset={:?}; is_closed={:?}; pending_send.is_empty={:?}; \ state={:?} \ @@ -182,7 +182,7 @@ impl Send { if is_reset { // Don't double reset - log::trace!( + tracing::trace!( " -> not sending RST_STREAM ({:?} is already reset)", stream.id ); @@ -195,7 +195,7 @@ impl Send { // If closed AND the send queue is flushed, then the stream cannot be // reset explicitly, either. Implicit resets can still be queued. if is_closed && is_empty { - log::trace!( + tracing::trace!( " -> not sending explicit RST_STREAM ({:?} was closed \ and send queue was flushed)", stream.id @@ -211,7 +211,7 @@ impl Send { let frame = frame::Reset::new(stream.id, reason); - log::trace!("send_reset -- queueing; frame={:?}", frame); + tracing::trace!("send_reset -- queueing; frame={:?}", frame); self.prioritize .queue_frame(frame.into(), buffer, stream, task); self.prioritize.reclaim_all_capacity(stream, counts); @@ -269,7 +269,7 @@ impl Send { stream.state.send_close(); - log::trace!("send_trailers -- queuing; frame={:?}", frame); + tracing::trace!("send_trailers -- queuing; frame={:?}", frame); self.prioritize .queue_frame(frame.into(), buffer, stream, task); @@ -370,7 +370,7 @@ impl Send { task: &mut Option, ) -> Result<(), Reason> { if let Err(e) = self.prioritize.recv_stream_window_update(sz, stream) { - log::debug!("recv_stream_window_update !!; err={:?}", e); + tracing::debug!("recv_stream_window_update !!; err={:?}", e); self.send_reset(Reason::FLOW_CONTROL_ERROR, buffer, stream, counts, task); @@ -443,7 +443,7 @@ impl Send { if val < old_val { // We must decrease the (remote) window on every open stream. let dec = old_val - val; - log::trace!("decrementing all windows; dec={}", dec); + tracing::trace!("decrementing all windows; dec={}", dec); let mut total_reclaimed = 0; store.for_each(|mut stream| { @@ -469,7 +469,7 @@ impl Send { 0 }; - log::trace!( + tracing::trace!( "decremented stream window; id={:?}; decr={}; reclaimed={}; flow={:?}", stream.id, dec, diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index 26323124d..45ec82f90 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -216,12 +216,12 @@ impl State { match self.inner { Open { local, .. } => { // The remote side will continue to receive data. - log::trace!("recv_close: Open => HalfClosedRemote({:?})", local); + tracing::trace!("recv_close: Open => HalfClosedRemote({:?})", local); self.inner = HalfClosedRemote(local); Ok(()) } HalfClosedLocal(..) => { - log::trace!("recv_close: HalfClosedLocal => Closed"); + tracing::trace!("recv_close: HalfClosedLocal => Closed"); self.inner = Closed(Cause::EndStream); Ok(()) } @@ -257,7 +257,7 @@ impl State { // previous state with the received RST_STREAM, so that the queue // will be cleared by `Prioritize::pop_frame`. state => { - log::trace!( + tracing::trace!( "recv_reset; reason={:?}; state={:?}; queued={:?}", reason, state, @@ -275,7 +275,7 @@ impl State { match self.inner { Closed(..) => {} _ => { - log::trace!("recv_err; err={:?}", err); + tracing::trace!("recv_err; err={:?}", err); self.inner = Closed(match *err { Proto(reason) => Cause::LocallyReset(reason), Io(..) => Cause::Io, @@ -288,7 +288,7 @@ impl State { match self.inner { Closed(..) => {} s => { - log::trace!("recv_eof; state={:?}", s); + tracing::trace!("recv_eof; state={:?}", s); self.inner = Closed(Cause::Io); } } @@ -299,11 +299,11 @@ impl State { match self.inner { Open { remote, .. } => { // The remote side will continue to receive data. - log::trace!("send_close: Open => HalfClosedLocal({:?})", remote); + tracing::trace!("send_close: Open => HalfClosedLocal({:?})", remote); self.inner = HalfClosedLocal(remote); } HalfClosedRemote(..) => { - log::trace!("send_close: HalfClosedRemote => Closed"); + tracing::trace!("send_close: HalfClosedRemote => Closed"); self.inner = Closed(Cause::EndStream); } state => panic!("send_close: unexpected state {:?}", state), diff --git a/src/proto/streams/store.rs b/src/proto/streams/store.rs index 09d8a64a6..9b66cf904 100644 --- a/src/proto/streams/store.rs +++ b/src/proto/streams/store.rs @@ -244,10 +244,10 @@ where /// /// If the stream is already contained by the list, return `false`. pub fn push(&mut self, stream: &mut store::Ptr) -> bool { - log::trace!("Queue::push"); + tracing::trace!("Queue::push"); if N::is_queued(stream) { - log::trace!(" -> already queued"); + tracing::trace!(" -> already queued"); return false; } @@ -259,7 +259,7 @@ where // Queue the stream match self.indices { Some(ref mut idxs) => { - log::trace!(" -> existing entries"); + tracing::trace!(" -> existing entries"); // Update the current tail node to point to `stream` let key = stream.key(); @@ -269,7 +269,7 @@ where idxs.tail = stream.key(); } None => { - log::trace!(" -> first entry"); + tracing::trace!(" -> first entry"); self.indices = Some(store::Indices { head: stream.key(), tail: stream.key(), diff --git a/src/proto/streams/stream.rs b/src/proto/streams/stream.rs index 398672049..c2b647c93 100644 --- a/src/proto/streams/stream.rs +++ b/src/proto/streams/stream.rs @@ -265,7 +265,7 @@ impl Stream { self.send_capacity_inc = true; self.send_flow.assign_capacity(capacity); - log::trace!( + tracing::trace!( " assigned capacity to stream; available={}; buffered={}; id={:?}", self.send_flow.available(), self.buffered_send_data, @@ -274,7 +274,7 @@ impl Stream { // Only notify if the capacity exceeds the amount of buffered data if self.send_flow.available() > self.buffered_send_data { - log::trace!(" notifying task"); + tracing::trace!(" notifying task"); self.notify_send(); } } diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 8f6186194..26eabbef9 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -135,7 +135,7 @@ where // The GOAWAY process has begun. All streams with a greater ID than // specified as part of GOAWAY should be ignored. if id > me.actions.recv.max_stream_id() { - log::trace!( + tracing::trace!( "id ({:?}) > max_stream_id ({:?}), ignoring HEADERS", id, me.actions.recv.max_stream_id() @@ -155,7 +155,7 @@ where // This may be response headers for a stream we've already // forgotten about... if me.actions.may_have_forgotten_stream::

(id) { - log::debug!( + tracing::debug!( "recv_headers for old stream={:?}, sending STREAM_CLOSED", id, ); @@ -187,7 +187,7 @@ where // Locally reset streams must ignore frames "for some time". // This is because the remote may have sent trailers before // receiving the RST_STREAM frame. - log::trace!("recv_headers; ignoring trailers on {:?}", stream.id); + tracing::trace!("recv_headers; ignoring trailers on {:?}", stream.id); return Ok(()); } @@ -196,7 +196,7 @@ where let send_buffer = &mut *send_buffer; me.counts.transition(stream, |counts, stream| { - log::trace!( + tracing::trace!( "recv_headers; stream={:?}; state={:?}", stream.id, stream.state @@ -259,7 +259,7 @@ where // The GOAWAY process has begun. All streams with a greater ID // than specified as part of GOAWAY should be ignored. if id > me.actions.recv.max_stream_id() { - log::trace!( + tracing::trace!( "id ({:?}) > max_stream_id ({:?}), ignoring DATA", id, me.actions.recv.max_stream_id() @@ -268,7 +268,7 @@ where } if me.actions.may_have_forgotten_stream::

(id) { - log::debug!("recv_data for old stream={:?}, sending STREAM_CLOSED", id,); + tracing::debug!("recv_data for old stream={:?}, sending STREAM_CLOSED", id,); let sz = frame.payload().len(); // This should have been enforced at the codec::FramedRead layer, so @@ -322,7 +322,7 @@ where // The GOAWAY process has begun. All streams with a greater ID than // specified as part of GOAWAY should be ignored. if id > me.actions.recv.max_stream_id() { - log::trace!( + tracing::trace!( "id ({:?}) > max_stream_id ({:?}), ignoring RST_STREAM", id, me.actions.recv.max_stream_id() @@ -470,7 +470,7 @@ where // The GOAWAY process has begun. All streams with a greater ID // than specified as part of GOAWAY should be ignored. if id > me.actions.recv.max_stream_id() { - log::trace!( + tracing::trace!( "id ({:?}) > max_stream_id ({:?}), ignoring PUSH_PROMISE", id, me.actions.recv.max_stream_id() @@ -563,7 +563,7 @@ where me.refs += 1; key.map(|key| { let stream = &mut me.store.resolve(key); - log::trace!( + tracing::trace!( "next_incoming; id={:?}, state={:?}", stream.id, stream.state @@ -788,7 +788,7 @@ where if let Some(pending) = pending { let mut stream = me.store.resolve(pending.key); - log::trace!("poll_pending_open; stream = {:?}", stream.is_pending_open); + tracing::trace!("poll_pending_open; stream = {:?}", stream.is_pending_open); if stream.is_pending_open { stream.wait_send(cx); return Poll::Pending; @@ -818,7 +818,7 @@ where actions.conn_error = Some(io::Error::from(io::ErrorKind::BrokenPipe).into()); } - log::trace!("Streams::recv_eof"); + tracing::trace!("Streams::recv_eof"); me.store .for_each(|stream| { @@ -1265,7 +1265,7 @@ fn drop_stream_ref(inner: &Mutex, key: store::Key) { Ok(inner) => inner, Err(_) => { if ::std::thread::panicking() { - log::trace!("StreamRef::drop; mutex poisoned"); + tracing::trace!("StreamRef::drop; mutex poisoned"); return; } else { panic!("StreamRef::drop; mutex poisoned"); @@ -1277,7 +1277,7 @@ fn drop_stream_ref(inner: &Mutex, key: store::Key) { me.refs -= 1; let mut stream = me.store.resolve(key); - log::trace!("drop_stream_ref; stream={:?}", stream); + tracing::trace!("drop_stream_ref; stream={:?}", stream); // decrement the stream's ref count by 1. stream.ref_dec(); diff --git a/src/server.rs b/src/server.rs index b6e739492..69ba16a7f 100644 --- a/src/server.rs +++ b/src/server.rs @@ -402,7 +402,7 @@ where } if let Some(inner) = self.connection.next_incoming() { - log::trace!("received incoming"); + tracing::trace!("received incoming"); let (head, _) = inner.take_request().into_parts(); let body = RecvStream::new(FlowControl::new(inner.clone_to_opaque())); @@ -1179,7 +1179,7 @@ where type Output = Result, crate::Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - log::trace!("Handshake::poll(); state={:?};", self.state); + tracing::trace!("Handshake::poll(); state={:?};", self.state); use crate::server::Handshaking::*; self.state = if let Flushing(ref mut flush) = self.state { @@ -1188,11 +1188,11 @@ where // for the client preface. let codec = match Pin::new(flush).poll(cx)? { Poll::Pending => { - log::trace!("Handshake::poll(); flush.poll()=Pending"); + tracing::trace!("Handshake::poll(); flush.poll()=Pending"); return Poll::Pending; } Poll::Ready(flushed) => { - log::trace!("Handshake::poll(); flush.poll()=Ready"); + tracing::trace!("Handshake::poll(); flush.poll()=Ready"); flushed } }; @@ -1229,7 +1229,7 @@ where }, ); - log::trace!("Handshake::poll(); connection established!"); + tracing::trace!("Handshake::poll(); connection established!"); let mut c = Connection { connection }; if let Some(sz) = self.builder.initial_target_connection_window_size { c.set_target_window_size(sz); @@ -1289,12 +1289,12 @@ impl Peer { if let Err(e) = frame::PushPromise::validate_request(&request) { use PushPromiseHeaderError::*; match e { - NotSafeAndCacheable => log::debug!( + NotSafeAndCacheable => tracing::debug!( "convert_push_message: method {} is not safe and cacheable; promised_id={:?}", request.method(), promised_id, ), - InvalidContentLength(e) => log::debug!( + InvalidContentLength(e) => tracing::debug!( "convert_push_message; promised request has invalid content-length {:?}; promised_id={:?}", e, promised_id, @@ -1347,7 +1347,7 @@ impl proto::Peer for Peer { macro_rules! malformed { ($($arg:tt)*) => {{ - log::debug!($($arg)*); + tracing::debug!($($arg)*); return Err(RecvError::Stream { id: stream_id, reason: Reason::PROTOCOL_ERROR, @@ -1367,7 +1367,7 @@ impl proto::Peer for Peer { // Specifying :status for a request is a protocol error if pseudo.status.is_some() { - log::trace!("malformed headers: :status field on request; PROTOCOL_ERROR"); + tracing::trace!("malformed headers: :status field on request; PROTOCOL_ERROR"); return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); } diff --git a/tests/h2-tests/Cargo.toml b/tests/h2-tests/Cargo.toml index 3e9d130f3..4c711fe24 100644 --- a/tests/h2-tests/Cargo.toml +++ b/tests/h2-tests/Cargo.toml @@ -9,6 +9,6 @@ edition = "2018" [dev-dependencies] h2-support = { path = "../h2-support" } -log = "0.4.1" +tracing = "0.1.13" futures = { version = "0.3", default-features = false, features = ["alloc"] } tokio = { version = "0.2", features = ["macros", "tcp"] } diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index b156d97c4..8175ce4af 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -16,7 +16,7 @@ async fn handshake() { let (_client, h2) = client::handshake(mock).await.unwrap(); - log::trace!("hands have been shook"); + tracing::trace!("hands have been shook"); // At this point, the connection should be closed h2.await.unwrap(); @@ -84,7 +84,7 @@ async fn recv_invalid_server_stream_id() { .body(()) .unwrap(); - log::info!("sending request"); + tracing::info!("sending request"); let (response, _) = client.send_request(request, true).unwrap(); // The connection errors diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index dd0316ca0..2082d5f3e 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -31,7 +31,7 @@ async fn send_recv_headers_only() { .body(()) .unwrap(); - log::info!("sending request"); + tracing::info!("sending request"); let (response, _) = client.send_request(request, true).unwrap(); let resp = h2.run(response).await.unwrap(); @@ -72,7 +72,7 @@ async fn send_recv_data() { .body(()) .unwrap(); - log::info!("sending request"); + tracing::info!("sending request"); let (response, mut stream) = client.send_request(request, false).unwrap(); // Reserve send capacity @@ -129,7 +129,7 @@ async fn send_headers_recv_data_single_frame() { .body(()) .unwrap(); - log::info!("sending request"); + tracing::info!("sending request"); let (response, _) = client.send_request(request, true).unwrap(); let resp = h2.run(response).await.unwrap(); diff --git a/tests/h2-tests/tests/trailers.rs b/tests/h2-tests/tests/trailers.rs index 513b65d82..078665551 100644 --- a/tests/h2-tests/tests/trailers.rs +++ b/tests/h2-tests/tests/trailers.rs @@ -28,7 +28,7 @@ async fn recv_trailers_only() { .body(()) .unwrap(); - log::info!("sending request"); + tracing::info!("sending request"); let (response, _) = client.send_request(request, true).unwrap(); let response = h2.run(response).await.unwrap(); @@ -79,7 +79,7 @@ async fn send_trailers_immediately() { .body(()) .unwrap(); - log::info!("sending request"); + tracing::info!("sending request"); let (response, mut stream) = client.send_request(request, false).unwrap(); let mut trailers = HeaderMap::new(); From e3a358d69634ea3cf094e2b7b590b540dc141111 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 13 Jul 2020 16:37:10 -0700 Subject: [PATCH 007/178] v0.2.6 --- CHANGELOG.md | 4 ++++ Cargo.toml | 4 ++-- src/lib.rs | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 162e47541..ef7b487cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.2.6 (July 13, 2020) + +* Integrate `tracing` directly where `log` was used. (For 0.2.x, `log`s are still emitted by default.) + # 0.2.5 (May 6, 2020) * Fix rare debug assert failure in store shutdown. diff --git a/Cargo.toml b/Cargo.toml index e8e04ee9c..b9e24884f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,14 +5,14 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.2.5" +version = "0.2.6" license = "MIT" authors = [ "Carl Lerche ", "Sean McArthur ", ] description = "An HTTP/2.0 client and server" -documentation = "https://docs.rs/h2/0.2.5/h2/" +documentation = "https://docs.rs/h2/0.2.6/h2/" repository = "https://github.com/hyperium/h2" readme = "README.md" keywords = ["http", "async", "non-blocking"] diff --git a/src/lib.rs b/src/lib.rs index e5e1f3c52..484052e96 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.2.5")] +#![doc(html_root_url = "https://docs.rs/h2/0.2.6")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From 6d80bd454ed4ec69b17e6a467f50be480ec13bb2 Mon Sep 17 00:00:00 2001 From: Michael Beaumont Date: Mon, 17 Aug 2020 21:19:44 +0200 Subject: [PATCH 008/178] Fix handling Streams.refs in `next_incoming` `Streams.inner.ref` doesn't need to be incremented if we don't return an `OpaqueStreamRef`. This prevented the bug in `send_push_promise` from appearing in the tests. --- src/proto/streams/streams.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 26eabbef9..b97035a7d 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -557,17 +557,16 @@ where pub fn next_incoming(&mut self) -> Option> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; - let key = me.actions.recv.next_incoming(&mut me.store); - // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding - // the lock, so it can't. - me.refs += 1; - key.map(|key| { + me.actions.recv.next_incoming(&mut me.store).map(|key| { let stream = &mut me.store.resolve(key); tracing::trace!( "next_incoming; id={:?}, state={:?}", stream.id, stream.state ); + // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding + // the lock, so it can't. + me.refs += 1; StreamRef { opaque: OpaqueStreamRef::new(self.inner.clone(), stream), send_buffer: self.send_buffer.clone(), From d3c2bba18b0e609cb734e3e47cf840cd7e332277 Mon Sep 17 00:00:00 2001 From: Michael Beaumont Date: Mon, 17 Aug 2020 21:20:17 +0200 Subject: [PATCH 009/178] Increment Stream.refs when sending a push promise --- src/proto/streams/streams.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index b97035a7d..79a28e3bd 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -1005,6 +1005,7 @@ impl StreamRef { return Err(err.into()); } + me.refs += 1; let opaque = OpaqueStreamRef::new(self.opaque.inner.clone(), &mut me.store.resolve(child_key)); From fc7f63f641210c52f8e742c1538c4aa455c795ab Mon Sep 17 00:00:00 2001 From: Eliza Weisman Date: Mon, 17 Aug 2020 17:29:22 -0700 Subject: [PATCH 010/178] start adding `tracing` spans to internals (#478) We've adopted `tracing` for diagnostics, but currently, it is just being used as a drop-in replacement for the `log` crate. Ideally, we would want to start emitting more structured diagnostics, using `tracing`'s `Span`s and structured key-value fields. A lot of the logging in `h2` is already written in a style that imitates the formatting of structured key-value logs, but as textual log messages. Migrating the logs to structured `tracing` events therefore is pretty easy to do. I've also started adding spans, mostly in the read path. Finally, I've updated the tests to use `tracing` rather than `env_logger`. The tracing setup happens in a macro, so that a span for each test with the test's name can be generated and entered. This will make the test output easier to read if multiple tests are run concurrently with `--nocapture`. Signed-off-by: Eliza Weisman --- Cargo.toml | 1 + src/client.rs | 8 +- src/codec/framed_read.rs | 10 +- src/codec/framed_write.rs | 21 +-- src/hpack/decoder.rs | 25 ++-- src/hpack/encoder.rs | 4 + src/hpack/table.rs | 2 +- src/proto/connection.rs | 40 ++++-- src/proto/peer.rs | 1 + src/proto/streams/prioritize.rs | 181 ++++++++++++------------- src/server.rs | 42 ++++-- tests/h2-support/Cargo.toml | 3 +- tests/h2-support/src/lib.rs | 17 +++ tests/h2-support/src/prelude.rs | 2 +- tests/h2-support/src/trace.rs | 41 ++++++ tests/h2-tests/tests/client_request.rs | 44 +++--- tests/h2-tests/tests/codec_read.rs | 4 +- tests/h2-tests/tests/codec_write.rs | 6 +- tests/h2-tests/tests/flow_control.rs | 52 +++---- tests/h2-tests/tests/ping_pong.rs | 10 +- tests/h2-tests/tests/prioritization.rs | 12 +- tests/h2-tests/tests/push_promise.rs | 16 +-- tests/h2-tests/tests/server.rs | 44 +++--- tests/h2-tests/tests/stream_states.rs | 42 +++--- tests/h2-tests/tests/trailers.rs | 4 +- 25 files changed, 369 insertions(+), 263 deletions(-) create mode 100644 tests/h2-support/src/trace.rs diff --git a/Cargo.toml b/Cargo.toml index b9e24884f..a6e820047 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,6 +48,7 @@ tokio = { version = "0.2", features = ["io-util"] } bytes = "0.5.2" http = "0.2" tracing = { version = "0.1.13", default-features = false, features = ["std", "log"] } +tracing-futures = { version = "0.2", default-features = false, features = ["std-future"]} fnv = "1.0.5" slab = "0.4.0" indexmap = "1.0" diff --git a/src/client.rs b/src/client.rs index 597eb3dfc..1233f468f 100644 --- a/src/client.rs +++ b/src/client.rs @@ -149,6 +149,7 @@ use std::task::{Context, Poll}; use std::time::Duration; use std::usize; use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; +use tracing_futures::Instrument; /// Initializes new HTTP/2.0 streams on a connection by sending a request. /// @@ -1115,7 +1116,10 @@ where T: AsyncRead + AsyncWrite + Unpin, { let builder = Builder::new(); - builder.handshake(io).await + builder + .handshake(io) + .instrument(tracing::trace_span!("client_handshake", io = %std::any::type_name::())) + .await } // ===== impl Connection ===== @@ -1438,6 +1442,8 @@ impl Peer { impl proto::Peer for Peer { type Poll = Response<()>; + const NAME: &'static str = "Client"; + fn r#dyn() -> proto::DynPeer { proto::DynPeer::Client } diff --git a/src/codec/framed_read.rs b/src/codec/framed_read.rs index 2674e390a..8bba12545 100644 --- a/src/codec/framed_read.rs +++ b/src/codec/framed_read.rs @@ -61,6 +61,8 @@ impl FramedRead { fn decode_frame(&mut self, mut bytes: BytesMut) -> Result, RecvError> { use self::RecvError::*; + let span = tracing::trace_span!("FramedRead::decode_frame", offset = bytes.len()); + let _e = span.enter(); tracing::trace!("decoding frame from {}B", bytes.len()); @@ -74,7 +76,7 @@ impl FramedRead { let kind = head.kind(); - tracing::trace!(" -> kind={:?}", kind); + tracing::trace!(frame.kind = ?kind); macro_rules! header_block { ($frame:ident, $head:ident, $bytes:ident) => ({ @@ -338,6 +340,8 @@ where type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let span = tracing::trace_span!("FramedRead::poll_next"); + let _e = span.enter(); loop { tracing::trace!("poll"); let bytes = match ready!(Pin::new(&mut self.inner).poll_next(cx)) { @@ -346,9 +350,9 @@ where None => return Poll::Ready(None), }; - tracing::trace!("poll; bytes={}B", bytes.len()); + tracing::trace!(read.bytes = bytes.len()); if let Some(frame) = self.decode_frame(bytes)? { - tracing::debug!("received; frame={:?}", frame); + tracing::debug!(?frame, "received"); return Poll::Ready(Some(Ok(frame))); } } diff --git a/src/codec/framed_write.rs b/src/codec/framed_write.rs index 47ee592d3..201bba26c 100644 --- a/src/codec/framed_write.rs +++ b/src/codec/framed_write.rs @@ -105,8 +105,10 @@ where pub fn buffer(&mut self, item: Frame) -> Result<(), UserError> { // Ensure that we have enough capacity to accept the write. assert!(self.has_capacity()); + let span = tracing::trace_span!("FramedWrite::buffer", frame = ?item); + let _e = span.enter(); - tracing::debug!("send; frame={:?}", item); + tracing::debug!(frame = ?item, "send"); match item { Frame::Data(mut v) => { @@ -150,19 +152,19 @@ where } Frame::Settings(v) => { v.encode(self.buf.get_mut()); - tracing::trace!("encoded settings; rem={:?}", self.buf.remaining()); + tracing::trace!(rem = self.buf.remaining(), "encoded settings"); } Frame::GoAway(v) => { v.encode(self.buf.get_mut()); - tracing::trace!("encoded go_away; rem={:?}", self.buf.remaining()); + tracing::trace!(rem = self.buf.remaining(), "encoded go_away"); } Frame::Ping(v) => { v.encode(self.buf.get_mut()); - tracing::trace!("encoded ping; rem={:?}", self.buf.remaining()); + tracing::trace!(rem = self.buf.remaining(), "encoded ping"); } Frame::WindowUpdate(v) => { v.encode(self.buf.get_mut()); - tracing::trace!("encoded window_update; rem={:?}", self.buf.remaining()); + tracing::trace!(rem = self.buf.remaining(), "encoded window_update"); } Frame::Priority(_) => { @@ -174,7 +176,7 @@ where } Frame::Reset(v) => { v.encode(self.buf.get_mut()); - tracing::trace!("encoded reset; rem={:?}", self.buf.remaining()); + tracing::trace!(rem = self.buf.remaining(), "encoded reset"); } } @@ -183,18 +185,19 @@ where /// Flush buffered data to the wire pub fn flush(&mut self, cx: &mut Context) -> Poll> { - tracing::trace!("flush"); + let span = tracing::trace_span!("FramedWrite::flush"); + let _e = span.enter(); loop { while !self.is_empty() { match self.next { Some(Next::Data(ref mut frame)) => { - tracing::trace!(" -> queued data frame"); + tracing::trace!(queued_data_frame = true); let mut buf = (&mut self.buf).chain(frame.payload_mut()); ready!(Pin::new(&mut self.inner).poll_write_buf(cx, &mut buf))?; } _ => { - tracing::trace!(" -> not a queued data frame"); + tracing::trace!(queued_data_frame = false); ready!(Pin::new(&mut self.inner).poll_write_buf(cx, &mut self.buf))?; } } diff --git a/src/hpack/decoder.rs b/src/hpack/decoder.rs index 3009e30e6..aba673d37 100644 --- a/src/hpack/decoder.rs +++ b/src/hpack/decoder.rs @@ -183,6 +183,9 @@ impl Decoder { self.last_max_update = size; } + let span = tracing::trace_span!("hpack::decode"); + let _e = span.enter(); + tracing::trace!("decode"); while let Some(ty) = peek_u8(src) { @@ -191,14 +194,14 @@ impl Decoder { // determined from the first byte. match Representation::load(ty)? { Indexed => { - tracing::trace!(" Indexed; rem={:?}", src.remaining()); + tracing::trace!(rem = src.remaining(), kind = %"Indexed"); can_resize = false; let entry = self.decode_indexed(src)?; consume(src); f(entry); } LiteralWithIndexing => { - tracing::trace!(" LiteralWithIndexing; rem={:?}", src.remaining()); + tracing::trace!(rem = src.remaining(), kind = %"LiteralWithIndexing"); can_resize = false; let entry = self.decode_literal(src, true)?; @@ -209,14 +212,14 @@ impl Decoder { f(entry); } LiteralWithoutIndexing => { - tracing::trace!(" LiteralWithoutIndexing; rem={:?}", src.remaining()); + tracing::trace!(rem = src.remaining(), kind = %"LiteralWithoutIndexing"); can_resize = false; let entry = self.decode_literal(src, false)?; consume(src); f(entry); } LiteralNeverIndexed => { - tracing::trace!(" LiteralNeverIndexed; rem={:?}", src.remaining()); + tracing::trace!(rem = src.remaining(), kind = %"LiteralNeverIndexed"); can_resize = false; let entry = self.decode_literal(src, false)?; consume(src); @@ -226,7 +229,7 @@ impl Decoder { f(entry); } SizeUpdate => { - tracing::trace!(" SizeUpdate; rem={:?}", src.remaining()); + tracing::trace!(rem = src.remaining(), kind = %"SizeUpdate"); if !can_resize { return Err(DecoderError::InvalidMaxDynamicSize); } @@ -249,9 +252,9 @@ impl Decoder { } tracing::debug!( - "Decoder changed max table size from {} to {}", - self.table.size(), - new_size + from = self.table.size(), + to = new_size, + "Decoder changed max table size" ); self.table.set_max_size(new_size); @@ -302,11 +305,7 @@ impl Decoder { let len = decode_int(buf, 7)?; if len > buf.remaining() { - tracing::trace!( - "decode_string underflow; len={}; remaining={}", - len, - buf.remaining() - ); + tracing::trace!(len, remaining = buf.remaining(), "decode_string underflow",); return Err(DecoderError::NeedMore(NeedMore::StringUnderflow)); } diff --git a/src/hpack/encoder.rs b/src/hpack/encoder.rs index ef177485f..e6881dd05 100644 --- a/src/hpack/encoder.rs +++ b/src/hpack/encoder.rs @@ -86,7 +86,11 @@ impl Encoder { where I: Iterator>>, { + let span = tracing::trace_span!("hpack::encode"); + let _e = span.enter(); + let pos = position(dst); + tracing::trace!(pos, "encoding at"); if let Err(e) = self.encode_size_updates(dst) { if e == EncoderError::BufferOverflow { diff --git a/src/hpack/table.rs b/src/hpack/table.rs index e7c8ce760..2328743a8 100644 --- a/src/hpack/table.rs +++ b/src/hpack/table.rs @@ -597,7 +597,7 @@ impl Table { } assert!(dist <= their_dist, - "could not find entry; actual={}; desired={};" + + "could not find entry; actual={}; desired={}" + "probe={}, dist={}; their_dist={}; index={}; msg={}", actual, desired, probe, dist, their_dist, index.wrapping_sub(self.inserted), msg); diff --git a/src/proto/connection.rs b/src/proto/connection.rs index c9e33f4a0..ffa2945c6 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -44,6 +44,9 @@ where /// Stream state handler streams: Streams, + /// A `tracing` span tracking the lifetime of the connection. + span: tracing::Span, + /// Client or server _phantom: PhantomData

, } @@ -100,6 +103,7 @@ where ping_pong: PingPong::new(), settings: Settings::new(config.settings), streams, + span: tracing::debug_span!("Connection", peer = %P::NAME), _phantom: PhantomData, } } @@ -121,6 +125,9 @@ where /// Returns `RecvError` as this may raise errors that are caused by delayed /// processing of received frames. fn poll_ready(&mut self, cx: &mut Context) -> Poll> { + let _e = self.span.enter(); + let span = tracing::trace_span!("poll_ready"); + let _e = span.enter(); // The order of these calls don't really matter too much ready!(self.ping_pong.send_pending_pong(cx, &mut self.codec))?; ready!(self.ping_pong.send_pending_ping(cx, &mut self.codec))?; @@ -200,9 +207,18 @@ where /// Advances the internal state of the connection. pub fn poll(&mut self, cx: &mut Context) -> Poll> { + // XXX(eliza): cloning the span is unfortunately necessary here in + // order to placate the borrow checker โ€” `self` is mutably borrowed by + // `poll2`, which means that we can't borrow `self.span` to enter it. + // The clone is just an atomic ref bump. + let span = self.span.clone(); + let _e = span.enter(); + let span = tracing::trace_span!("poll"); + let _e = span.enter(); use crate::codec::RecvError::*; loop { + tracing::trace!(connection.state = ?self.state); // TODO: probably clean up this glob of code match self.state { // When open, continue to poll a frame @@ -230,7 +246,7 @@ where // error. This is handled by setting a GOAWAY frame followed by // terminating the connection. Poll::Ready(Err(Connection(e))) => { - tracing::debug!("Connection::poll; connection error={:?}", e); + tracing::debug!(error = ?e, "Connection::poll; connection error"); // We may have already sent a GOAWAY for this error, // if so, don't send another, just flush and close up. @@ -250,7 +266,7 @@ where // This is handled by resetting the frame then trying to read // another frame. Poll::Ready(Err(Stream { id, reason })) => { - tracing::trace!("stream error; id={:?}; reason={:?}", id, reason); + tracing::trace!(?id, ?reason, "stream error"); self.streams.send_reset(id, reason); } // Attempting to read a frame resulted in an I/O error. All @@ -258,7 +274,7 @@ where // // TODO: Are I/O errors recoverable? Poll::Ready(Err(Io(e))) => { - tracing::debug!("Connection::poll; IO error={:?}", e); + tracing::debug!(error = ?e, "Connection::poll; IO error"); let e = e.into(); // Reset all active streams @@ -317,28 +333,28 @@ where match ready!(Pin::new(&mut self.codec).poll_next(cx)?) { Some(Headers(frame)) => { - tracing::trace!("recv HEADERS; frame={:?}", frame); + tracing::trace!(?frame, "recv HEADERS"); self.streams.recv_headers(frame)?; } Some(Data(frame)) => { - tracing::trace!("recv DATA; frame={:?}", frame); + tracing::trace!(?frame, "recv DATA"); self.streams.recv_data(frame)?; } Some(Reset(frame)) => { - tracing::trace!("recv RST_STREAM; frame={:?}", frame); + tracing::trace!(?frame, "recv RST_STREAM"); self.streams.recv_reset(frame)?; } Some(PushPromise(frame)) => { - tracing::trace!("recv PUSH_PROMISE; frame={:?}", frame); + tracing::trace!(?frame, "recv PUSH_PROMISE"); self.streams.recv_push_promise(frame)?; } Some(Settings(frame)) => { - tracing::trace!("recv SETTINGS; frame={:?}", frame); + tracing::trace!(?frame, "recv SETTINGS"); self.settings .recv_settings(frame, &mut self.codec, &mut self.streams)?; } Some(GoAway(frame)) => { - tracing::trace!("recv GOAWAY; frame={:?}", frame); + tracing::trace!(?frame, "recv GOAWAY"); // This should prevent starting new streams, // but should allow continuing to process current streams // until they are all EOS. Once they are, State should @@ -347,7 +363,7 @@ where self.error = Some(frame.reason()); } Some(Ping(frame)) => { - tracing::trace!("recv PING; frame={:?}", frame); + tracing::trace!(?frame, "recv PING"); let status = self.ping_pong.recv_ping(frame); if status.is_shutdown() { assert!( @@ -360,11 +376,11 @@ where } } Some(WindowUpdate(frame)) => { - tracing::trace!("recv WINDOW_UPDATE; frame={:?}", frame); + tracing::trace!(?frame, "recv WINDOW_UPDATE"); self.streams.recv_window_update(frame)?; } Some(Priority(frame)) => { - tracing::trace!("recv PRIORITY; frame={:?}", frame); + tracing::trace!(?frame, "recv PRIORITY"); // TODO: handle } None => { diff --git a/src/proto/peer.rs b/src/proto/peer.rs index 8d327fbfc..3bcc77224 100644 --- a/src/proto/peer.rs +++ b/src/proto/peer.rs @@ -11,6 +11,7 @@ use std::fmt; pub(crate) trait Peer { /// Message type polled from the transport type Poll: fmt::Debug; + const NAME: &'static str; fn r#dyn() -> Dyn; diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index 180d9365a..937982086 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -104,6 +104,8 @@ impl Prioritize { stream: &mut store::Ptr, task: &mut Option, ) { + let span = tracing::trace_span!("Prioritize::queue_frame", ?stream.id); + let _e = span.enter(); // Queue the frame in the buffer stream.pending_send.push_back(buffer, frame); self.schedule_send(stream, task); @@ -112,7 +114,7 @@ impl Prioritize { pub fn schedule_send(&mut self, stream: &mut store::Ptr, task: &mut Option) { // If the stream is waiting to be opened, nothing more to do. if stream.is_send_ready() { - tracing::trace!("schedule_send; {:?}", stream.id); + tracing::trace!(?stream.id, "schedule_send"); // Queue the stream self.pending_send.push(stream); @@ -158,12 +160,10 @@ impl Prioritize { // Update the buffered data counter stream.buffered_send_data += sz; - tracing::trace!( - "send_data; sz={}; buffered={}; requested={}", - sz, - stream.buffered_send_data, - stream.requested_send_capacity - ); + let span = + tracing::trace_span!("send_data", sz, requested = stream.requested_send_capacity); + let _e = span.enter(); + tracing::trace!(buffered = stream.buffered_send_data); // Implicitly request more send capacity if not enough has been // requested yet. @@ -180,9 +180,8 @@ impl Prioritize { } tracing::trace!( - "send_data (2); available={}; buffered={}", - stream.send_flow.available(), - stream.buffered_send_data + available = %stream.send_flow.available(), + buffered = stream.buffered_send_data, ); // The `stream.buffered_send_data == 0` check is here so that, if a zero @@ -214,13 +213,14 @@ impl Prioritize { stream: &mut store::Ptr, counts: &mut Counts, ) { - tracing::trace!( - "reserve_capacity; stream={:?}; requested={:?}; effective={:?}; curr={:?}", - stream.id, - capacity, - capacity + stream.buffered_send_data, - stream.requested_send_capacity + let span = tracing::trace_span!( + "reserve_capacity", + ?stream.id, + requested = capacity, + effective = capacity + stream.buffered_send_data, + curr = stream.requested_send_capacity ); + let _e = span.enter(); // Actual capacity is `capacity` + the current amount of buffered data. // If it were less, then we could never send out the buffered data. @@ -266,13 +266,14 @@ impl Prioritize { inc: WindowSize, stream: &mut store::Ptr, ) -> Result<(), Reason> { - tracing::trace!( - "recv_stream_window_update; stream={:?}; state={:?}; inc={}; flow={:?}", - stream.id, - stream.state, + let span = tracing::trace_span!( + "recv_stream_window_update", + ?stream.id, + ?stream.state, inc, - stream.send_flow + flow = ?stream.send_flow ); + let _e = span.enter(); if stream.state.is_send_closed() && stream.buffered_send_data == 0 { // We can't send any data, so don't bother doing anything else. @@ -324,9 +325,11 @@ impl Prioritize { } pub fn clear_pending_capacity(&mut self, store: &mut Store, counts: &mut Counts) { + let span = tracing::trace_span!("clear_pending_capacity"); + let _e = span.enter(); while let Some(stream) = self.pending_capacity.pop(store) { counts.transition(stream, |_, stream| { - tracing::trace!("clear_pending_capacity; stream={:?}", stream.id); + tracing::trace!(?stream.id, "clear_pending_capacity"); }) } } @@ -339,7 +342,8 @@ impl Prioritize { ) where R: Resolve, { - tracing::trace!("assign_connection_capacity; inc={}", inc); + let span = tracing::trace_span!("assign_connection_capacity", inc); + let _e = span.enter(); self.flow.assign_capacity(inc); @@ -382,15 +386,14 @@ impl Prioritize { // Can't assign more than what is available stream.send_flow.window_size() - stream.send_flow.available().as_size(), ); - + let span = tracing::trace_span!("try_assign_capacity", ?stream.id); + let _e = span.enter(); tracing::trace!( - "try_assign_capacity; stream={:?}, requested={}; additional={}; buffered={}; window={}; conn={}", - stream.id, - total_requested, + requested = total_requested, additional, - stream.buffered_send_data, - stream.send_flow.window_size(), - self.flow.available() + buffered = stream.buffered_send_data, + window = stream.send_flow.window_size(), + conn = %self.flow.available() ); if additional == 0 { @@ -416,7 +419,7 @@ impl Prioritize { // TODO: Should prioritization factor into this? let assign = cmp::min(conn_available, additional); - tracing::trace!(" assigning; stream={:?}, capacity={}", stream.id, assign,); + tracing::trace!(capacity = assign, "assigning"); // Assign the capacity to the stream stream.assign_capacity(assign); @@ -426,11 +429,10 @@ impl Prioritize { } tracing::trace!( - "try_assign_capacity(2); available={}; requested={}; buffered={}; has_unavailable={:?}", - stream.send_flow.available(), - stream.requested_send_capacity, - stream.buffered_send_data, - stream.send_flow.has_unavailable() + available = %stream.send_flow.available(), + requested = stream.requested_send_capacity, + buffered = stream.buffered_send_data, + has_unavailable = %stream.send_flow.has_unavailable() ); if stream.send_flow.available() < stream.requested_send_capacity @@ -492,7 +494,7 @@ impl Prioritize { match self.pop_frame(buffer, store, max_frame_len, counts) { Some(frame) => { - tracing::trace!("writing frame={:?}", frame); + tracing::trace!(?frame, "writing"); debug_assert_eq!(self.in_flight_data_frame, InFlightData::Nothing); if let Frame::Data(ref frame) = frame { @@ -538,14 +540,15 @@ impl Prioritize { where B: Buf, { - tracing::trace!("try reclaim frame"); + let span = tracing::trace_span!("try_reclaim_frame"); + let _e = span.enter(); // First check if there are any data chunks to take back if let Some(frame) = dst.take_last_data_frame() { tracing::trace!( - " -> reclaimed; frame={:?}; sz={}", - frame, - frame.payload().inner.get_ref().remaining() + ?frame, + sz = frame.payload().inner.get_ref().remaining(), + "reclaimed" ); let mut eos = false; @@ -603,11 +606,12 @@ impl Prioritize { } pub fn clear_queue(&mut self, buffer: &mut Buffer>, stream: &mut store::Ptr) { - tracing::trace!("clear_queue; stream={:?}", stream.id); + let span = tracing::trace_span!("clear_queue", ?stream.id); + let _e = span.enter(); // TODO: make this more efficient? while let Some(frame) = stream.pending_send.pop_front(buffer) { - tracing::trace!("dropping; frame={:?}", frame); + tracing::trace!(?frame, "dropping"); } stream.buffered_send_data = 0; @@ -644,16 +648,14 @@ impl Prioritize { where B: Buf, { - tracing::trace!("pop_frame"); + let span = tracing::trace_span!("pop_frame"); + let _e = span.enter(); loop { match self.pending_send.pop(store) { Some(mut stream) => { - tracing::trace!( - "pop_frame; stream={:?}; stream.state={:?}", - stream.id, - stream.state - ); + let span = tracing::trace_span!("popped", ?stream.id, ?stream.state); + let _e = span.enter(); // It's possible that this stream, besides having data to send, // is also queued to send a reset, and thus is already in the queue @@ -662,11 +664,7 @@ impl Prioritize { // To be safe, we just always ask the stream. let is_pending_reset = stream.is_pending_reset_expiration(); - tracing::trace!( - " --> stream={:?}; is_pending_reset={:?};", - stream.id, - is_pending_reset - ); + tracing::trace!(is_pending_reset); let frame = match stream.pending_send.pop_front(buffer) { Some(Frame::Data(mut frame)) => { @@ -676,24 +674,19 @@ impl Prioritize { let sz = frame.payload().remaining(); tracing::trace!( - " --> data frame; stream={:?}; sz={}; eos={:?}; window={}; \ - available={}; requested={}; buffered={};", - frame.stream_id(), sz, - frame.is_end_stream(), - stream_capacity, - stream.send_flow.available(), - stream.requested_send_capacity, - stream.buffered_send_data, + eos = frame.is_end_stream(), + window = %stream_capacity, + available = %stream.send_flow.available(), + requested = stream.requested_send_capacity, + buffered = stream.buffered_send_data, + "data frame" ); // Zero length data frames always have capacity to // be sent. if sz > 0 && stream_capacity == 0 { - tracing::trace!( - " --> stream capacity is 0; requested={}", - stream.requested_send_capacity - ); + tracing::trace!("stream capacity is 0"); // Ensure that the stream is waiting for // connection level capacity @@ -721,34 +714,38 @@ impl Prioritize { // capacity at this point. debug_assert!(len <= self.flow.window_size()); - tracing::trace!(" --> sending data frame; len={}", len); + tracing::trace!(len, "sending data frame"); // Update the flow control - tracing::trace!(" -- updating stream flow --"); - stream.send_flow.send_data(len); - - // Decrement the stream's buffered data counter - debug_assert!(stream.buffered_send_data >= len); - stream.buffered_send_data -= len; - stream.requested_send_capacity -= len; - - // Assign the capacity back to the connection that - // was just consumed from the stream in the previous - // line. - self.flow.assign_capacity(len); - - tracing::trace!(" -- updating connection flow --"); - self.flow.send_data(len); - - // Wrap the frame's data payload to ensure that the - // correct amount of data gets written. - - let eos = frame.is_end_stream(); - let len = len as usize; - - if frame.payload().remaining() > len { - frame.set_end_stream(false); - } + tracing::trace_span!("updating stream flow").in_scope(|| { + stream.send_flow.send_data(len); + + // Decrement the stream's buffered data counter + debug_assert!(stream.buffered_send_data >= len); + stream.buffered_send_data -= len; + stream.requested_send_capacity -= len; + + // Assign the capacity back to the connection that + // was just consumed from the stream in the previous + // line. + self.flow.assign_capacity(len); + }); + + let (eos, len) = tracing::trace_span!("updating connection flow") + .in_scope(|| { + self.flow.send_data(len); + + // Wrap the frame's data payload to ensure that the + // correct amount of data gets written. + + let eos = frame.is_end_stream(); + let len = len as usize; + + if frame.payload().remaining() > len { + frame.set_end_stream(false); + } + (eos, len) + }); Frame::Data(frame.map(|buf| Prioritized { inner: buf.take(len), diff --git a/src/server.rs b/src/server.rs index 69ba16a7f..3c093f7ee 100644 --- a/src/server.rs +++ b/src/server.rs @@ -128,6 +128,7 @@ use std::task::{Context, Poll}; use std::time::Duration; use std::{convert, fmt, io, mem}; use tokio::io::{AsyncRead, AsyncWrite}; +use tracing_futures::{Instrument, Instrumented}; /// In progress HTTP/2.0 connection handshake future. /// @@ -149,6 +150,8 @@ pub struct Handshake { builder: Builder, /// The current state of the handshake. state: Handshaking, + /// Span tracking the handshake + span: tracing::Span, } /// Accepts inbound HTTP/2.0 streams on a connection. @@ -290,9 +293,9 @@ impl fmt::Debug for SendPushedResponse { /// Stages of an in-progress handshake. enum Handshaking { /// State 1. Connection is flushing pending SETTINGS frame. - Flushing(Flush>), + Flushing(Instrumented>>), /// State 2. Connection is waiting for the client preface. - ReadingPreface(ReadPreface>), + ReadingPreface(Instrumented>>), /// Dummy state for `mem::replace`. Empty, } @@ -359,6 +362,9 @@ where B: Buf + 'static, { fn handshake2(io: T, builder: Builder) -> Handshake { + let span = tracing::trace_span!("server_handshake", io = %std::any::type_name::()); + let entered = span.enter(); + // Create the codec. let mut codec = Codec::new(io); @@ -378,7 +384,13 @@ where // Create the handshake future. let state = Handshaking::from(codec); - Handshake { builder, state } + drop(entered); + + Handshake { + builder, + state, + span, + } } /// Accept the next incoming request on this connection. @@ -1179,7 +1191,9 @@ where type Output = Result, crate::Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - tracing::trace!("Handshake::poll(); state={:?};", self.state); + let span = self.span.clone(); // XXX(eliza): T_T + let _e = span.enter(); + tracing::trace!(state = ?self.state); use crate::server::Handshaking::*; self.state = if let Flushing(ref mut flush) = self.state { @@ -1188,11 +1202,11 @@ where // for the client preface. let codec = match Pin::new(flush).poll(cx)? { Poll::Pending => { - tracing::trace!("Handshake::poll(); flush.poll()=Pending"); + tracing::trace!(flush.poll = %"Pending"); return Poll::Pending; } Poll::Ready(flushed) => { - tracing::trace!("Handshake::poll(); flush.poll()=Ready"); + tracing::trace!(flush.poll = %"Ready"); flushed } }; @@ -1229,7 +1243,7 @@ where }, ); - tracing::trace!("Handshake::poll(); connection established!"); + tracing::trace!("connection established!"); let mut c = Connection { connection }; if let Some(sz) = self.builder.initial_target_connection_window_size { c.set_target_window_size(sz); @@ -1290,14 +1304,14 @@ impl Peer { use PushPromiseHeaderError::*; match e { NotSafeAndCacheable => tracing::debug!( - "convert_push_message: method {} is not safe and cacheable; promised_id={:?}", + ?promised_id, + "convert_push_message: method {} is not safe and cacheable", request.method(), - promised_id, ), InvalidContentLength(e) => tracing::debug!( - "convert_push_message; promised request has invalid content-length {:?}; promised_id={:?}", + ?promised_id, + "convert_push_message; promised request has invalid content-length {:?}", e, - promised_id, ), } return Err(UserError::MalformedHeaders); @@ -1328,6 +1342,8 @@ impl Peer { impl proto::Peer for Peer { type Poll = Request<()>; + const NAME: &'static str = "Server"; + fn is_server() -> bool { true } @@ -1471,7 +1487,7 @@ where { #[inline] fn from(flush: Flush>) -> Self { - Handshaking::Flushing(flush) + Handshaking::Flushing(flush.instrument(tracing::trace_span!("flush"))) } } @@ -1482,7 +1498,7 @@ where { #[inline] fn from(read: ReadPreface>) -> Self { - Handshaking::ReadingPreface(read) + Handshaking::ReadingPreface(read.instrument(tracing::trace_span!("read_preface"))) } } diff --git a/tests/h2-support/Cargo.toml b/tests/h2-support/Cargo.toml index b48dc36a6..c4e68b1ee 100644 --- a/tests/h2-support/Cargo.toml +++ b/tests/h2-support/Cargo.toml @@ -8,7 +8,8 @@ edition = "2018" h2 = { path = "../..", features = ["stream", "unstable"] } bytes = "0.5" -env_logger = "0.5.9" +tracing = "0.1" +tracing-subscriber = { version = "0.2", default-features = false, features = ["fmt", "chrono", "ansi"] } futures = { version = "0.3", default-features = false } http = "0.2" tokio = { version = "0.2", features = ["time"] } diff --git a/tests/h2-support/src/lib.rs b/tests/h2-support/src/lib.rs index d88f6cabf..3c13c0afe 100644 --- a/tests/h2-support/src/lib.rs +++ b/tests/h2-support/src/lib.rs @@ -8,6 +8,7 @@ pub mod raw; pub mod frames; pub mod mock; pub mod prelude; +pub mod trace; pub mod util; mod client_ext; @@ -24,3 +25,19 @@ pub type Codec = h2::Codec; // This is the frame type that is sent pub type SendFrame = h2::frame::Frame; + +#[macro_export] +macro_rules! trace_init { + () => { + let _guard = $crate::trace::init(); + let span = $crate::prelude::tracing::info_span!( + "test", + "{}", + // get the name of the test thread to generate a unique span for the test + std::thread::current() + .name() + .expect("test threads must be named") + ); + let _e = span.enter(); + }; +} diff --git a/tests/h2-support/src/prelude.rs b/tests/h2-support/src/prelude.rs index 2e95b68b0..dafdd29f0 100644 --- a/tests/h2-support/src/prelude.rs +++ b/tests/h2-support/src/prelude.rs @@ -28,7 +28,7 @@ pub use super::assert::assert_frame_eq; // Re-export useful crates pub use tokio_test::io as mock_io; -pub use {bytes, env_logger, futures, http, tokio::io as tokio_io}; +pub use {bytes, futures, http, tokio::io as tokio_io, tracing, tracing_subscriber}; // Re-export primary future types pub use futures::{Future, Sink, Stream}; diff --git a/tests/h2-support/src/trace.rs b/tests/h2-support/src/trace.rs new file mode 100644 index 000000000..4ac11742c --- /dev/null +++ b/tests/h2-support/src/trace.rs @@ -0,0 +1,41 @@ +use std::{io, str}; +pub use tracing; +pub use tracing_subscriber; + +pub fn init() -> tracing::dispatcher::DefaultGuard { + tracing::subscriber::set_default( + tracing_subscriber::fmt() + .with_max_level(tracing::Level::TRACE) + .with_span_events(tracing_subscriber::fmt::format::FmtSpan::CLOSE) + .with_writer(PrintlnWriter { _p: () }) + .finish(), + ) +} + +struct PrintlnWriter { + _p: (), +} + +impl tracing_subscriber::fmt::MakeWriter for PrintlnWriter { + type Writer = PrintlnWriter; + fn make_writer(&self) -> Self::Writer { + PrintlnWriter { _p: () } + } +} + +impl io::Write for PrintlnWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + let s = str::from_utf8(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?; + println!("{}", s); + Ok(s.len()) + } + + fn write_fmt(&mut self, fmt: std::fmt::Arguments<'_>) -> io::Result<()> { + println!("{}", fmt); + Ok(()) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 8175ce4af..41f8f6459 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -7,7 +7,7 @@ use std::task::Context; #[tokio::test] async fn handshake() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let mock = mock_io::Builder::new() .handshake() @@ -24,7 +24,7 @@ async fn handshake() { #[tokio::test] async fn client_other_thread() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -60,7 +60,7 @@ async fn client_other_thread() { #[tokio::test] async fn recv_invalid_server_stream_id() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let mock = mock_io::Builder::new() .handshake() @@ -96,7 +96,7 @@ async fn recv_invalid_server_stream_id() { #[tokio::test] async fn request_stream_id_overflows() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let h2 = async move { @@ -149,7 +149,7 @@ async fn request_stream_id_overflows() { #[tokio::test] async fn client_builder_max_concurrent_streams() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let mut settings = frame::Settings::default(); @@ -187,7 +187,7 @@ async fn client_builder_max_concurrent_streams() { #[tokio::test] async fn request_over_max_concurrent_streams_errors() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -286,7 +286,7 @@ async fn request_over_max_concurrent_streams_errors() { #[tokio::test] async fn send_request_poll_ready_when_connection_error() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -379,7 +379,7 @@ async fn send_request_poll_ready_when_connection_error() { #[tokio::test] async fn send_reset_notifies_recv_stream() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -432,7 +432,7 @@ async fn send_reset_notifies_recv_stream() { #[tokio::test] async fn http_11_request_without_scheme_or_authority() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -462,7 +462,7 @@ async fn http_11_request_without_scheme_or_authority() { #[tokio::test] async fn http_2_request_without_scheme_or_authority() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -499,7 +499,7 @@ fn request_with_h1_version() {} #[tokio::test] async fn request_with_connection_headers() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); // can't assert full handshake, since client never sends a request, and @@ -542,7 +542,7 @@ async fn request_with_connection_headers() { #[tokio::test] async fn connection_close_notifies_response_future() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { let settings = srv.assert_client_handshake().await; @@ -581,7 +581,7 @@ async fn connection_close_notifies_response_future() { #[tokio::test] async fn connection_close_notifies_client_poll_ready() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -626,7 +626,7 @@ async fn connection_close_notifies_client_poll_ready() { #[tokio::test] async fn sending_request_on_closed_connection() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -688,7 +688,7 @@ async fn sending_request_on_closed_connection() { #[tokio::test] async fn recv_too_big_headers() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -751,7 +751,7 @@ async fn recv_too_big_headers() { #[tokio::test] async fn pending_send_request_gets_reset_by_peer_properly() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let payload = Bytes::from(vec![0; (frame::DEFAULT_INITIAL_WINDOW_SIZE * 2) as usize]); @@ -823,7 +823,7 @@ async fn pending_send_request_gets_reset_by_peer_properly() { #[tokio::test] async fn request_without_path() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -854,7 +854,7 @@ async fn request_without_path() { #[tokio::test] async fn request_options_with_star() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); // Note the lack of trailing slash. @@ -899,7 +899,7 @@ async fn notify_on_send_capacity() { // stream, the client is notified. use tokio::sync::oneshot; - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let (done_tx, done_rx) = oneshot::channel(); @@ -979,7 +979,7 @@ async fn notify_on_send_capacity() { #[tokio::test] async fn send_stream_poll_reset() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -1017,7 +1017,7 @@ async fn drop_pending_open() { // This test checks that a stream queued for pending open behaves correctly when its // client drops. use tokio::sync::oneshot; - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let (init_tx, init_rx) = oneshot::channel(); @@ -1105,7 +1105,7 @@ async fn malformed_response_headers_dont_unlink_stream() { // no remaining references correctly resets the stream, without prematurely // unlinking it. use tokio::sync::oneshot; - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let (drop_tx, drop_rx) = oneshot::channel(); diff --git a/tests/h2-tests/tests/codec_read.rs b/tests/h2-tests/tests/codec_read.rs index 6ebe54d6e..95e895ddd 100644 --- a/tests/h2-tests/tests/codec_read.rs +++ b/tests/h2-tests/tests/codec_read.rs @@ -130,7 +130,7 @@ fn read_headers_empty_payload() {} #[tokio::test] async fn read_continuation_frames() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let large = build_large_headers(); @@ -191,7 +191,7 @@ async fn read_continuation_frames() { async fn update_max_frame_len_at_rest() { use futures::StreamExt; - let _ = env_logger::try_init(); + h2_support::trace_init!(); // TODO: add test for updating max frame length in flight as well? let mut codec = raw_codec! { read => [ diff --git a/tests/h2-tests/tests/codec_write.rs b/tests/h2-tests/tests/codec_write.rs index 2347f63b2..0b85a2238 100644 --- a/tests/h2-tests/tests/codec_write.rs +++ b/tests/h2-tests/tests/codec_write.rs @@ -5,7 +5,7 @@ use h2_support::prelude::*; async fn write_continuation_frames() { // An invalid dependency ID results in a stream level error. The hpack // payload should still be decoded. - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let large = build_large_headers(); @@ -56,7 +56,7 @@ async fn write_continuation_frames() { async fn client_settings_header_table_size() { // A server sets the SETTINGS_HEADER_TABLE_SIZE to 0, test that the // client doesn't send indexed headers. - let _ = env_logger::try_init(); + h2_support::trace_init!(); let io = mock_io::Builder::new() // Read SETTINGS_HEADER_TABLE_SIZE = 0 @@ -99,7 +99,7 @@ async fn client_settings_header_table_size() { async fn server_settings_header_table_size() { // A client sets the SETTINGS_HEADER_TABLE_SIZE to 0, test that the // server doesn't send indexed headers. - let _ = env_logger::try_init(); + h2_support::trace_init!(); let io = mock_io::Builder::new() .read(MAGIC_PREFACE) diff --git a/tests/h2-tests/tests/flow_control.rs b/tests/h2-tests/tests/flow_control.rs index 3ca65ac73..4b6fe7a85 100644 --- a/tests/h2-tests/tests/flow_control.rs +++ b/tests/h2-tests/tests/flow_control.rs @@ -7,7 +7,7 @@ use h2_support::util::yield_once; // explicitly requested. #[tokio::test] async fn send_data_without_requesting_capacity() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let payload = vec![0; 1024]; @@ -53,7 +53,7 @@ async fn send_data_without_requesting_capacity() { #[tokio::test] async fn release_capacity_sends_window_update() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let payload = vec![0u8; 16_384]; let payload_len = payload.len(); @@ -120,7 +120,7 @@ async fn release_capacity_sends_window_update() { #[tokio::test] async fn release_capacity_of_small_amount_does_not_send_window_update() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let payload = [0; 16]; @@ -175,7 +175,7 @@ fn expand_window_calls_are_coalesced() {} #[tokio::test] async fn recv_data_overflows_connection_window() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); @@ -238,7 +238,7 @@ async fn recv_data_overflows_connection_window() { #[tokio::test] async fn recv_data_overflows_stream_window() { // this tests for when streams have smaller windows than their connection - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); @@ -295,7 +295,7 @@ fn recv_window_update_causes_overflow() { #[tokio::test] async fn stream_error_release_connection_capacity() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -371,7 +371,7 @@ async fn stream_error_release_connection_capacity() { #[tokio::test] async fn stream_close_by_data_frame_releases_capacity() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let window_size = frame::DEFAULT_INITIAL_WINDOW_SIZE as usize; @@ -443,7 +443,7 @@ async fn stream_close_by_data_frame_releases_capacity() { #[tokio::test] async fn stream_close_by_trailers_frame_releases_capacity() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let window_size = frame::DEFAULT_INITIAL_WINDOW_SIZE as usize; @@ -516,7 +516,7 @@ async fn stream_close_by_trailers_frame_releases_capacity() { #[tokio::test] async fn stream_close_by_send_reset_frame_releases_capacity() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -575,7 +575,7 @@ fn stream_close_by_recv_reset_frame_releases_capacity() {} #[tokio::test] async fn recv_window_update_on_stream_closed_by_data_frame() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let h2 = async move { @@ -620,7 +620,7 @@ async fn recv_window_update_on_stream_closed_by_data_frame() { #[tokio::test] async fn reserved_capacity_assigned_in_multi_window_updates() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let h2 = async move { @@ -685,7 +685,7 @@ async fn reserved_capacity_assigned_in_multi_window_updates() { async fn connection_notified_on_released_capacity() { use tokio::sync::{mpsc, oneshot}; - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); // We're going to run the connection on a thread in order to isolate task @@ -794,7 +794,7 @@ async fn connection_notified_on_released_capacity() { #[tokio::test] async fn recv_settings_removes_available_capacity() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let mut settings = frame::Settings::default(); @@ -841,7 +841,7 @@ async fn recv_settings_removes_available_capacity() { #[tokio::test] async fn recv_settings_keeps_assigned_capacity() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let (sent_settings, sent_settings_rx) = futures::channel::oneshot::channel(); @@ -886,7 +886,7 @@ async fn recv_settings_keeps_assigned_capacity() { #[tokio::test] async fn recv_no_init_window_then_receive_some_init_window() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let mut settings = frame::Settings::default(); @@ -942,7 +942,7 @@ async fn settings_lowered_capacity_returns_capacity_to_connection() { use futures::channel::oneshot; use futures::future::{select, Either}; - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let (tx1, rx1) = oneshot::channel(); let (tx2, rx2) = oneshot::channel(); @@ -1049,7 +1049,7 @@ async fn settings_lowered_capacity_returns_capacity_to_connection() { #[tokio::test] async fn client_increase_target_window_size() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -1069,7 +1069,7 @@ async fn client_increase_target_window_size() { #[tokio::test] async fn increase_target_window_size_after_using_some() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -1110,7 +1110,7 @@ async fn increase_target_window_size_after_using_some() { #[tokio::test] async fn decrease_target_window_size() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -1155,7 +1155,7 @@ async fn decrease_target_window_size() { #[tokio::test] async fn client_update_initial_window_size() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let window_size = frame::DEFAULT_INITIAL_WINDOW_SIZE * 2; @@ -1230,7 +1230,7 @@ async fn client_update_initial_window_size() { #[tokio::test] async fn client_decrease_initial_window_size() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -1355,7 +1355,7 @@ async fn client_decrease_initial_window_size() { #[tokio::test] async fn server_target_window_size() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -1377,7 +1377,7 @@ async fn server_target_window_size() { #[tokio::test] async fn recv_settings_increase_window_size_after_using_some() { // See https://github.com/hyperium/h2/issues/208 - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let new_win_size = 16_384 * 4; // 1 bigger than default @@ -1419,7 +1419,7 @@ async fn recv_settings_increase_window_size_after_using_some() { #[tokio::test] async fn reserve_capacity_after_peer_closes() { // See https://github.com/hyperium/h2/issues/300 - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -1456,7 +1456,7 @@ async fn reserve_capacity_after_peer_closes() { async fn reset_stream_waiting_for_capacity() { // This tests that receiving a reset on a stream that has some available // connection-level window reassigns that window to another stream. - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); @@ -1517,7 +1517,7 @@ async fn reset_stream_waiting_for_capacity() { #[tokio::test] async fn data_padding() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let mut body = Vec::new(); diff --git a/tests/h2-tests/tests/ping_pong.rs b/tests/h2-tests/tests/ping_pong.rs index f093b43f6..a57f35c17 100644 --- a/tests/h2-tests/tests/ping_pong.rs +++ b/tests/h2-tests/tests/ping_pong.rs @@ -6,7 +6,7 @@ use h2_support::prelude::*; #[tokio::test] async fn recv_single_ping() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (m, mut mock) = mock::new(); // Create the handshake @@ -36,7 +36,7 @@ async fn recv_single_ping() { #[tokio::test] async fn recv_multiple_pings() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -58,7 +58,7 @@ async fn recv_multiple_pings() { #[tokio::test] async fn pong_has_highest_priority() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let data = Bytes::from(vec![0; 16_384]); @@ -96,7 +96,7 @@ async fn pong_has_highest_priority() { #[tokio::test] async fn user_ping_pong() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -138,7 +138,7 @@ async fn user_ping_pong() { #[tokio::test] async fn user_notifies_when_connection_closes() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { let settings = srv.assert_client_handshake().await; diff --git a/tests/h2-tests/tests/prioritization.rs b/tests/h2-tests/tests/prioritization.rs index 18084d91d..7c2681068 100644 --- a/tests/h2-tests/tests/prioritization.rs +++ b/tests/h2-tests/tests/prioritization.rs @@ -6,7 +6,7 @@ use std::task::Context; #[tokio::test] async fn single_stream_send_large_body() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let payload = vec![0; 1024]; @@ -66,7 +66,7 @@ async fn single_stream_send_large_body() { #[tokio::test] async fn multiple_streams_with_payload_greater_than_default_window() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let payload = vec![0; 16384 * 5 - 1]; let payload_clone = payload.clone(); @@ -129,7 +129,7 @@ async fn multiple_streams_with_payload_greater_than_default_window() { #[tokio::test] async fn single_stream_send_extra_large_body_multi_frames_one_buffer() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let payload = vec![0; 32_768]; @@ -193,7 +193,7 @@ async fn single_stream_send_extra_large_body_multi_frames_one_buffer() { #[tokio::test] async fn single_stream_send_body_greater_than_default_window() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let payload = vec![0; 16384 * 5 - 1]; @@ -279,7 +279,7 @@ async fn single_stream_send_body_greater_than_default_window() { #[tokio::test] async fn single_stream_send_extra_large_body_multi_frames_multi_buffer() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let payload = vec![0; 32_768]; @@ -341,7 +341,7 @@ async fn single_stream_send_extra_large_body_multi_frames_multi_buffer() { #[tokio::test] async fn send_data_receive_window_update() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (m, mut mock) = mock::new(); let h2 = async move { diff --git a/tests/h2-tests/tests/push_promise.rs b/tests/h2-tests/tests/push_promise.rs index f786a72b7..a5a7dfe97 100644 --- a/tests/h2-tests/tests/push_promise.rs +++ b/tests/h2-tests/tests/push_promise.rs @@ -4,7 +4,7 @@ use h2_support::prelude::*; #[tokio::test] async fn recv_push_works() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let mock = async move { @@ -62,7 +62,7 @@ async fn recv_push_works() { #[tokio::test] async fn pushed_streams_arent_dropped_too_early() { // tests that by default, received push promises work - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let mock = async move { @@ -128,7 +128,7 @@ async fn pushed_streams_arent_dropped_too_early() { #[tokio::test] async fn recv_push_when_push_disabled_is_conn_error() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let mock = async move { @@ -186,7 +186,7 @@ async fn recv_push_when_push_disabled_is_conn_error() { #[tokio::test] async fn pending_push_promises_reset_when_dropped() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -233,7 +233,7 @@ async fn pending_push_promises_reset_when_dropped() { #[tokio::test] async fn recv_push_promise_over_max_header_list_size() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -284,7 +284,7 @@ async fn recv_push_promise_over_max_header_list_size() { #[tokio::test] async fn recv_invalid_push_promise_headers_is_stream_protocol_error() { // Unsafe method or content length is stream protocol error - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let mock = async move { @@ -348,7 +348,7 @@ fn recv_push_promise_with_wrong_authority_is_stream_error() { #[tokio::test] async fn recv_push_promise_skipped_stream_id() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let mock = async move { @@ -402,7 +402,7 @@ async fn recv_push_promise_skipped_stream_id() { #[tokio::test] async fn recv_push_promise_dup_stream_id() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let mock = async move { diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 1916138b3..3a7649135 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -10,7 +10,7 @@ const SETTINGS_ACK: &'static [u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; #[tokio::test] async fn read_preface_in_multiple_frames() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let mock = mock_io::Builder::new() .read(b"PRI * HTTP/2.0") @@ -28,7 +28,7 @@ async fn read_preface_in_multiple_frames() { #[tokio::test] async fn server_builder_set_max_concurrent_streams() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let mut settings = frame::Settings::default(); @@ -72,7 +72,7 @@ async fn server_builder_set_max_concurrent_streams() { #[tokio::test] async fn serve_request() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -107,7 +107,7 @@ async fn serve_request() { #[tokio::test] async fn serve_connect() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -138,7 +138,7 @@ async fn serve_connect() { #[tokio::test] async fn push_request() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -222,7 +222,7 @@ async fn push_request() { #[tokio::test] async fn push_request_against_concurrency() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -306,7 +306,7 @@ async fn push_request_against_concurrency() { #[tokio::test] async fn push_request_with_data() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -372,7 +372,7 @@ async fn push_request_with_data() { #[tokio::test] async fn push_request_between_data() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -443,7 +443,7 @@ fn accept_with_pending_connections_after_socket_close() {} #[tokio::test] async fn recv_invalid_authority() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let bad_auth = util::byte_str("not:a/good authority"); @@ -470,7 +470,7 @@ async fn recv_invalid_authority() { #[tokio::test] async fn recv_connection_header() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let req = |id, name, val| { @@ -507,7 +507,7 @@ async fn recv_connection_header() { #[tokio::test] async fn sends_reset_cancel_when_req_body_is_dropped() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -539,7 +539,7 @@ async fn sends_reset_cancel_when_req_body_is_dropped() { #[tokio::test] async fn abrupt_shutdown() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -583,7 +583,7 @@ async fn abrupt_shutdown() { #[tokio::test] async fn graceful_shutdown() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -658,7 +658,7 @@ async fn graceful_shutdown() { #[tokio::test] async fn goaway_even_if_client_sent_goaway() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -707,7 +707,7 @@ async fn goaway_even_if_client_sent_goaway() { #[tokio::test] async fn sends_reset_cancel_when_res_body_is_dropped() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -761,7 +761,7 @@ async fn sends_reset_cancel_when_res_body_is_dropped() { #[tokio::test] async fn too_big_headers_sends_431() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -797,7 +797,7 @@ async fn too_big_headers_sends_431() { #[tokio::test] async fn too_big_headers_sends_reset_after_431_if_not_eos() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -832,7 +832,7 @@ async fn too_big_headers_sends_reset_after_431_if_not_eos() { #[tokio::test] async fn poll_reset() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -872,7 +872,7 @@ async fn poll_reset() { #[tokio::test] async fn poll_reset_io_error() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -913,7 +913,7 @@ async fn poll_reset_io_error() { #[tokio::test] async fn poll_reset_after_send_response_is_user_error() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { @@ -967,7 +967,7 @@ async fn poll_reset_after_send_response_is_user_error() { #[tokio::test] async fn server_error_on_unclean_shutdown() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let srv = server::Builder::new().handshake::<_, Bytes>(io); @@ -980,7 +980,7 @@ async fn server_error_on_unclean_shutdown() { #[tokio::test] async fn request_without_authority() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let client = async move { diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index 2082d5f3e..16d1a7502 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -9,7 +9,7 @@ use tokio::sync::oneshot; #[tokio::test] async fn send_recv_headers_only() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let mock = mock_io::Builder::new() .handshake() @@ -42,7 +42,7 @@ async fn send_recv_headers_only() { #[tokio::test] async fn send_recv_data() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let mock = mock_io::Builder::new() .handshake() @@ -104,7 +104,7 @@ async fn send_recv_data() { #[tokio::test] async fn send_headers_recv_data_single_frame() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let mock = mock_io::Builder::new() .handshake() @@ -153,7 +153,7 @@ async fn send_headers_recv_data_single_frame() { #[tokio::test] async fn closed_streams_are_released() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let h2 = async move { @@ -196,7 +196,7 @@ async fn closed_streams_are_released() { #[tokio::test] async fn errors_if_recv_frame_exceeds_max_frame_size() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let h2 = async move { @@ -239,7 +239,7 @@ async fn errors_if_recv_frame_exceeds_max_frame_size() { #[tokio::test] async fn configure_max_frame_size() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let h2 = async move { @@ -278,7 +278,7 @@ async fn configure_max_frame_size() { #[tokio::test] async fn recv_goaway_finishes_processed_streams() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -332,7 +332,7 @@ async fn recv_goaway_finishes_processed_streams() { #[tokio::test] async fn recv_goaway_with_higher_last_processed_id() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -366,7 +366,7 @@ async fn recv_goaway_with_higher_last_processed_id() { #[tokio::test] async fn recv_next_stream_id_updated_by_malformed_headers() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut client) = mock::new(); let bad_auth = util::byte_str("not:a/good authority"); @@ -404,7 +404,7 @@ async fn recv_next_stream_id_updated_by_malformed_headers() { #[tokio::test] async fn skipped_stream_ids_are_implicitly_closed() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -445,7 +445,7 @@ async fn skipped_stream_ids_are_implicitly_closed() { #[tokio::test] async fn send_rst_stream_allows_recv_data() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -490,7 +490,7 @@ async fn send_rst_stream_allows_recv_data() { #[tokio::test] async fn send_rst_stream_allows_recv_trailers() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -531,7 +531,7 @@ async fn send_rst_stream_allows_recv_trailers() { #[tokio::test] async fn rst_stream_expires() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -582,7 +582,7 @@ async fn rst_stream_expires() { #[tokio::test] async fn rst_stream_max() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -653,7 +653,7 @@ async fn rst_stream_max() { #[tokio::test] async fn reserved_state_recv_window_update() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { @@ -692,7 +692,7 @@ async fn reserved_state_recv_window_update() { /* #[test] fn send_data_after_headers_eos() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let mock = mock_io::Builder::new() .handshake() @@ -733,7 +733,7 @@ async fn rst_while_closing() { // Test to reproduce panic in issue #246 --- receipt of a RST_STREAM frame // on a stream in the Half Closed (remote) state with a queued EOS causes // a panic. - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); // Rendevous when we've queued a trailers frame @@ -794,7 +794,7 @@ async fn rst_with_buffered_data() { // the data is fully flushed. Given that resetting a stream requires // clearing all associated state for that stream, this test ensures that the // buffered up frame is correctly handled. - let _ = env_logger::try_init(); + h2_support::trace_init!(); // This allows the settings + headers frame through let (io, mut srv) = mock::new_with_write_capacity(73); @@ -846,7 +846,7 @@ async fn err_with_buffered_data() { // the data is fully flushed. Given that resetting a stream requires // clearing all associated state for that stream, this test ensures that the // buffered up frame is correctly handled. - let _ = env_logger::try_init(); + h2_support::trace_init!(); // This allows the settings + headers frame through let (io, mut srv) = mock::new_with_write_capacity(73); @@ -901,7 +901,7 @@ async fn send_err_with_buffered_data() { // the data is fully flushed. Given that resetting a stream requires // clearing all associated state for that stream, this test ensures that the // buffered up frame is correctly handled. - let _ = env_logger::try_init(); + h2_support::trace_init!(); // This allows the settings + headers frame through let (io, mut srv) = mock::new_with_write_capacity(73); @@ -963,7 +963,7 @@ async fn send_err_with_buffered_data() { #[tokio::test] async fn srv_window_update_on_lower_stream_id() { // See https://github.com/hyperium/h2/issues/208 - let _ = env_logger::try_init(); + h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { diff --git a/tests/h2-tests/tests/trailers.rs b/tests/h2-tests/tests/trailers.rs index 078665551..08a463ab7 100644 --- a/tests/h2-tests/tests/trailers.rs +++ b/tests/h2-tests/tests/trailers.rs @@ -3,7 +3,7 @@ use h2_support::prelude::*; #[tokio::test] async fn recv_trailers_only() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let mock = mock_io::Builder::new() .handshake() @@ -53,7 +53,7 @@ async fn recv_trailers_only() { #[tokio::test] async fn send_trailers_immediately() { - let _ = env_logger::try_init(); + h2_support::trace_init!(); let mock = mock_io::Builder::new() .handshake() From a19323727bfcda46e7144a504c8603011f8f2263 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Wed, 26 Aug 2020 18:58:52 +0100 Subject: [PATCH 011/178] fix h2-fuzz testing, import futures with FuturesUnordered (#482) --- tests/h2-fuzz/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/h2-fuzz/Cargo.toml b/tests/h2-fuzz/Cargo.toml index d119aedf4..8bb121959 100644 --- a/tests/h2-fuzz/Cargo.toml +++ b/tests/h2-fuzz/Cargo.toml @@ -9,7 +9,7 @@ edition = "2018" h2 = { path = "../.." } env_logger = { version = "0.5.3", default-features = false } -futures = { version = "0.3", default-features = false } +futures = { version = "0.3", default-features = false, features = ["std"] } honggfuzz = "0.5" http = "0.2" tokio = { version = "0.2", features = [] } From 2b19acf1323257e1d2a99ad9889e0619593561fd Mon Sep 17 00:00:00 2001 From: eggyal Date: Fri, 18 Sep 2020 01:25:31 +0100 Subject: [PATCH 012/178] Handle client-disabled server push (#486) --- src/codec/error.rs | 4 +++ src/frame/settings.rs | 4 +-- src/proto/connection.rs | 2 +- src/proto/streams/send.rs | 11 ++++++++ tests/h2-support/src/frames.rs | 5 ++++ tests/h2-tests/tests/server.rs | 47 ++++++++++++++++++++++++++++++++++ 6 files changed, 70 insertions(+), 3 deletions(-) diff --git a/src/codec/error.rs b/src/codec/error.rs index 2c6b2961d..5d6659223 100644 --- a/src/codec/error.rs +++ b/src/codec/error.rs @@ -63,6 +63,9 @@ pub enum UserError { /// Tries to update local SETTINGS while ACK has not been received. SendSettingsWhilePending, + + /// Tries to send push promise to peer who has disabled server push + PeerDisabledServerPush, } // ===== impl RecvError ===== @@ -136,6 +139,7 @@ impl fmt::Display for UserError { PollResetAfterSendResponse => "poll_reset after send_response is illegal", SendPingWhilePending => "send_ping before received previous pong", SendSettingsWhilePending => "sending SETTINGS before received previous ACK", + PeerDisabledServerPush => "sending PUSH_PROMISE to peer who disabled server push", }) } } diff --git a/src/frame/settings.rs b/src/frame/settings.rs index 06de9cf12..523f20b06 100644 --- a/src/frame/settings.rs +++ b/src/frame/settings.rs @@ -99,8 +99,8 @@ impl Settings { self.max_header_list_size = size; } - pub fn is_push_enabled(&self) -> bool { - self.enable_push.unwrap_or(1) != 0 + pub fn is_push_enabled(&self) -> Option { + self.enable_push.map(|val| val != 0) } pub fn set_enable_push(&mut self, enable: bool) { diff --git a/src/proto/connection.rs b/src/proto/connection.rs index ffa2945c6..1c1c8ce1b 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -86,7 +86,7 @@ where .unwrap_or(DEFAULT_INITIAL_WINDOW_SIZE), initial_max_send_streams: config.initial_max_send_streams, local_next_stream_id: config.next_stream_id, - local_push_enabled: config.settings.is_push_enabled(), + local_push_enabled: config.settings.is_push_enabled().unwrap_or(true), local_reset_duration: config.reset_stream_duration, local_reset_max: config.reset_stream_max, remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index 220a8b461..10934de48 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -32,6 +32,8 @@ pub(super) struct Send { /// Prioritization layer prioritize: Prioritize, + + is_push_enabled: bool, } /// A value to detect which public API has called `poll_reset`. @@ -49,6 +51,7 @@ impl Send { max_stream_id: StreamId::MAX, next_stream_id: Ok(config.local_next_stream_id), prioritize: Prioritize::new(config), + is_push_enabled: true, } } @@ -95,6 +98,10 @@ impl Send { stream: &mut store::Ptr, task: &mut Option, ) -> Result<(), UserError> { + if !self.is_push_enabled { + return Err(UserError::PeerDisabledServerPush); + } + tracing::trace!( "send_push_promise; frame={:?}; init_window={:?}", frame, @@ -496,6 +503,10 @@ impl Send { } } + if let Some(val) = settings.is_push_enabled() { + self.is_push_enabled = val + } + Ok(()) } diff --git a/tests/h2-support/src/frames.rs b/tests/h2-support/src/frames.rs index b9393b2b5..05fb3202f 100644 --- a/tests/h2-support/src/frames.rs +++ b/tests/h2-support/src/frames.rs @@ -339,6 +339,11 @@ impl Mock { self.0.set_max_header_list_size(Some(val)); self } + + pub fn disable_push(mut self) -> Self { + self.0.set_enable_push(false); + self + } } impl From> for frame::Settings { diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 3a7649135..4be70902b 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -220,6 +220,53 @@ async fn push_request() { join(client, srv).await; } +#[tokio::test] +async fn push_request_disabled() { + h2_support::trace_init!(); + let (io, mut client) = mock::new(); + + let client = async move { + client + .assert_server_handshake_with_settings(frames::settings().disable_push()) + .await; + client + .send_frame( + frames::headers(1) + .request("GET", "https://example.com/") + .eos(), + ) + .await; + client + .recv_frame(frames::headers(1).response(200).eos()) + .await; + }; + + let srv = async move { + let mut srv = server::handshake(io).await.expect("handshake"); + let (req, mut stream) = srv.next().await.unwrap().unwrap(); + + assert_eq!(req.method(), &http::Method::GET); + + // attempt to push - expect failure + let req = http::Request::builder() + .method("GET") + .uri("https://http2.akamai.com/style.css") + .body(()) + .unwrap(); + stream + .push_request(req) + .expect_err("push_request should error"); + + // send normal response + let rsp = http::Response::builder().status(200).body(()).unwrap(); + stream.send_response(rsp, true).unwrap(); + + assert!(srv.next().await.is_none()); + }; + + join(client, srv).await; +} + #[tokio::test] async fn push_request_against_concurrency() { h2_support::trace_init!(); From cb2c7ac72b0053496d5bb110f2c0a2678610a53f Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 22 Oct 2020 14:35:30 -0700 Subject: [PATCH 013/178] Fix h2psec CI script, since debug output of socket addresses changed (#493) --- ci/h2spec.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/h2spec.sh b/ci/h2spec.sh index c55af5513..ff7295515 100755 --- a/ci/h2spec.sh +++ b/ci/h2spec.sh @@ -13,7 +13,7 @@ SERVER_PID=$! # wait 'til the server is listening before running h2spec, and pipe server's # stdout to a log file. -sed '/listening on Ok(V4(127.0.0.1:5928))/q' <&3 ; cat <&3 > "${LOGFILE}" & +sed '/listening on Ok(127.0.0.1:5928)/q' <&3 ; cat <&3 > "${LOGFILE}" & # run h2spec against the server, printing the server log if h2spec failed ./h2spec -p 5928 From 0ba7d13ae51972c451228ccdac867dbfd8766971 Mon Sep 17 00:00:00 2001 From: Yuchen Wu Date: Thu, 22 Oct 2020 14:36:41 -0700 Subject: [PATCH 014/178] Allow responses of HEAD requests to have empty DATA frames (#490) --- src/proto/streams/stream.rs | 6 +++- tests/h2-tests/tests/client_request.rs | 44 ++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 1 deletion(-) diff --git a/src/proto/streams/stream.rs b/src/proto/streams/stream.rs index c2b647c93..5bbda250c 100644 --- a/src/proto/streams/stream.rs +++ b/src/proto/streams/stream.rs @@ -286,7 +286,11 @@ impl Stream { Some(val) => *rem = val, None => return Err(()), }, - ContentLength::Head => return Err(()), + ContentLength::Head => { + if len != 0 { + return Err(()); + } + } _ => {} } diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 41f8f6459..35b4beacf 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -1171,6 +1171,50 @@ async fn malformed_response_headers_dont_unlink_stream() { join(srv, client).await; } +#[tokio::test] +async fn allow_empty_data_for_head() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv.assert_client_handshake().await; + assert_default_settings!(settings); + srv.recv_frame( + frames::headers(1) + .request("HEAD", "https://example.com/") + .eos(), + ) + .await; + srv.send_frame( + frames::headers(1) + .response(200) + .field("content-length", 100), + ) + .await; + srv.send_frame(frames::data(1, "").eos()).await; + }; + + let h2 = async move { + let (mut client, h2) = client::Builder::new() + .handshake::<_, Bytes>(io) + .await + .unwrap(); + tokio::spawn(async { + h2.await.expect("connection failed"); + }); + let request = Request::builder() + .method(Method::HEAD) + .uri("https://example.com/") + .body(()) + .unwrap(); + let (response, _) = client.send_request(request, true).unwrap(); + let (_, mut body) = response.await.unwrap().into_parts(); + assert_eq!(body.data().await.unwrap().unwrap(), ""); + }; + + join(srv, h2).await; +} + const SETTINGS: &'static [u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; const SETTINGS_ACK: &'static [u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; From 3bb01aafaea7b9be4f3a71cf201274670986e253 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 22 Oct 2020 16:12:46 -0700 Subject: [PATCH 015/178] v0.2.7 --- CHANGELOG.md | 6 ++++++ Cargo.toml | 4 ++-- src/lib.rs | 2 +- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ef7b487cb..25a273e8a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.2.7 (October 22, 2020) + +* Fix stream ref count when sending a push promise +* Fix receiving empty DATA frames in response to a HEAD request +* Fix handling of client disabling SERVER_PUSH + # 0.2.6 (July 13, 2020) * Integrate `tracing` directly where `log` was used. (For 0.2.x, `log`s are still emitted by default.) diff --git a/Cargo.toml b/Cargo.toml index a6e820047..186259952 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,14 +5,14 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.2.6" +version = "0.2.7" license = "MIT" authors = [ "Carl Lerche ", "Sean McArthur ", ] description = "An HTTP/2.0 client and server" -documentation = "https://docs.rs/h2/0.2.6/h2/" +documentation = "https://docs.rs/h2/0.2.7/h2/" repository = "https://github.com/hyperium/h2" readme = "README.md" keywords = ["http", "async", "non-blocking"] diff --git a/src/lib.rs b/src/lib.rs index 484052e96..79d93dc8a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.2.6")] +#![doc(html_root_url = "https://docs.rs/h2/0.2.7")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From 676a068fd4e881d09ff985b60c4378f193b98d14 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 23 Oct 2020 08:18:51 -0700 Subject: [PATCH 016/178] Prepare for 0.3.x changes --- Cargo.toml | 6 ++++-- src/lib.rs | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 186259952..784c14d86 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,14 +5,14 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.2.7" +version = "0.3.0" license = "MIT" authors = [ "Carl Lerche ", "Sean McArthur ", ] description = "An HTTP/2.0 client and server" -documentation = "https://docs.rs/h2/0.2.7/h2/" +documentation = "https://docs.rs/h2/0.3.0/h2/" repository = "https://github.com/hyperium/h2" readme = "README.md" keywords = ["http", "async", "non-blocking"] @@ -20,6 +20,8 @@ categories = ["asynchronous", "web-programming", "network-programming"] exclude = ["fixtures/**", "ci/**"] edition = "2018" +publish = false + [features] # Enables `futures::Stream` implementations for various types. stream = [] diff --git a/src/lib.rs b/src/lib.rs index 79d93dc8a..8a3368837 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.2.7")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.0")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From cbbdd305b1afc1eaf19f2e3b26f9419048041e7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Fri, 23 Oct 2020 18:45:09 +0100 Subject: [PATCH 017/178] update to tokio 0.3 (#491) --- Cargo.toml | 12 ++++---- examples/server.rs | 2 +- src/codec/framed_write.rs | 43 ++++++++++++++-------------- src/server.rs | 8 ++++-- tests/h2-fuzz/Cargo.toml | 2 +- tests/h2-support/Cargo.toml | 4 +-- tests/h2-support/src/mock.rs | 41 +++++++++++++------------- tests/h2-tests/Cargo.toml | 2 +- tests/h2-tests/tests/codec_read.rs | 5 ++++ tests/h2-tests/tests/flow_control.rs | 4 +-- tests/h2-tests/tests/hammer.rs | 6 ++-- 11 files changed, 68 insertions(+), 61 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 784c14d86..6c806de24 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,14 +45,14 @@ members = [ futures-core = { version = "0.3", default-features = false } futures-sink = { version = "0.3", default-features = false } futures-util = { version = "0.3", default-features = false } -tokio-util = { version = "0.3.1", features = ["codec"] } -tokio = { version = "0.2", features = ["io-util"] } +tokio-util = { version = "0.4.0", features = ["codec"] } +tokio = { version = "0.3", features = ["io-util"] } bytes = "0.5.2" http = "0.2" tracing = { version = "0.1.13", default-features = false, features = ["std", "log"] } tracing-futures = { version = "0.2", default-features = false, features = ["std-future"]} fnv = "1.0.5" -slab = "0.4.0" +slab = "0.4.2" indexmap = "1.0" [dev-dependencies] @@ -68,9 +68,9 @@ serde = "1.0.0" serde_json = "1.0.0" # Examples -tokio = { version = "0.2", features = ["dns", "macros", "rt-core", "sync", "tcp"] } +tokio = { version = "0.3", features = ["rt-multi-thread", "macros", "sync", "net"] } env_logger = { version = "0.5.3", default-features = false } -rustls = "0.16" -tokio-rustls = "0.12.0" +rustls = "0.18" +tokio-rustls = "0.20.0" webpki = "0.21" webpki-roots = "0.17" diff --git a/examples/server.rs b/examples/server.rs index 1753b7a2e..777f4ea14 100644 --- a/examples/server.rs +++ b/examples/server.rs @@ -8,7 +8,7 @@ use tokio::net::{TcpListener, TcpStream}; async fn main() -> Result<(), Box> { let _ = env_logger::try_init(); - let mut listener = TcpListener::bind("127.0.0.1:5928").await?; + let listener = TcpListener::bind("127.0.0.1:5928").await?; println!("listening on {:?}", listener.local_addr()); diff --git a/src/codec/framed_write.rs b/src/codec/framed_write.rs index 201bba26c..53032ce23 100644 --- a/src/codec/framed_write.rs +++ b/src/codec/framed_write.rs @@ -3,13 +3,10 @@ use crate::codec::UserError::*; use crate::frame::{self, Frame, FrameSize}; use crate::hpack; -use bytes::{ - buf::{BufExt, BufMutExt}, - Buf, BufMut, BytesMut, -}; +use bytes::{buf::BufMutExt, Buf, BufMut, BytesMut}; use std::pin::Pin; use std::task::{Context, Poll}; -use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use std::io::{self, Cursor}; @@ -193,12 +190,26 @@ where match self.next { Some(Next::Data(ref mut frame)) => { tracing::trace!(queued_data_frame = true); - let mut buf = (&mut self.buf).chain(frame.payload_mut()); - ready!(Pin::new(&mut self.inner).poll_write_buf(cx, &mut buf))?; + + if self.buf.has_remaining() { + let n = + ready!(Pin::new(&mut self.inner).poll_write(cx, self.buf.bytes()))?; + self.buf.advance(n); + } + + let buf = frame.payload_mut(); + + if !self.buf.has_remaining() && buf.has_remaining() { + let n = ready!(Pin::new(&mut self.inner).poll_write(cx, buf.bytes()))?; + buf.advance(n); + } } _ => { tracing::trace!(queued_data_frame = false); - ready!(Pin::new(&mut self.inner).poll_write_buf(cx, &mut self.buf))?; + let n = ready!( + Pin::new(&mut self.inner).poll_write(cx, &mut self.buf.bytes()) + )?; + self.buf.advance(n); } } } @@ -290,25 +301,13 @@ impl FramedWrite { } impl AsyncRead for FramedWrite { - unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [std::mem::MaybeUninit]) -> bool { - self.inner.prepare_uninitialized_buffer(buf) - } - fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf, + ) -> Poll> { Pin::new(&mut self.inner).poll_read(cx, buf) } - - fn poll_read_buf( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut Buf, - ) -> Poll> { - Pin::new(&mut self.inner).poll_read_buf(cx, buf) - } } // We never project the Pin to `B`. diff --git a/src/server.rs b/src/server.rs index 3c093f7ee..32433121a 100644 --- a/src/server.rs +++ b/src/server.rs @@ -127,7 +127,7 @@ use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; use std::{convert, fmt, io, mem}; -use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tracing_futures::{Instrument, Instrumented}; /// In progress HTTP/2.0 connection handshake future. @@ -1158,8 +1158,10 @@ where let mut rem = PREFACE.len() - self.pos; while rem > 0 { - let n = ready!(Pin::new(self.inner_mut()).poll_read(cx, &mut buf[..rem])) + let mut buf = ReadBuf::new(&mut buf[..rem]); + ready!(Pin::new(self.inner_mut()).poll_read(cx, &mut buf)) .map_err(crate::Error::from_io)?; + let n = buf.filled().len(); if n == 0 { return Poll::Ready(Err(crate::Error::from_io(io::Error::new( io::ErrorKind::UnexpectedEof, @@ -1167,7 +1169,7 @@ where )))); } - if PREFACE[self.pos..self.pos + n] != buf[..n] { + if &PREFACE[self.pos..self.pos + n] != buf.filled() { proto_err!(conn: "read_preface: invalid preface"); // TODO: Should this just write the GO_AWAY frame directly? return Poll::Ready(Err(Reason::PROTOCOL_ERROR.into())); diff --git a/tests/h2-fuzz/Cargo.toml b/tests/h2-fuzz/Cargo.toml index 8bb121959..40e985de6 100644 --- a/tests/h2-fuzz/Cargo.toml +++ b/tests/h2-fuzz/Cargo.toml @@ -12,4 +12,4 @@ env_logger = { version = "0.5.3", default-features = false } futures = { version = "0.3", default-features = false, features = ["std"] } honggfuzz = "0.5" http = "0.2" -tokio = { version = "0.2", features = [] } +tokio = { version = "0.3", features = [] } diff --git a/tests/h2-support/Cargo.toml b/tests/h2-support/Cargo.toml index c4e68b1ee..183013f14 100644 --- a/tests/h2-support/Cargo.toml +++ b/tests/h2-support/Cargo.toml @@ -12,5 +12,5 @@ tracing = "0.1" tracing-subscriber = { version = "0.2", default-features = false, features = ["fmt", "chrono", "ansi"] } futures = { version = "0.3", default-features = false } http = "0.2" -tokio = { version = "0.2", features = ["time"] } -tokio-test = "0.2" +tokio = { version = "0.3", features = ["time"] } +tokio-test = "0.3" diff --git a/tests/h2-support/src/mock.rs b/tests/h2-support/src/mock.rs index 08837fa56..ebfc094c1 100644 --- a/tests/h2-support/src/mock.rs +++ b/tests/h2-support/src/mock.rs @@ -6,7 +6,7 @@ use h2::{self, RecvError, SendError}; use futures::future::poll_fn; use futures::{ready, Stream, StreamExt}; -use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; +use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadBuf}; use super::assert::assert_frame_eq; use std::pin::Pin; @@ -147,10 +147,11 @@ impl Handle { poll_fn(move |cx| { while buf.has_remaining() { let res = Pin::new(self.codec.get_mut()) - .poll_write_buf(cx, &mut buf) + .poll_write(cx, &mut buf.bytes()) .map_err(|e| panic!("write err={:?}", e)); - ready!(res).unwrap(); + let n = ready!(res).unwrap(); + buf.advance(n); } Poll::Ready(()) @@ -294,8 +295,8 @@ impl AsyncRead for Handle { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf, + ) -> Poll> { Pin::new(self.codec.get_mut()).poll_read(cx, buf) } } @@ -344,10 +345,10 @@ impl AsyncRead for Mock { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf, + ) -> Poll> { assert!( - buf.len() > 0, + buf.remaining() > 0, "attempted read with zero length buffer... wut?" ); @@ -355,18 +356,18 @@ impl AsyncRead for Mock { if me.rx.is_empty() { if me.closed { - return Poll::Ready(Ok(0)); + return Poll::Ready(Ok(())); } me.rx_task = Some(cx.waker().clone()); return Poll::Pending; } - let n = cmp::min(buf.len(), me.rx.len()); - buf[..n].copy_from_slice(&me.rx[..n]); + let n = cmp::min(buf.remaining(), me.rx.len()); + buf.put_slice(&me.rx[..n]); me.rx.drain(..n); - Poll::Ready(Ok(n)) + Poll::Ready(Ok(())) } } @@ -427,10 +428,10 @@ impl AsyncRead for Pipe { fn poll_read( self: Pin<&mut Self>, cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { + buf: &mut ReadBuf, + ) -> Poll> { assert!( - buf.len() > 0, + buf.remaining() > 0, "attempted read with zero length buffer... wut?" ); @@ -438,18 +439,18 @@ impl AsyncRead for Pipe { if me.tx.is_empty() { if me.closed { - return Poll::Ready(Ok(0)); + return Poll::Ready(Ok(())); } me.tx_task = Some(cx.waker().clone()); return Poll::Pending; } - let n = cmp::min(buf.len(), me.tx.len()); - buf[..n].copy_from_slice(&me.tx[..n]); + let n = cmp::min(buf.remaining(), me.tx.len()); + buf.put_slice(&me.tx[..n]); me.tx.drain(..n); - Poll::Ready(Ok(n)) + Poll::Ready(Ok(())) } } @@ -479,5 +480,5 @@ impl AsyncWrite for Pipe { } pub async fn idle_ms(ms: u64) { - tokio::time::delay_for(Duration::from_millis(ms)).await + tokio::time::sleep(Duration::from_millis(ms)).await } diff --git a/tests/h2-tests/Cargo.toml b/tests/h2-tests/Cargo.toml index 4c711fe24..b5f3c6eeb 100644 --- a/tests/h2-tests/Cargo.toml +++ b/tests/h2-tests/Cargo.toml @@ -11,4 +11,4 @@ edition = "2018" h2-support = { path = "../h2-support" } tracing = "0.1.13" futures = { version = "0.3", default-features = false, features = ["alloc"] } -tokio = { version = "0.2", features = ["macros", "tcp"] } +tokio = { version = "0.3", features = ["macros", "net", "rt", "io-util"] } diff --git a/tests/h2-tests/tests/codec_read.rs b/tests/h2-tests/tests/codec_read.rs index 95e895ddd..fe3cfea97 100644 --- a/tests/h2-tests/tests/codec_read.rs +++ b/tests/h2-tests/tests/codec_read.rs @@ -190,6 +190,7 @@ async fn read_continuation_frames() { #[tokio::test] async fn update_max_frame_len_at_rest() { use futures::StreamExt; + use tokio::io::AsyncReadExt; h2_support::trace_init!(); // TODO: add test for updating max frame length in flight as well? @@ -211,6 +212,10 @@ async fn update_max_frame_len_at_rest() { codec.next().await.unwrap().unwrap_err().to_string(), "frame with invalid size" ); + + // drain codec buffer + let mut buf = Vec::new(); + codec.get_mut().read_to_end(&mut buf).await.unwrap(); } #[tokio::test] diff --git a/tests/h2-tests/tests/flow_control.rs b/tests/h2-tests/tests/flow_control.rs index 4b6fe7a85..08019bbae 100644 --- a/tests/h2-tests/tests/flow_control.rs +++ b/tests/h2-tests/tests/flow_control.rs @@ -972,7 +972,7 @@ async fn settings_lowered_capacity_returns_capacity_to_connection() { // // A timeout is used here to avoid blocking forever if there is a // failure - let result = select(rx2, tokio::time::delay_for(Duration::from_secs(5))).await; + let result = select(rx2, tokio::time::sleep(Duration::from_secs(5))).await; if let Either::Right((_, _)) = result { panic!("Timed out"); } @@ -1004,7 +1004,7 @@ async fn settings_lowered_capacity_returns_capacity_to_connection() { }); // Wait for server handshake to complete. - let result = select(rx1, tokio::time::delay_for(Duration::from_secs(5))).await; + let result = select(rx1, tokio::time::sleep(Duration::from_secs(5))).await; if let Either::Right((_, _)) = result { panic!("Timed out"); } diff --git a/tests/h2-tests/tests/hammer.rs b/tests/h2-tests/tests/hammer.rs index cf7051814..9a200537a 100644 --- a/tests/h2-tests/tests/hammer.rs +++ b/tests/h2-tests/tests/hammer.rs @@ -26,8 +26,8 @@ impl Server { { let mk_data = Arc::new(mk_data); - let mut rt = tokio::runtime::Runtime::new().unwrap(); - let mut listener = rt + let rt = tokio::runtime::Runtime::new().unwrap(); + let listener = rt .block_on(TcpListener::bind(SocketAddr::from(([127, 0, 0, 1], 0)))) .unwrap(); let addr = listener.local_addr().unwrap(); @@ -140,7 +140,7 @@ fn hammer_client_concurrency() { }) }); - let mut rt = tokio::runtime::Runtime::new().unwrap(); + let rt = tokio::runtime::Runtime::new().unwrap(); rt.block_on(tcp); println!("...done"); } From 5a92f256c0ab63fd12e2180f99e603230955dfbc Mon Sep 17 00:00:00 2001 From: Paolo Barbolini Date: Thu, 19 Nov 2020 23:38:56 +0100 Subject: [PATCH 018/178] Upgrade to bytes 0.6 (#497) * Upgrade to bytes 0.6 * Update Cargo.toml Co-authored-by: Eliza Weisman * Update tests/h2-support/Cargo.toml Co-authored-by: Eliza Weisman Co-authored-by: Eliza Weisman --- Cargo.toml | 8 ++++---- src/codec/framed_write.rs | 2 +- src/frame/headers.rs | 2 +- src/hpack/encoder.rs | 4 ++-- src/hpack/test/fixture.rs | 2 +- src/hpack/test/fuzz.rs | 2 +- src/proto/streams/prioritize.rs | 2 +- tests/h2-fuzz/Cargo.toml | 4 ++-- tests/h2-support/Cargo.toml | 4 ++-- tests/h2-support/src/prelude.rs | 5 +---- tests/h2-tests/Cargo.toml | 2 +- 11 files changed, 17 insertions(+), 20 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6c806de24..3b55d080b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,9 +45,9 @@ members = [ futures-core = { version = "0.3", default-features = false } futures-sink = { version = "0.3", default-features = false } futures-util = { version = "0.3", default-features = false } -tokio-util = { version = "0.4.0", features = ["codec"] } -tokio = { version = "0.3", features = ["io-util"] } -bytes = "0.5.2" +tokio-util = { version = "0.5", features = ["codec"] } +tokio = { version = "0.3.2", features = ["io-util"] } +bytes = "0.6" http = "0.2" tracing = { version = "0.1.13", default-features = false, features = ["std", "log"] } tracing-futures = { version = "0.2", default-features = false, features = ["std-future"]} @@ -68,7 +68,7 @@ serde = "1.0.0" serde_json = "1.0.0" # Examples -tokio = { version = "0.3", features = ["rt-multi-thread", "macros", "sync", "net"] } +tokio = { version = "0.3.2", features = ["rt-multi-thread", "macros", "sync", "net"] } env_logger = { version = "0.5.3", default-features = false } rustls = "0.18" tokio-rustls = "0.20.0" diff --git a/src/codec/framed_write.rs b/src/codec/framed_write.rs index 53032ce23..870b5589a 100644 --- a/src/codec/framed_write.rs +++ b/src/codec/framed_write.rs @@ -3,7 +3,7 @@ use crate::codec::UserError::*; use crate::frame::{self, Frame, FrameSize}; use crate::hpack; -use bytes::{buf::BufMutExt, Buf, BufMut, BytesMut}; +use bytes::{buf::BufMut, Buf, BytesMut}; use std::pin::Pin; use std::task::{Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; diff --git a/src/frame/headers.rs b/src/frame/headers.rs index 0719f140c..ad37393b4 100644 --- a/src/frame/headers.rs +++ b/src/frame/headers.rs @@ -10,7 +10,7 @@ use bytes::{Bytes, BytesMut}; use std::fmt; use std::io::Cursor; -type EncodeBuf<'a> = bytes::buf::ext::Limit<&'a mut BytesMut>; +type EncodeBuf<'a> = bytes::buf::Limit<&'a mut BytesMut>; // Minimum MAX_FRAME_SIZE is 16kb, so save some arbitrary space for frame // head and other header bits. diff --git a/src/hpack/encoder.rs b/src/hpack/encoder.rs index e6881dd05..ee264d5c6 100644 --- a/src/hpack/encoder.rs +++ b/src/hpack/encoder.rs @@ -1,7 +1,7 @@ use super::table::{Index, Table}; use super::{huffman, Header}; -use bytes::{buf::ext::Limit, BufMut, BytesMut}; +use bytes::{buf::Limit, BufMut, BytesMut}; use http::header::{HeaderName, HeaderValue}; type DstBuf<'a> = Limit<&'a mut BytesMut>; @@ -428,7 +428,7 @@ fn rewind(buf: &mut DstBuf<'_>, pos: usize) { mod test { use super::*; use crate::hpack::Header; - use bytes::buf::BufMutExt; + use bytes::buf::BufMut; use http::*; #[test] diff --git a/src/hpack/test/fixture.rs b/src/hpack/test/fixture.rs index 20ee1275b..9828f0488 100644 --- a/src/hpack/test/fixture.rs +++ b/src/hpack/test/fixture.rs @@ -1,6 +1,6 @@ use crate::hpack::{Decoder, Encoder, Header}; -use bytes::{buf::BufMutExt, BytesMut}; +use bytes::{buf::BufMut, BytesMut}; use hex::FromHex; use serde_json::Value; diff --git a/src/hpack/test/fuzz.rs b/src/hpack/test/fuzz.rs index dbf9b3c8f..0abb66aca 100644 --- a/src/hpack/test/fuzz.rs +++ b/src/hpack/test/fuzz.rs @@ -2,7 +2,7 @@ use crate::hpack::{Decoder, Encode, Encoder, Header}; use http::header::{HeaderName, HeaderValue}; -use bytes::{buf::BufMutExt, Bytes, BytesMut}; +use bytes::{buf::BufMut, Bytes, BytesMut}; use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; use rand::{Rng, SeedableRng, StdRng}; diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index 937982086..96b65d7ad 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -6,7 +6,7 @@ use crate::frame::{Reason, StreamId}; use crate::codec::UserError; use crate::codec::UserError::*; -use bytes::buf::ext::{BufExt, Take}; +use bytes::buf::{Buf, Take}; use std::io; use std::task::{Context, Poll, Waker}; use std::{cmp, fmt, mem}; diff --git a/tests/h2-fuzz/Cargo.toml b/tests/h2-fuzz/Cargo.toml index 40e985de6..d76a8f609 100644 --- a/tests/h2-fuzz/Cargo.toml +++ b/tests/h2-fuzz/Cargo.toml @@ -11,5 +11,5 @@ h2 = { path = "../.." } env_logger = { version = "0.5.3", default-features = false } futures = { version = "0.3", default-features = false, features = ["std"] } honggfuzz = "0.5" -http = "0.2" -tokio = { version = "0.3", features = [] } +http = { git = "https://github.com/paolobarbolini/http.git", branch = "bytes06" } +tokio = { version = "0.3.2", features = [] } diff --git a/tests/h2-support/Cargo.toml b/tests/h2-support/Cargo.toml index 183013f14..c441ded9c 100644 --- a/tests/h2-support/Cargo.toml +++ b/tests/h2-support/Cargo.toml @@ -7,10 +7,10 @@ edition = "2018" [dependencies] h2 = { path = "../..", features = ["stream", "unstable"] } -bytes = "0.5" +bytes = "0.6" tracing = "0.1" tracing-subscriber = { version = "0.2", default-features = false, features = ["fmt", "chrono", "ansi"] } futures = { version = "0.3", default-features = false } http = "0.2" -tokio = { version = "0.3", features = ["time"] } +tokio = { version = "0.3.2", features = ["time"] } tokio-test = "0.3" diff --git a/tests/h2-support/src/prelude.rs b/tests/h2-support/src/prelude.rs index dafdd29f0..f4b2e823f 100644 --- a/tests/h2-support/src/prelude.rs +++ b/tests/h2-support/src/prelude.rs @@ -42,10 +42,7 @@ pub use super::client_ext::SendRequestExt; // Re-export HTTP types pub use http::{uri, HeaderMap, Method, Request, Response, StatusCode, Version}; -pub use bytes::{ - buf::{BufExt, BufMutExt}, - Buf, BufMut, Bytes, BytesMut, -}; +pub use bytes::{Buf, BufMut, Bytes, BytesMut}; pub use tokio::io::{AsyncRead, AsyncWrite}; diff --git a/tests/h2-tests/Cargo.toml b/tests/h2-tests/Cargo.toml index b5f3c6eeb..ac16043b3 100644 --- a/tests/h2-tests/Cargo.toml +++ b/tests/h2-tests/Cargo.toml @@ -11,4 +11,4 @@ edition = "2018" h2-support = { path = "../h2-support" } tracing = "0.1.13" futures = { version = "0.3", default-features = false, features = ["alloc"] } -tokio = { version = "0.3", features = ["macros", "net", "rt", "io-util"] } +tokio = { version = "0.3.2", features = ["macros", "net", "rt", "io-util"] } From 73bf6a61ada04f332138b1eb0f79ecade219f00c Mon Sep 17 00:00:00 2001 From: Eliza Weisman Date: Mon, 23 Nov 2020 16:35:48 -0800 Subject: [PATCH 019/178] re-enable vectored writes (#500) Tokio's AsyncWrite trait once again has support for vectored writes in Tokio 0.3.4 (see tokio-rs/tokio#3149. This branch re-enables vectored writes in h2. This change doesn't make all that big of a performance improvement in Hyper's HTTP/2 benchmarks, but they use a BytesMut as the buffer. With a buffer that turns into more IO vectors in bytes_vectored, there might be a more noticeable performance improvement. I spent a bit trying to refactor the flush logic to coalesce into fewer writev calls with more buffers, but the current implementation seems like about the best we're going to get without a bigger refactor. It's basically the same as what h2 did previously, so it's probably fine. --- Cargo.toml | 4 ++-- src/codec/framed_write.rs | 47 ++++++++++++++++++++++++--------------- 2 files changed, 31 insertions(+), 20 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 3b55d080b..3b73efe4e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,7 +46,7 @@ futures-core = { version = "0.3", default-features = false } futures-sink = { version = "0.3", default-features = false } futures-util = { version = "0.3", default-features = false } tokio-util = { version = "0.5", features = ["codec"] } -tokio = { version = "0.3.2", features = ["io-util"] } +tokio = { version = "0.3.4", features = ["io-util"] } bytes = "0.6" http = "0.2" tracing = { version = "0.1.13", default-features = false, features = ["std", "log"] } @@ -68,7 +68,7 @@ serde = "1.0.0" serde_json = "1.0.0" # Examples -tokio = { version = "0.3.2", features = ["rt-multi-thread", "macros", "sync", "net"] } +tokio = { version = "0.3.4", features = ["rt-multi-thread", "macros", "sync", "net"] } env_logger = { version = "0.5.3", default-features = false } rustls = "0.18" tokio-rustls = "0.20.0" diff --git a/src/codec/framed_write.rs b/src/codec/framed_write.rs index 870b5589a..e2151d660 100644 --- a/src/codec/framed_write.rs +++ b/src/codec/framed_write.rs @@ -3,12 +3,12 @@ use crate::codec::UserError::*; use crate::frame::{self, Frame, FrameSize}; use crate::hpack; -use bytes::{buf::BufMut, Buf, BytesMut}; +use bytes::{Buf, BufMut, BytesMut}; use std::pin::Pin; use std::task::{Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -use std::io::{self, Cursor}; +use std::io::{self, Cursor, IoSlice}; // A macro to get around a method needing to borrow &mut self macro_rules! limited_write_buf { @@ -39,6 +39,9 @@ pub struct FramedWrite { /// Max frame size, this is specified by the peer max_frame_size: FrameSize, + + /// Whether or not the wrapped `AsyncWrite` supports vectored IO. + is_write_vectored: bool, } #[derive(Debug)] @@ -68,6 +71,7 @@ where B: Buf, { pub fn new(inner: T) -> FramedWrite { + let is_write_vectored = inner.is_write_vectored(); FramedWrite { inner, hpack: hpack::Encoder::default(), @@ -75,6 +79,7 @@ where next: None, last_data_frame: None, max_frame_size: frame::DEFAULT_MAX_FRAME_SIZE, + is_write_vectored, } } @@ -182,6 +187,8 @@ where /// Flush buffered data to the wire pub fn flush(&mut self, cx: &mut Context) -> Poll> { + const MAX_IOVS: usize = 64; + let span = tracing::trace_span!("FramedWrite::flush"); let _e = span.enter(); @@ -190,25 +197,29 @@ where match self.next { Some(Next::Data(ref mut frame)) => { tracing::trace!(queued_data_frame = true); - - if self.buf.has_remaining() { - let n = - ready!(Pin::new(&mut self.inner).poll_write(cx, self.buf.bytes()))?; - self.buf.advance(n); - } - - let buf = frame.payload_mut(); - - if !self.buf.has_remaining() && buf.has_remaining() { - let n = ready!(Pin::new(&mut self.inner).poll_write(cx, buf.bytes()))?; - buf.advance(n); - } + let mut buf = (&mut self.buf).chain(frame.payload_mut()); + // TODO(eliza): when tokio-util 0.5.1 is released, this + // could just use `poll_write_buf`... + let n = if self.is_write_vectored { + let mut bufs = [IoSlice::new(&[]); MAX_IOVS]; + let cnt = buf.bytes_vectored(&mut bufs); + ready!(Pin::new(&mut self.inner).poll_write_vectored(cx, &bufs[..cnt]))? + } else { + ready!(Pin::new(&mut self.inner).poll_write(cx, buf.bytes()))? + }; + buf.advance(n); } _ => { tracing::trace!(queued_data_frame = false); - let n = ready!( - Pin::new(&mut self.inner).poll_write(cx, &mut self.buf.bytes()) - )?; + let n = if self.is_write_vectored { + let mut iovs = [IoSlice::new(&[]); MAX_IOVS]; + let cnt = self.buf.bytes_vectored(&mut iovs); + ready!( + Pin::new(&mut self.inner).poll_write_vectored(cx, &mut iovs[..cnt]) + )? + } else { + ready!(Pin::new(&mut self.inner).poll_write(cx, &mut self.buf.bytes()))? + }; self.buf.advance(n); } } From dc3079ab89ca9fa7b79e014f5b2a835f30f4916b Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 25 Nov 2020 16:25:36 -0800 Subject: [PATCH 020/178] Remove log feature from tracing dependency (#501) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 3b73efe4e..390a1576f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,7 +49,7 @@ tokio-util = { version = "0.5", features = ["codec"] } tokio = { version = "0.3.4", features = ["io-util"] } bytes = "0.6" http = "0.2" -tracing = { version = "0.1.13", default-features = false, features = ["std", "log"] } +tracing = { version = "0.1.13", default-features = false, features = ["std"] } tracing-futures = { version = "0.2", default-features = false, features = ["std-future"]} fnv = "1.0.5" slab = "0.4.2" From b4976675fa9af48fff61c05842154c4c04cded49 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 23 Dec 2020 10:01:44 -0800 Subject: [PATCH 021/178] Update to Tokio and Bytes 1.0 (#504) --- Cargo.toml | 16 ++++++++-------- examples/akamai.rs | 5 +++++ src/codec/framed_write.rs | 8 ++++---- src/hpack/decoder.rs | 4 ++-- src/proto/streams/prioritize.rs | 8 ++++++-- tests/h2-fuzz/Cargo.toml | 4 ++-- tests/h2-support/Cargo.toml | 6 +++--- tests/h2-support/src/mock.rs | 2 +- tests/h2-tests/Cargo.toml | 2 +- tests/h2-tests/tests/flow_control.rs | 15 ++++++--------- 10 files changed, 38 insertions(+), 32 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 390a1576f..b6668993d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,9 +45,9 @@ members = [ futures-core = { version = "0.3", default-features = false } futures-sink = { version = "0.3", default-features = false } futures-util = { version = "0.3", default-features = false } -tokio-util = { version = "0.5", features = ["codec"] } -tokio = { version = "0.3.4", features = ["io-util"] } -bytes = "0.6" +tokio-util = { version = "0.6", features = ["codec"] } +tokio = { version = "1", features = ["io-util"] } +bytes = "1" http = "0.2" tracing = { version = "0.1.13", default-features = false, features = ["std"] } tracing-futures = { version = "0.2", default-features = false, features = ["std-future"]} @@ -68,9 +68,9 @@ serde = "1.0.0" serde_json = "1.0.0" # Examples -tokio = { version = "0.3.4", features = ["rt-multi-thread", "macros", "sync", "net"] } +tokio = { version = "1", features = ["rt-multi-thread", "macros", "sync", "net"] } env_logger = { version = "0.5.3", default-features = false } -rustls = "0.18" -tokio-rustls = "0.20.0" -webpki = "0.21" -webpki-roots = "0.17" +#rustls = "0.18" +#tokio-rustls = "0.20.0" +#webpki = "0.21" +#webpki-roots = "0.17" diff --git a/examples/akamai.rs b/examples/akamai.rs index 29d8a9347..ebd09ad0d 100644 --- a/examples/akamai.rs +++ b/examples/akamai.rs @@ -1,3 +1,7 @@ +fn main() { + eprintln!("TODO: Re-enable when tokio-rustls is upgraded."); +} +/* use h2::client; use http::{Method, Request}; use tokio::net::TcpStream; @@ -73,3 +77,4 @@ pub async fn main() -> Result<(), Box> { } Ok(()) } +*/ diff --git a/src/codec/framed_write.rs b/src/codec/framed_write.rs index e2151d660..8ec2045ce 100644 --- a/src/codec/framed_write.rs +++ b/src/codec/framed_write.rs @@ -202,10 +202,10 @@ where // could just use `poll_write_buf`... let n = if self.is_write_vectored { let mut bufs = [IoSlice::new(&[]); MAX_IOVS]; - let cnt = buf.bytes_vectored(&mut bufs); + let cnt = buf.chunks_vectored(&mut bufs); ready!(Pin::new(&mut self.inner).poll_write_vectored(cx, &bufs[..cnt]))? } else { - ready!(Pin::new(&mut self.inner).poll_write(cx, buf.bytes()))? + ready!(Pin::new(&mut self.inner).poll_write(cx, buf.chunk()))? }; buf.advance(n); } @@ -213,12 +213,12 @@ where tracing::trace!(queued_data_frame = false); let n = if self.is_write_vectored { let mut iovs = [IoSlice::new(&[]); MAX_IOVS]; - let cnt = self.buf.bytes_vectored(&mut iovs); + let cnt = self.buf.chunks_vectored(&mut iovs); ready!( Pin::new(&mut self.inner).poll_write_vectored(cx, &mut iovs[..cnt]) )? } else { - ready!(Pin::new(&mut self.inner).poll_write(cx, &mut self.buf.bytes()))? + ready!(Pin::new(&mut self.inner).poll_write(cx, &mut self.buf.chunk()))? }; self.buf.advance(n); } diff --git a/src/hpack/decoder.rs b/src/hpack/decoder.rs index aba673d37..39afc8ad1 100644 --- a/src/hpack/decoder.rs +++ b/src/hpack/decoder.rs @@ -311,7 +311,7 @@ impl Decoder { if huff { let ret = { - let raw = &buf.bytes()[..len]; + let raw = &buf.chunk()[..len]; huffman::decode(raw, &mut self.buffer).map(BytesMut::freeze) }; @@ -419,7 +419,7 @@ fn decode_int(buf: &mut B, prefix_size: u8) -> Result(buf: &mut B) -> Option { if buf.has_remaining() { - Some(buf.bytes()[0]) + Some(buf.chunk()[0]) } else { None } diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index 96b65d7ad..b7b616fac 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -847,8 +847,12 @@ where self.inner.remaining() } - fn bytes(&self) -> &[u8] { - self.inner.bytes() + fn chunk(&self) -> &[u8] { + self.inner.chunk() + } + + fn chunks_vectored<'a>(&'a self, dst: &mut [std::io::IoSlice<'a>]) -> usize { + self.inner.chunks_vectored(dst) } fn advance(&mut self, cnt: usize) { diff --git a/tests/h2-fuzz/Cargo.toml b/tests/h2-fuzz/Cargo.toml index d76a8f609..7fbf4c3f3 100644 --- a/tests/h2-fuzz/Cargo.toml +++ b/tests/h2-fuzz/Cargo.toml @@ -11,5 +11,5 @@ h2 = { path = "../.." } env_logger = { version = "0.5.3", default-features = false } futures = { version = "0.3", default-features = false, features = ["std"] } honggfuzz = "0.5" -http = { git = "https://github.com/paolobarbolini/http.git", branch = "bytes06" } -tokio = { version = "0.3.2", features = [] } +http = "0.2" +tokio = "1" diff --git a/tests/h2-support/Cargo.toml b/tests/h2-support/Cargo.toml index c441ded9c..e97c6b310 100644 --- a/tests/h2-support/Cargo.toml +++ b/tests/h2-support/Cargo.toml @@ -7,10 +7,10 @@ edition = "2018" [dependencies] h2 = { path = "../..", features = ["stream", "unstable"] } -bytes = "0.6" +bytes = "1" tracing = "0.1" tracing-subscriber = { version = "0.2", default-features = false, features = ["fmt", "chrono", "ansi"] } futures = { version = "0.3", default-features = false } http = "0.2" -tokio = { version = "0.3.2", features = ["time"] } -tokio-test = "0.3" +tokio = { version = "1", features = ["time"] } +tokio-test = "0.4" diff --git a/tests/h2-support/src/mock.rs b/tests/h2-support/src/mock.rs index ebfc094c1..4f81de239 100644 --- a/tests/h2-support/src/mock.rs +++ b/tests/h2-support/src/mock.rs @@ -147,7 +147,7 @@ impl Handle { poll_fn(move |cx| { while buf.has_remaining() { let res = Pin::new(self.codec.get_mut()) - .poll_write(cx, &mut buf.bytes()) + .poll_write(cx, &mut buf.chunk()) .map_err(|e| panic!("write err={:?}", e)); let n = ready!(res).unwrap(); diff --git a/tests/h2-tests/Cargo.toml b/tests/h2-tests/Cargo.toml index ac16043b3..33436f3c4 100644 --- a/tests/h2-tests/Cargo.toml +++ b/tests/h2-tests/Cargo.toml @@ -11,4 +11,4 @@ edition = "2018" h2-support = { path = "../h2-support" } tracing = "0.1.13" futures = { version = "0.3", default-features = false, features = ["alloc"] } -tokio = { version = "0.3.2", features = ["macros", "net", "rt", "io-util"] } +tokio = { version = "1", features = ["macros", "net", "rt", "io-util"] } diff --git a/tests/h2-tests/tests/flow_control.rs b/tests/h2-tests/tests/flow_control.rs index 08019bbae..1b86cadb2 100644 --- a/tests/h2-tests/tests/flow_control.rs +++ b/tests/h2-tests/tests/flow_control.rs @@ -940,7 +940,6 @@ async fn recv_no_init_window_then_receive_some_init_window() { #[tokio::test] async fn settings_lowered_capacity_returns_capacity_to_connection() { use futures::channel::oneshot; - use futures::future::{select, Either}; h2_support::trace_init!(); let (io, mut srv) = mock::new(); @@ -972,10 +971,9 @@ async fn settings_lowered_capacity_returns_capacity_to_connection() { // // A timeout is used here to avoid blocking forever if there is a // failure - let result = select(rx2, tokio::time::sleep(Duration::from_secs(5))).await; - if let Either::Right((_, _)) = result { - panic!("Timed out"); - } + let _ = tokio::time::timeout(Duration::from_secs(5), rx2) + .await + .unwrap(); idle_ms(500).await; @@ -1004,10 +1002,9 @@ async fn settings_lowered_capacity_returns_capacity_to_connection() { }); // Wait for server handshake to complete. - let result = select(rx1, tokio::time::sleep(Duration::from_secs(5))).await; - if let Either::Right((_, _)) = result { - panic!("Timed out"); - } + let _ = tokio::time::timeout(Duration::from_secs(5), rx1) + .await + .unwrap(); let request = Request::post("https://example.com/one").body(()).unwrap(); From eec547d0dd76612b8743fbfbb28afa09df58ebdd Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 23 Dec 2020 10:05:05 -0800 Subject: [PATCH 022/178] v0.3.0 --- CHANGELOG.md | 5 +++++ Cargo.toml | 2 -- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 25a273e8a..be0116e7d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# 0.3.0 (December 23, 2020) + +* Update to Tokio v1 and Bytes v1. +* Disable `tracing`'s `log` feature. (It can still be enabled by a user in their own `Cargo.toml`.) + # 0.2.7 (October 22, 2020) * Fix stream ref count when sending a push promise diff --git a/Cargo.toml b/Cargo.toml index b6668993d..42f78b808 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,8 +20,6 @@ categories = ["asynchronous", "web-programming", "network-programming"] exclude = ["fixtures/**", "ci/**"] edition = "2018" -publish = false - [features] # Enables `futures::Stream` implementations for various types. stream = [] From 2c8c847cd5981a3b3e08b768b6c6cfd707422415 Mon Sep 17 00:00:00 2001 From: Eliza Weisman Date: Fri, 5 Feb 2021 09:27:27 -0800 Subject: [PATCH 023/178] Replace deprecated compare_and_swap with compare_exchange (#514) The `compare_and_swap` method on atomics is now deprecated in favor of `compare_exchange`. Since the author of #510 closed that PR, this is just #510 with rustfmt run. I also removed an unnecessary trailing semicolon that the latest rust compiler now complains about. Signed-off-by: Eliza Weisman Co-authored-by: Kornel --- src/proto/ping_pong.rs | 45 ++++++++++++++++++++++++++++-------------- src/server.rs | 2 +- 2 files changed, 31 insertions(+), 16 deletions(-) diff --git a/src/proto/ping_pong.rs b/src/proto/ping_pong.rs index e0442c838..844c5fbb9 100644 --- a/src/proto/ping_pong.rs +++ b/src/proto/ping_pong.rs @@ -211,11 +211,16 @@ impl ReceivedPing { impl UserPings { pub(crate) fn send_ping(&self) -> Result<(), Option> { - let prev = self.0.state.compare_and_swap( - USER_STATE_EMPTY, // current - USER_STATE_PENDING_PING, // new - Ordering::AcqRel, - ); + let prev = self + .0 + .state + .compare_exchange( + USER_STATE_EMPTY, // current + USER_STATE_PENDING_PING, // new + Ordering::AcqRel, + Ordering::Acquire, + ) + .unwrap_or_else(|v| v); match prev { USER_STATE_EMPTY => { @@ -234,11 +239,16 @@ impl UserPings { // Must register before checking state, in case state were to change // before we could register, and then the ping would just be lost. self.0.pong_task.register(cx.waker()); - let prev = self.0.state.compare_and_swap( - USER_STATE_RECEIVED_PONG, // current - USER_STATE_EMPTY, // new - Ordering::AcqRel, - ); + let prev = self + .0 + .state + .compare_exchange( + USER_STATE_RECEIVED_PONG, // current + USER_STATE_EMPTY, // new + Ordering::AcqRel, + Ordering::Acquire, + ) + .unwrap_or_else(|v| v); match prev { USER_STATE_RECEIVED_PONG => Poll::Ready(Ok(())), @@ -252,11 +262,16 @@ impl UserPings { impl UserPingsRx { fn receive_pong(&self) -> bool { - let prev = self.0.state.compare_and_swap( - USER_STATE_PENDING_PONG, // current - USER_STATE_RECEIVED_PONG, // new - Ordering::AcqRel, - ); + let prev = self + .0 + .state + .compare_exchange( + USER_STATE_PENDING_PONG, // current + USER_STATE_RECEIVED_PONG, // new + Ordering::AcqRel, + Ordering::Acquire, + ) + .unwrap_or_else(|v| v); if prev == USER_STATE_PENDING_PONG { self.0.pong_task.wake(); diff --git a/src/server.rs b/src/server.rs index 32433121a..f6b43ef08 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1371,7 +1371,7 @@ impl proto::Peer for Peer { reason: Reason::PROTOCOL_ERROR, }); }} - }; + } b = b.version(Version::HTTP_2); From 978c71270a952b43fc0004ae6363932bb15f11c5 Mon Sep 17 00:00:00 2001 From: Eliza Weisman Date: Fri, 5 Feb 2021 09:58:10 -0800 Subject: [PATCH 024/178] add `Connection::max_concurrent_send_streams` (#513) This PR adds accessors to `client::Connection` and `server::Connection` that return the send stream concurrency limit on that connection, as negotiated by the remote peer. This is part of issue #512. I think we probably ought to expose similar accessors for other settings, but I thought it was better to add each one in a separate, focused PR. Signed-off-by: Eliza Weisman --- src/client.rs | 13 +++++++++++++ src/proto/connection.rs | 6 ++++++ src/proto/streams/counts.rs | 6 ++++++ src/proto/streams/streams.rs | 4 ++++ src/server.rs | 13 +++++++++++++ 5 files changed, 42 insertions(+) diff --git a/src/client.rs b/src/client.rs index 1233f468f..9d0e8c879 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1228,6 +1228,19 @@ where pub fn ping_pong(&mut self) -> Option { self.inner.take_user_pings().map(PingPong::new) } + + /// Returns the maximum number of concurrent streams that may be initiated + /// by this client. + /// + /// This limit is configured by the server peer by sending the + /// [`SETTINGS_MAX_CONCURRENT_STREAMS` parameter][settings] in a `SETTINGS` + /// frame. This method returns the currently acknowledged value recieved + /// from the remote. + /// + /// [settings]: https://tools.ietf.org/html/rfc7540#section-5.1.2 + pub fn max_concurrent_send_streams(&self) -> usize { + self.inner.max_send_streams() + } } impl Future for Connection diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 1c1c8ce1b..887c8f0ce 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -120,6 +120,12 @@ where self.settings.send_settings(settings) } + /// Returns the maximum number of concurrent streams that may be initiated + /// by this peer. + pub(crate) fn max_send_streams(&self) -> usize { + self.streams.max_send_streams() + } + /// Returns `Ready` when the connection is ready to receive a frame. /// /// Returns `RecvError` as this may raise errors that are caused by delayed diff --git a/src/proto/streams/counts.rs b/src/proto/streams/counts.rs index a1b7c1df3..bb22ee44a 100644 --- a/src/proto/streams/counts.rs +++ b/src/proto/streams/counts.rs @@ -167,6 +167,12 @@ impl Counts { } } + /// Returns the maximum number of streams that can be initiated by this + /// peer. + pub(crate) fn max_send_streams(&self) -> usize { + self.max_send_streams + } + fn dec_num_streams(&mut self, stream: &mut store::Ptr) { assert!(stream.is_counted); diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 79a28e3bd..7e9b4035a 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -836,6 +836,10 @@ where Ok(()) } + pub(crate) fn max_send_streams(&self) -> usize { + self.inner.lock().unwrap().counts.max_send_streams() + } + #[cfg(feature = "unstable")] pub fn num_active_streams(&self) -> usize { let me = self.inner.lock().unwrap(); diff --git a/src/server.rs b/src/server.rs index f6b43ef08..97297e3e1 100644 --- a/src/server.rs +++ b/src/server.rs @@ -529,6 +529,19 @@ where pub fn ping_pong(&mut self) -> Option { self.connection.take_user_pings().map(PingPong::new) } + + /// Returns the maximum number of concurrent streams that may be initiated + /// by the server on this connection. + /// + /// This limit is configured by the client peer by sending the + /// [`SETTINGS_MAX_CONCURRENT_STREAMS` parameter][settings] in a `SETTINGS` + /// frame. This method returns the currently acknowledged value recieved + /// from the remote. + /// + /// [settings]: https://tools.ietf.org/html/rfc7540#section-5.1.2 + pub fn max_concurrent_send_streams(&self) -> usize { + self.connection.max_send_streams() + } } #[cfg(feature = "stream")] From 2b05c13298011c2cbce2c10d299a5a0f40d22fab Mon Sep 17 00:00:00 2001 From: nickelc Date: Tue, 9 Feb 2021 14:49:12 +0100 Subject: [PATCH 025/178] Re-enable the akamai example (#518) --- Cargo.toml | 8 ++++---- examples/akamai.rs | 5 ----- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 42f78b808..e68eb81e8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -68,7 +68,7 @@ serde_json = "1.0.0" # Examples tokio = { version = "1", features = ["rt-multi-thread", "macros", "sync", "net"] } env_logger = { version = "0.5.3", default-features = false } -#rustls = "0.18" -#tokio-rustls = "0.20.0" -#webpki = "0.21" -#webpki-roots = "0.17" +rustls = "0.19" +tokio-rustls = "0.22" +webpki = "0.21" +webpki-roots = "0.21" diff --git a/examples/akamai.rs b/examples/akamai.rs index ebd09ad0d..29d8a9347 100644 --- a/examples/akamai.rs +++ b/examples/akamai.rs @@ -1,7 +1,3 @@ -fn main() { - eprintln!("TODO: Re-enable when tokio-rustls is upgraded."); -} -/* use h2::client; use http::{Method, Request}; use tokio::net::TcpStream; @@ -77,4 +73,3 @@ pub async fn main() -> Result<(), Box> { } Ok(()) } -*/ From 9049e468c86b94fb041359bf3a7e7184b1372041 Mon Sep 17 00:00:00 2001 From: nickelc Date: Tue, 9 Feb 2021 14:50:11 +0100 Subject: [PATCH 026/178] Remove the obsolent tracing-future dependency (#517) --- Cargo.toml | 3 +-- src/client.rs | 2 +- src/server.rs | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e68eb81e8..4894e5e4a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,8 +47,7 @@ tokio-util = { version = "0.6", features = ["codec"] } tokio = { version = "1", features = ["io-util"] } bytes = "1" http = "0.2" -tracing = { version = "0.1.13", default-features = false, features = ["std"] } -tracing-futures = { version = "0.2", default-features = false, features = ["std-future"]} +tracing = { version = "0.1.21", default-features = false, features = ["std"] } fnv = "1.0.5" slab = "0.4.2" indexmap = "1.0" diff --git a/src/client.rs b/src/client.rs index 9d0e8c879..099e1f921 100644 --- a/src/client.rs +++ b/src/client.rs @@ -149,7 +149,7 @@ use std::task::{Context, Poll}; use std::time::Duration; use std::usize; use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; -use tracing_futures::Instrument; +use tracing::Instrument; /// Initializes new HTTP/2.0 streams on a connection by sending a request. /// diff --git a/src/server.rs b/src/server.rs index 97297e3e1..d2b799af7 100644 --- a/src/server.rs +++ b/src/server.rs @@ -128,7 +128,7 @@ use std::task::{Context, Poll}; use std::time::Duration; use std::{convert, fmt, io, mem}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -use tracing_futures::{Instrument, Instrumented}; +use tracing::instrument::{Instrument, Instrumented}; /// In progress HTTP/2.0 connection handshake future. /// From fb78fe960665cf0802fed6d17a31b5bd73c8b715 Mon Sep 17 00:00:00 2001 From: Kestrer Date: Mon, 15 Feb 2021 23:03:35 +0000 Subject: [PATCH 027/178] Explicitly enable the `std` feature of indexmap (#519) * Explicitly enable the `std` feature of indexmap This crate depends on it anyway, and by explicitly turning it on we avoid unreliable platform target detection that causes build failures on some platforms. * Bump indexmap to 1.5.2 This allows use of the `std` feature. Co-authored-by: Taiki Endo Co-authored-by: Taiki Endo --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 4894e5e4a..ab730b332 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,7 +50,7 @@ http = "0.2" tracing = { version = "0.1.21", default-features = false, features = ["std"] } fnv = "1.0.5" slab = "0.4.2" -indexmap = "1.0" +indexmap = { version = "1.5.2", features = ["std"] } [dev-dependencies] From fe938cb81ced6fe73b2cb4ae5a4c0134d6d80497 Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Tue, 16 Feb 2021 20:35:34 +0100 Subject: [PATCH 028/178] Fix the macro param name in set_pseudo (fixes #472) (#520) --- src/frame/headers.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frame/headers.rs b/src/frame/headers.rs index ad37393b4..5b9fd8aea 100644 --- a/src/frame/headers.rs +++ b/src/frame/headers.rs @@ -825,7 +825,7 @@ impl HeaderBlock { } else { let __val = $val; headers_size += - decoded_header_size(stringify!($ident).len() + 1, __val.as_str().len()); + decoded_header_size(stringify!($field).len() + 1, __val.as_str().len()); if headers_size < max_header_list_size { self.pseudo.$field = Some(__val); } else if !self.is_over_size { From 6357e3256adf15de6b94181e787ff5b7b1cc79b0 Mon Sep 17 00:00:00 2001 From: Kornel Date: Tue, 16 Feb 2021 20:21:29 +0000 Subject: [PATCH 029/178] de-generify FramedRead::decode_frame (#509) * de-generify FramedRead::decode_frame * Rename arg to decode_frame Co-authored-by: Dan Burkert --- src/codec/framed_read.rs | 500 ++++++++++++++++++++------------------- 1 file changed, 256 insertions(+), 244 deletions(-) diff --git a/src/codec/framed_read.rs b/src/codec/framed_read.rs index 8bba12545..9673c49a8 100644 --- a/src/codec/framed_read.rs +++ b/src/codec/framed_read.rs @@ -59,249 +59,6 @@ impl FramedRead { } } - fn decode_frame(&mut self, mut bytes: BytesMut) -> Result, RecvError> { - use self::RecvError::*; - let span = tracing::trace_span!("FramedRead::decode_frame", offset = bytes.len()); - let _e = span.enter(); - - tracing::trace!("decoding frame from {}B", bytes.len()); - - // Parse the head - let head = frame::Head::parse(&bytes); - - if self.partial.is_some() && head.kind() != Kind::Continuation { - proto_err!(conn: "expected CONTINUATION, got {:?}", head.kind()); - return Err(Connection(Reason::PROTOCOL_ERROR)); - } - - let kind = head.kind(); - - tracing::trace!(frame.kind = ?kind); - - macro_rules! header_block { - ($frame:ident, $head:ident, $bytes:ident) => ({ - // Drop the frame header - // TODO: Change to drain: carllerche/bytes#130 - let _ = $bytes.split_to(frame::HEADER_LEN); - - // Parse the header frame w/o parsing the payload - let (mut frame, mut payload) = match frame::$frame::load($head, $bytes) { - Ok(res) => res, - Err(frame::Error::InvalidDependencyId) => { - proto_err!(stream: "invalid HEADERS dependency ID"); - // A stream cannot depend on itself. An endpoint MUST - // treat this as a stream error (Section 5.4.2) of type - // `PROTOCOL_ERROR`. - return Err(Stream { - id: $head.stream_id(), - reason: Reason::PROTOCOL_ERROR, - }); - }, - Err(e) => { - proto_err!(conn: "failed to load frame; err={:?}", e); - return Err(Connection(Reason::PROTOCOL_ERROR)); - } - }; - - let is_end_headers = frame.is_end_headers(); - - // Load the HPACK encoded headers - match frame.load_hpack(&mut payload, self.max_header_list_size, &mut self.hpack) { - Ok(_) => {}, - Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {}, - Err(frame::Error::MalformedMessage) => { - let id = $head.stream_id(); - proto_err!(stream: "malformed header block; stream={:?}", id); - return Err(Stream { - id, - reason: Reason::PROTOCOL_ERROR, - }); - }, - Err(e) => { - proto_err!(conn: "failed HPACK decoding; err={:?}", e); - return Err(Connection(Reason::PROTOCOL_ERROR)); - } - } - - if is_end_headers { - frame.into() - } else { - tracing::trace!("loaded partial header block"); - // Defer returning the frame - self.partial = Some(Partial { - frame: Continuable::$frame(frame), - buf: payload, - }); - - return Ok(None); - } - }); - } - - let frame = match kind { - Kind::Settings => { - let res = frame::Settings::load(head, &bytes[frame::HEADER_LEN..]); - - res.map_err(|e| { - proto_err!(conn: "failed to load SETTINGS frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::Ping => { - let res = frame::Ping::load(head, &bytes[frame::HEADER_LEN..]); - - res.map_err(|e| { - proto_err!(conn: "failed to load PING frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::WindowUpdate => { - let res = frame::WindowUpdate::load(head, &bytes[frame::HEADER_LEN..]); - - res.map_err(|e| { - proto_err!(conn: "failed to load WINDOW_UPDATE frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::Data => { - let _ = bytes.split_to(frame::HEADER_LEN); - let res = frame::Data::load(head, bytes.freeze()); - - // TODO: Should this always be connection level? Probably not... - res.map_err(|e| { - proto_err!(conn: "failed to load DATA frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::Headers => header_block!(Headers, head, bytes), - Kind::Reset => { - let res = frame::Reset::load(head, &bytes[frame::HEADER_LEN..]); - res.map_err(|e| { - proto_err!(conn: "failed to load RESET frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::GoAway => { - let res = frame::GoAway::load(&bytes[frame::HEADER_LEN..]); - res.map_err(|e| { - proto_err!(conn: "failed to load GO_AWAY frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) - })? - .into() - } - Kind::PushPromise => header_block!(PushPromise, head, bytes), - Kind::Priority => { - if head.stream_id() == 0 { - // Invalid stream identifier - proto_err!(conn: "invalid stream ID 0"); - return Err(Connection(Reason::PROTOCOL_ERROR)); - } - - match frame::Priority::load(head, &bytes[frame::HEADER_LEN..]) { - Ok(frame) => frame.into(), - Err(frame::Error::InvalidDependencyId) => { - // A stream cannot depend on itself. An endpoint MUST - // treat this as a stream error (Section 5.4.2) of type - // `PROTOCOL_ERROR`. - let id = head.stream_id(); - proto_err!(stream: "PRIORITY invalid dependency ID; stream={:?}", id); - return Err(Stream { - id, - reason: Reason::PROTOCOL_ERROR, - }); - } - Err(e) => { - proto_err!(conn: "failed to load PRIORITY frame; err={:?};", e); - return Err(Connection(Reason::PROTOCOL_ERROR)); - } - } - } - Kind::Continuation => { - let is_end_headers = (head.flag() & 0x4) == 0x4; - - let mut partial = match self.partial.take() { - Some(partial) => partial, - None => { - proto_err!(conn: "received unexpected CONTINUATION frame"); - return Err(Connection(Reason::PROTOCOL_ERROR)); - } - }; - - // The stream identifiers must match - if partial.frame.stream_id() != head.stream_id() { - proto_err!(conn: "CONTINUATION frame stream ID does not match previous frame stream ID"); - return Err(Connection(Reason::PROTOCOL_ERROR)); - } - - // Extend the buf - if partial.buf.is_empty() { - partial.buf = bytes.split_off(frame::HEADER_LEN); - } else { - if partial.frame.is_over_size() { - // If there was left over bytes previously, they may be - // needed to continue decoding, even though we will - // be ignoring this frame. This is done to keep the HPACK - // decoder state up-to-date. - // - // Still, we need to be careful, because if a malicious - // attacker were to try to send a gigantic string, such - // that it fits over multiple header blocks, we could - // grow memory uncontrollably again, and that'd be a shame. - // - // Instead, we use a simple heuristic to determine if - // we should continue to ignore decoding, or to tell - // the attacker to go away. - if partial.buf.len() + bytes.len() > self.max_header_list_size { - proto_err!(conn: "CONTINUATION frame header block size over ignorable limit"); - return Err(Connection(Reason::COMPRESSION_ERROR)); - } - } - partial.buf.extend_from_slice(&bytes[frame::HEADER_LEN..]); - } - - match partial.frame.load_hpack( - &mut partial.buf, - self.max_header_list_size, - &mut self.hpack, - ) { - Ok(_) => {} - Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) - if !is_end_headers => {} - Err(frame::Error::MalformedMessage) => { - let id = head.stream_id(); - proto_err!(stream: "malformed CONTINUATION frame; stream={:?}", id); - return Err(Stream { - id, - reason: Reason::PROTOCOL_ERROR, - }); - } - Err(e) => { - proto_err!(conn: "failed HPACK decoding; err={:?}", e); - return Err(Connection(Reason::PROTOCOL_ERROR)); - } - } - - if is_end_headers { - partial.frame.into() - } else { - self.partial = Some(partial); - return Ok(None); - } - } - Kind::Unknown => { - // Unknown frames are ignored - return Ok(None); - } - }; - - Ok(Some(frame)) - } - pub fn get_ref(&self) -> &T { self.inner.get_ref() } @@ -333,6 +90,255 @@ impl FramedRead { } } +/// Decodes a frame. +/// +/// This method is intentionally de-generified and outlined because it is very large. +fn decode_frame( + hpack: &mut hpack::Decoder, + max_header_list_size: usize, + partial_inout: &mut Option, + mut bytes: BytesMut, +) -> Result, RecvError> { + use self::RecvError::*; + let span = tracing::trace_span!("FramedRead::decode_frame", offset = bytes.len()); + let _e = span.enter(); + + tracing::trace!("decoding frame from {}B", bytes.len()); + + // Parse the head + let head = frame::Head::parse(&bytes); + + if partial_inout.is_some() && head.kind() != Kind::Continuation { + proto_err!(conn: "expected CONTINUATION, got {:?}", head.kind()); + return Err(Connection(Reason::PROTOCOL_ERROR)); + } + + let kind = head.kind(); + + tracing::trace!(frame.kind = ?kind); + + macro_rules! header_block { + ($frame:ident, $head:ident, $bytes:ident) => ({ + // Drop the frame header + // TODO: Change to drain: carllerche/bytes#130 + let _ = $bytes.split_to(frame::HEADER_LEN); + + // Parse the header frame w/o parsing the payload + let (mut frame, mut payload) = match frame::$frame::load($head, $bytes) { + Ok(res) => res, + Err(frame::Error::InvalidDependencyId) => { + proto_err!(stream: "invalid HEADERS dependency ID"); + // A stream cannot depend on itself. An endpoint MUST + // treat this as a stream error (Section 5.4.2) of type + // `PROTOCOL_ERROR`. + return Err(Stream { + id: $head.stream_id(), + reason: Reason::PROTOCOL_ERROR, + }); + }, + Err(e) => { + proto_err!(conn: "failed to load frame; err={:?}", e); + return Err(Connection(Reason::PROTOCOL_ERROR)); + } + }; + + let is_end_headers = frame.is_end_headers(); + + // Load the HPACK encoded headers + match frame.load_hpack(&mut payload, max_header_list_size, hpack) { + Ok(_) => {}, + Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {}, + Err(frame::Error::MalformedMessage) => { + let id = $head.stream_id(); + proto_err!(stream: "malformed header block; stream={:?}", id); + return Err(Stream { + id, + reason: Reason::PROTOCOL_ERROR, + }); + }, + Err(e) => { + proto_err!(conn: "failed HPACK decoding; err={:?}", e); + return Err(Connection(Reason::PROTOCOL_ERROR)); + } + } + + if is_end_headers { + frame.into() + } else { + tracing::trace!("loaded partial header block"); + // Defer returning the frame + *partial_inout = Some(Partial { + frame: Continuable::$frame(frame), + buf: payload, + }); + + return Ok(None); + } + }); + } + + let frame = match kind { + Kind::Settings => { + let res = frame::Settings::load(head, &bytes[frame::HEADER_LEN..]); + + res.map_err(|e| { + proto_err!(conn: "failed to load SETTINGS frame; err={:?}", e); + Connection(Reason::PROTOCOL_ERROR) + })? + .into() + } + Kind::Ping => { + let res = frame::Ping::load(head, &bytes[frame::HEADER_LEN..]); + + res.map_err(|e| { + proto_err!(conn: "failed to load PING frame; err={:?}", e); + Connection(Reason::PROTOCOL_ERROR) + })? + .into() + } + Kind::WindowUpdate => { + let res = frame::WindowUpdate::load(head, &bytes[frame::HEADER_LEN..]); + + res.map_err(|e| { + proto_err!(conn: "failed to load WINDOW_UPDATE frame; err={:?}", e); + Connection(Reason::PROTOCOL_ERROR) + })? + .into() + } + Kind::Data => { + let _ = bytes.split_to(frame::HEADER_LEN); + let res = frame::Data::load(head, bytes.freeze()); + + // TODO: Should this always be connection level? Probably not... + res.map_err(|e| { + proto_err!(conn: "failed to load DATA frame; err={:?}", e); + Connection(Reason::PROTOCOL_ERROR) + })? + .into() + } + Kind::Headers => header_block!(Headers, head, bytes), + Kind::Reset => { + let res = frame::Reset::load(head, &bytes[frame::HEADER_LEN..]); + res.map_err(|e| { + proto_err!(conn: "failed to load RESET frame; err={:?}", e); + Connection(Reason::PROTOCOL_ERROR) + })? + .into() + } + Kind::GoAway => { + let res = frame::GoAway::load(&bytes[frame::HEADER_LEN..]); + res.map_err(|e| { + proto_err!(conn: "failed to load GO_AWAY frame; err={:?}", e); + Connection(Reason::PROTOCOL_ERROR) + })? + .into() + } + Kind::PushPromise => header_block!(PushPromise, head, bytes), + Kind::Priority => { + if head.stream_id() == 0 { + // Invalid stream identifier + proto_err!(conn: "invalid stream ID 0"); + return Err(Connection(Reason::PROTOCOL_ERROR)); + } + + match frame::Priority::load(head, &bytes[frame::HEADER_LEN..]) { + Ok(frame) => frame.into(), + Err(frame::Error::InvalidDependencyId) => { + // A stream cannot depend on itself. An endpoint MUST + // treat this as a stream error (Section 5.4.2) of type + // `PROTOCOL_ERROR`. + let id = head.stream_id(); + proto_err!(stream: "PRIORITY invalid dependency ID; stream={:?}", id); + return Err(Stream { + id, + reason: Reason::PROTOCOL_ERROR, + }); + } + Err(e) => { + proto_err!(conn: "failed to load PRIORITY frame; err={:?};", e); + return Err(Connection(Reason::PROTOCOL_ERROR)); + } + } + } + Kind::Continuation => { + let is_end_headers = (head.flag() & 0x4) == 0x4; + + let mut partial = match partial_inout.take() { + Some(partial) => partial, + None => { + proto_err!(conn: "received unexpected CONTINUATION frame"); + return Err(Connection(Reason::PROTOCOL_ERROR)); + } + }; + + // The stream identifiers must match + if partial.frame.stream_id() != head.stream_id() { + proto_err!(conn: "CONTINUATION frame stream ID does not match previous frame stream ID"); + return Err(Connection(Reason::PROTOCOL_ERROR)); + } + + // Extend the buf + if partial.buf.is_empty() { + partial.buf = bytes.split_off(frame::HEADER_LEN); + } else { + if partial.frame.is_over_size() { + // If there was left over bytes previously, they may be + // needed to continue decoding, even though we will + // be ignoring this frame. This is done to keep the HPACK + // decoder state up-to-date. + // + // Still, we need to be careful, because if a malicious + // attacker were to try to send a gigantic string, such + // that it fits over multiple header blocks, we could + // grow memory uncontrollably again, and that'd be a shame. + // + // Instead, we use a simple heuristic to determine if + // we should continue to ignore decoding, or to tell + // the attacker to go away. + if partial.buf.len() + bytes.len() > max_header_list_size { + proto_err!(conn: "CONTINUATION frame header block size over ignorable limit"); + return Err(Connection(Reason::COMPRESSION_ERROR)); + } + } + partial.buf.extend_from_slice(&bytes[frame::HEADER_LEN..]); + } + + match partial + .frame + .load_hpack(&mut partial.buf, max_header_list_size, hpack) + { + Ok(_) => {} + Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {} + Err(frame::Error::MalformedMessage) => { + let id = head.stream_id(); + proto_err!(stream: "malformed CONTINUATION frame; stream={:?}", id); + return Err(Stream { + id, + reason: Reason::PROTOCOL_ERROR, + }); + } + Err(e) => { + proto_err!(conn: "failed HPACK decoding; err={:?}", e); + return Err(Connection(Reason::PROTOCOL_ERROR)); + } + } + + if is_end_headers { + partial.frame.into() + } else { + *partial_inout = Some(partial); + return Ok(None); + } + } + Kind::Unknown => { + // Unknown frames are ignored + return Ok(None); + } + }; + + Ok(Some(frame)) +} + impl Stream for FramedRead where T: AsyncRead + Unpin, @@ -351,7 +357,13 @@ where }; tracing::trace!(read.bytes = bytes.len()); - if let Some(frame) = self.decode_frame(bytes)? { + let Self { + ref mut hpack, + max_header_list_size, + ref mut partial, + .. + } = *self; + if let Some(frame) = decode_frame(hpack, max_header_list_size, partial, bytes)? { tracing::debug!(?frame, "received"); return Poll::Ready(Some(Ok(frame))); } From 30ca83279056390bab51358844ab5a975b49d640 Mon Sep 17 00:00:00 2001 From: Markus Westerlind Date: Thu, 18 Feb 2021 20:17:49 +0100 Subject: [PATCH 030/178] Make some functions less-generic to reduce binary bloat (#503) * refactor: Extract FramedWrite::buffer to a less generic function Should cut out another 23 KiB (since I see it duplicated) * refactor: Extract some duplicated code to a function * refactor: Extract part of flush into a less generic function * refactor: Extract a less generic part of connection * refactor: Factor out a less generic part of Connection::poll2 * refactor: Extract a non-generic part of handshake2 * refactor: Don't duplicate Streams code on Peer (-3.5%) The `P: Peer` parameter is rarely used and there is already a mechanism for using it dynamically. * refactor: Make recv_frame less generic (-2.3%) * Move out part of Connection::poll * refactor: Extract parts of Connection * refactor: Extract a non-generic part of reclaim_frame * comments --- src/client.rs | 21 +- src/codec/framed_write.rs | 246 ++++++---- src/proto/connection.rs | 453 +++++++++++------- src/proto/mod.rs | 2 +- src/proto/streams/mod.rs | 2 +- src/proto/streams/prioritize.rs | 76 +-- src/proto/streams/streams.rs | 787 ++++++++++++++++++-------------- 7 files changed, 931 insertions(+), 656 deletions(-) diff --git a/src/client.rs b/src/client.rs index 099e1f921..62aea854c 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1124,6 +1124,20 @@ where // ===== impl Connection ===== +async fn bind_connection(io: &mut T) -> Result<(), crate::Error> +where + T: AsyncRead + AsyncWrite + Unpin, +{ + tracing::debug!("binding client connection"); + + let msg: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; + io.write_all(msg).await.map_err(crate::Error::from_io)?; + + tracing::debug!("client connection bound"); + + Ok(()) +} + impl Connection where T: AsyncRead + AsyncWrite + Unpin, @@ -1133,12 +1147,7 @@ where mut io: T, builder: Builder, ) -> Result<(SendRequest, Connection), crate::Error> { - tracing::debug!("binding client connection"); - - let msg: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; - io.write_all(msg).await.map_err(crate::Error::from_io)?; - - tracing::debug!("client connection bound"); + bind_connection(&mut io).await?; // Create the codec let mut codec = Codec::new(io); diff --git a/src/codec/framed_write.rs b/src/codec/framed_write.rs index 8ec2045ce..b69979ac9 100644 --- a/src/codec/framed_write.rs +++ b/src/codec/framed_write.rs @@ -23,6 +23,11 @@ pub struct FramedWrite { /// Upstream `AsyncWrite` inner: T, + encoder: Encoder, +} + +#[derive(Debug)] +struct Encoder { /// HPACK encoder hpack: hpack::Encoder, @@ -74,12 +79,14 @@ where let is_write_vectored = inner.is_write_vectored(); FramedWrite { inner, - hpack: hpack::Encoder::default(), - buf: Cursor::new(BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY)), - next: None, - last_data_frame: None, - max_frame_size: frame::DEFAULT_MAX_FRAME_SIZE, - is_write_vectored, + encoder: Encoder { + hpack: hpack::Encoder::default(), + buf: Cursor::new(BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY)), + next: None, + last_data_frame: None, + max_frame_size: frame::DEFAULT_MAX_FRAME_SIZE, + is_write_vectored, + }, } } @@ -88,11 +95,11 @@ where /// Calling this function may result in the current contents of the buffer /// to be flushed to `T`. pub fn poll_ready(&mut self, cx: &mut Context) -> Poll> { - if !self.has_capacity() { + if !self.encoder.has_capacity() { // Try flushing ready!(self.flush(cx))?; - if !self.has_capacity() { + if !self.encoder.has_capacity() { return Poll::Pending; } } @@ -105,6 +112,128 @@ where /// `poll_ready` must be called first to ensure that a frame may be /// accepted. pub fn buffer(&mut self, item: Frame) -> Result<(), UserError> { + self.encoder.buffer(item) + } + + /// Flush buffered data to the wire + pub fn flush(&mut self, cx: &mut Context) -> Poll> { + let span = tracing::trace_span!("FramedWrite::flush"); + let _e = span.enter(); + + loop { + while !self.encoder.is_empty() { + match self.encoder.next { + Some(Next::Data(ref mut frame)) => { + tracing::trace!(queued_data_frame = true); + let mut buf = (&mut self.encoder.buf).chain(frame.payload_mut()); + ready!(write( + &mut self.inner, + self.encoder.is_write_vectored, + &mut buf, + cx, + ))? + } + _ => { + tracing::trace!(queued_data_frame = false); + ready!(write( + &mut self.inner, + self.encoder.is_write_vectored, + &mut self.encoder.buf, + cx, + ))? + } + } + } + + match self.encoder.unset_frame() { + ControlFlow::Continue => (), + ControlFlow::Break => break, + } + } + + tracing::trace!("flushing buffer"); + // Flush the upstream + ready!(Pin::new(&mut self.inner).poll_flush(cx))?; + + Poll::Ready(Ok(())) + } + + /// Close the codec + pub fn shutdown(&mut self, cx: &mut Context) -> Poll> { + ready!(self.flush(cx))?; + Pin::new(&mut self.inner).poll_shutdown(cx) + } +} + +fn write( + writer: &mut T, + is_write_vectored: bool, + buf: &mut B, + cx: &mut Context<'_>, +) -> Poll> +where + T: AsyncWrite + Unpin, + B: Buf, +{ + // TODO(eliza): when tokio-util 0.5.1 is released, this + // could just use `poll_write_buf`... + const MAX_IOVS: usize = 64; + let n = if is_write_vectored { + let mut bufs = [IoSlice::new(&[]); MAX_IOVS]; + let cnt = buf.chunks_vectored(&mut bufs); + ready!(Pin::new(writer).poll_write_vectored(cx, &bufs[..cnt]))? + } else { + ready!(Pin::new(writer).poll_write(cx, buf.chunk()))? + }; + buf.advance(n); + Ok(()).into() +} + +#[must_use] +enum ControlFlow { + Continue, + Break, +} + +impl Encoder +where + B: Buf, +{ + fn unset_frame(&mut self) -> ControlFlow { + // Clear internal buffer + self.buf.set_position(0); + self.buf.get_mut().clear(); + + // The data frame has been written, so unset it + match self.next.take() { + Some(Next::Data(frame)) => { + self.last_data_frame = Some(frame); + debug_assert!(self.is_empty()); + ControlFlow::Break + } + Some(Next::Continuation(frame)) => { + // Buffer the continuation frame, then try to write again + let mut buf = limited_write_buf!(self); + if let Some(continuation) = frame.encode(&mut self.hpack, &mut buf) { + // We previously had a CONTINUATION, and after encoding + // it, we got *another* one? Let's just double check + // that at least some progress is being made... + if self.buf.get_ref().len() == frame::HEADER_LEN { + // If *only* the CONTINUATION frame header was + // written, and *no* header fields, we're stuck + // in a loop... + panic!("CONTINUATION frame write loop; header value too big to encode"); + } + + self.next = Some(Next::Continuation(continuation)); + } + ControlFlow::Continue + } + None => ControlFlow::Break, + } + } + + fn buffer(&mut self, item: Frame) -> Result<(), UserError> { // Ensure that we have enough capacity to accept the write. assert!(self.has_capacity()); let span = tracing::trace_span!("FramedWrite::buffer", frame = ?item); @@ -185,93 +314,6 @@ where Ok(()) } - /// Flush buffered data to the wire - pub fn flush(&mut self, cx: &mut Context) -> Poll> { - const MAX_IOVS: usize = 64; - - let span = tracing::trace_span!("FramedWrite::flush"); - let _e = span.enter(); - - loop { - while !self.is_empty() { - match self.next { - Some(Next::Data(ref mut frame)) => { - tracing::trace!(queued_data_frame = true); - let mut buf = (&mut self.buf).chain(frame.payload_mut()); - // TODO(eliza): when tokio-util 0.5.1 is released, this - // could just use `poll_write_buf`... - let n = if self.is_write_vectored { - let mut bufs = [IoSlice::new(&[]); MAX_IOVS]; - let cnt = buf.chunks_vectored(&mut bufs); - ready!(Pin::new(&mut self.inner).poll_write_vectored(cx, &bufs[..cnt]))? - } else { - ready!(Pin::new(&mut self.inner).poll_write(cx, buf.chunk()))? - }; - buf.advance(n); - } - _ => { - tracing::trace!(queued_data_frame = false); - let n = if self.is_write_vectored { - let mut iovs = [IoSlice::new(&[]); MAX_IOVS]; - let cnt = self.buf.chunks_vectored(&mut iovs); - ready!( - Pin::new(&mut self.inner).poll_write_vectored(cx, &mut iovs[..cnt]) - )? - } else { - ready!(Pin::new(&mut self.inner).poll_write(cx, &mut self.buf.chunk()))? - }; - self.buf.advance(n); - } - } - } - - // Clear internal buffer - self.buf.set_position(0); - self.buf.get_mut().clear(); - - // The data frame has been written, so unset it - match self.next.take() { - Some(Next::Data(frame)) => { - self.last_data_frame = Some(frame); - debug_assert!(self.is_empty()); - break; - } - Some(Next::Continuation(frame)) => { - // Buffer the continuation frame, then try to write again - let mut buf = limited_write_buf!(self); - if let Some(continuation) = frame.encode(&mut self.hpack, &mut buf) { - // We previously had a CONTINUATION, and after encoding - // it, we got *another* one? Let's just double check - // that at least some progress is being made... - if self.buf.get_ref().len() == frame::HEADER_LEN { - // If *only* the CONTINUATION frame header was - // written, and *no* header fields, we're stuck - // in a loop... - panic!("CONTINUATION frame write loop; header value too big to encode"); - } - - self.next = Some(Next::Continuation(continuation)); - } - } - None => { - break; - } - } - } - - tracing::trace!("flushing buffer"); - // Flush the upstream - ready!(Pin::new(&mut self.inner).poll_flush(cx))?; - - Poll::Ready(Ok(())) - } - - /// Close the codec - pub fn shutdown(&mut self, cx: &mut Context) -> Poll> { - ready!(self.flush(cx))?; - Pin::new(&mut self.inner).poll_shutdown(cx) - } - fn has_capacity(&self) -> bool { self.next.is_none() && self.buf.get_ref().remaining_mut() >= MIN_BUFFER_CAPACITY } @@ -284,26 +326,32 @@ where } } +impl Encoder { + fn max_frame_size(&self) -> usize { + self.max_frame_size as usize + } +} + impl FramedWrite { /// Returns the max frame size that can be sent pub fn max_frame_size(&self) -> usize { - self.max_frame_size as usize + self.encoder.max_frame_size() } /// Set the peer's max frame size. pub fn set_max_frame_size(&mut self, val: usize) { assert!(val <= frame::MAX_MAX_FRAME_SIZE as usize); - self.max_frame_size = val as FrameSize; + self.encoder.max_frame_size = val as FrameSize; } /// Set the peer's header table size. pub fn set_header_table_size(&mut self, val: usize) { - self.hpack.update_max_size(val); + self.encoder.hpack.update_max_size(val); } /// Retrieve the last data frame that has been sent pub fn take_last_data_frame(&mut self) -> Option> { - self.last_data_frame.take() + self.encoder.last_data_frame.take() } pub fn get_mut(&mut self) -> &mut T { diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 887c8f0ce..d408f7cbb 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -17,6 +17,19 @@ use tokio::io::{AsyncRead, AsyncWrite}; /// An H2 connection #[derive(Debug)] pub(crate) struct Connection +where + P: Peer, +{ + /// Read / write frame values + codec: Codec>, + + inner: ConnectionInner, +} + +// Extracted part of `Connection` which does not depend on `T`. Reduces the amount of duplicated +// method instantiations. +#[derive(Debug)] +struct ConnectionInner where P: Peer, { @@ -29,9 +42,6 @@ where /// graceful shutdown. error: Option, - /// Read / write frame values - codec: Codec>, - /// Pending GOAWAY frames to write. go_away: GoAway, @@ -51,6 +61,18 @@ where _phantom: PhantomData

, } +struct DynConnection<'a, B: Buf = Bytes> { + state: &'a mut State, + + go_away: &'a mut GoAway, + + streams: DynStreams<'a, B>, + + error: &'a mut Option, + + ping_pong: &'a mut PingPong, +} + #[derive(Debug, Clone)] pub(crate) struct Config { pub next_stream_id: StreamId, @@ -79,51 +101,56 @@ where B: Buf, { pub fn new(codec: Codec>, config: Config) -> Connection { - let streams = Streams::new(streams::Config { - local_init_window_sz: config - .settings - .initial_window_size() - .unwrap_or(DEFAULT_INITIAL_WINDOW_SIZE), - initial_max_send_streams: config.initial_max_send_streams, - local_next_stream_id: config.next_stream_id, - local_push_enabled: config.settings.is_push_enabled().unwrap_or(true), - local_reset_duration: config.reset_stream_duration, - local_reset_max: config.reset_stream_max, - remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, - remote_max_initiated: config - .settings - .max_concurrent_streams() - .map(|max| max as usize), - }); + fn streams_config(config: &Config) -> streams::Config { + streams::Config { + local_init_window_sz: config + .settings + .initial_window_size() + .unwrap_or(DEFAULT_INITIAL_WINDOW_SIZE), + initial_max_send_streams: config.initial_max_send_streams, + local_next_stream_id: config.next_stream_id, + local_push_enabled: config.settings.is_push_enabled().unwrap_or(true), + local_reset_duration: config.reset_stream_duration, + local_reset_max: config.reset_stream_max, + remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, + remote_max_initiated: config + .settings + .max_concurrent_streams() + .map(|max| max as usize), + } + } + let streams = Streams::new(streams_config(&config)); Connection { - state: State::Open, - error: None, codec, - go_away: GoAway::new(), - ping_pong: PingPong::new(), - settings: Settings::new(config.settings), - streams, - span: tracing::debug_span!("Connection", peer = %P::NAME), - _phantom: PhantomData, + inner: ConnectionInner { + state: State::Open, + error: None, + go_away: GoAway::new(), + ping_pong: PingPong::new(), + settings: Settings::new(config.settings), + streams, + span: tracing::debug_span!("Connection", peer = %P::NAME), + _phantom: PhantomData, + }, } } /// connection flow control pub(crate) fn set_target_window_size(&mut self, size: WindowSize) { - self.streams.set_target_connection_window_size(size); + self.inner.streams.set_target_connection_window_size(size); } /// Send a new SETTINGS frame with an updated initial window size. pub(crate) fn set_initial_window_size(&mut self, size: WindowSize) -> Result<(), UserError> { let mut settings = frame::Settings::default(); settings.set_initial_window_size(Some(size)); - self.settings.send_settings(settings) + self.inner.settings.send_settings(settings) } /// Returns the maximum number of concurrent streams that may be initiated /// by this peer. pub(crate) fn max_send_streams(&self) -> usize { - self.streams.max_send_streams() + self.inner.streams.max_send_streams() } /// Returns `Ready` when the connection is ready to receive a frame. @@ -131,16 +158,17 @@ where /// Returns `RecvError` as this may raise errors that are caused by delayed /// processing of received frames. fn poll_ready(&mut self, cx: &mut Context) -> Poll> { - let _e = self.span.enter(); + let _e = self.inner.span.enter(); let span = tracing::trace_span!("poll_ready"); let _e = span.enter(); // The order of these calls don't really matter too much - ready!(self.ping_pong.send_pending_pong(cx, &mut self.codec))?; - ready!(self.ping_pong.send_pending_ping(cx, &mut self.codec))?; + ready!(self.inner.ping_pong.send_pending_pong(cx, &mut self.codec))?; + ready!(self.inner.ping_pong.send_pending_ping(cx, &mut self.codec))?; ready!(self + .inner .settings - .poll_send(cx, &mut self.codec, &mut self.streams))?; - ready!(self.streams.send_pending_refusal(cx, &mut self.codec))?; + .poll_send(cx, &mut self.codec, &mut self.inner.streams))?; + ready!(self.inner.streams.send_pending_refusal(cx, &mut self.codec))?; Poll::Ready(Ok(())) } @@ -150,32 +178,15 @@ where /// This will return `Some(reason)` if the connection should be closed /// afterwards. If this is a graceful shutdown, this returns `None`. fn poll_go_away(&mut self, cx: &mut Context) -> Poll>> { - self.go_away.send_pending_go_away(cx, &mut self.codec) - } - - fn go_away(&mut self, id: StreamId, e: Reason) { - let frame = frame::GoAway::new(id, e); - self.streams.send_go_away(id); - self.go_away.go_away(frame); - } - - fn go_away_now(&mut self, e: Reason) { - let last_processed_id = self.streams.last_processed_id(); - let frame = frame::GoAway::new(last_processed_id, e); - self.go_away.go_away_now(frame); + self.inner.go_away.send_pending_go_away(cx, &mut self.codec) } pub fn go_away_from_user(&mut self, e: Reason) { - let last_processed_id = self.streams.last_processed_id(); - let frame = frame::GoAway::new(last_processed_id, e); - self.go_away.go_away_from_user(frame); - - // Notify all streams of reason we're abruptly closing. - self.streams.recv_err(&proto::Error::Proto(e)); + self.inner.as_dyn().go_away_from_user(e) } fn take_error(&mut self, ours: Reason) -> Poll> { - let reason = if let Some(theirs) = self.error.take() { + let reason = if let Some(theirs) = self.inner.error.take() { match (ours, theirs) { // If either side reported an error, return that // to the user. @@ -202,13 +213,13 @@ where pub fn maybe_close_connection_if_no_streams(&mut self) { // If we poll() and realize that there are no streams or references // then we can close the connection by transitioning to GOAWAY - if !self.streams.has_streams_or_other_references() { - self.go_away_now(Reason::NO_ERROR); + if !self.inner.streams.has_streams_or_other_references() { + self.inner.as_dyn().go_away_now(Reason::NO_ERROR); } } pub(crate) fn take_user_pings(&mut self) -> Option { - self.ping_pong.take_user_pings() + self.inner.ping_pong.take_user_pings() } /// Advances the internal state of the connection. @@ -217,79 +228,39 @@ where // order to placate the borrow checker โ€” `self` is mutably borrowed by // `poll2`, which means that we can't borrow `self.span` to enter it. // The clone is just an atomic ref bump. - let span = self.span.clone(); + let span = self.inner.span.clone(); let _e = span.enter(); let span = tracing::trace_span!("poll"); let _e = span.enter(); - use crate::codec::RecvError::*; loop { - tracing::trace!(connection.state = ?self.state); + tracing::trace!(connection.state = ?self.inner.state); // TODO: probably clean up this glob of code - match self.state { + match self.inner.state { // When open, continue to poll a frame State::Open => { - match self.poll2(cx) { - // The connection has shutdown normally - Poll::Ready(Ok(())) => self.state = State::Closing(Reason::NO_ERROR), + let result = match self.poll2(cx) { + Poll::Ready(result) => result, // The connection is not ready to make progress Poll::Pending => { // Ensure all window updates have been sent. // // This will also handle flushing `self.codec` - ready!(self.streams.poll_complete(cx, &mut self.codec))?; + ready!(self.inner.streams.poll_complete(cx, &mut self.codec))?; - if (self.error.is_some() || self.go_away.should_close_on_idle()) - && !self.streams.has_streams() + if (self.inner.error.is_some() + || self.inner.go_away.should_close_on_idle()) + && !self.inner.streams.has_streams() { - self.go_away_now(Reason::NO_ERROR); + self.inner.as_dyn().go_away_now(Reason::NO_ERROR); continue; } return Poll::Pending; } - // Attempting to read a frame resulted in a connection level - // error. This is handled by setting a GOAWAY frame followed by - // terminating the connection. - Poll::Ready(Err(Connection(e))) => { - tracing::debug!(error = ?e, "Connection::poll; connection error"); - - // We may have already sent a GOAWAY for this error, - // if so, don't send another, just flush and close up. - if let Some(reason) = self.go_away.going_away_reason() { - if reason == e { - tracing::trace!(" -> already going away"); - self.state = State::Closing(e); - continue; - } - } + }; - // Reset all active streams - self.streams.recv_err(&e.into()); - self.go_away_now(e); - } - // Attempting to read a frame resulted in a stream level error. - // This is handled by resetting the frame then trying to read - // another frame. - Poll::Ready(Err(Stream { id, reason })) => { - tracing::trace!(?id, ?reason, "stream error"); - self.streams.send_reset(id, reason); - } - // Attempting to read a frame resulted in an I/O error. All - // active streams must be reset. - // - // TODO: Are I/O errors recoverable? - Poll::Ready(Err(Io(e))) => { - tracing::debug!(error = ?e, "Connection::poll; IO error"); - let e = e.into(); - - // Reset all active streams - self.streams.recv_err(&e); - - // Return the error - return Poll::Ready(Err(e)); - } - } + self.inner.as_dyn().handle_poll2_result(result)? } State::Closing(reason) => { tracing::trace!("connection closing after flush"); @@ -297,7 +268,7 @@ where ready!(self.codec.shutdown(cx))?; // Transition the state to error - self.state = State::Closed(reason); + self.inner.state = State::Closed(reason); } State::Closed(reason) => return self.take_error(reason), } @@ -305,8 +276,6 @@ where } fn poll2(&mut self, cx: &mut Context) -> Poll> { - use crate::frame::Frame::*; - // This happens outside of the loop to prevent needing to do a clock // check and then comparison of the queue possibly multiple times a // second (and thus, the clock wouldn't have changed enough to matter). @@ -319,8 +288,8 @@ where // - poll_go_away may buffer a graceful shutdown GOAWAY frame // - If it has, we've also added a PING to be sent in poll_ready if let Some(reason) = ready!(self.poll_go_away(cx)?) { - if self.go_away.should_close_now() { - if self.go_away.is_user_initiated() { + if self.inner.go_away.should_close_now() { + if self.inner.go_away.is_user_initiated() { // A user initiated abrupt shutdown shouldn't return // the same error back to the user. return Poll::Ready(Ok(())); @@ -337,61 +306,20 @@ where } ready!(self.poll_ready(cx))?; - match ready!(Pin::new(&mut self.codec).poll_next(cx)?) { - Some(Headers(frame)) => { - tracing::trace!(?frame, "recv HEADERS"); - self.streams.recv_headers(frame)?; - } - Some(Data(frame)) => { - tracing::trace!(?frame, "recv DATA"); - self.streams.recv_data(frame)?; - } - Some(Reset(frame)) => { - tracing::trace!(?frame, "recv RST_STREAM"); - self.streams.recv_reset(frame)?; - } - Some(PushPromise(frame)) => { - tracing::trace!(?frame, "recv PUSH_PROMISE"); - self.streams.recv_push_promise(frame)?; - } - Some(Settings(frame)) => { - tracing::trace!(?frame, "recv SETTINGS"); - self.settings - .recv_settings(frame, &mut self.codec, &mut self.streams)?; - } - Some(GoAway(frame)) => { - tracing::trace!(?frame, "recv GOAWAY"); - // This should prevent starting new streams, - // but should allow continuing to process current streams - // until they are all EOS. Once they are, State should - // transition to GoAway. - self.streams.recv_go_away(&frame)?; - self.error = Some(frame.reason()); - } - Some(Ping(frame)) => { - tracing::trace!(?frame, "recv PING"); - let status = self.ping_pong.recv_ping(frame); - if status.is_shutdown() { - assert!( - self.go_away.is_going_away(), - "received unexpected shutdown ping" - ); - - let last_processed_id = self.streams.last_processed_id(); - self.go_away(last_processed_id, Reason::NO_ERROR); - } + match self + .inner + .as_dyn() + .recv_frame(ready!(Pin::new(&mut self.codec).poll_next(cx)?))? + { + ReceivedFrame::Settings(frame) => { + self.inner.settings.recv_settings( + frame, + &mut self.codec, + &mut self.inner.streams, + )?; } - Some(WindowUpdate(frame)) => { - tracing::trace!(?frame, "recv WINDOW_UPDATE"); - self.streams.recv_window_update(frame)?; - } - Some(Priority(frame)) => { - tracing::trace!(?frame, "recv PRIORITY"); - // TODO: handle - } - None => { - tracing::trace!("codec closed"); - self.streams.recv_eof(false).expect("mutex poisoned"); + ReceivedFrame::Continue => (), + ReceivedFrame::Done => { return Poll::Ready(Ok(())); } } @@ -399,17 +327,190 @@ where } fn clear_expired_reset_streams(&mut self) { - self.streams.clear_expired_reset_streams(); + self.inner.streams.clear_expired_reset_streams(); } } +impl ConnectionInner +where + P: Peer, + B: Buf, +{ + fn as_dyn(&mut self) -> DynConnection<'_, B> { + let ConnectionInner { + state, + go_away, + streams, + error, + ping_pong, + .. + } = self; + let streams = streams.as_dyn(); + DynConnection { + state, + go_away, + streams, + error, + ping_pong, + } + } +} + +impl DynConnection<'_, B> +where + B: Buf, +{ + fn go_away(&mut self, id: StreamId, e: Reason) { + let frame = frame::GoAway::new(id, e); + self.streams.send_go_away(id); + self.go_away.go_away(frame); + } + + fn go_away_now(&mut self, e: Reason) { + let last_processed_id = self.streams.last_processed_id(); + let frame = frame::GoAway::new(last_processed_id, e); + self.go_away.go_away_now(frame); + } + + fn go_away_from_user(&mut self, e: Reason) { + let last_processed_id = self.streams.last_processed_id(); + let frame = frame::GoAway::new(last_processed_id, e); + self.go_away.go_away_from_user(frame); + + // Notify all streams of reason we're abruptly closing. + self.streams.recv_err(&proto::Error::Proto(e)); + } + + fn handle_poll2_result(&mut self, result: Result<(), RecvError>) -> Result<(), Error> { + use crate::codec::RecvError::*; + match result { + // The connection has shutdown normally + Ok(()) => { + *self.state = State::Closing(Reason::NO_ERROR); + Ok(()) + } + // Attempting to read a frame resulted in a connection level + // error. This is handled by setting a GOAWAY frame followed by + // terminating the connection. + Err(Connection(e)) => { + tracing::debug!(error = ?e, "Connection::poll; connection error"); + + // We may have already sent a GOAWAY for this error, + // if so, don't send another, just flush and close up. + if let Some(reason) = self.go_away.going_away_reason() { + if reason == e { + tracing::trace!(" -> already going away"); + *self.state = State::Closing(e); + return Ok(()); + } + } + + // Reset all active streams + self.streams.recv_err(&e.into()); + self.go_away_now(e); + Ok(()) + } + // Attempting to read a frame resulted in a stream level error. + // This is handled by resetting the frame then trying to read + // another frame. + Err(Stream { id, reason }) => { + tracing::trace!(?id, ?reason, "stream error"); + self.streams.send_reset(id, reason); + Ok(()) + } + // Attempting to read a frame resulted in an I/O error. All + // active streams must be reset. + // + // TODO: Are I/O errors recoverable? + Err(Io(e)) => { + tracing::debug!(error = ?e, "Connection::poll; IO error"); + let e = e.into(); + + // Reset all active streams + self.streams.recv_err(&e); + + // Return the error + Err(e) + } + } + } + + fn recv_frame(&mut self, frame: Option) -> Result { + use crate::frame::Frame::*; + match frame { + Some(Headers(frame)) => { + tracing::trace!(?frame, "recv HEADERS"); + self.streams.recv_headers(frame)?; + } + Some(Data(frame)) => { + tracing::trace!(?frame, "recv DATA"); + self.streams.recv_data(frame)?; + } + Some(Reset(frame)) => { + tracing::trace!(?frame, "recv RST_STREAM"); + self.streams.recv_reset(frame)?; + } + Some(PushPromise(frame)) => { + tracing::trace!(?frame, "recv PUSH_PROMISE"); + self.streams.recv_push_promise(frame)?; + } + Some(Settings(frame)) => { + tracing::trace!(?frame, "recv SETTINGS"); + return Ok(ReceivedFrame::Settings(frame)); + } + Some(GoAway(frame)) => { + tracing::trace!(?frame, "recv GOAWAY"); + // This should prevent starting new streams, + // but should allow continuing to process current streams + // until they are all EOS. Once they are, State should + // transition to GoAway. + self.streams.recv_go_away(&frame)?; + *self.error = Some(frame.reason()); + } + Some(Ping(frame)) => { + tracing::trace!(?frame, "recv PING"); + let status = self.ping_pong.recv_ping(frame); + if status.is_shutdown() { + assert!( + self.go_away.is_going_away(), + "received unexpected shutdown ping" + ); + + let last_processed_id = self.streams.last_processed_id(); + self.go_away(last_processed_id, Reason::NO_ERROR); + } + } + Some(WindowUpdate(frame)) => { + tracing::trace!(?frame, "recv WINDOW_UPDATE"); + self.streams.recv_window_update(frame)?; + } + Some(Priority(frame)) => { + tracing::trace!(?frame, "recv PRIORITY"); + // TODO: handle + } + None => { + tracing::trace!("codec closed"); + self.streams.recv_eof(false).expect("mutex poisoned"); + return Ok(ReceivedFrame::Done); + } + } + Ok(ReceivedFrame::Continue) + } +} + +enum ReceivedFrame { + Settings(frame::Settings), + Continue, + Done, +} + impl Connection where T: AsyncRead + AsyncWrite, B: Buf, { pub(crate) fn streams(&self) -> &Streams { - &self.streams + &self.inner.streams } } @@ -419,12 +520,12 @@ where B: Buf, { pub fn next_incoming(&mut self) -> Option> { - self.streams.next_incoming() + self.inner.streams.next_incoming() } // Graceful shutdown only makes sense for server peers. pub fn go_away_gracefully(&mut self) { - if self.go_away.is_going_away() { + if self.inner.go_away.is_going_away() { // No reason to start a new one. return; } @@ -440,11 +541,11 @@ where // > send another GOAWAY frame with an updated last stream identifier. // > This ensures that a connection can be cleanly shut down without // > losing requests. - self.go_away(StreamId::MAX, Reason::NO_ERROR); + self.inner.as_dyn().go_away(StreamId::MAX, Reason::NO_ERROR); // We take the advice of waiting 1 RTT literally, and wait // for a pong before proceeding. - self.ping_pong.ping_shutdown(); + self.inner.ping_pong.ping_shutdown(); } } @@ -455,6 +556,6 @@ where { fn drop(&mut self) { // Ignore errors as this indicates that the mutex is poisoned. - let _ = self.streams.recv_eof(true); + let _ = self.inner.streams.recv_eof(true); } } diff --git a/src/proto/mod.rs b/src/proto/mod.rs index f9e068b58..84fd8542e 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -10,7 +10,7 @@ pub(crate) use self::connection::{Config, Connection}; pub(crate) use self::error::Error; pub(crate) use self::peer::{Dyn as DynPeer, Peer}; pub(crate) use self::ping_pong::UserPings; -pub(crate) use self::streams::{OpaqueStreamRef, StreamRef, Streams}; +pub(crate) use self::streams::{DynStreams, OpaqueStreamRef, StreamRef, Streams}; pub(crate) use self::streams::{Open, PollReset, Prioritized}; use crate::codec::Codec; diff --git a/src/proto/streams/mod.rs b/src/proto/streams/mod.rs index 508d9a1e3..608395c0f 100644 --- a/src/proto/streams/mod.rs +++ b/src/proto/streams/mod.rs @@ -12,7 +12,7 @@ mod streams; pub(crate) use self::prioritize::Prioritized; pub(crate) use self::recv::Open; pub(crate) use self::send::PollReset; -pub(crate) use self::streams::{OpaqueStreamRef, StreamRef, Streams}; +pub(crate) use self::streams::{DynStreams, OpaqueStreamRef, StreamRef, Streams}; use self::buffer::Buffer; use self::counts::Counts; diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index b7b616fac..701b8f4b8 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -545,43 +545,57 @@ impl Prioritize { // First check if there are any data chunks to take back if let Some(frame) = dst.take_last_data_frame() { - tracing::trace!( - ?frame, - sz = frame.payload().inner.get_ref().remaining(), - "reclaimed" - ); - - let mut eos = false; - let key = frame.payload().stream; - - match mem::replace(&mut self.in_flight_data_frame, InFlightData::Nothing) { - InFlightData::Nothing => panic!("wasn't expecting a frame to reclaim"), - InFlightData::Drop => { - tracing::trace!("not reclaiming frame for cancelled stream"); - return false; - } - InFlightData::DataFrame(k) => { - debug_assert_eq!(k, key); - } - } + self.reclaim_frame_inner(buffer, store, frame) + } else { + false + } + } + + fn reclaim_frame_inner( + &mut self, + buffer: &mut Buffer>, + store: &mut Store, + frame: frame::Data>, + ) -> bool + where + B: Buf, + { + tracing::trace!( + ?frame, + sz = frame.payload().inner.get_ref().remaining(), + "reclaimed" + ); - let mut frame = frame.map(|prioritized| { - // TODO: Ensure fully written - eos = prioritized.end_of_stream; - prioritized.inner.into_inner() - }); + let mut eos = false; + let key = frame.payload().stream; - if frame.payload().has_remaining() { - let mut stream = store.resolve(key); + match mem::replace(&mut self.in_flight_data_frame, InFlightData::Nothing) { + InFlightData::Nothing => panic!("wasn't expecting a frame to reclaim"), + InFlightData::Drop => { + tracing::trace!("not reclaiming frame for cancelled stream"); + return false; + } + InFlightData::DataFrame(k) => { + debug_assert_eq!(k, key); + } + } - if eos { - frame.set_end_stream(true); - } + let mut frame = frame.map(|prioritized| { + // TODO: Ensure fully written + eos = prioritized.end_of_stream; + prioritized.inner.into_inner() + }); - self.push_back_frame(frame.into(), buffer, &mut stream); + if frame.payload().has_remaining() { + let mut stream = store.resolve(key); - return true; + if eos { + frame.set_end_stream(true); } + + self.push_back_frame(frame.into(), buffer, &mut stream); + + return true; } false diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 7e9b4035a..7ba87eb70 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -37,6 +37,17 @@ where _p: ::std::marker::PhantomData

, } +// Like `Streams` but with a `peer::Dyn` field instead of a static `P: Peer` type parameter. +// Ensures that the methods only get one instantiation, instead of two (client and server) +#[derive(Debug)] +pub(crate) struct DynStreams<'a, B> { + inner: &'a Mutex, + + send_buffer: &'a SendBuffer, + + peer: peer::Dyn, +} + /// Reference to the stream state #[derive(Debug)] pub(crate) struct StreamRef { @@ -101,17 +112,7 @@ where let peer = P::r#dyn(); Streams { - inner: Arc::new(Mutex::new(Inner { - counts: Counts::new(peer, &config), - actions: Actions { - recv: Recv::new(peer, &config), - send: Send::new(&config), - task: None, - conn_error: None, - }, - store: Store::new(), - refs: 1, - })), + inner: Inner::new(peer, config), send_buffer: Arc::new(SendBuffer::new()), _p: ::std::marker::PhantomData, } @@ -126,24 +127,266 @@ where .set_target_connection_window(size, &mut me.actions.task) } - /// Process inbound headers - pub fn recv_headers(&mut self, frame: frame::Headers) -> Result<(), RecvError> { - let id = frame.stream_id(); + pub fn next_incoming(&mut self) -> Option> { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + me.actions.recv.next_incoming(&mut me.store).map(|key| { + let stream = &mut me.store.resolve(key); + tracing::trace!( + "next_incoming; id={:?}, state={:?}", + stream.id, + stream.state + ); + // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding + // the lock, so it can't. + me.refs += 1; + StreamRef { + opaque: OpaqueStreamRef::new(self.inner.clone(), stream), + send_buffer: self.send_buffer.clone(), + } + }) + } + + pub fn send_pending_refusal( + &mut self, + cx: &mut Context, + dst: &mut Codec>, + ) -> Poll> + where + T: AsyncWrite + Unpin, + { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + me.actions.recv.send_pending_refusal(cx, dst) + } + + pub fn clear_expired_reset_streams(&mut self) { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + me.actions + .recv + .clear_expired_reset_streams(&mut me.store, &mut me.counts); + } + + pub fn poll_complete( + &mut self, + cx: &mut Context, + dst: &mut Codec>, + ) -> Poll> + where + T: AsyncWrite + Unpin, + { + let mut me = self.inner.lock().unwrap(); + me.poll_complete(&self.send_buffer, cx, dst) + } + + pub fn apply_remote_settings(&mut self, frame: &frame::Settings) -> Result<(), RecvError> { + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + me.counts.apply_remote_settings(frame); + + me.actions.send.apply_remote_settings( + frame, + send_buffer, + &mut me.store, + &mut me.counts, + &mut me.actions.task, + ) + } + + pub fn apply_local_settings(&mut self, frame: &frame::Settings) -> Result<(), RecvError> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; + me.actions.recv.apply_local_settings(frame, &mut me.store) + } + + pub fn send_request( + &mut self, + request: Request<()>, + end_of_stream: bool, + pending: Option<&OpaqueStreamRef>, + ) -> Result, SendError> { + use super::stream::ContentLength; + use http::Method; + + // TODO: There is a hazard with assigning a stream ID before the + // prioritize layer. If prioritization reorders new streams, this + // implicitly closes the earlier stream IDs. + // + // See: hyperium/h2#11 + let mut me = self.inner.lock().unwrap(); + let me = &mut *me; + + let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + me.actions.ensure_no_conn_error()?; + me.actions.send.ensure_next_stream_id()?; + + // The `pending` argument is provided by the `Client`, and holds + // a store `Key` of a `Stream` that may have been not been opened + // yet. + // + // If that stream is still pending, the Client isn't allowed to + // queue up another pending stream. They should use `poll_ready`. + if let Some(stream) = pending { + if me.store.resolve(stream.key).is_pending_open { + return Err(UserError::Rejected.into()); + } + } + + if me.counts.peer().is_server() { + // Servers cannot open streams. PushPromise must first be reserved. + return Err(UserError::UnexpectedFrameType.into()); + } + + let stream_id = me.actions.send.open()?; + + let mut stream = Stream::new( + stream_id, + me.actions.send.init_window_sz(), + me.actions.recv.init_window_sz(), + ); + + if *request.method() == Method::HEAD { + stream.content_length = ContentLength::Head; + } + + // Convert the message + let headers = client::Peer::convert_send_message(stream_id, request, end_of_stream)?; + + let mut stream = me.store.insert(stream.id, stream); + + let sent = me.actions.send.send_headers( + headers, + send_buffer, + &mut stream, + &mut me.counts, + &mut me.actions.task, + ); + + // send_headers can return a UserError, if it does, + // we should forget about this stream. + if let Err(err) = sent { + stream.unlink(); + stream.remove(); + return Err(err.into()); + } + + // Given that the stream has been initialized, it should not be in the + // closed state. + debug_assert!(!stream.state.is_closed()); + + // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding + // the lock, so it can't. + me.refs += 1; + + Ok(StreamRef { + opaque: OpaqueStreamRef::new(self.inner.clone(), &mut stream), + send_buffer: self.send_buffer.clone(), + }) + } +} + +impl DynStreams<'_, B> { + pub fn recv_headers(&mut self, frame: frame::Headers) -> Result<(), RecvError> { + let mut me = self.inner.lock().unwrap(); + + me.recv_headers(self.peer, &self.send_buffer, frame) + } + + pub fn recv_data(&mut self, frame: frame::Data) -> Result<(), RecvError> { + let mut me = self.inner.lock().unwrap(); + me.recv_data(self.peer, &self.send_buffer, frame) + } + + pub fn recv_reset(&mut self, frame: frame::Reset) -> Result<(), RecvError> { + let mut me = self.inner.lock().unwrap(); + + me.recv_reset(&self.send_buffer, frame) + } + + /// Handle a received error and return the ID of the last processed stream. + pub fn recv_err(&mut self, err: &proto::Error) -> StreamId { + let mut me = self.inner.lock().unwrap(); + me.recv_err(&self.send_buffer, err) + } + + pub fn recv_go_away(&mut self, frame: &frame::GoAway) -> Result<(), RecvError> { + let mut me = self.inner.lock().unwrap(); + me.recv_go_away(&self.send_buffer, frame) + } + + pub fn last_processed_id(&self) -> StreamId { + self.inner.lock().unwrap().actions.recv.last_processed_id() + } + + pub fn recv_window_update(&mut self, frame: frame::WindowUpdate) -> Result<(), RecvError> { + let mut me = self.inner.lock().unwrap(); + me.recv_window_update(&self.send_buffer, frame) + } + + pub fn recv_push_promise(&mut self, frame: frame::PushPromise) -> Result<(), RecvError> { + let mut me = self.inner.lock().unwrap(); + me.recv_push_promise(&self.send_buffer, frame) + } + + pub fn recv_eof(&mut self, clear_pending_accept: bool) -> Result<(), ()> { + let mut me = self.inner.lock().map_err(|_| ())?; + me.recv_eof(&self.send_buffer, clear_pending_accept) + } + + pub fn send_reset(&mut self, id: StreamId, reason: Reason) { + let mut me = self.inner.lock().unwrap(); + me.send_reset(&self.send_buffer, id, reason) + } + + pub fn send_go_away(&mut self, last_processed_id: StreamId) { + let mut me = self.inner.lock().unwrap(); + me.actions.recv.go_away(last_processed_id); + } +} + +impl Inner { + fn new(peer: peer::Dyn, config: Config) -> Arc> { + Arc::new(Mutex::new(Inner { + counts: Counts::new(peer, &config), + actions: Actions { + recv: Recv::new(peer, &config), + send: Send::new(&config), + task: None, + conn_error: None, + }, + store: Store::new(), + refs: 1, + })) + } + + fn recv_headers( + &mut self, + peer: peer::Dyn, + send_buffer: &SendBuffer, + frame: frame::Headers, + ) -> Result<(), RecvError> { + let id = frame.stream_id(); + // The GOAWAY process has begun. All streams with a greater ID than // specified as part of GOAWAY should be ignored. - if id > me.actions.recv.max_stream_id() { + if id > self.actions.recv.max_stream_id() { tracing::trace!( "id ({:?}) > max_stream_id ({:?}), ignoring HEADERS", id, - me.actions.recv.max_stream_id() + self.actions.recv.max_stream_id() ); return Ok(()); } - let key = match me.store.find_entry(id) { + let key = match self.store.find_entry(id) { Entry::Occupied(e) => e.key(), Entry::Vacant(e) => { // Client: it's possible to send a request, and then send @@ -151,10 +394,10 @@ where // // Server: we can't reset a stream before having received // the request headers, so don't allow. - if !P::is_server() { + if !peer.is_server() { // This may be response headers for a stream we've already // forgotten about... - if me.actions.may_have_forgotten_stream::

(id) { + if self.actions.may_have_forgotten_stream(peer, id) { tracing::debug!( "recv_headers for old stream={:?}, sending STREAM_CLOSED", id, @@ -166,12 +409,16 @@ where } } - match me.actions.recv.open(id, Open::Headers, &mut me.counts)? { + match self + .actions + .recv + .open(id, Open::Headers, &mut self.counts)? + { Some(stream_id) => { let stream = Stream::new( stream_id, - me.actions.send.init_window_sz(), - me.actions.recv.init_window_sz(), + self.actions.send.init_window_sz(), + self.actions.recv.init_window_sz(), ); e.insert(stream) @@ -181,7 +428,7 @@ where } }; - let stream = me.store.resolve(key); + let stream = self.store.resolve(key); if stream.state.is_local_reset() { // Locally reset streams must ignore frames "for some time". @@ -191,11 +438,11 @@ where return Ok(()); } - let actions = &mut me.actions; - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let actions = &mut self.actions; + let mut send_buffer = send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; - me.counts.transition(stream, |counts, stream| { + self.counts.transition(stream, |counts, stream| { tracing::trace!( "recv_headers; stream={:?}; state={:?}", stream.id, @@ -247,27 +494,29 @@ where }) } - pub fn recv_data(&mut self, frame: frame::Data) -> Result<(), RecvError> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - + fn recv_data( + &mut self, + peer: peer::Dyn, + send_buffer: &SendBuffer, + frame: frame::Data, + ) -> Result<(), RecvError> { let id = frame.stream_id(); - let stream = match me.store.find_mut(&id) { + let stream = match self.store.find_mut(&id) { Some(stream) => stream, None => { // The GOAWAY process has begun. All streams with a greater ID // than specified as part of GOAWAY should be ignored. - if id > me.actions.recv.max_stream_id() { + if id > self.actions.recv.max_stream_id() { tracing::trace!( "id ({:?}) > max_stream_id ({:?}), ignoring DATA", id, - me.actions.recv.max_stream_id() + self.actions.recv.max_stream_id() ); return Ok(()); } - if me.actions.may_have_forgotten_stream::

(id) { + if self.actions.may_have_forgotten_stream(peer, id) { tracing::debug!("recv_data for old stream={:?}, sending STREAM_CLOSED", id,); let sz = frame.payload().len(); @@ -276,7 +525,7 @@ where assert!(sz <= super::MAX_WINDOW_SIZE as usize); let sz = sz as WindowSize; - me.actions.recv.ignore_data(sz)?; + self.actions.recv.ignore_data(sz)?; return Err(RecvError::Stream { id, reason: Reason::STREAM_CLOSED, @@ -288,11 +537,11 @@ where } }; - let actions = &mut me.actions; - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let actions = &mut self.actions; + let mut send_buffer = send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; - me.counts.transition(stream, |counts, stream| { + self.counts.transition(stream, |counts, stream| { let sz = frame.payload().len(); let res = actions.recv.recv_data(frame, stream); @@ -308,10 +557,11 @@ where }) } - pub fn recv_reset(&mut self, frame: frame::Reset) -> Result<(), RecvError> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - + fn recv_reset( + &mut self, + send_buffer: &SendBuffer, + frame: frame::Reset, + ) -> Result<(), RecvError> { let id = frame.stream_id(); if id.is_zero() { @@ -321,33 +571,33 @@ where // The GOAWAY process has begun. All streams with a greater ID than // specified as part of GOAWAY should be ignored. - if id > me.actions.recv.max_stream_id() { + if id > self.actions.recv.max_stream_id() { tracing::trace!( "id ({:?}) > max_stream_id ({:?}), ignoring RST_STREAM", id, - me.actions.recv.max_stream_id() + self.actions.recv.max_stream_id() ); return Ok(()); } - let stream = match me.store.find_mut(&id) { + let stream = match self.store.find_mut(&id) { Some(stream) => stream, None => { // TODO: Are there other error cases? - me.actions - .ensure_not_idle(me.counts.peer(), id) + self.actions + .ensure_not_idle(self.counts.peer(), id) .map_err(RecvError::Connection)?; return Ok(()); } }; - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let mut send_buffer = send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; - let actions = &mut me.actions; + let actions = &mut self.actions; - me.counts.transition(stream, |counts, stream| { + self.counts.transition(stream, |counts, stream| { actions.recv.recv_reset(frame, stream); actions.send.recv_err(send_buffer, stream, counts); assert!(stream.state.is_closed()); @@ -355,19 +605,54 @@ where }) } - /// Handle a received error and return the ID of the last processed stream. - pub fn recv_err(&mut self, err: &proto::Error) -> StreamId { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; + fn recv_window_update( + &mut self, + send_buffer: &SendBuffer, + frame: frame::WindowUpdate, + ) -> Result<(), RecvError> { + let id = frame.stream_id(); - let actions = &mut me.actions; - let counts = &mut me.counts; - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let mut send_buffer = send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; + + if id.is_zero() { + self.actions + .send + .recv_connection_window_update(frame, &mut self.store, &mut self.counts) + .map_err(RecvError::Connection)?; + } else { + // The remote may send window updates for streams that the local now + // considers closed. It's ok... + if let Some(mut stream) = self.store.find_mut(&id) { + // This result is ignored as there is nothing to do when there + // is an error. The stream is reset by the function on error and + // the error is informational. + let _ = self.actions.send.recv_stream_window_update( + frame.size_increment(), + send_buffer, + &mut stream, + &mut self.counts, + &mut self.actions.task, + ); + } else { + self.actions + .ensure_not_idle(self.counts.peer(), id) + .map_err(RecvError::Connection)?; + } + } + + Ok(()) + } + + fn recv_err(&mut self, send_buffer: &SendBuffer, err: &proto::Error) -> StreamId { + let actions = &mut self.actions; + let counts = &mut self.counts; + let mut send_buffer = send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; let last_processed_id = actions.recv.last_processed_id(); - me.store + self.store .for_each(|stream| { counts.transition(stream, |counts, stream| { actions.recv.recv_err(err, &mut *stream); @@ -382,13 +667,14 @@ where last_processed_id } - pub fn recv_go_away(&mut self, frame: &frame::GoAway) -> Result<(), RecvError> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let actions = &mut me.actions; - let counts = &mut me.counts; - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + fn recv_go_away( + &mut self, + send_buffer: &SendBuffer, + frame: &frame::GoAway, + ) -> Result<(), RecvError> { + let actions = &mut self.actions; + let counts = &mut self.counts; + let mut send_buffer = send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; let last_stream_id = frame.last_stream_id(); @@ -397,83 +683,43 @@ where let err = frame.reason().into(); - me.store + self.store .for_each(|stream| { if stream.id > last_stream_id { counts.transition(stream, |counts, stream| { actions.recv.recv_err(&err, &mut *stream); actions.send.recv_err(send_buffer, stream, counts); - Ok::<_, ()>(()) - }) - } else { - Ok::<_, ()>(()) - } - }) - .unwrap(); - - actions.conn_error = Some(err); - - Ok(()) - } - - pub fn last_processed_id(&self) -> StreamId { - self.inner.lock().unwrap().actions.recv.last_processed_id() - } - - pub fn recv_window_update(&mut self, frame: frame::WindowUpdate) -> Result<(), RecvError> { - let id = frame.stream_id(); - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; + Ok::<_, ()>(()) + }) + } else { + Ok::<_, ()>(()) + } + }) + .unwrap(); - if id.is_zero() { - me.actions - .send - .recv_connection_window_update(frame, &mut me.store, &mut me.counts) - .map_err(RecvError::Connection)?; - } else { - // The remote may send window updates for streams that the local now - // considers closed. It's ok... - if let Some(mut stream) = me.store.find_mut(&id) { - // This result is ignored as there is nothing to do when there - // is an error. The stream is reset by the function on error and - // the error is informational. - let _ = me.actions.send.recv_stream_window_update( - frame.size_increment(), - send_buffer, - &mut stream, - &mut me.counts, - &mut me.actions.task, - ); - } else { - me.actions - .ensure_not_idle(me.counts.peer(), id) - .map_err(RecvError::Connection)?; - } - } + actions.conn_error = Some(err); Ok(()) } - pub fn recv_push_promise(&mut self, frame: frame::PushPromise) -> Result<(), RecvError> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - + fn recv_push_promise( + &mut self, + send_buffer: &SendBuffer, + frame: frame::PushPromise, + ) -> Result<(), RecvError> { let id = frame.stream_id(); let promised_id = frame.promised_id(); // First, ensure that the initiating stream is still in a valid state. - let parent_key = match me.store.find_mut(&id) { + let parent_key = match self.store.find_mut(&id) { Some(stream) => { // The GOAWAY process has begun. All streams with a greater ID // than specified as part of GOAWAY should be ignored. - if id > me.actions.recv.max_stream_id() { + if id > self.actions.recv.max_stream_id() { tracing::trace!( "id ({:?}) > max_stream_id ({:?}), ignoring PUSH_PROMISE", id, - me.actions.recv.max_stream_id() + self.actions.recv.max_stream_id() ); return Ok(()); } @@ -493,16 +739,16 @@ where // could grow in memory indefinitely. // Ensure that we can reserve streams - me.actions.recv.ensure_can_reserve()?; + self.actions.recv.ensure_can_reserve()?; // Next, open the stream. // // If `None` is returned, then the stream is being refused. There is no // further work to be done. - if me + if self .actions .recv - .open(promised_id, Open::PushPromise, &mut me.counts)? + .open(promised_id, Open::PushPromise, &mut self.counts)? .is_none() { return Ok(()); @@ -512,23 +758,23 @@ where // this requires a bit of indirection to make the borrow checker happy. let child_key: Option = { // Create state for the stream - let stream = me.store.insert(promised_id, { + let stream = self.store.insert(promised_id, { Stream::new( promised_id, - me.actions.send.init_window_sz(), - me.actions.recv.init_window_sz(), + self.actions.send.init_window_sz(), + self.actions.recv.init_window_sz(), ) }); - let actions = &mut me.actions; + let actions = &mut self.actions; - me.counts.transition(stream, |counts, stream| { + self.counts.transition(stream, |counts, stream| { let stream_valid = actions.recv.recv_push_promise(frame, stream); match stream_valid { Ok(()) => Ok(Some(stream.key())), _ => { - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let mut send_buffer = send_buffer.inner.lock().unwrap(); actions .reset_on_recv_stream_err( &mut *send_buffer, @@ -543,10 +789,10 @@ where }; // If we're successful, push the headers and stream... if let Some(child) = child_key { - let mut ppp = me.store[parent_key].pending_push_promises.take(); - ppp.push(&mut me.store.resolve(child)); + let mut ppp = self.store[parent_key].pending_push_promises.take(); + ppp.push(&mut self.store.resolve(child)); - let parent = &mut me.store.resolve(parent_key); + let parent = &mut self.store.resolve(parent_key); parent.pending_push_promises = ppp; parent.notify_recv(); }; @@ -554,199 +800,78 @@ where Ok(()) } - pub fn next_incoming(&mut self) -> Option> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - me.actions.recv.next_incoming(&mut me.store).map(|key| { - let stream = &mut me.store.resolve(key); - tracing::trace!( - "next_incoming; id={:?}, state={:?}", - stream.id, - stream.state - ); - // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding - // the lock, so it can't. - me.refs += 1; - StreamRef { - opaque: OpaqueStreamRef::new(self.inner.clone(), stream), - send_buffer: self.send_buffer.clone(), - } - }) - } - - pub fn send_pending_refusal( + fn recv_eof( &mut self, - cx: &mut Context, - dst: &mut Codec>, - ) -> Poll> - where - T: AsyncWrite + Unpin, - { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - me.actions.recv.send_pending_refusal(cx, dst) - } + send_buffer: &SendBuffer, + clear_pending_accept: bool, + ) -> Result<(), ()> { + let actions = &mut self.actions; + let counts = &mut self.counts; + let mut send_buffer = send_buffer.inner.lock().unwrap(); + let send_buffer = &mut *send_buffer; - pub fn clear_expired_reset_streams(&mut self) { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - me.actions - .recv - .clear_expired_reset_streams(&mut me.store, &mut me.counts); + if actions.conn_error.is_none() { + actions.conn_error = Some(io::Error::from(io::ErrorKind::BrokenPipe).into()); + } + + tracing::trace!("Streams::recv_eof"); + + self.store + .for_each(|stream| { + counts.transition(stream, |counts, stream| { + actions.recv.recv_eof(stream); + + // This handles resetting send state associated with the + // stream + actions.send.recv_err(send_buffer, stream, counts); + Ok::<_, ()>(()) + }) + }) + .expect("recv_eof"); + + actions.clear_queues(clear_pending_accept, &mut self.store, counts); + Ok(()) } - pub fn poll_complete( + fn poll_complete( &mut self, + send_buffer: &SendBuffer, cx: &mut Context, dst: &mut Codec>, ) -> Poll> where T: AsyncWrite + Unpin, + B: Buf, { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let mut send_buffer = send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; // Send WINDOW_UPDATE frames first // // TODO: It would probably be better to interleave updates w/ data // frames. - ready!(me + ready!(self .actions .recv - .poll_complete(cx, &mut me.store, &mut me.counts, dst))?; + .poll_complete(cx, &mut self.store, &mut self.counts, dst))?; // Send any other pending frames - ready!(me - .actions - .send - .poll_complete(cx, send_buffer, &mut me.store, &mut me.counts, dst))?; + ready!(self.actions.send.poll_complete( + cx, + send_buffer, + &mut self.store, + &mut self.counts, + dst + ))?; // Nothing else to do, track the task - me.actions.task = Some(cx.waker().clone()); + self.actions.task = Some(cx.waker().clone()); Poll::Ready(Ok(())) } - pub fn apply_remote_settings(&mut self, frame: &frame::Settings) -> Result<(), RecvError> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - me.counts.apply_remote_settings(frame); - - me.actions.send.apply_remote_settings( - frame, - send_buffer, - &mut me.store, - &mut me.counts, - &mut me.actions.task, - ) - } - - pub fn apply_local_settings(&mut self, frame: &frame::Settings) -> Result<(), RecvError> { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - me.actions.recv.apply_local_settings(frame, &mut me.store) - } - - pub fn send_request( - &mut self, - request: Request<()>, - end_of_stream: bool, - pending: Option<&OpaqueStreamRef>, - ) -> Result, SendError> { - use super::stream::ContentLength; - use http::Method; - - // TODO: There is a hazard with assigning a stream ID before the - // prioritize layer. If prioritization reorders new streams, this - // implicitly closes the earlier stream IDs. - // - // See: hyperium/h2#11 - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - me.actions.ensure_no_conn_error()?; - me.actions.send.ensure_next_stream_id()?; - - // The `pending` argument is provided by the `Client`, and holds - // a store `Key` of a `Stream` that may have been not been opened - // yet. - // - // If that stream is still pending, the Client isn't allowed to - // queue up another pending stream. They should use `poll_ready`. - if let Some(stream) = pending { - if me.store.resolve(stream.key).is_pending_open { - return Err(UserError::Rejected.into()); - } - } - - if me.counts.peer().is_server() { - // Servers cannot open streams. PushPromise must first be reserved. - return Err(UserError::UnexpectedFrameType.into()); - } - - let stream_id = me.actions.send.open()?; - - let mut stream = Stream::new( - stream_id, - me.actions.send.init_window_sz(), - me.actions.recv.init_window_sz(), - ); - - if *request.method() == Method::HEAD { - stream.content_length = ContentLength::Head; - } - - // Convert the message - let headers = client::Peer::convert_send_message(stream_id, request, end_of_stream)?; - - let mut stream = me.store.insert(stream.id, stream); - - let sent = me.actions.send.send_headers( - headers, - send_buffer, - &mut stream, - &mut me.counts, - &mut me.actions.task, - ); - - // send_headers can return a UserError, if it does, - // we should forget about this stream. - if let Err(err) = sent { - stream.unlink(); - stream.remove(); - return Err(err.into()); - } - - // Given that the stream has been initialized, it should not be in the - // closed state. - debug_assert!(!stream.state.is_closed()); - - // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding - // the lock, so it can't. - me.refs += 1; - - Ok(StreamRef { - opaque: OpaqueStreamRef::new(self.inner.clone(), &mut stream), - send_buffer: self.send_buffer.clone(), - }) - } - - pub fn send_reset(&mut self, id: StreamId, reason: Reason) { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - - let key = match me.store.find_entry(id) { + fn send_reset(&mut self, send_buffer: &SendBuffer, id: StreamId, reason: Reason) { + let key = match self.store.find_entry(id) { Entry::Occupied(e) => e.key(), Entry::Vacant(e) => { let stream = Stream::new(id, 0, 0); @@ -755,18 +880,11 @@ where } }; - let stream = me.store.resolve(key); - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); + let stream = self.store.resolve(key); + let mut send_buffer = send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; - me.actions - .send_reset(stream, reason, &mut me.counts, send_buffer); - } - - pub fn send_go_away(&mut self, last_processed_id: StreamId) { - let mut me = self.inner.lock().unwrap(); - let me = &mut *me; - let actions = &mut me.actions; - actions.recv.go_away(last_processed_id); + self.actions + .send_reset(stream, reason, &mut self.counts, send_buffer); } } @@ -801,39 +919,24 @@ impl Streams where P: Peer, { + pub fn as_dyn(&self) -> DynStreams { + let Self { + inner, + send_buffer, + _p, + } = self; + DynStreams { + inner, + send_buffer, + peer: P::r#dyn(), + } + } + /// This function is safe to call multiple times. /// /// A `Result` is returned to avoid panicking if the mutex is poisoned. pub fn recv_eof(&mut self, clear_pending_accept: bool) -> Result<(), ()> { - let mut me = self.inner.lock().map_err(|_| ())?; - let me = &mut *me; - - let actions = &mut me.actions; - let counts = &mut me.counts; - let mut send_buffer = self.send_buffer.inner.lock().unwrap(); - let send_buffer = &mut *send_buffer; - - if actions.conn_error.is_none() { - actions.conn_error = Some(io::Error::from(io::ErrorKind::BrokenPipe).into()); - } - - tracing::trace!("Streams::recv_eof"); - - me.store - .for_each(|stream| { - counts.transition(stream, |counts, stream| { - actions.recv.recv_eof(stream); - - // This handles resetting send state associated with the - // stream - actions.send.recv_err(send_buffer, stream, counts); - Ok::<_, ()>(()) - }) - }) - .expect("recv_eof"); - - actions.clear_queues(clear_pending_accept, &mut me.store, counts); - Ok(()) + self.as_dyn().recv_eof(clear_pending_accept) } pub(crate) fn max_send_streams(&self) -> usize { @@ -1398,11 +1501,11 @@ impl Actions { /// is more likely to be latency/memory constraints that caused this, /// and not a bad actor. So be less catastrophic, the spec allows /// us to send another RST_STREAM of STREAM_CLOSED. - fn may_have_forgotten_stream(&self, id: StreamId) -> bool { + fn may_have_forgotten_stream(&self, peer: peer::Dyn, id: StreamId) -> bool { if id.is_zero() { return false; } - if P::is_local_init(id) { + if peer.is_local_init(id) { self.send.may_have_created_stream(id) } else { self.recv.may_have_created_stream(id) From 89d91b0a4f879bede3d3348bed061d33544f2682 Mon Sep 17 00:00:00 2001 From: Kornel Date: Thu, 25 Feb 2021 16:57:42 +0000 Subject: [PATCH 031/178] Ignore 1xx frames (#521) Closes #515 --- src/frame/headers.rs | 11 +++++++ src/proto/streams/recv.rs | 22 ++++++++------ src/proto/streams/state.rs | 10 ++++-- tests/h2-tests/tests/client_request.rs | 42 ++++++++++++++++++++++++++ 4 files changed, 73 insertions(+), 12 deletions(-) diff --git a/src/frame/headers.rs b/src/frame/headers.rs index 5b9fd8aea..b65951c21 100644 --- a/src/frame/headers.rs +++ b/src/frame/headers.rs @@ -254,6 +254,11 @@ impl Headers { &mut self.header_block.pseudo } + /// Whether it has status 1xx + pub(crate) fn is_informational(&self) -> bool { + self.header_block.pseudo.is_informational() + } + pub fn fields(&self) -> &HeaderMap { &self.header_block.fields } @@ -599,6 +604,12 @@ impl Pseudo { pub fn set_authority(&mut self, authority: BytesStr) { self.authority = Some(authority); } + + /// Whether it has status 1xx + pub(crate) fn is_informational(&self) -> bool { + self.status + .map_or(false, |status| status.is_informational()) + } } // ===== impl EncodingHeaderBlock ===== diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 682200d45..7a6ff8ad2 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -161,7 +161,7 @@ impl Recv { counts: &mut Counts, ) -> Result<(), RecvHeaderBlockError>> { tracing::trace!("opening stream; init_window={}", self.init_window_sz); - let is_initial = stream.state.recv_open(frame.is_end_stream())?; + let is_initial = stream.state.recv_open(&frame)?; if is_initial { // TODO: be smarter about this logic @@ -226,15 +226,17 @@ impl Recv { let stream_id = frame.stream_id(); let (pseudo, fields) = frame.into_parts(); - let message = counts - .peer() - .convert_poll_message(pseudo, fields, stream_id)?; - - // Push the frame onto the stream's recv buffer - stream - .pending_recv - .push_back(&mut self.buffer, Event::Headers(message)); - stream.notify_recv(); + if !pseudo.is_informational() { + let message = counts + .peer() + .convert_poll_message(pseudo, fields, stream_id)?; + + // Push the frame onto the stream's recv buffer + stream + .pending_recv + .push_back(&mut self.buffer, Event::Headers(message)); + stream.notify_recv(); + } // Only servers can receive a headers frame that initiates the stream. // This is verified in `Streams` before calling this function. diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index 45ec82f90..08d4dba00 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -2,7 +2,7 @@ use std::io; use crate::codec::UserError::*; use crate::codec::{RecvError, UserError}; -use crate::frame::Reason; +use crate::frame::{self, Reason}; use crate::proto::{self, PollReset}; use self::Inner::*; @@ -132,10 +132,13 @@ impl State { /// Opens the receive-half of the stream when a HEADERS frame is received. /// + /// is_informational: whether received a 1xx status code + /// /// Returns true if this transitions the state to Open. - pub fn recv_open(&mut self, eos: bool) -> Result { + pub fn recv_open(&mut self, frame: &frame::Headers) -> Result { let remote = Streaming; let mut initial = false; + let eos = frame.is_end_stream(); self.inner = match self.inner { Idle => { @@ -172,6 +175,9 @@ impl State { HalfClosedLocal(AwaitingHeaders) => { if eos { Closed(Cause::EndStream) + } else if frame.is_informational() { + tracing::trace!("skipping 1xx response headers"); + HalfClosedLocal(AwaitingHeaders) } else { HalfClosedLocal(remote) } diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 35b4beacf..23b5dbb50 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -1215,6 +1215,48 @@ async fn allow_empty_data_for_head() { join(srv, h2).await; } +#[tokio::test] +async fn early_hints() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv.assert_client_handshake().await; + assert_default_settings!(settings); + srv.recv_frame( + frames::headers(1) + .request("GET", "https://example.com/") + .eos(), + ) + .await; + srv.send_frame(frames::headers(1).response(103)).await; + srv.send_frame(frames::headers(1).response(200).field("content-length", 2)) + .await; + srv.send_frame(frames::data(1, "ok").eos()).await; + }; + + let h2 = async move { + let (mut client, h2) = client::Builder::new() + .handshake::<_, Bytes>(io) + .await + .unwrap(); + tokio::spawn(async { + h2.await.expect("connection failed"); + }); + let request = Request::builder() + .method(Method::GET) + .uri("https://example.com/") + .body(()) + .unwrap(); + let (response, _) = client.send_request(request, true).unwrap(); + let (ha, mut body) = response.await.unwrap().into_parts(); + eprintln!("{:?}", ha); + assert_eq!(body.data().await.unwrap().unwrap(), "ok"); + }; + + join(srv, h2).await; +} + const SETTINGS: &'static [u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; const SETTINGS_ACK: &'static [u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; From c1b411fc14ea0d3fd90eb77bebd36c73a5fdbc03 Mon Sep 17 00:00:00 2001 From: Eliza Weisman Date: Thu, 25 Feb 2021 08:58:19 -0800 Subject: [PATCH 032/178] add `Connection::max_concurrent_recv_streams` (#516) This commit adds accessors to `client::Connection` and `server::Connection` that return the current value of the `SETTINGS_MAX_CONCURRENT_STREAMS` limit that has been sent by this peer and acknowledged by the remote. This is analogous to the `max_concurrent_send_streams` methods added in PR #513. These accessors may be somewhat less useful than the ones for the values negotiated by the remote, since users who care about this limit are probably setting the builder parameter. However, it seems worth having for completeness sake --- and it might be useful for determining whether or not a configured concurrency limit has been acked yet... Part of #512 --- src/client.rs | 21 ++++++++++++++++++--- src/proto/connection.rs | 6 ++++++ src/proto/streams/counts.rs | 6 ++++++ src/proto/streams/streams.rs | 4 ++++ src/server.rs | 23 +++++++++++++++++++---- 5 files changed, 53 insertions(+), 7 deletions(-) diff --git a/src/client.rs b/src/client.rs index 62aea854c..5bbbaf499 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1242,14 +1242,29 @@ where /// by this client. /// /// This limit is configured by the server peer by sending the - /// [`SETTINGS_MAX_CONCURRENT_STREAMS` parameter][settings] in a `SETTINGS` - /// frame. This method returns the currently acknowledged value recieved - /// from the remote. + /// [`SETTINGS_MAX_CONCURRENT_STREAMS` parameter][1] in a `SETTINGS` frame. + /// This method returns the currently acknowledged value recieved from the + /// remote. /// /// [settings]: https://tools.ietf.org/html/rfc7540#section-5.1.2 pub fn max_concurrent_send_streams(&self) -> usize { self.inner.max_send_streams() } + + /// Returns the maximum number of concurrent streams that may be initiated + /// by the server on this connection. + /// + /// This returns the value of the [`SETTINGS_MAX_CONCURRENT_STREAMS` + /// parameter][1] sent in a `SETTINGS` frame that has been + /// acknowledged by the remote peer. The value to be sent is configured by + /// the [`Builder::max_concurrent_streams`][2] method before handshaking + /// with the remote peer. + /// + /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2 + /// [2]: ../struct.Builder.html#method.max_concurrent_streams + pub fn max_concurrent_recv_streams(&self) -> usize { + self.inner.max_recv_streams() + } } impl Future for Connection diff --git a/src/proto/connection.rs b/src/proto/connection.rs index d408f7cbb..b44fdcd5c 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -153,6 +153,12 @@ where self.inner.streams.max_send_streams() } + /// Returns the maximum number of concurrent streams that may be initiated + /// by the remote peer. + pub(crate) fn max_recv_streams(&self) -> usize { + self.inner.streams.max_recv_streams() + } + /// Returns `Ready` when the connection is ready to receive a frame. /// /// Returns `RecvError` as this may raise errors that are caused by delayed diff --git a/src/proto/streams/counts.rs b/src/proto/streams/counts.rs index bb22ee44a..70dfc7851 100644 --- a/src/proto/streams/counts.rs +++ b/src/proto/streams/counts.rs @@ -173,6 +173,12 @@ impl Counts { self.max_send_streams } + /// Returns the maximum number of streams that can be initiated by the + /// remote peer. + pub(crate) fn max_recv_streams(&self) -> usize { + self.max_recv_streams + } + fn dec_num_streams(&mut self, stream: &mut store::Ptr) { assert!(stream.is_counted); diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 7ba87eb70..c694203a8 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -943,6 +943,10 @@ where self.inner.lock().unwrap().counts.max_send_streams() } + pub(crate) fn max_recv_streams(&self) -> usize { + self.inner.lock().unwrap().counts.max_recv_streams() + } + #[cfg(feature = "unstable")] pub fn num_active_streams(&self) -> usize { let me = self.inner.lock().unwrap(); diff --git a/src/server.rs b/src/server.rs index d2b799af7..50591bcf5 100644 --- a/src/server.rs +++ b/src/server.rs @@ -534,14 +534,29 @@ where /// by the server on this connection. /// /// This limit is configured by the client peer by sending the - /// [`SETTINGS_MAX_CONCURRENT_STREAMS` parameter][settings] in a `SETTINGS` - /// frame. This method returns the currently acknowledged value recieved - /// from the remote. + /// [`SETTINGS_MAX_CONCURRENT_STREAMS` parameter][1] in a `SETTINGS` frame. + /// This method returns the currently acknowledged value recieved from the + /// remote. /// - /// [settings]: https://tools.ietf.org/html/rfc7540#section-5.1.2 + /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2 pub fn max_concurrent_send_streams(&self) -> usize { self.connection.max_send_streams() } + + /// Returns the maximum number of concurrent streams that may be initiated + /// by the client on this connection. + /// + /// This returns the value of the [`SETTINGS_MAX_CONCURRENT_STREAMS` + /// parameter][1] sent in a `SETTINGS` frame that has been + /// acknowledged by the remote peer. The value to be sent is configured by + /// the [`Builder::max_concurrent_streams`][2] method before handshaking + /// with the remote peer. + /// + /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2 + /// [2]: ../struct.Builder.html#method.max_concurrent_streams + pub fn max_concurrent_recv_streams(&self) -> usize { + self.connection.max_recv_streams() + } } #[cfg(feature = "stream")] From bcaaaf6dd98d8da69e4755b0bcb3e0f4dd3cfee6 Mon Sep 17 00:00:00 2001 From: Josh Soref Date: Thu, 25 Feb 2021 11:59:18 -0500 Subject: [PATCH 033/178] Spelling fixes in comments (#508) --- src/codec/framed_write.rs | 2 +- src/frame/data.rs | 4 ++-- src/hpack/encoder.rs | 2 +- src/lib.rs | 2 +- src/proto/streams/prioritize.rs | 2 +- src/proto/streams/recv.rs | 2 +- src/server.rs | 4 ++-- tests/h2-tests/tests/client_request.rs | 2 +- tests/h2-tests/tests/stream_states.rs | 4 ++-- 9 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/codec/framed_write.rs b/src/codec/framed_write.rs index b69979ac9..4191e03c5 100644 --- a/src/codec/framed_write.rs +++ b/src/codec/framed_write.rs @@ -55,7 +55,7 @@ enum Next { Continuation(frame::Continuation), } -/// Initialze the connection with this amount of write buffer. +/// Initialize the connection with this amount of write buffer. /// /// The minimum MAX_FRAME_SIZE is 16kb, so always be able to send a HEADERS /// frame that big. diff --git a/src/frame/data.rs b/src/frame/data.rs index 91de52df9..e253d5e23 100644 --- a/src/frame/data.rs +++ b/src/frame/data.rs @@ -36,7 +36,7 @@ impl Data { } } - /// Returns the stream identifer that this frame is associated with. + /// Returns the stream identifier that this frame is associated with. /// /// This cannot be a zero stream identifier. pub fn stream_id(&self) -> StreamId { @@ -63,7 +63,7 @@ impl Data { } } - /// Returns whther the `PADDED` flag is set on this frame. + /// Returns whether the `PADDED` flag is set on this frame. #[cfg(feature = "unstable")] pub fn is_padded(&self) -> bool { self.flags.is_padded() diff --git a/src/hpack/encoder.rs b/src/hpack/encoder.rs index ee264d5c6..cafd405e2 100644 --- a/src/hpack/encoder.rs +++ b/src/hpack/encoder.rs @@ -324,7 +324,7 @@ fn encode_str(val: &[u8], dst: &mut DstBuf<'_>) -> Result<(), EncoderError> { // Write the string head dst.get_mut()[idx] = 0x80 | huff_len as u8; } else { - // Write the head to a placeholer + // Write the head to a placeholder const PLACEHOLDER_LEN: usize = 8; let mut buf = [0u8; PLACEHOLDER_LEN]; diff --git a/src/lib.rs b/src/lib.rs index 8a3368837..b69aafad5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -126,7 +126,7 @@ pub use codec::{Codec, RecvError, SendError, UserError}; use std::task::Poll; // TODO: Get rid of this trait once https://github.com/rust-lang/rust/pull/63512 -// is stablized. +// is stabilized. trait PollExt { /// Changes the success value of this `Poll` with the closure provided. fn map_ok_(self, f: F) -> Poll>> diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index 701b8f4b8..77eb507db 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -18,7 +18,7 @@ use std::{cmp, fmt, mem}; /// This is because "idle" stream IDs โ€“ those which have been initiated but /// have yet to receive frames โ€“ will be implicitly closed on receipt of a /// frame on a higher stream ID. If these queues was not ordered by stream -/// IDs, some mechanism would be necessary to ensure that the lowest-numberedh] +/// IDs, some mechanism would be necessary to ensure that the lowest-numbered] /// idle stream is opened first. #[derive(Debug)] pub(super) struct Prioritize { diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 7a6ff8ad2..252fd8687 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -54,7 +54,7 @@ pub(super) struct Recv { /// Refused StreamId, this represents a frame that must be sent out. refused: Option, - /// If push promises are allowed to be recevied. + /// If push promises are allowed to be received. is_push_enabled: bool, } diff --git a/src/server.rs b/src/server.rs index 50591bcf5..6ad010bd1 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1059,7 +1059,7 @@ impl SendResponse { /// /// # Panics /// - /// If the lock on the strean store has been poisoned. + /// If the lock on the stream store has been poisoned. pub fn stream_id(&self) -> crate::StreamId { crate::StreamId::from_internal(self.inner.stream_id()) } @@ -1131,7 +1131,7 @@ impl SendPushedResponse { /// /// # Panics /// - /// If the lock on the strean store has been poisoned. + /// If the lock on the stream store has been poisoned. pub fn stream_id(&self) -> crate::StreamId { self.inner.stream_id() } diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 23b5dbb50..7dc2680b1 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -708,7 +708,7 @@ async fn recv_too_big_headers() { .await; srv.send_frame(frames::headers(1).response(200).eos()).await; srv.send_frame(frames::headers(3).response(200)).await; - // no reset for 1, since it's closed anyways + // no reset for 1, since it's closed anyway // but reset for 3, since server hasn't closed stream srv.recv_frame(frames::reset(3).refused()).await; idle_ms(10).await; diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index 16d1a7502..cd2644d06 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -460,7 +460,7 @@ async fn send_rst_stream_allows_recv_data() { srv.send_frame(frames::headers(1).response(200)).await; srv.recv_frame(frames::reset(1).cancel()).await; // sending frames after canceled! - // note: sending 2 to cosume 50% of connection window + // note: sending 2 to consume 50% of connection window srv.send_frame(frames::data(1, vec![0; 16_384])).await; srv.send_frame(frames::data(1, vec![0; 16_384]).eos()).await; // make sure we automatically free the connection window @@ -736,7 +736,7 @@ async fn rst_while_closing() { h2_support::trace_init!(); let (io, mut srv) = mock::new(); - // Rendevous when we've queued a trailers frame + // Rendezvous when we've queued a trailers frame let (tx, rx) = oneshot::channel(); let srv = async move { From 11229702a03626ca1463f9216d7511616711acc7 Mon Sep 17 00:00:00 2001 From: Yuchen Wu Date: Wed, 23 Dec 2020 14:08:50 -0800 Subject: [PATCH 034/178] Return an error instead of panicking when stuck in a CONTINUATION loop It is not rare a large header can trigger such a CONTINUATION loop. While the stream cannot recover from this issue, returning an error instead of panicking makes the impact radius under better control. --- src/codec/framed_write.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/codec/framed_write.rs b/src/codec/framed_write.rs index 4191e03c5..30888c67f 100644 --- a/src/codec/framed_write.rs +++ b/src/codec/framed_write.rs @@ -148,6 +148,12 @@ where match self.encoder.unset_frame() { ControlFlow::Continue => (), ControlFlow::Break => break, + ControlFlow::EndlessLoopHeaderTooBig => { + return Poll::Ready(Err(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + UserError::HeaderTooBig, + ))); + } } } @@ -193,6 +199,7 @@ where enum ControlFlow { Continue, Break, + EndlessLoopHeaderTooBig, } impl Encoder @@ -222,7 +229,10 @@ where // If *only* the CONTINUATION frame header was // written, and *no* header fields, we're stuck // in a loop... - panic!("CONTINUATION frame write loop; header value too big to encode"); + tracing::warn!( + "CONTINUATION frame write loop; header value too big to encode" + ); + return ControlFlow::EndlessLoopHeaderTooBig; } self.next = Some(Next::Continuation(continuation)); From 7a5b574d8e61fd852538eb6fc7e587640dd5f6d2 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 26 Feb 2021 10:15:58 -0800 Subject: [PATCH 035/178] v0.3.1 --- CHANGELOG.md | 8 ++++++++ Cargo.toml | 2 +- src/lib.rs | 4 ++-- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index be0116e7d..c2800fa00 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.3.1 (February 26, 2021) + +* Add `Connection::max_concurrent_recv_streams()` getter. +* Add `Connection::max_concurrent_send_streams()` getter. +* Fix client to ignore receipt of 1xx headers frames. +* Fix incorrect calculation of pseudo header lengths when determining if a received header is too big. +* Reduce monomorphized code size of internal code. + # 0.3.0 (December 23, 2020) * Update to Tokio v1 and Bytes v1. diff --git a/Cargo.toml b/Cargo.toml index ab730b332..a683674b2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.0" +version = "0.3.1" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index b69aafad5..1ee37d9aa 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -11,7 +11,7 @@ //! //! ```toml //! [dependencies] -//! h2 = "0.2" +//! h2 = "0.3" //! ``` //! //! # Layout @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.0")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.1")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From a1f914f46f797a13d3559a690223b732e5006b92 Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Wed, 24 Mar 2021 16:03:12 +0100 Subject: [PATCH 036/178] Skip 1xx frames in more states (#527) #524 Co-authored-by: Kornel --- src/proto/streams/state.rs | 25 +++++++++++---- tests/h2-tests/tests/client_request.rs | 44 ++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 6 deletions(-) diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index 08d4dba00..3e739daf9 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -132,11 +132,8 @@ impl State { /// Opens the receive-half of the stream when a HEADERS frame is received. /// - /// is_informational: whether received a 1xx status code - /// /// Returns true if this transitions the state to Open. pub fn recv_open(&mut self, frame: &frame::Headers) -> Result { - let remote = Streaming; let mut initial = false; let eos = frame.is_end_stream(); @@ -149,7 +146,12 @@ impl State { } else { Open { local: AwaitingHeaders, - remote, + remote: if frame.is_informational() { + tracing::trace!("skipping 1xx response headers"); + AwaitingHeaders + } else { + Streaming + }, } } } @@ -158,6 +160,9 @@ impl State { if eos { Closed(Cause::EndStream) + } else if frame.is_informational() { + tracing::trace!("skipping 1xx response headers"); + ReservedRemote } else { HalfClosedLocal(Streaming) } @@ -169,7 +174,15 @@ impl State { if eos { HalfClosedRemote(local) } else { - Open { local, remote } + Open { + local, + remote: if frame.is_informational() { + tracing::trace!("skipping 1xx response headers"); + AwaitingHeaders + } else { + Streaming + }, + } } } HalfClosedLocal(AwaitingHeaders) => { @@ -179,7 +192,7 @@ impl State { tracing::trace!("skipping 1xx response headers"); HalfClosedLocal(AwaitingHeaders) } else { - HalfClosedLocal(remote) + HalfClosedLocal(Streaming) } } state => { diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 7dc2680b1..b574df5aa 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -1257,6 +1257,50 @@ async fn early_hints() { join(srv, h2).await; } +#[tokio::test] +async fn informational_while_local_streaming() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv.assert_client_handshake().await; + assert_default_settings!(settings); + srv.recv_frame(frames::headers(1).request("POST", "https://example.com/")) + .await; + srv.send_frame(frames::headers(1).response(103)).await; + srv.send_frame(frames::headers(1).response(200).field("content-length", 2)) + .await; + srv.recv_frame(frames::data(1, "hello").eos()).await; + srv.send_frame(frames::data(1, "ok").eos()).await; + }; + + let h2 = async move { + let (mut client, h2) = client::Builder::new() + .handshake::<_, Bytes>(io) + .await + .unwrap(); + tokio::spawn(async { + h2.await.expect("connection failed"); + }); + let request = Request::builder() + .method(Method::POST) + .uri("https://example.com/") + .body(()) + .unwrap(); + // don't EOS stream yet.. + let (response, mut body_tx) = client.send_request(request, false).unwrap(); + // eventual response is 200, not 103 + let resp = response.await.expect("response"); + // assert_eq!(resp.status(), 200); + // now we can end the stream + body_tx.send_data("hello".into(), true).expect("send_data"); + let mut body = resp.into_body(); + assert_eq!(body.data().await.unwrap().unwrap(), "ok"); + }; + + join(srv, h2).await; +} + const SETTINGS: &'static [u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; const SETTINGS_ACK: &'static [u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; From 2c53d600989209e263c8157df6e6e2280a9a3355 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 24 Mar 2021 16:57:27 -0700 Subject: [PATCH 037/178] v0.3.2 --- CHANGELOG.md | 4 ++++ Cargo.toml | 4 ++-- src/lib.rs | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c2800fa00..e701b8a6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.3.2 (March 24, 2021) + +* Fix incorrect handling of received 1xx responses on the client when the request body is still streaming. + # 0.3.1 (February 26, 2021) * Add `Connection::max_concurrent_recv_streams()` getter. diff --git a/Cargo.toml b/Cargo.toml index a683674b2..c1f481846 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,14 +5,14 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.1" +version = "0.3.2" license = "MIT" authors = [ "Carl Lerche ", "Sean McArthur ", ] description = "An HTTP/2.0 client and server" -documentation = "https://docs.rs/h2/0.3.0/h2/" +documentation = "https://docs.rs/h2" repository = "https://github.com/hyperium/h2" readme = "README.md" keywords = ["http", "async", "non-blocking"] diff --git a/src/lib.rs b/src/lib.rs index 1ee37d9aa..88a63de84 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.1")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.2")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From 9c7f47af95ea0c59f6ef630f04865b7245ec1d6c Mon Sep 17 00:00:00 2001 From: DavidKorczynski Date: Fri, 16 Apr 2021 22:58:07 +0100 Subject: [PATCH 038/178] Initial oss-fuzz integration. (#529) Signed-off-by: davkor --- fuzz/.gitignore | 4 + fuzz/Cargo.toml | 43 +++++ fuzz/fuzz_targets/fuzz_client.rs | 31 ++++ fuzz/fuzz_targets/fuzz_e2e.rs | 129 +++++++++++++++ fuzz/fuzz_targets/fuzz_hpack.rs | 6 + src/fuzz_bridge.rs | 28 ++++ src/lib.rs | 4 + tests/h2-fuzz/Cargo.toml | 2 +- tests/h2-fuzz/src/main.rs | 260 +++++++++++++++---------------- 9 files changed, 374 insertions(+), 133 deletions(-) create mode 100644 fuzz/.gitignore create mode 100644 fuzz/Cargo.toml create mode 100644 fuzz/fuzz_targets/fuzz_client.rs create mode 100644 fuzz/fuzz_targets/fuzz_e2e.rs create mode 100644 fuzz/fuzz_targets/fuzz_hpack.rs create mode 100644 src/fuzz_bridge.rs diff --git a/fuzz/.gitignore b/fuzz/.gitignore new file mode 100644 index 000000000..572e03bdf --- /dev/null +++ b/fuzz/.gitignore @@ -0,0 +1,4 @@ + +target +corpus +artifacts diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml new file mode 100644 index 000000000..ca32138e2 --- /dev/null +++ b/fuzz/Cargo.toml @@ -0,0 +1,43 @@ + +[package] +name = "h2-oss-fuzz" +version = "0.0.0" +authors = [ "David Korczynski " ] +publish = false +edition = "2018" + +[package.metadata] +cargo-fuzz = true + +[dependencies] +arbitrary = { version = "1", features = ["derive"] } +libfuzzer-sys = { version = "0.4.0", features = ["arbitrary-derive"] } +tokio = { version = "1", features = [ "full" ] } +bytes = "0.5.2" +h2 = { path = "../", features = [ "unstable" ] } +h2-support = { path = "../tests/h2-support" } +futures = { version = "0.3", default-features = false, features = ["std"] } +http = "0.2" +env_logger = { version = "0.5.3", default-features = false } + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[[bin]] +name = "fuzz_client" +path = "fuzz_targets/fuzz_client.rs" +test = false +doc = false + +[[bin]] +name = "fuzz_hpack" +path = "fuzz_targets/fuzz_hpack.rs" +test = false +doc = false + +[[bin]] +name = "fuzz_e2e" +path = "fuzz_targets/fuzz_e2e.rs" +test = false +doc = false diff --git a/fuzz/fuzz_targets/fuzz_client.rs b/fuzz/fuzz_targets/fuzz_client.rs new file mode 100644 index 000000000..8d558a935 --- /dev/null +++ b/fuzz/fuzz_targets/fuzz_client.rs @@ -0,0 +1,31 @@ +#![no_main] +use h2_support::prelude::*; +use libfuzzer_sys::{arbitrary::Arbitrary, fuzz_target}; + +#[derive(Debug, Arbitrary)] +struct HttpSpec { + uri: Vec, + header_name: Vec, + header_value: Vec, +} + +async fn fuzz_entry(inp: HttpSpec) { + if let Ok(req) = Request::builder() + .uri(&inp.uri[..]) + .header(&inp.header_name[..], &inp.header_value[..]) + .body(()) + { + let (io, mut _srv) = mock::new(); + let (mut client, _h2) = client::Builder::new() + .handshake::<_, Bytes>(io) + .await + .unwrap(); + + let (_, _) = client.send_request(req, true).unwrap(); + } +} + +fuzz_target!(|inp: HttpSpec| { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(fuzz_entry(inp)); +}); diff --git a/fuzz/fuzz_targets/fuzz_e2e.rs b/fuzz/fuzz_targets/fuzz_e2e.rs new file mode 100644 index 000000000..a8d021ada --- /dev/null +++ b/fuzz/fuzz_targets/fuzz_e2e.rs @@ -0,0 +1,129 @@ +#![no_main] +use libfuzzer_sys::fuzz_target; + +use futures::future; +use futures::stream::FuturesUnordered; +use futures::Stream; +use http::{Method, Request}; +use std::future::Future; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; + +struct MockIo<'a> { + input: &'a [u8], +} + +impl<'a> MockIo<'a> { + fn next_byte(&mut self) -> Option { + if let Some(&c) = self.input.first() { + self.input = &self.input[1..]; + Some(c) + } else { + None + } + } + + fn next_u32(&mut self) -> u32 { + (self.next_byte().unwrap_or(0) as u32) << 8 | self.next_byte().unwrap_or(0) as u32 + } +} + +impl<'a> AsyncRead for MockIo<'a> { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut ReadBuf, + ) -> Poll> { + + + let mut len = self.next_u32() as usize; + if self.input.is_empty() { + Poll::Ready(Ok(())) + } else if len == 0 { + cx.waker().clone().wake(); + Poll::Pending + } else { + if len > self.input.len() { + len = self.input.len(); + } + + if len > buf.remaining() { + len = buf.remaining(); + } + buf.put_slice(&self.input[len..]); + self.input = &self.input[len..]; + Poll::Ready(Ok(())) + } + } +} + +impl<'a> AsyncWrite for MockIo<'a> { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + let len = std::cmp::min(self.next_u32() as usize, buf.len()); + if len == 0 { + if self.input.is_empty() { + Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) + } else { + cx.waker().clone().wake(); + Poll::Pending + } + } else { + Poll::Ready(Ok(len)) + } + } + + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } +} + +async fn run(script: &[u8]) -> Result<(), h2::Error> { + let io = MockIo { input: script }; + let (mut h2, mut connection) = h2::client::handshake(io).await?; + let mut futs = FuturesUnordered::new(); + let future = future::poll_fn(|cx| { + if let Poll::Ready(()) = Pin::new(&mut connection).poll(cx)? { + return Poll::Ready(Ok::<_, h2::Error>(())); + } + while futs.len() < 128 { + if !h2.poll_ready(cx)?.is_ready() { + break; + } + let request = Request::builder() + .method(Method::POST) + .uri("https://example.com/") + .body(()) + .unwrap(); + let (resp, mut send) = h2.send_request(request, false)?; + send.send_data(vec![0u8; 32769].into(), true).unwrap(); + drop(send); + futs.push(resp); + } + loop { + match Pin::new(&mut futs).poll_next(cx) { + Poll::Pending | Poll::Ready(None) => break, + r @ Poll::Ready(Some(Ok(_))) | r @ Poll::Ready(Some(Err(_))) => { + eprintln!("{:?}", r); + } + } + } + Poll::Pending + }); + future.await?; + Ok(()) +} + +fuzz_target!(|data: &[u8]| { + let rt = tokio::runtime::Runtime::new().unwrap(); + let _res = rt.block_on(run(data)); +}); + diff --git a/fuzz/fuzz_targets/fuzz_hpack.rs b/fuzz/fuzz_targets/fuzz_hpack.rs new file mode 100644 index 000000000..c2597bb94 --- /dev/null +++ b/fuzz/fuzz_targets/fuzz_hpack.rs @@ -0,0 +1,6 @@ +#![no_main] +use libfuzzer_sys::fuzz_target; + +fuzz_target!(|data_: &[u8]| { + let _decoder_ = h2::fuzz_bridge::fuzz_logic::fuzz_hpack(data_); +}); diff --git a/src/fuzz_bridge.rs b/src/fuzz_bridge.rs new file mode 100644 index 000000000..5379a4de8 --- /dev/null +++ b/src/fuzz_bridge.rs @@ -0,0 +1,28 @@ +#[cfg(fuzzing)] +pub mod fuzz_logic { + use crate::hpack; + use bytes::{BufMut, BytesMut}; + use http::header::HeaderName; + use std::io::Cursor; + + pub fn fuzz_hpack(data_: &[u8]) { + let mut decoder_ = hpack::Decoder::new(0); + let mut buf = BytesMut::new(); + buf.extend(data_); + let _dec_res = decoder_.decode(&mut Cursor::new(&mut buf), |_h| {}); + + if let Ok(s) = std::str::from_utf8(data_) { + if let Ok(h) = http::Method::from_bytes(s.as_bytes()) { + let m_ = hpack::Header::Method(h); + let mut encoder = hpack::Encoder::new(0, 0); + let _res = encode(&mut encoder, vec![m_]); + } + } + } + + fn encode(e: &mut hpack::Encoder, hdrs: Vec>>) -> BytesMut { + let mut dst = BytesMut::with_capacity(1024); + e.encode(None, &mut hdrs.into_iter(), &mut (&mut dst).limit(1024)); + dst + } +} diff --git a/src/lib.rs b/src/lib.rs index 88a63de84..2942ac864 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -117,6 +117,10 @@ pub mod client; pub mod server; mod share; +#[cfg(fuzzing)] +#[cfg_attr(feature = "unstable", allow(missing_docs))] +pub mod fuzz_bridge; + pub use crate::error::{Error, Reason}; pub use crate::share::{FlowControl, Ping, PingPong, Pong, RecvStream, SendStream, StreamId}; diff --git a/tests/h2-fuzz/Cargo.toml b/tests/h2-fuzz/Cargo.toml index 7fbf4c3f3..524627f31 100644 --- a/tests/h2-fuzz/Cargo.toml +++ b/tests/h2-fuzz/Cargo.toml @@ -12,4 +12,4 @@ env_logger = { version = "0.5.3", default-features = false } futures = { version = "0.3", default-features = false, features = ["std"] } honggfuzz = "0.5" http = "0.2" -tokio = "1" +tokio = { version = "1", features = [ "full" ] } diff --git a/tests/h2-fuzz/src/main.rs b/tests/h2-fuzz/src/main.rs index a57fb76a5..e67a54071 100644 --- a/tests/h2-fuzz/src/main.rs +++ b/tests/h2-fuzz/src/main.rs @@ -1,132 +1,128 @@ -use futures::future; -use futures::stream::FuturesUnordered; -use futures::Stream; -use http::{Method, Request}; -use std::future::Future; -use std::io; -use std::pin::Pin; -use std::task::{Context, Poll}; -use tokio::io::{AsyncRead, AsyncWrite}; - -struct MockIo<'a> { - input: &'a [u8], -} - -impl<'a> MockIo<'a> { - fn next_byte(&mut self) -> Option { - if let Some(&c) = self.input.first() { - self.input = &self.input[1..]; - Some(c) - } else { - None - } - } - - fn next_u32(&mut self) -> u32 { - (self.next_byte().unwrap_or(0) as u32) << 8 | self.next_byte().unwrap_or(0) as u32 - } -} - -impl<'a> AsyncRead for MockIo<'a> { - unsafe fn prepare_uninitialized_buffer(&self, _buf: &mut [std::mem::MaybeUninit]) -> bool { - false - } - - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - let mut len = self.next_u32() as usize; - if self.input.is_empty() { - Poll::Ready(Ok(0)) - } else if len == 0 { - cx.waker().clone().wake(); - Poll::Pending - } else { - if len > self.input.len() { - len = self.input.len(); - } - - if len > buf.len() { - len = buf.len(); - } - buf[0..len].copy_from_slice(&self.input[0..len]); - self.input = &self.input[len..]; - Poll::Ready(Ok(len)) - } - } -} - -impl<'a> AsyncWrite for MockIo<'a> { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - let len = std::cmp::min(self.next_u32() as usize, buf.len()); - if len == 0 { - if self.input.is_empty() { - Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) - } else { - cx.waker().clone().wake(); - Poll::Pending - } - } else { - Poll::Ready(Ok(len)) - } - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } -} - -async fn run(script: &[u8]) -> Result<(), h2::Error> { - let io = MockIo { input: script }; - let (mut h2, mut connection) = h2::client::handshake(io).await?; - let mut futs = FuturesUnordered::new(); - let future = future::poll_fn(|cx| { - if let Poll::Ready(()) = Pin::new(&mut connection).poll(cx)? { - return Poll::Ready(Ok::<_, h2::Error>(())); - } - while futs.len() < 128 { - if !h2.poll_ready(cx)?.is_ready() { - break; - } - let request = Request::builder() - .method(Method::POST) - .uri("https://example.com/") - .body(()) - .unwrap(); - let (resp, mut send) = h2.send_request(request, false)?; - send.send_data(vec![0u8; 32769].into(), true).unwrap(); - drop(send); - futs.push(resp); - } - loop { - match Pin::new(&mut futs).poll_next(cx) { - Poll::Pending | Poll::Ready(None) => break, - r @ Poll::Ready(Some(Ok(_))) | r @ Poll::Ready(Some(Err(_))) => { - eprintln!("{:?}", r); - } - } - } - Poll::Pending - }); - future.await?; - Ok(()) -} - -fn main() { - env_logger::init(); - let mut rt = tokio::runtime::Runtime::new().unwrap(); - loop { - honggfuzz::fuzz!(|data: &[u8]| { - eprintln!("{:?}", rt.block_on(run(data))); - }); - } -} +use futures::future; +use futures::stream::FuturesUnordered; +use futures::Stream; +use http::{Method, Request}; +use std::future::Future; +use std::io; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; + +struct MockIo<'a> { + input: &'a [u8], +} + +impl<'a> MockIo<'a> { + fn next_byte(&mut self) -> Option { + if let Some(&c) = self.input.first() { + self.input = &self.input[1..]; + Some(c) + } else { + None + } + } + + fn next_u32(&mut self) -> u32 { + (self.next_byte().unwrap_or(0) as u32) << 8 | self.next_byte().unwrap_or(0) as u32 + } +} + +impl<'a> AsyncRead for MockIo<'a> { + fn poll_read( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &mut ReadBuf, + ) -> Poll> { + let mut len = self.next_u32() as usize; + if self.input.is_empty() { + Poll::Ready(Ok(())) + } else if len == 0 { + cx.waker().clone().wake(); + Poll::Pending + } else { + if len > self.input.len() { + len = self.input.len(); + } + + if len > buf.remaining() { + len = buf.remaining(); + } + buf.put_slice(&self.input[len..]); + self.input = &self.input[len..]; + Poll::Ready(Ok(())) + } + } +} + +impl<'a> AsyncWrite for MockIo<'a> { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + let len = std::cmp::min(self.next_u32() as usize, buf.len()); + if len == 0 { + if self.input.is_empty() { + Poll::Ready(Err(io::ErrorKind::BrokenPipe.into())) + } else { + cx.waker().clone().wake(); + Poll::Pending + } + } else { + Poll::Ready(Ok(len)) + } + } + + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } +} + +async fn run(script: &[u8]) -> Result<(), h2::Error> { + let io = MockIo { input: script }; + let (mut h2, mut connection) = h2::client::handshake(io).await?; + let mut futs = FuturesUnordered::new(); + let future = future::poll_fn(|cx| { + if let Poll::Ready(()) = Pin::new(&mut connection).poll(cx)? { + return Poll::Ready(Ok::<_, h2::Error>(())); + } + while futs.len() < 128 { + if !h2.poll_ready(cx)?.is_ready() { + break; + } + let request = Request::builder() + .method(Method::POST) + .uri("https://example.com/") + .body(()) + .unwrap(); + let (resp, mut send) = h2.send_request(request, false)?; + send.send_data(vec![0u8; 32769].into(), true).unwrap(); + drop(send); + futs.push(resp); + } + loop { + match Pin::new(&mut futs).poll_next(cx) { + Poll::Pending | Poll::Ready(None) => break, + r @ Poll::Ready(Some(Ok(_))) | r @ Poll::Ready(Some(Err(_))) => { + eprintln!("{:?}", r); + } + } + } + Poll::Pending + }); + future.await?; + Ok(()) +} + +fn main() { + env_logger::init(); + let mut rt = tokio::runtime::Runtime::new().unwrap(); + loop { + honggfuzz::fuzz!(|data: &[u8]| { + eprintln!("{:?}", rt.block_on(run(data))); + }); + } +} From 35699e721af35fdf64ecfbb6a19089ee7d845c96 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 22 Apr 2021 14:54:26 -0700 Subject: [PATCH 039/178] Fix fuzzing of client sending to not fail on user errors (#533) --- fuzz/fuzz_targets/fuzz_client.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fuzz/fuzz_targets/fuzz_client.rs b/fuzz/fuzz_targets/fuzz_client.rs index 8d558a935..0b4672653 100644 --- a/fuzz/fuzz_targets/fuzz_client.rs +++ b/fuzz/fuzz_targets/fuzz_client.rs @@ -21,7 +21,10 @@ async fn fuzz_entry(inp: HttpSpec) { .await .unwrap(); - let (_, _) = client.send_request(req, true).unwrap(); + // this could still trigger a user error: + // - if the uri isn't absolute + // - if the header name isn't allowed in http2 (like connection) + let _ = client.send_request(req, true); } } From 10d17e5f6202a611105d6d1e45aa497be19a3af2 Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Wed, 28 Apr 2021 03:55:46 +0200 Subject: [PATCH 040/178] Don't override empty path for CONNECT requests (#534) --- src/frame/headers.rs | 10 +++++++--- tests/h2-tests/tests/server.rs | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/frame/headers.rs b/src/frame/headers.rs index b65951c21..979ac702e 100644 --- a/src/frame/headers.rs +++ b/src/frame/headers.rs @@ -552,15 +552,19 @@ impl Pseudo { .map(|v| Bytes::copy_from_slice(v.as_str().as_bytes())) .unwrap_or_else(Bytes::new); - if path.is_empty() && method != Method::OPTIONS { - path = Bytes::from_static(b"/"); + match method { + Method::OPTIONS | Method::CONNECT => {} + _ if path.is_empty() => { + path = Bytes::from_static(b"/"); + } + _ => {} } let mut pseudo = Pseudo { method: Some(method), scheme: None, authority: None, - path: Some(unsafe { BytesStr::from_utf8_unchecked(path) }), + path: Some(unsafe { BytesStr::from_utf8_unchecked(path) }).filter(|p| !p.is_empty()), status: None, }; diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 4be70902b..03ce43fe1 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -114,7 +114,7 @@ async fn serve_connect() { let settings = client.assert_server_handshake().await; assert_default_settings!(settings); client - .send_frame(frames::headers(1).method("CONNECT").eos()) + .send_frame(frames::headers(1).request("CONNECT", "localhost").eos()) .await; client .recv_frame(frames::headers(1).response(200).eos()) From 13dd80be8d86e0445d74a7a1bf2853174bdd509a Mon Sep 17 00:00:00 2001 From: Anthony Ramine Date: Thu, 22 Apr 2021 12:02:05 +0200 Subject: [PATCH 041/178] Remove useless mut --- tests/h2-fuzz/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/h2-fuzz/src/main.rs b/tests/h2-fuzz/src/main.rs index e67a54071..28905524b 100644 --- a/tests/h2-fuzz/src/main.rs +++ b/tests/h2-fuzz/src/main.rs @@ -119,7 +119,7 @@ async fn run(script: &[u8]) -> Result<(), h2::Error> { fn main() { env_logger::init(); - let mut rt = tokio::runtime::Runtime::new().unwrap(); + let rt = tokio::runtime::Runtime::new().unwrap(); loop { honggfuzz::fuzz!(|data: &[u8]| { eprintln!("{:?}", rt.block_on(run(data))); From c3bc09550e20d53895450458d0446ea8c2b2b428 Mon Sep 17 00:00:00 2001 From: Anthony Ramine Date: Thu, 22 Apr 2021 12:02:22 +0200 Subject: [PATCH 042/178] Remove commented-out code --- src/proto/streams/buffer.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/proto/streams/buffer.rs b/src/proto/streams/buffer.rs index 652f2eda1..2648a410e 100644 --- a/src/proto/streams/buffer.rs +++ b/src/proto/streams/buffer.rs @@ -92,13 +92,4 @@ impl Deque { None => None, } } - - /* - pub fn peek_front<'a, T>(&self, buf: &'a Buffer) -> Option<&'a T> { - match self.indices { - Some(idxs) => Some(&buf.slab[idxs.head].value), - None => None, - } - } - */ } From 869e7162c4cb5d888155cb14f9c2fb57ea98d56e Mon Sep 17 00:00:00 2001 From: Anthony Ramine Date: Thu, 29 Apr 2021 11:48:46 +0200 Subject: [PATCH 043/178] Remove obsolete note in the docs The Stream implementationa also wants you to take care of managing capacity yourself. --- src/share.rs | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/share.rs b/src/share.rs index 06291068d..108e4a6eb 100644 --- a/src/share.rs +++ b/src/share.rs @@ -125,11 +125,6 @@ pub struct StreamId(u32); /// See method level documentation for more details on receiving data. See /// [`FlowControl`] for more details on inbound flow control. /// -/// Note that this type implements [`Stream`], yielding the received data frames. -/// When this implementation is used, the capacity is immediately released when -/// the data is yielded. It is recommended to only use this API when the data -/// will not be retained in memory for extended periods of time. -/// /// [`client::ResponseFuture`]: client/struct.ResponseFuture.html /// [`server::Connection`]: server/struct.Connection.html /// [`FlowControl`]: struct.FlowControl.html From 0a738e65888176823fdfc160eec7d80b824f42fb Mon Sep 17 00:00:00 2001 From: Anthony Ramine Date: Thu, 22 Apr 2021 12:02:29 +0200 Subject: [PATCH 044/178] Document RecvStream::poll_data I need it in another polling function. --- src/share.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/share.rs b/src/share.rs index 108e4a6eb..eb071dced 100644 --- a/src/share.rs +++ b/src/share.rs @@ -401,7 +401,7 @@ impl RecvStream { futures_util::future::poll_fn(move |cx| self.poll_trailers(cx)).await } - #[doc(hidden)] + /// Poll for the next data frame. pub fn poll_data(&mut self, cx: &mut Context<'_>) -> Poll>> { self.inner.inner.poll_data(cx).map_err_(Into::into) } From 2ccf8dd280c2a0e345e68548e27873e0cd19b039 Mon Sep 17 00:00:00 2001 From: Justin Mayhew Date: Sun, 28 Jun 2020 13:59:11 -0300 Subject: [PATCH 045/178] Fix calculation in FlowControl documentation --- src/share.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/share.rs b/src/share.rs index eb071dced..3249e5550 100644 --- a/src/share.rs +++ b/src/share.rs @@ -173,7 +173,7 @@ pub struct RecvStream { /// /// * A new stream is activated. The receive window is initialized to 1024 (the /// value of the initial window size for this connection). -/// * A `DATA` frame is received containing a payload of 400 bytes. +/// * A `DATA` frame is received containing a payload of 600 bytes. /// * The receive window size is reduced to 424 bytes. /// * [`release_capacity`] is called with 200. /// * The receive window size is now 624 bytes. The peer may send no more than From a6b414458fd7687f53df68861f4833cf142e5b76 Mon Sep 17 00:00:00 2001 From: Anthony Ramine Date: Thu, 29 Apr 2021 10:58:49 +0200 Subject: [PATCH 046/178] v0.3.3 --- CHANGELOG.md | 6 ++++++ Cargo.toml | 4 ++-- src/lib.rs | 2 +- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e701b8a6d..76f95b22d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.3.3 (April 29, 2021) + +* Fix client being able to make `CONNECT` requests without a `:path`. +* Expose `RecvStream::poll_data`. +* Fix some docs. + # 0.3.2 (March 24, 2021) * Fix incorrect handling of received 1xx responses on the client when the request body is still streaming. diff --git a/Cargo.toml b/Cargo.toml index c1f481846..5b20fa544 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,14 +5,14 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.2" +version = "0.3.3" license = "MIT" authors = [ "Carl Lerche ", "Sean McArthur ", ] description = "An HTTP/2.0 client and server" -documentation = "https://docs.rs/h2" +documentation = "https://docs.rs/h2/0.3.3" repository = "https://github.com/hyperium/h2" readme = "README.md" keywords = ["http", "async", "non-blocking"] diff --git a/src/lib.rs b/src/lib.rs index 2942ac864..6049eec51 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.2")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.3")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From 50d6297d23df6fe5ae83bf88e5b316b7d21bf85e Mon Sep 17 00:00:00 2001 From: Jason Hinch <594559+jhinch@users.noreply.github.com> Date: Tue, 7 Jan 2020 22:20:04 +1100 Subject: [PATCH 047/178] Replace unsafe code by ByteStr (fixes #440) --- src/frame/headers.rs | 24 +++++++++++------------- src/hpack/decoder.rs | 14 +++++--------- src/hpack/header.rs | 8 ++++++-- src/hpack/test/fuzz.rs | 5 ++--- 4 files changed, 24 insertions(+), 27 deletions(-) diff --git a/src/frame/headers.rs b/src/frame/headers.rs index 979ac702e..0b900de55 100644 --- a/src/frame/headers.rs +++ b/src/frame/headers.rs @@ -5,7 +5,7 @@ use crate::hpack::{self, BytesStr}; use http::header::{self, HeaderName, HeaderValue}; use http::{uri, HeaderMap, Method, Request, StatusCode, Uri}; -use bytes::{Bytes, BytesMut}; +use bytes::BytesMut; use std::fmt; use std::io::Cursor; @@ -549,13 +549,13 @@ impl Pseudo { let mut path = parts .path_and_query - .map(|v| Bytes::copy_from_slice(v.as_str().as_bytes())) - .unwrap_or_else(Bytes::new); + .map(|v| BytesStr::from(v.as_str())) + .unwrap_or(BytesStr::from_static("")); match method { Method::OPTIONS | Method::CONNECT => {} _ if path.is_empty() => { - path = Bytes::from_static(b"/"); + path = BytesStr::from_static("/"); } _ => {} } @@ -564,7 +564,7 @@ impl Pseudo { method: Some(method), scheme: None, authority: None, - path: Some(unsafe { BytesStr::from_utf8_unchecked(path) }).filter(|p| !p.is_empty()), + path: Some(path).filter(|p| !p.is_empty()), status: None, }; @@ -578,9 +578,7 @@ impl Pseudo { // If the URI includes an authority component, add it to the pseudo // headers if let Some(authority) = parts.authority { - pseudo.set_authority(unsafe { - BytesStr::from_utf8_unchecked(Bytes::copy_from_slice(authority.as_str().as_bytes())) - }); + pseudo.set_authority(BytesStr::from(authority.as_str())); } pseudo @@ -597,12 +595,12 @@ impl Pseudo { } pub fn set_scheme(&mut self, scheme: uri::Scheme) { - let bytes = match scheme.as_str() { - "http" => Bytes::from_static(b"http"), - "https" => Bytes::from_static(b"https"), - s => Bytes::copy_from_slice(s.as_bytes()), + let bytes_str = match scheme.as_str() { + "http" => BytesStr::from_static("http"), + "https" => BytesStr::from_static("https"), + s => BytesStr::from(s), }; - self.scheme = Some(unsafe { BytesStr::from_utf8_unchecked(bytes) }); + self.scheme = Some(bytes_str); } pub fn set_authority(&mut self, authority: BytesStr) { diff --git a/src/hpack/decoder.rs b/src/hpack/decoder.rs index 39afc8ad1..dacbbd992 100644 --- a/src/hpack/decoder.rs +++ b/src/hpack/decoder.rs @@ -577,13 +577,13 @@ pub fn get_static(idx: usize) -> Header { use http::header::HeaderValue; match idx { - 1 => Header::Authority(from_static("")), + 1 => Header::Authority(BytesStr::from_static("")), 2 => Header::Method(Method::GET), 3 => Header::Method(Method::POST), - 4 => Header::Path(from_static("/")), - 5 => Header::Path(from_static("/index.html")), - 6 => Header::Scheme(from_static("http")), - 7 => Header::Scheme(from_static("https")), + 4 => Header::Path(BytesStr::from_static("/")), + 5 => Header::Path(BytesStr::from_static("/index.html")), + 6 => Header::Scheme(BytesStr::from_static("http")), + 7 => Header::Scheme(BytesStr::from_static("https")), 8 => Header::Status(StatusCode::OK), 9 => Header::Status(StatusCode::NO_CONTENT), 10 => Header::Status(StatusCode::PARTIAL_CONTENT), @@ -783,10 +783,6 @@ pub fn get_static(idx: usize) -> Header { } } -fn from_static(s: &'static str) -> BytesStr { - unsafe { BytesStr::from_utf8_unchecked(Bytes::from_static(s.as_bytes())) } -} - #[cfg(test)] mod test { use super::*; diff --git a/src/hpack/header.rs b/src/hpack/header.rs index 74369506c..e5b1a342d 100644 --- a/src/hpack/header.rs +++ b/src/hpack/header.rs @@ -246,8 +246,12 @@ impl<'a> Name<'a> { // ===== impl BytesStr ===== impl BytesStr { - pub(crate) unsafe fn from_utf8_unchecked(bytes: Bytes) -> Self { - BytesStr(bytes) + pub(crate) const fn from_static(value: &'static str) -> Self { + BytesStr(Bytes::from_static(value.as_bytes())) + } + + pub(crate) fn from(value: &str) -> Self { + BytesStr(Bytes::copy_from_slice(value.as_bytes())) } #[doc(hidden)] diff --git a/src/hpack/test/fuzz.rs b/src/hpack/test/fuzz.rs index 0abb66aca..d4e6534f3 100644 --- a/src/hpack/test/fuzz.rs +++ b/src/hpack/test/fuzz.rs @@ -2,7 +2,7 @@ use crate::hpack::{Decoder, Encode, Encoder, Header}; use http::header::{HeaderName, HeaderValue}; -use bytes::{buf::BufMut, Bytes, BytesMut}; +use bytes::{buf::BufMut, BytesMut}; use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; use rand::{Rng, SeedableRng, StdRng}; @@ -404,6 +404,5 @@ fn gen_string(g: &mut StdRng, min: usize, max: usize) -> String { } fn to_shared(src: String) -> crate::hpack::BytesStr { - let b: Bytes = src.into(); - unsafe { crate::hpack::BytesStr::from_utf8_unchecked(b) } + crate::hpack::BytesStr::from(src.as_str()) } From 361de985a082246484830b15e438af2b64731004 Mon Sep 17 00:00:00 2001 From: Anthony Ramine Date: Tue, 4 May 2021 11:04:24 +0200 Subject: [PATCH 048/178] Enable all features on docs.rs (fixes #437) --- Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 5b20fa544..39888a1b6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,3 +71,6 @@ rustls = "0.19" tokio-rustls = "0.22" webpki = "0.21" webpki-roots = "0.21" + +[package.metadata.docs.rs] +features = ["stream"] From 04570652b75a2a5176fb296c0c8309a865dc9d79 Mon Sep 17 00:00:00 2001 From: boxdot Date: Wed, 5 May 2021 18:28:45 +0200 Subject: [PATCH 049/178] Do not use `Instant::now` when zero reset streams are configured. (#537) This allows to use `h2` on wasm platforms which lack an `Instant::now` implementation by setting the number of streams to 0 with: `h2::client::Builder::max_concurrent_reset_streams`. --- src/proto/streams/recv.rs | 16 +++++++++------- src/proto/streams/store.rs | 4 ++++ 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 252fd8687..1f30450fe 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -866,13 +866,15 @@ impl Recv { } pub fn clear_expired_reset_streams(&mut self, store: &mut Store, counts: &mut Counts) { - let now = Instant::now(); - let reset_duration = self.reset_duration; - while let Some(stream) = self.pending_reset_expired.pop_if(store, |stream| { - let reset_at = stream.reset_at.expect("reset_at must be set if in queue"); - now - reset_at > reset_duration - }) { - counts.transition_after(stream, true); + if !self.pending_reset_expired.is_empty() { + let now = Instant::now(); + let reset_duration = self.reset_duration; + while let Some(stream) = self.pending_reset_expired.pop_if(store, |stream| { + let reset_at = stream.reset_at.expect("reset_at must be set if in queue"); + now - reset_at > reset_duration + }) { + counts.transition_after(stream, true); + } } } diff --git a/src/proto/streams/store.rs b/src/proto/streams/store.rs index 9b66cf904..ac58f43ac 100644 --- a/src/proto/streams/store.rs +++ b/src/proto/streams/store.rs @@ -304,6 +304,10 @@ where None } + pub fn is_empty(&self) -> bool { + self.indices.is_none() + } + pub fn pop_if<'a, R, F>(&mut self, store: &'a mut R, f: F) -> Option> where R: Resolve, From 5c72713e2a1c5105d7e923246a4a2e54a5622c6b Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Thu, 6 May 2021 15:26:01 +0200 Subject: [PATCH 050/178] Remove panic (fixes #395) (#541) I don't even understand why that should panic, what's wrong with values greater than 0x0fff_ffff? If we truly wish to avoid very large dynamic tables, we should do so when we get the large SETTINGS_HEADER_TABLE_SIZE value, not when encoding it. --- src/hpack/encoder.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/hpack/encoder.rs b/src/hpack/encoder.rs index cafd405e2..15a117526 100644 --- a/src/hpack/encoder.rs +++ b/src/hpack/encoder.rs @@ -384,10 +384,6 @@ fn encode_int( value -= low; - if value > 0x0fff_ffff { - panic!("value out of range"); - } - dst.put_u8(first_byte | low as u8); rem -= 1; @@ -851,6 +847,20 @@ mod test { assert_eq!("sup", huff_decode(&dst[9..])); } + #[test] + fn test_large_size_update() { + let mut encoder = Encoder::default(); + + encoder.update_max_size(1912930560); + assert_eq!(Some(SizeUpdate::One(1912930560)), encoder.size_update); + + let mut dst = BytesMut::with_capacity(6); + encoder + .encode_size_updates(&mut (&mut dst).limit(6)) + .unwrap(); + assert_eq!([63, 225, 129, 148, 144, 7], &dst[..]); + } + #[test] #[ignore] fn test_evicted_overflow() { From fea3ae6ca916712c2e32c7fa31be8289a19582cd Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Thu, 6 May 2021 20:42:29 +0200 Subject: [PATCH 051/178] Read body in the example server (#544) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Kornel Lesinฬski --- examples/server.rs | 41 +++++++++++++++++++++++++++++++---------- src/server.rs | 8 +++++--- 2 files changed, 36 insertions(+), 13 deletions(-) diff --git a/examples/server.rs b/examples/server.rs index 777f4ea14..6d6490db0 100644 --- a/examples/server.rs +++ b/examples/server.rs @@ -1,7 +1,9 @@ use std::error::Error; use bytes::Bytes; -use h2::server; +use h2::server::{self, SendResponse}; +use h2::RecvStream; +use http::Request; use tokio::net::{TcpListener, TcpStream}; #[tokio::main] @@ -15,7 +17,7 @@ async fn main() -> Result<(), Box> { loop { if let Ok((socket, _peer_addr)) = listener.accept().await { tokio::spawn(async move { - if let Err(e) = handle(socket).await { + if let Err(e) = serve(socket).await { println!(" -> err={:?}", e); } }); @@ -23,22 +25,41 @@ async fn main() -> Result<(), Box> { } } -async fn handle(socket: TcpStream) -> Result<(), Box> { +async fn serve(socket: TcpStream) -> Result<(), Box> { let mut connection = server::handshake(socket).await?; println!("H2 connection bound"); while let Some(result) = connection.accept().await { - let (request, mut respond) = result?; - println!("GOT request: {:?}", request); - let response = http::Response::new(()); + let (request, respond) = result?; + tokio::spawn(async move { + if let Err(e) = handle_request(request, respond).await { + println!("error while handling request: {}", e); + } + }); + } + + println!("~~~~~~~~~~~ H2 connection CLOSE !!!!!! ~~~~~~~~~~~"); + Ok(()) +} - let mut send = respond.send_response(response, false)?; +async fn handle_request( + mut request: Request, + mut respond: SendResponse, +) -> Result<(), Box> { + println!("GOT request: {:?}", request); - println!(">>>> sending data"); - send.send_data(Bytes::from_static(b"hello world"), true)?; + let body = request.body_mut(); + while let Some(data) = body.data().await { + let data = data?; + println!("<<<< recv {:?}", data); + let _ = body.flow_control().release_capacity(data.len()); } - println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~ H2 connection CLOSE !!!!!! ~~~~~~~~~~~"); + let response = http::Response::new(()); + let mut send = respond.send_response(response, false)?; + println!(">>>> send"); + send.send_data(Bytes::from_static(b"hello "), false)?; + send.send_data(Bytes::from_static(b"world\n"), true)?; Ok(()) } diff --git a/src/server.rs b/src/server.rs index 6ad010bd1..f71315363 100644 --- a/src/server.rs +++ b/src/server.rs @@ -182,9 +182,11 @@ pub struct Handshake { /// # async fn doc(my_io: T) { /// let mut server = server::handshake(my_io).await.unwrap(); /// while let Some(request) = server.accept().await { -/// let (request, respond) = request.unwrap(); -/// // Process the request and send the response back to the client -/// // using `respond`. +/// tokio::spawn(async move { +/// let (request, respond) = request.unwrap(); +/// // Process the request and send the response back to the client +/// // using `respond`. +/// }); /// } /// # } /// # From 47d107aa174a4186ea02491b8852329e3e63e572 Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Thu, 6 May 2021 20:57:44 +0200 Subject: [PATCH 052/178] Wake up connection when dropping SendRequest (#538) Fixes #502 --- src/proto/streams/streams.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index c694203a8..8606654c2 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -990,7 +990,14 @@ where P: Peer, { fn drop(&mut self) { - let _ = self.inner.lock().map(|mut inner| inner.refs -= 1); + if let Ok(mut inner) = self.inner.lock() { + inner.refs -= 1; + if inner.refs == 1 { + if let Some(task) = inner.actions.task.take() { + task.wake(); + } + } + } } } From b66f3aea99917c662f391b14919746d9e413cb02 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 24 Jun 2021 08:02:06 -0700 Subject: [PATCH 053/178] fuzz: fix e2e MockIo reading too much for the buffer --- fuzz/fuzz_targets/fuzz_e2e.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fuzz/fuzz_targets/fuzz_e2e.rs b/fuzz/fuzz_targets/fuzz_e2e.rs index a8d021ada..02792c134 100644 --- a/fuzz/fuzz_targets/fuzz_e2e.rs +++ b/fuzz/fuzz_targets/fuzz_e2e.rs @@ -52,7 +52,7 @@ impl<'a> AsyncRead for MockIo<'a> { if len > buf.remaining() { len = buf.remaining(); } - buf.put_slice(&self.input[len..]); + buf.put_slice(&self.input[..len]); self.input = &self.input[len..]; Poll::Ready(Ok(())) } From 288a5f086faa7bd63bac29d7096d97d75fa70721 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 21 Jul 2021 07:33:26 -0700 Subject: [PATCH 054/178] remove unused macro_escape attribute --- src/frame/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/frame/mod.rs b/src/frame/mod.rs index 4c49d6bb1..5a682b634 100644 --- a/src/frame/mod.rs +++ b/src/frame/mod.rs @@ -15,7 +15,6 @@ use std::fmt; /// let buf: [u8; 4] = [0, 0, 0, 1]; /// assert_eq!(1u32, unpack_octets_4!(buf, 0, u32)); /// ``` -#[macro_escape] macro_rules! unpack_octets_4 { // TODO: Get rid of this macro ($buf:expr, $offset:expr, $tip:ty) => { From ab6f148ee105a563d8b1d08ec750047ca0c6b5da Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 20 Aug 2021 14:30:49 -0700 Subject: [PATCH 055/178] Fix potential hang if extensions contain same StreamRef If a user stored a `StreamRef` to the same stream in the request or response extensions, they would be dropped while the internal store lock was held. That would lead to a deadlock, since dropping a stream ref will try to take the store lock to clean up. Clear extensions of Request and Response before locking store, prevent this. Fixes hyperium/hyper#2621 --- src/proto/streams/streams.rs | 16 +++++++++++++--- tests/h2-tests/tests/server.rs | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 3 deletions(-) diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 8606654c2..e3e02c2fa 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -207,13 +207,16 @@ where pub fn send_request( &mut self, - request: Request<()>, + mut request: Request<()>, end_of_stream: bool, pending: Option<&OpaqueStreamRef>, ) -> Result, SendError> { use super::stream::ContentLength; use http::Method; + // Clear before taking lock, incase extensions contain a StreamRef. + request.extensions_mut().clear(); + // TODO: There is a hazard with assigning a stream ID before the // prioritize layer. If prioritization reorders new streams, this // implicitly closes the earlier stream IDs. @@ -1062,9 +1065,11 @@ impl StreamRef { pub fn send_response( &mut self, - response: Response<()>, + mut response: Response<()>, end_of_stream: bool, ) -> Result<(), UserError> { + // Clear before taking lock, incase extensions contain a StreamRef. + response.extensions_mut().clear(); let mut me = self.opaque.inner.lock().unwrap(); let me = &mut *me; @@ -1082,7 +1087,12 @@ impl StreamRef { }) } - pub fn send_push_promise(&mut self, request: Request<()>) -> Result, UserError> { + pub fn send_push_promise( + &mut self, + mut request: Request<()>, + ) -> Result, UserError> { + // Clear before taking lock, incase extensions contain a StreamRef. + request.extensions_mut().clear(); let mut me = self.opaque.inner.lock().unwrap(); let me = &mut *me; diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 03ce43fe1..556b53c71 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -1059,3 +1059,37 @@ async fn request_without_authority() { join(client, srv).await; } + +#[tokio::test] +async fn serve_when_request_in_response_extensions() { + h2_support::trace_init!(); + let (io, mut client) = mock::new(); + + let client = async move { + let settings = client.assert_server_handshake().await; + assert_default_settings!(settings); + client + .send_frame( + frames::headers(1) + .request("GET", "https://example.com/") + .eos(), + ) + .await; + client + .recv_frame(frames::headers(1).response(200).eos()) + .await; + }; + + let srv = async move { + let mut srv = server::handshake(io).await.expect("handshake"); + let (req, mut stream) = srv.next().await.unwrap().unwrap(); + + let mut rsp = http::Response::new(()); + rsp.extensions_mut().insert(req); + stream.send_response(rsp, true).unwrap(); + + assert!(srv.next().await.is_none()); + }; + + join(client, srv).await; +} From 507229232095e7492076d0819fa5f6199d5b1630 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 20 Aug 2021 15:23:56 -0700 Subject: [PATCH 056/178] v0.3.4 --- CHANGELOG.md | 7 +++++++ Cargo.toml | 4 ++-- src/lib.rs | 2 +- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76f95b22d..574e1fbca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +# 0.3.4 (August 20, 2021) + +* Fix panic when encoding header size update over a certain size. +* Fix `SendRequest` to wake up connection when dropped. +* Fix potential hang if `RecvStream` is placed in the request or response `extensions`. +* Stop calling `Instant::now` if zero reset streams are configured. + # 0.3.3 (April 29, 2021) * Fix client being able to make `CONNECT` requests without a `:path`. diff --git a/Cargo.toml b/Cargo.toml index 39888a1b6..fed704593 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,14 +5,14 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.3" +version = "0.3.4" license = "MIT" authors = [ "Carl Lerche ", "Sean McArthur ", ] description = "An HTTP/2.0 client and server" -documentation = "https://docs.rs/h2/0.3.3" +documentation = "https://docs.rs/h2/0.3.4" repository = "https://github.com/hyperium/h2" readme = "README.md" keywords = ["http", "async", "non-blocking"] diff --git a/src/lib.rs b/src/lib.rs index 6049eec51..951cd96b3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.3")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.4")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From e9a13700cb344fff70f7fdf72687511107f09b1d Mon Sep 17 00:00:00 2001 From: Alex Touchet Date: Wed, 8 Sep 2021 00:44:22 -0700 Subject: [PATCH 057/178] Update version number in Readme (#549) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 79072f39a..63627b706 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ To use `h2`, first add this to your `Cargo.toml`: ```toml [dependencies] -h2 = "0.2" +h2 = "0.3" ``` Next, add this to your crate: From 61b4f8fc34709b7cdfeaf91f3a7a527105c2026b Mon Sep 17 00:00:00 2001 From: Anthony Ramine Date: Fri, 20 Aug 2021 14:51:11 +0200 Subject: [PATCH 058/178] Support very large headers This completely refactors how headers are hpack-encoded. Instead of trying to be clever, constructing frames on the go while hpack-encoding, we just make a blob of all the hpack-encoded headers first, and then we split that blob in as many frames as necessary. --- src/codec/error.rs | 4 - src/codec/framed_write.rs | 22 +-- src/frame/headers.rs | 188 +++++++++++++----------- src/hpack/decoder.rs | 2 +- src/hpack/encoder.rs | 253 ++++++-------------------------- src/hpack/huffman/mod.rs | 33 ++--- src/hpack/mod.rs | 4 +- src/hpack/test/fixture.rs | 8 +- src/hpack/test/fuzz.rs | 31 +--- src/proto/streams/send.rs | 8 - tests/h2-support/src/prelude.rs | 1 + 11 files changed, 164 insertions(+), 390 deletions(-) diff --git a/src/codec/error.rs b/src/codec/error.rs index 5d6659223..f505eb0f5 100644 --- a/src/codec/error.rs +++ b/src/codec/error.rs @@ -35,9 +35,6 @@ pub enum UserError { /// The payload size is too big PayloadTooBig, - /// A header size is too big - HeaderTooBig, - /// The application attempted to initiate too many streams to remote. Rejected, @@ -130,7 +127,6 @@ impl fmt::Display for UserError { InactiveStreamId => "inactive stream", UnexpectedFrameType => "unexpected frame type", PayloadTooBig => "payload too big", - HeaderTooBig => "header too big", Rejected => "rejected", ReleaseCapacityTooBig => "release capacity too big", OverflowedStreamId => "stream ID overflowed", diff --git a/src/codec/framed_write.rs b/src/codec/framed_write.rs index 30888c67f..4b1b4accc 100644 --- a/src/codec/framed_write.rs +++ b/src/codec/framed_write.rs @@ -148,12 +148,6 @@ where match self.encoder.unset_frame() { ControlFlow::Continue => (), ControlFlow::Break => break, - ControlFlow::EndlessLoopHeaderTooBig => { - return Poll::Ready(Err(std::io::Error::new( - std::io::ErrorKind::InvalidInput, - UserError::HeaderTooBig, - ))); - } } } @@ -199,7 +193,6 @@ where enum ControlFlow { Continue, Break, - EndlessLoopHeaderTooBig, } impl Encoder @@ -221,20 +214,7 @@ where Some(Next::Continuation(frame)) => { // Buffer the continuation frame, then try to write again let mut buf = limited_write_buf!(self); - if let Some(continuation) = frame.encode(&mut self.hpack, &mut buf) { - // We previously had a CONTINUATION, and after encoding - // it, we got *another* one? Let's just double check - // that at least some progress is being made... - if self.buf.get_ref().len() == frame::HEADER_LEN { - // If *only* the CONTINUATION frame header was - // written, and *no* header fields, we're stuck - // in a loop... - tracing::warn!( - "CONTINUATION frame write loop; header value too big to encode" - ); - return ControlFlow::EndlessLoopHeaderTooBig; - } - + if let Some(continuation) = frame.encode(&mut buf) { self.next = Some(Next::Continuation(continuation)); } ControlFlow::Continue diff --git a/src/frame/headers.rs b/src/frame/headers.rs index 0b900de55..cfc6a1a27 100644 --- a/src/frame/headers.rs +++ b/src/frame/headers.rs @@ -5,17 +5,12 @@ use crate::hpack::{self, BytesStr}; use http::header::{self, HeaderName, HeaderValue}; use http::{uri, HeaderMap, Method, Request, StatusCode, Uri}; -use bytes::BytesMut; +use bytes::{BufMut, Bytes, BytesMut}; use std::fmt; use std::io::Cursor; type EncodeBuf<'a> = bytes::buf::Limit<&'a mut BytesMut>; - -// Minimum MAX_FRAME_SIZE is 16kb, so save some arbitrary space for frame -// head and other header bits. -const MAX_HEADER_LENGTH: usize = 1024 * 16 - 100; - /// Header frame /// /// This could be either a request or a response. @@ -100,11 +95,7 @@ struct HeaderBlock { #[derive(Debug)] struct EncodingHeaderBlock { - /// Argument to pass to the HPACK encoder to resume encoding - hpack: Option, - - /// remaining headers to encode - headers: Iter, + hpack: Bytes, } const END_STREAM: u8 = 0x1; @@ -241,10 +232,6 @@ impl Headers { self.header_block.is_over_size } - pub(crate) fn has_too_big_field(&self) -> bool { - self.header_block.has_too_big_field() - } - pub fn into_parts(self) -> (Pseudo, HeaderMap) { (self.header_block.pseudo, self.header_block.fields) } @@ -279,8 +266,8 @@ impl Headers { let head = self.head(); self.header_block - .into_encoding() - .encode(&head, encoder, dst, |_| {}) + .into_encoding(encoder) + .encode(&head, dst, |_| {}) } fn head(&self) -> Head { @@ -480,8 +467,6 @@ impl PushPromise { encoder: &mut hpack::Encoder, dst: &mut EncodeBuf<'_>, ) -> Option { - use bytes::BufMut; - // At this point, the `is_end_headers` flag should always be set debug_assert!(self.flags.is_end_headers()); @@ -489,8 +474,8 @@ impl PushPromise { let promised_id = self.promised_id; self.header_block - .into_encoding() - .encode(&head, encoder, dst, |dst| { + .into_encoding(encoder) + .encode(&head, dst, |dst| { dst.put_u32(promised_id.into()); }) } @@ -529,15 +514,11 @@ impl Continuation { Head::new(Kind::Continuation, END_HEADERS, self.stream_id) } - pub fn encode( - self, - encoder: &mut hpack::Encoder, - dst: &mut EncodeBuf<'_>, - ) -> Option { + pub fn encode(self, dst: &mut EncodeBuf<'_>) -> Option { // Get the CONTINUATION frame head let head = self.head(); - self.header_block.encode(&head, encoder, dst, |_| {}) + self.header_block.encode(&head, dst, |_| {}) } } @@ -617,13 +598,7 @@ impl Pseudo { // ===== impl EncodingHeaderBlock ===== impl EncodingHeaderBlock { - fn encode( - mut self, - head: &Head, - encoder: &mut hpack::Encoder, - dst: &mut EncodeBuf<'_>, - f: F, - ) -> Option + fn encode(mut self, head: &Head, dst: &mut EncodeBuf<'_>, f: F) -> Option where F: FnOnce(&mut EncodeBuf<'_>), { @@ -639,15 +614,17 @@ impl EncodingHeaderBlock { f(dst); // Now, encode the header payload - let continuation = match encoder.encode(self.hpack, &mut self.headers, dst) { - hpack::Encode::Full => None, - hpack::Encode::Partial(state) => Some(Continuation { + let continuation = if self.hpack.len() > dst.remaining_mut() { + dst.put_slice(&self.hpack.split_to(dst.remaining_mut())); + + Some(Continuation { stream_id: head.stream_id(), - header_block: EncodingHeaderBlock { - hpack: Some(state), - headers: self.headers, - }, - }), + header_block: self, + }) + } else { + dst.put_slice(&self.hpack); + + None }; // Compute the header block length @@ -910,13 +887,17 @@ impl HeaderBlock { Ok(()) } - fn into_encoding(self) -> EncodingHeaderBlock { + fn into_encoding(self, encoder: &mut hpack::Encoder) -> EncodingHeaderBlock { + let mut hpack = BytesMut::new(); + let headers = Iter { + pseudo: Some(self.pseudo), + fields: self.fields.into_iter(), + }; + + encoder.encode(headers, &mut hpack); + EncodingHeaderBlock { - hpack: None, - headers: Iter { - pseudo: Some(self.pseudo), - fields: self.fields.into_iter(), - }, + hpack: hpack.freeze(), } } @@ -949,48 +930,79 @@ impl HeaderBlock { .map(|(name, value)| decoded_header_size(name.as_str().len(), value.len())) .sum::() } - - /// Iterate over all pseudos and headers to see if any individual pair - /// would be too large to encode. - pub(crate) fn has_too_big_field(&self) -> bool { - macro_rules! pseudo_size { - ($name:ident) => {{ - self.pseudo - .$name - .as_ref() - .map(|m| decoded_header_size(stringify!($name).len() + 1, m.as_str().len())) - .unwrap_or(0) - }}; - } - - if pseudo_size!(method) > MAX_HEADER_LENGTH { - return true; - } - - if pseudo_size!(scheme) > MAX_HEADER_LENGTH { - return true; - } - - if pseudo_size!(authority) > MAX_HEADER_LENGTH { - return true; - } - - if pseudo_size!(path) > MAX_HEADER_LENGTH { - return true; - } - - // skip :status, its never going to be too big - - for (name, value) in &self.fields { - if decoded_header_size(name.as_str().len(), value.len()) > MAX_HEADER_LENGTH { - return true; - } - } - - false - } } fn decoded_header_size(name: usize, value: usize) -> usize { name + value + 32 } + +#[cfg(test)] +mod test { + use std::iter::FromIterator; + + use http::HeaderValue; + + use super::*; + use crate::frame; + use crate::hpack::{huffman, Encoder}; + + #[test] + fn test_nameless_header_at_resume() { + let mut encoder = Encoder::default(); + let mut dst = BytesMut::new(); + + let headers = Headers::new( + StreamId::ZERO, + Default::default(), + HeaderMap::from_iter(vec![ + ( + HeaderName::from_static("hello"), + HeaderValue::from_static("world"), + ), + ( + HeaderName::from_static("hello"), + HeaderValue::from_static("zomg"), + ), + ( + HeaderName::from_static("hello"), + HeaderValue::from_static("sup"), + ), + ]), + ); + + let continuation = headers + .encode(&mut encoder, &mut (&mut dst).limit(frame::HEADER_LEN + 8)) + .unwrap(); + + assert_eq!(17, dst.len()); + assert_eq!([0, 0, 8, 1, 0, 0, 0, 0, 0], &dst[0..9]); + assert_eq!(&[0x40, 0x80 | 4], &dst[9..11]); + assert_eq!("hello", huff_decode(&dst[11..15])); + assert_eq!(0x80 | 4, dst[15]); + + let mut world = dst[16..17].to_owned(); + + dst.clear(); + + assert!(continuation + .encode(&mut (&mut dst).limit(frame::HEADER_LEN + 16)) + .is_none()); + + world.extend_from_slice(&dst[9..12]); + assert_eq!("world", huff_decode(&world)); + + assert_eq!(24, dst.len()); + assert_eq!([0, 0, 15, 9, 4, 0, 0, 0, 0], &dst[0..9]); + + // // Next is not indexed + assert_eq!(&[15, 47, 0x80 | 3], &dst[12..15]); + assert_eq!("zomg", huff_decode(&dst[15..18])); + assert_eq!(&[15, 47, 0x80 | 3], &dst[18..21]); + assert_eq!("sup", huff_decode(&dst[21..])); + } + + fn huff_decode(src: &[u8]) -> BytesMut { + let mut buf = BytesMut::new(); + huffman::decode(src, &mut buf).unwrap() + } +} diff --git a/src/hpack/decoder.rs b/src/hpack/decoder.rs index dacbbd992..e4b34d1fc 100644 --- a/src/hpack/decoder.rs +++ b/src/hpack/decoder.rs @@ -847,7 +847,7 @@ mod test { fn huff_encode(src: &[u8]) -> BytesMut { let mut buf = BytesMut::new(); - huffman::encode(src, &mut buf).unwrap(); + huffman::encode(src, &mut buf); buf } } diff --git a/src/hpack/encoder.rs b/src/hpack/encoder.rs index 15a117526..bdff0a066 100644 --- a/src/hpack/encoder.rs +++ b/src/hpack/encoder.rs @@ -1,34 +1,21 @@ use super::table::{Index, Table}; use super::{huffman, Header}; -use bytes::{buf::Limit, BufMut, BytesMut}; +use bytes::{BufMut, BytesMut}; use http::header::{HeaderName, HeaderValue}; -type DstBuf<'a> = Limit<&'a mut BytesMut>; - #[derive(Debug)] pub struct Encoder { table: Table, size_update: Option, } -#[derive(Debug)] -pub enum Encode { - Full, - Partial(EncodeState), -} - #[derive(Debug)] pub struct EncodeState { index: Index, value: Option, } -#[derive(Debug, PartialEq, Eq)] -pub enum EncoderError { - BufferOverflow, -} - #[derive(Debug, Copy, Clone, Eq, PartialEq)] enum SizeUpdate { One(usize), @@ -77,60 +64,24 @@ impl Encoder { } /// Encode a set of headers into the provide buffer - pub fn encode( - &mut self, - resume: Option, - headers: &mut I, - dst: &mut DstBuf<'_>, - ) -> Encode + pub fn encode(&mut self, headers: I, dst: &mut BytesMut) where - I: Iterator>>, + I: IntoIterator>>, { let span = tracing::trace_span!("hpack::encode"); let _e = span.enter(); - let pos = position(dst); - tracing::trace!(pos, "encoding at"); - - if let Err(e) = self.encode_size_updates(dst) { - if e == EncoderError::BufferOverflow { - rewind(dst, pos); - } - - unreachable!("encode_size_updates errored"); - } + self.encode_size_updates(dst); let mut last_index = None; - if let Some(resume) = resume { - let pos = position(dst); - - let res = match resume.value { - Some(ref value) => self.encode_header_without_name(&resume.index, value, dst), - None => self.encode_header(&resume.index, dst), - }; - - if res.is_err() { - rewind(dst, pos); - return Encode::Partial(resume); - } - last_index = Some(resume.index); - } - for header in headers { - let pos = position(dst); - match header.reify() { // The header has an associated name. In which case, try to // index it in the table. Ok(header) => { let index = self.table.index(header); - let res = self.encode_header(&index, dst); - - if res.is_err() { - rewind(dst, pos); - return Encode::Partial(EncodeState { index, value: None }); - } + self.encode_header(&index, dst); last_index = Some(index); } @@ -139,77 +90,61 @@ impl Encoder { // which case, we skip table lookup and just use the same index // as the previous entry. Err(value) => { - let res = self.encode_header_without_name( + self.encode_header_without_name( last_index.as_ref().unwrap_or_else(|| { panic!("encoding header without name, but no previous index to use for name"); }), &value, dst, ); - - if res.is_err() { - rewind(dst, pos); - return Encode::Partial(EncodeState { - index: last_index.unwrap(), // checked just above - value: Some(value), - }); - } } - }; + } } - - Encode::Full } - fn encode_size_updates(&mut self, dst: &mut DstBuf<'_>) -> Result<(), EncoderError> { + fn encode_size_updates(&mut self, dst: &mut BytesMut) { match self.size_update.take() { Some(SizeUpdate::One(val)) => { self.table.resize(val); - encode_size_update(val, dst)?; + encode_size_update(val, dst); } Some(SizeUpdate::Two(min, max)) => { self.table.resize(min); self.table.resize(max); - encode_size_update(min, dst)?; - encode_size_update(max, dst)?; + encode_size_update(min, dst); + encode_size_update(max, dst); } None => {} } - - Ok(()) } - fn encode_header(&mut self, index: &Index, dst: &mut DstBuf<'_>) -> Result<(), EncoderError> { + fn encode_header(&mut self, index: &Index, dst: &mut BytesMut) { match *index { Index::Indexed(idx, _) => { - encode_int(idx, 7, 0x80, dst)?; + encode_int(idx, 7, 0x80, dst); } Index::Name(idx, _) => { let header = self.table.resolve(&index); - encode_not_indexed(idx, header.value_slice(), header.is_sensitive(), dst)?; + encode_not_indexed(idx, header.value_slice(), header.is_sensitive(), dst); } Index::Inserted(_) => { let header = self.table.resolve(&index); assert!(!header.is_sensitive()); - if !dst.has_remaining_mut() { - return Err(EncoderError::BufferOverflow); - } - dst.put_u8(0b0100_0000); - encode_str(header.name().as_slice(), dst)?; - encode_str(header.value_slice(), dst)?; + encode_str(header.name().as_slice(), dst); + encode_str(header.value_slice(), dst); } Index::InsertedValue(idx, _) => { let header = self.table.resolve(&index); assert!(!header.is_sensitive()); - encode_int(idx, 6, 0b0100_0000, dst)?; - encode_str(header.value_slice(), dst)?; + encode_int(idx, 6, 0b0100_0000, dst); + encode_str(header.value_slice(), dst); } Index::NotIndexed(_) => { let header = self.table.resolve(&index); @@ -219,19 +154,17 @@ impl Encoder { header.value_slice(), header.is_sensitive(), dst, - )?; + ); } } - - Ok(()) } fn encode_header_without_name( &mut self, last: &Index, value: &HeaderValue, - dst: &mut DstBuf<'_>, - ) -> Result<(), EncoderError> { + dst: &mut BytesMut, + ) { match *last { Index::Indexed(..) | Index::Name(..) @@ -239,7 +172,7 @@ impl Encoder { | Index::InsertedValue(..) => { let idx = self.table.resolve_idx(last); - encode_not_indexed(idx, value.as_ref(), value.is_sensitive(), dst)?; + encode_not_indexed(idx, value.as_ref(), value.is_sensitive(), dst); } Index::NotIndexed(_) => { let last = self.table.resolve(last); @@ -249,11 +182,9 @@ impl Encoder { value.as_ref(), value.is_sensitive(), dst, - )?; + ); } } - - Ok(()) } } @@ -263,52 +194,32 @@ impl Default for Encoder { } } -fn encode_size_update(val: usize, dst: &mut B) -> Result<(), EncoderError> { +fn encode_size_update(val: usize, dst: &mut BytesMut) { encode_int(val, 5, 0b0010_0000, dst) } -fn encode_not_indexed( - name: usize, - value: &[u8], - sensitive: bool, - dst: &mut DstBuf<'_>, -) -> Result<(), EncoderError> { +fn encode_not_indexed(name: usize, value: &[u8], sensitive: bool, dst: &mut BytesMut) { if sensitive { - encode_int(name, 4, 0b10000, dst)?; + encode_int(name, 4, 0b10000, dst); } else { - encode_int(name, 4, 0, dst)?; + encode_int(name, 4, 0, dst); } - encode_str(value, dst)?; - Ok(()) + encode_str(value, dst); } -fn encode_not_indexed2( - name: &[u8], - value: &[u8], - sensitive: bool, - dst: &mut DstBuf<'_>, -) -> Result<(), EncoderError> { - if !dst.has_remaining_mut() { - return Err(EncoderError::BufferOverflow); - } - +fn encode_not_indexed2(name: &[u8], value: &[u8], sensitive: bool, dst: &mut BytesMut) { if sensitive { dst.put_u8(0b10000); } else { dst.put_u8(0); } - encode_str(name, dst)?; - encode_str(value, dst)?; - Ok(()) + encode_str(name, dst); + encode_str(value, dst); } -fn encode_str(val: &[u8], dst: &mut DstBuf<'_>) -> Result<(), EncoderError> { - if !dst.has_remaining_mut() { - return Err(EncoderError::BufferOverflow); - } - +fn encode_str(val: &[u8], dst: &mut BytesMut) { if !val.is_empty() { let idx = position(dst); @@ -316,13 +227,13 @@ fn encode_str(val: &[u8], dst: &mut DstBuf<'_>) -> Result<(), EncoderError> { dst.put_u8(0); // Encode with huffman - huffman::encode(val, dst)?; + huffman::encode(val, dst); let huff_len = position(dst) - (idx + 1); if encode_int_one_byte(huff_len, 7) { // Write the string head - dst.get_mut()[idx] = 0x80 | huff_len as u8; + dst[idx] = 0x80 | huff_len as u8; } else { // Write the head to a placeholder const PLACEHOLDER_LEN: usize = 8; @@ -330,36 +241,29 @@ fn encode_str(val: &[u8], dst: &mut DstBuf<'_>) -> Result<(), EncoderError> { let head_len = { let mut head_dst = &mut buf[..]; - encode_int(huff_len, 7, 0x80, &mut head_dst)?; + encode_int(huff_len, 7, 0x80, &mut head_dst); PLACEHOLDER_LEN - head_dst.remaining_mut() }; - if dst.remaining_mut() < head_len { - return Err(EncoderError::BufferOverflow); - } - // This is just done to reserve space in the destination dst.put_slice(&buf[1..head_len]); - let written = dst.get_mut(); // Shift the header forward for i in 0..huff_len { let src_i = idx + 1 + (huff_len - (i + 1)); let dst_i = idx + head_len + (huff_len - (i + 1)); - written[dst_i] = written[src_i]; + dst[dst_i] = dst[src_i]; } // Copy in the head for i in 0..head_len { - written[idx + i] = buf[i]; + dst[idx + i] = buf[i]; } } } else { // Write an empty string dst.put_u8(0); } - - Ok(()) } /// Encode an integer into the given destination buffer @@ -368,16 +272,10 @@ fn encode_int( prefix_bits: usize, // The number of bits in the prefix first_byte: u8, // The base upon which to start encoding the int dst: &mut B, -) -> Result<(), EncoderError> { - let mut rem = dst.remaining_mut(); - - if rem == 0 { - return Err(EncoderError::BufferOverflow); - } - +) { if encode_int_one_byte(value, prefix_bits) { dst.put_u8(first_byte | value as u8); - return Ok(()); + return; } let low = (1 << prefix_bits) - 1; @@ -385,26 +283,14 @@ fn encode_int( value -= low; dst.put_u8(first_byte | low as u8); - rem -= 1; while value >= 128 { - if rem == 0 { - return Err(EncoderError::BufferOverflow); - } - dst.put_u8(0b1000_0000 | value as u8); - rem -= 1; value >>= 7; } - if rem == 0 { - return Err(EncoderError::BufferOverflow); - } - dst.put_u8(value as u8); - - Ok(()) } /// Returns true if the in the int can be fully encoded in the first byte. @@ -412,19 +298,14 @@ fn encode_int_one_byte(value: usize, prefix_bits: usize) -> bool { value < (1 << prefix_bits) - 1 } -fn position(buf: &DstBuf<'_>) -> usize { - buf.get_ref().len() -} - -fn rewind(buf: &mut DstBuf<'_>, pos: usize) { - buf.get_mut().truncate(pos); +fn position(buf: &BytesMut) -> usize { + buf.len() } #[cfg(test)] mod test { use super::*; use crate::hpack::Header; - use bytes::buf::BufMut; use http::*; #[test] @@ -801,52 +682,6 @@ mod test { assert_eq!("zomg", huff_decode(&res[14..])); } - #[test] - fn test_nameless_header_at_resume() { - let mut encoder = Encoder::default(); - let max_len = 15; - let mut dst = BytesMut::with_capacity(64); - - let mut input = vec![ - Header::Field { - name: Some("hello".parse().unwrap()), - value: HeaderValue::from_bytes(b"world").unwrap(), - }, - Header::Field { - name: None, - value: HeaderValue::from_bytes(b"zomg").unwrap(), - }, - Header::Field { - name: None, - value: HeaderValue::from_bytes(b"sup").unwrap(), - }, - ] - .into_iter(); - - let resume = match encoder.encode(None, &mut input, &mut (&mut dst).limit(max_len)) { - Encode::Partial(r) => r, - _ => panic!("encode should be partial"), - }; - - assert_eq!(&[0x40, 0x80 | 4], &dst[0..2]); - assert_eq!("hello", huff_decode(&dst[2..6])); - assert_eq!(0x80 | 4, dst[6]); - assert_eq!("world", huff_decode(&dst[7..11])); - - dst.clear(); - - match encoder.encode(Some(resume), &mut input, &mut (&mut dst).limit(max_len)) { - Encode::Full => {} - unexpected => panic!("resume returned unexpected: {:?}", unexpected), - } - - // Next is not indexed - assert_eq!(&[15, 47, 0x80 | 3], &dst[0..3]); - assert_eq!("zomg", huff_decode(&dst[3..6])); - assert_eq!(&[15, 47, 0x80 | 3], &dst[6..9]); - assert_eq!("sup", huff_decode(&dst[9..])); - } - #[test] fn test_large_size_update() { let mut encoder = Encoder::default(); @@ -855,9 +690,7 @@ mod test { assert_eq!(Some(SizeUpdate::One(1912930560)), encoder.size_update); let mut dst = BytesMut::with_capacity(6); - encoder - .encode_size_updates(&mut (&mut dst).limit(6)) - .unwrap(); + encoder.encode_size_updates(&mut dst); assert_eq!([63, 225, 129, 148, 144, 7], &dst[..]); } @@ -869,7 +702,7 @@ mod test { fn encode(e: &mut Encoder, hdrs: Vec>>) -> BytesMut { let mut dst = BytesMut::with_capacity(1024); - e.encode(None, &mut hdrs.into_iter(), &mut (&mut dst).limit(1024)); + e.encode(&mut hdrs.into_iter(), &mut dst); dst } diff --git a/src/hpack/huffman/mod.rs b/src/hpack/huffman/mod.rs index b8db8b4d3..07b3fd925 100644 --- a/src/hpack/huffman/mod.rs +++ b/src/hpack/huffman/mod.rs @@ -1,7 +1,7 @@ mod table; use self::table::{DECODE_TABLE, ENCODE_TABLE}; -use crate::hpack::{DecoderError, EncoderError}; +use crate::hpack::DecoderError; use bytes::{BufMut, BytesMut}; @@ -40,11 +40,9 @@ pub fn decode(src: &[u8], buf: &mut BytesMut) -> Result Ok(buf.split()) } -// TODO: return error when there is not enough room to encode the value -pub fn encode(src: &[u8], dst: &mut B) -> Result<(), EncoderError> { +pub fn encode(src: &[u8], dst: &mut BytesMut) { let mut bits: u64 = 0; let mut bits_left = 40; - let mut rem = dst.remaining_mut(); for &b in src { let (nbits, code) = ENCODE_TABLE[b as usize]; @@ -53,29 +51,18 @@ pub fn encode(src: &[u8], dst: &mut B) -> Result<(), EncoderError> { bits_left -= nbits; while bits_left <= 32 { - if rem == 0 { - return Err(EncoderError::BufferOverflow); - } - dst.put_u8((bits >> 32) as u8); bits <<= 8; bits_left += 8; - rem -= 1; } } if bits_left != 40 { - if rem == 0 { - return Err(EncoderError::BufferOverflow); - } - // This writes the EOS token bits |= (1 << bits_left) - 1; dst.put_u8((bits >> 32) as u8); } - - Ok(()) } impl Decoder { @@ -144,17 +131,17 @@ mod test { #[test] fn encode_single_byte() { - let mut dst = Vec::with_capacity(1); + let mut dst = BytesMut::with_capacity(1); - encode(b"o", &mut dst).unwrap(); + encode(b"o", &mut dst); assert_eq!(&dst[..], &[0b00111111]); dst.clear(); - encode(b"0", &mut dst).unwrap(); + encode(b"0", &mut dst); assert_eq!(&dst[..], &[0x0 + 7]); dst.clear(); - encode(b"A", &mut dst).unwrap(); + encode(b"A", &mut dst); assert_eq!(&dst[..], &[(0x21 << 2) + 3]); } @@ -185,9 +172,9 @@ mod test { ]; for s in DATA { - let mut dst = Vec::with_capacity(s.len()); + let mut dst = BytesMut::with_capacity(s.len()); - encode(s.as_bytes(), &mut dst).unwrap(); + encode(s.as_bytes(), &mut dst); let decoded = decode(&dst).unwrap(); @@ -201,9 +188,9 @@ mod test { &[b"\0", b"\0\0\0", b"\0\x01\x02\x03\x04\x05", b"\xFF\xF8"]; for s in DATA { - let mut dst = Vec::with_capacity(s.len()); + let mut dst = BytesMut::with_capacity(s.len()); - encode(s, &mut dst).unwrap(); + encode(s, &mut dst); let decoded = decode(&dst).unwrap(); diff --git a/src/hpack/mod.rs b/src/hpack/mod.rs index 365b0057f..b808e5238 100644 --- a/src/hpack/mod.rs +++ b/src/hpack/mod.rs @@ -1,12 +1,12 @@ mod decoder; mod encoder; pub(crate) mod header; -mod huffman; +pub(crate) mod huffman; mod table; #[cfg(test)] mod test; pub use self::decoder::{Decoder, DecoderError, NeedMore}; -pub use self::encoder::{Encode, EncodeState, Encoder, EncoderError}; +pub use self::encoder::{EncodeState, Encoder}; pub use self::header::{BytesStr, Header}; diff --git a/src/hpack/test/fixture.rs b/src/hpack/test/fixture.rs index 9828f0488..6d0448425 100644 --- a/src/hpack/test/fixture.rs +++ b/src/hpack/test/fixture.rs @@ -1,6 +1,6 @@ use crate::hpack::{Decoder, Encoder, Header}; -use bytes::{buf::BufMut, BytesMut}; +use bytes::BytesMut; use hex::FromHex; use serde_json::Value; @@ -107,11 +107,7 @@ fn test_story(story: Value) { }) .collect(); - encoder.encode( - None, - &mut input.clone().into_iter(), - &mut (&mut buf).limit(limit), - ); + encoder.encode(&mut input.clone().into_iter(), &mut buf); decoder .decode(&mut Cursor::new(&mut buf), |e| { diff --git a/src/hpack/test/fuzz.rs b/src/hpack/test/fuzz.rs index d4e6534f3..0f84adc83 100644 --- a/src/hpack/test/fuzz.rs +++ b/src/hpack/test/fuzz.rs @@ -1,8 +1,8 @@ -use crate::hpack::{Decoder, Encode, Encoder, Header}; +use crate::hpack::{Decoder, Encoder, Header}; use http::header::{HeaderName, HeaderValue}; -use bytes::{buf::BufMut, BytesMut}; +use bytes::BytesMut; use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; use rand::{Rng, SeedableRng, StdRng}; @@ -144,7 +144,6 @@ impl FuzzHpack { } fn run(self) { - let mut chunks = self.chunks; let frames = self.frames; let mut expect = vec![]; @@ -173,11 +172,7 @@ impl FuzzHpack { } } - let mut input = frame.headers.into_iter(); - let mut index = None; - - let mut max_chunk = chunks.pop().unwrap_or(MAX_CHUNK); - let mut buf = BytesMut::with_capacity(max_chunk); + let mut buf = BytesMut::new(); if let Some(max) = frame.resizes.iter().max() { decoder.queue_size_update(*max); @@ -188,25 +183,7 @@ impl FuzzHpack { encoder.update_max_size(*resize); } - loop { - match encoder.encode(index.take(), &mut input, &mut (&mut buf).limit(max_chunk)) { - Encode::Full => break, - Encode::Partial(i) => { - index = Some(i); - - // Decode the chunk! - decoder - .decode(&mut Cursor::new(&mut buf), |h| { - let e = expect.remove(0); - assert_eq!(h, e); - }) - .expect("partial decode"); - - max_chunk = chunks.pop().unwrap_or(MAX_CHUNK); - buf = BytesMut::with_capacity(max_chunk); - } - } - } + encoder.encode(frame.headers, &mut buf); // Decode the chunk! decoder diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index 10934de48..e8804127b 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -133,10 +133,6 @@ impl Send { Self::check_headers(frame.fields())?; - if frame.has_too_big_field() { - return Err(UserError::HeaderTooBig); - } - let end_stream = frame.is_end_stream(); // Update the state @@ -270,10 +266,6 @@ impl Send { return Err(UserError::UnexpectedFrameType); } - if frame.has_too_big_field() { - return Err(UserError::HeaderTooBig); - } - stream.state.send_close(); tracing::trace!("send_trailers -- queuing; frame={:?}", frame); diff --git a/tests/h2-support/src/prelude.rs b/tests/h2-support/src/prelude.rs index f4b2e823f..1fcb0dcc4 100644 --- a/tests/h2-support/src/prelude.rs +++ b/tests/h2-support/src/prelude.rs @@ -121,6 +121,7 @@ pub fn build_large_headers() -> Vec<(&'static str, String)> { ("eight", build_large_string('8', 4 * 1024)), ("nine", "nine".to_string()), ("ten", build_large_string('0', 4 * 1024)), + ("eleven", build_large_string('1', 32 * 1024)), ] } From 96d9277454e501dba64a36bca5c0de6e8d5e88d9 Mon Sep 17 00:00:00 2001 From: Anthony Ramine Date: Mon, 13 Sep 2021 10:23:09 +0200 Subject: [PATCH 059/178] Remove code that was made obsolete by #555 --- src/hpack/encoder.rs | 6 ------ src/hpack/mod.rs | 2 +- src/hpack/test/fuzz.rs | 24 +----------------------- src/proto/streams/recv.rs | 6 ------ 4 files changed, 2 insertions(+), 36 deletions(-) diff --git a/src/hpack/encoder.rs b/src/hpack/encoder.rs index bdff0a066..76b373830 100644 --- a/src/hpack/encoder.rs +++ b/src/hpack/encoder.rs @@ -10,12 +10,6 @@ pub struct Encoder { size_update: Option, } -#[derive(Debug)] -pub struct EncodeState { - index: Index, - value: Option, -} - #[derive(Debug, Copy, Clone, Eq, PartialEq)] enum SizeUpdate { One(usize), diff --git a/src/hpack/mod.rs b/src/hpack/mod.rs index b808e5238..12c75d553 100644 --- a/src/hpack/mod.rs +++ b/src/hpack/mod.rs @@ -8,5 +8,5 @@ mod table; mod test; pub use self::decoder::{Decoder, DecoderError, NeedMore}; -pub use self::encoder::{EncodeState, Encoder}; +pub use self::encoder::Encoder; pub use self::header::{BytesStr, Header}; diff --git a/src/hpack/test/fuzz.rs b/src/hpack/test/fuzz.rs index 0f84adc83..1d05a97c5 100644 --- a/src/hpack/test/fuzz.rs +++ b/src/hpack/test/fuzz.rs @@ -8,7 +8,6 @@ use rand::{Rng, SeedableRng, StdRng}; use std::io::Cursor; -const MIN_CHUNK: usize = 16; const MAX_CHUNK: usize = 2 * 1024; #[test] @@ -36,17 +35,8 @@ fn hpack_fuzz_seeded() { #[derive(Debug, Clone)] struct FuzzHpack { - // The magic seed that makes the test case reproducible - seed: [usize; 4], - // The set of headers to encode / decode frames: Vec, - - // The list of chunk sizes to do it in - chunks: Vec, - - // Number of times reduced - reduced: usize, } #[derive(Debug, Clone)] @@ -128,19 +118,7 @@ impl FuzzHpack { frames.push(frame); } - // Now, generate the buffer sizes used to encode - let mut chunks = vec![]; - - for _ in 0..rng.gen_range(0, 100) { - chunks.push(rng.gen_range(MIN_CHUNK, MAX_CHUNK)); - } - - FuzzHpack { - seed: seed, - frames: frames, - chunks: chunks, - reduced: 0, - } + FuzzHpack { frames } } fn run(self) { diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 1f30450fe..08a2fc336 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -77,12 +77,6 @@ pub(crate) enum Open { Headers, } -#[derive(Debug, Clone, Copy)] -struct Indices { - head: store::Key, - tail: store::Key, -} - impl Recv { pub fn new(peer: peer::Dyn, config: &Config) -> Self { let next_stream_id = if peer.is_server() { 1 } else { 2 }; From cab307d2edaa37e1b1ff62585067139138214081 Mon Sep 17 00:00:00 2001 From: David Korczynski Date: Fri, 24 Sep 2021 11:57:36 +0100 Subject: [PATCH 060/178] fuzzing: fix build --- src/fuzz_bridge.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/fuzz_bridge.rs b/src/fuzz_bridge.rs index 5379a4de8..6132deeb4 100644 --- a/src/fuzz_bridge.rs +++ b/src/fuzz_bridge.rs @@ -22,7 +22,7 @@ pub mod fuzz_logic { fn encode(e: &mut hpack::Encoder, hdrs: Vec>>) -> BytesMut { let mut dst = BytesMut::with_capacity(1024); - e.encode(None, &mut hdrs.into_iter(), &mut (&mut dst).limit(1024)); + e.encode(&mut hdrs.into_iter(), &mut dst); dst } } From 465f0337f84e120141b9cc70429c3ee7299df095 Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Tue, 28 Sep 2021 18:04:35 +0200 Subject: [PATCH 061/178] Refactor errors internals (#556) h2::Error now knows whether protocol errors happened because the user sent them, because it was received from the remote peer, or because the library itself emitted an error because it detected a protocol violation. It also keeps track of whether it came from a RST_STREAM or GO_AWAY frame, and in the case of the latter, it includes the additional debug data if any. Fixes #530 --- src/client.rs | 11 +-- src/codec/error.rs | 49 +--------- src/codec/framed_read.rs | 61 +++++------- src/codec/mod.rs | 5 +- src/error.rs | 70 ++++++++++---- src/frame/go_away.rs | 3 +- src/frame/reset.rs | 2 +- src/lib.rs | 8 +- src/proto/connection.rs | 109 +++++++++++----------- src/proto/error.rs | 90 ++++++++++++------ src/proto/go_away.rs | 16 +++- src/proto/mod.rs | 2 +- src/proto/peer.rs | 13 ++- src/proto/settings.rs | 8 +- src/proto/streams/prioritize.rs | 5 +- src/proto/streams/recv.rs | 99 ++++++++------------ src/proto/streams/send.rs | 37 +++++--- src/proto/streams/state.rs | 103 ++++++++++----------- src/proto/streams/streams.rs | 123 +++++++++++++------------ src/server.rs | 20 ++-- tests/h2-support/src/mock.rs | 5 +- tests/h2-tests/tests/client_request.rs | 8 +- tests/h2-tests/tests/codec_read.rs | 2 +- tests/h2-tests/tests/flow_control.rs | 8 +- tests/h2-tests/tests/push_promise.rs | 26 ++++-- tests/h2-tests/tests/stream_states.rs | 15 ++- 26 files changed, 465 insertions(+), 433 deletions(-) diff --git a/src/client.rs b/src/client.rs index 5bbbaf499..9c211ab32 100644 --- a/src/client.rs +++ b/src/client.rs @@ -135,9 +135,9 @@ //! [`Builder`]: struct.Builder.html //! [`Error`]: ../struct.Error.html -use crate::codec::{Codec, RecvError, SendError, UserError}; +use crate::codec::{Codec, SendError, UserError}; use crate::frame::{Headers, Pseudo, Reason, Settings, StreamId}; -use crate::proto; +use crate::proto::{self, Error}; use crate::{FlowControl, PingPong, RecvStream, SendStream}; use bytes::{Buf, Bytes}; @@ -1493,7 +1493,7 @@ impl proto::Peer for Peer { pseudo: Pseudo, fields: HeaderMap, stream_id: StreamId, - ) -> Result { + ) -> Result { let mut b = Response::builder(); b = b.version(Version::HTTP_2); @@ -1507,10 +1507,7 @@ impl proto::Peer for Peer { Err(_) => { // TODO: Should there be more specialized handling for different // kinds of errors - return Err(RecvError::Stream { - id: stream_id, - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset(stream_id, Reason::PROTOCOL_ERROR)); } }; diff --git a/src/codec/error.rs b/src/codec/error.rs index f505eb0f5..0acb913e5 100644 --- a/src/codec/error.rs +++ b/src/codec/error.rs @@ -1,26 +1,12 @@ -use crate::frame::{Reason, StreamId}; +use crate::proto::Error; use std::{error, fmt, io}; -/// Errors that are received -#[derive(Debug)] -pub enum RecvError { - Connection(Reason), - Stream { id: StreamId, reason: Reason }, - Io(io::Error), -} - /// Errors caused by sending a message #[derive(Debug)] pub enum SendError { - /// User error + Connection(Error), User(UserError), - - /// Connection error prevents sending. - Connection(Reason), - - /// I/O error - Io(io::Error), } /// Errors caused by users of the library @@ -65,47 +51,22 @@ pub enum UserError { PeerDisabledServerPush, } -// ===== impl RecvError ===== - -impl From for RecvError { - fn from(src: io::Error) -> Self { - RecvError::Io(src) - } -} - -impl error::Error for RecvError {} - -impl fmt::Display for RecvError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - use self::RecvError::*; - - match *self { - Connection(ref reason) => reason.fmt(fmt), - Stream { ref reason, .. } => reason.fmt(fmt), - Io(ref e) => e.fmt(fmt), - } - } -} - // ===== impl SendError ===== impl error::Error for SendError {} impl fmt::Display for SendError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - use self::SendError::*; - match *self { - User(ref e) => e.fmt(fmt), - Connection(ref reason) => reason.fmt(fmt), - Io(ref e) => e.fmt(fmt), + Self::Connection(ref e) => e.fmt(fmt), + Self::User(ref e) => e.fmt(fmt), } } } impl From for SendError { fn from(src: io::Error) -> Self { - SendError::Io(src) + Self::Connection(src.into()) } } diff --git a/src/codec/framed_read.rs b/src/codec/framed_read.rs index 9673c49a8..7c3bbb3ba 100644 --- a/src/codec/framed_read.rs +++ b/src/codec/framed_read.rs @@ -1,8 +1,8 @@ -use crate::codec::RecvError; use crate::frame::{self, Frame, Kind, Reason}; use crate::frame::{ DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE, MAX_MAX_FRAME_SIZE, }; +use crate::proto::Error; use crate::hpack; @@ -98,8 +98,7 @@ fn decode_frame( max_header_list_size: usize, partial_inout: &mut Option, mut bytes: BytesMut, -) -> Result, RecvError> { - use self::RecvError::*; +) -> Result, Error> { let span = tracing::trace_span!("FramedRead::decode_frame", offset = bytes.len()); let _e = span.enter(); @@ -110,7 +109,7 @@ fn decode_frame( if partial_inout.is_some() && head.kind() != Kind::Continuation { proto_err!(conn: "expected CONTINUATION, got {:?}", head.kind()); - return Err(Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); } let kind = head.kind(); @@ -131,14 +130,11 @@ fn decode_frame( // A stream cannot depend on itself. An endpoint MUST // treat this as a stream error (Section 5.4.2) of type // `PROTOCOL_ERROR`. - return Err(Stream { - id: $head.stream_id(), - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset($head.stream_id(), Reason::PROTOCOL_ERROR)); }, Err(e) => { proto_err!(conn: "failed to load frame; err={:?}", e); - return Err(Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } }; @@ -151,14 +147,11 @@ fn decode_frame( Err(frame::Error::MalformedMessage) => { let id = $head.stream_id(); proto_err!(stream: "malformed header block; stream={:?}", id); - return Err(Stream { - id, - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR)); }, Err(e) => { proto_err!(conn: "failed HPACK decoding; err={:?}", e); - return Err(Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } } @@ -183,7 +176,7 @@ fn decode_frame( res.map_err(|e| { proto_err!(conn: "failed to load SETTINGS frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) + Error::library_go_away(Reason::PROTOCOL_ERROR) })? .into() } @@ -192,7 +185,7 @@ fn decode_frame( res.map_err(|e| { proto_err!(conn: "failed to load PING frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) + Error::library_go_away(Reason::PROTOCOL_ERROR) })? .into() } @@ -201,7 +194,7 @@ fn decode_frame( res.map_err(|e| { proto_err!(conn: "failed to load WINDOW_UPDATE frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) + Error::library_go_away(Reason::PROTOCOL_ERROR) })? .into() } @@ -212,7 +205,7 @@ fn decode_frame( // TODO: Should this always be connection level? Probably not... res.map_err(|e| { proto_err!(conn: "failed to load DATA frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) + Error::library_go_away(Reason::PROTOCOL_ERROR) })? .into() } @@ -221,7 +214,7 @@ fn decode_frame( let res = frame::Reset::load(head, &bytes[frame::HEADER_LEN..]); res.map_err(|e| { proto_err!(conn: "failed to load RESET frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) + Error::library_go_away(Reason::PROTOCOL_ERROR) })? .into() } @@ -229,7 +222,7 @@ fn decode_frame( let res = frame::GoAway::load(&bytes[frame::HEADER_LEN..]); res.map_err(|e| { proto_err!(conn: "failed to load GO_AWAY frame; err={:?}", e); - Connection(Reason::PROTOCOL_ERROR) + Error::library_go_away(Reason::PROTOCOL_ERROR) })? .into() } @@ -238,7 +231,7 @@ fn decode_frame( if head.stream_id() == 0 { // Invalid stream identifier proto_err!(conn: "invalid stream ID 0"); - return Err(Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); } match frame::Priority::load(head, &bytes[frame::HEADER_LEN..]) { @@ -249,14 +242,11 @@ fn decode_frame( // `PROTOCOL_ERROR`. let id = head.stream_id(); proto_err!(stream: "PRIORITY invalid dependency ID; stream={:?}", id); - return Err(Stream { - id, - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR)); } Err(e) => { proto_err!(conn: "failed to load PRIORITY frame; err={:?};", e); - return Err(Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } } } @@ -267,14 +257,14 @@ fn decode_frame( Some(partial) => partial, None => { proto_err!(conn: "received unexpected CONTINUATION frame"); - return Err(Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); } }; // The stream identifiers must match if partial.frame.stream_id() != head.stream_id() { proto_err!(conn: "CONTINUATION frame stream ID does not match previous frame stream ID"); - return Err(Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); } // Extend the buf @@ -297,7 +287,7 @@ fn decode_frame( // the attacker to go away. if partial.buf.len() + bytes.len() > max_header_list_size { proto_err!(conn: "CONTINUATION frame header block size over ignorable limit"); - return Err(Connection(Reason::COMPRESSION_ERROR)); + return Err(Error::library_go_away(Reason::COMPRESSION_ERROR).into()); } } partial.buf.extend_from_slice(&bytes[frame::HEADER_LEN..]); @@ -312,14 +302,11 @@ fn decode_frame( Err(frame::Error::MalformedMessage) => { let id = head.stream_id(); proto_err!(stream: "malformed CONTINUATION frame; stream={:?}", id); - return Err(Stream { - id, - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR)); } Err(e) => { proto_err!(conn: "failed HPACK decoding; err={:?}", e); - return Err(Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } } @@ -343,7 +330,7 @@ impl Stream for FramedRead where T: AsyncRead + Unpin, { - type Item = Result; + type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let span = tracing::trace_span!("FramedRead::poll_next"); @@ -371,11 +358,11 @@ where } } -fn map_err(err: io::Error) -> RecvError { +fn map_err(err: io::Error) -> Error { if let io::ErrorKind::InvalidData = err.kind() { if let Some(custom) = err.get_ref() { if custom.is::() { - return RecvError::Connection(Reason::FRAME_SIZE_ERROR); + return Error::library_go_away(Reason::FRAME_SIZE_ERROR); } } } diff --git a/src/codec/mod.rs b/src/codec/mod.rs index 7d0ab73d8..359adf6e4 100644 --- a/src/codec/mod.rs +++ b/src/codec/mod.rs @@ -2,12 +2,13 @@ mod error; mod framed_read; mod framed_write; -pub use self::error::{RecvError, SendError, UserError}; +pub use self::error::{SendError, UserError}; use self::framed_read::FramedRead; use self::framed_write::FramedWrite; use crate::frame::{self, Data, Frame}; +use crate::proto::Error; use bytes::Buf; use futures_core::Stream; @@ -155,7 +156,7 @@ impl Stream for Codec where T: AsyncRead + Unpin, { - type Item = Result; + type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.inner).poll_next(cx) diff --git a/src/error.rs b/src/error.rs index 372bac2ee..0421f4030 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,6 +1,8 @@ use crate::codec::{SendError, UserError}; -use crate::proto; +use crate::frame::StreamId; +use crate::proto::{self, Initiator}; +use bytes::Bytes; use std::{error, fmt, io}; pub use crate::frame::Reason; @@ -22,11 +24,14 @@ pub struct Error { #[derive(Debug)] enum Kind { - /// An error caused by an action taken by the remote peer. - /// - /// This is either an error received by the peer or caused by an invalid - /// action taken by the peer (i.e. a protocol error). - Proto(Reason), + /// A RST_STREAM frame was received or sent. + Reset(StreamId, Reason, Initiator), + + /// A GO_AWAY frame was received or sent. + GoAway(Bytes, Reason, Initiator), + + /// The user created an error from a bare Reason. + Reason(Reason), /// An error resulting from an invalid action taken by the user of this /// library. @@ -45,7 +50,7 @@ impl Error { /// action taken by the peer (i.e. a protocol error). pub fn reason(&self) -> Option { match self.kind { - Kind::Proto(reason) => Some(reason), + Kind::Reset(_, reason, _) | Kind::GoAway(_, reason, _) => Some(reason), _ => None, } } @@ -87,8 +92,13 @@ impl From for Error { Error { kind: match src { - Proto(reason) => Kind::Proto(reason), - Io(e) => Kind::Io(e), + Reset(stream_id, reason, initiator) => Kind::Reset(stream_id, reason, initiator), + GoAway(debug_data, reason, initiator) => { + Kind::GoAway(debug_data, reason, initiator) + } + Io(kind, inner) => { + Kind::Io(inner.map_or_else(|| kind.into(), |inner| io::Error::new(kind, inner))) + } }, } } @@ -97,7 +107,7 @@ impl From for Error { impl From for Error { fn from(src: Reason) -> Error { Error { - kind: Kind::Proto(src), + kind: Kind::Reason(src), } } } @@ -106,8 +116,7 @@ impl From for Error { fn from(src: SendError) -> Error { match src { SendError::User(e) => e.into(), - SendError::Connection(reason) => reason.into(), - SendError::Io(e) => Error::from_io(e), + SendError::Connection(e) => e.into(), } } } @@ -122,13 +131,38 @@ impl From for Error { impl fmt::Display for Error { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - use self::Kind::*; - - match self.kind { - Proto(ref reason) => write!(fmt, "protocol error: {}", reason), - User(ref e) => write!(fmt, "user error: {}", e), - Io(ref e) => fmt::Display::fmt(e, fmt), + let debug_data = match self.kind { + Kind::Reset(_, reason, Initiator::User) => { + return write!(fmt, "stream error sent by user: {}", reason) + } + Kind::Reset(_, reason, Initiator::Library) => { + return write!(fmt, "stream error detected: {}", reason) + } + Kind::Reset(_, reason, Initiator::Remote) => { + return write!(fmt, "stream error received: {}", reason) + } + Kind::GoAway(ref debug_data, reason, Initiator::User) => { + write!(fmt, "connection error sent by user: {}", reason)?; + debug_data + } + Kind::GoAway(ref debug_data, reason, Initiator::Library) => { + write!(fmt, "connection error detected: {}", reason)?; + debug_data + } + Kind::GoAway(ref debug_data, reason, Initiator::Remote) => { + write!(fmt, "connection error received: {}", reason)?; + debug_data + } + Kind::Reason(reason) => return write!(fmt, "protocol error: {}", reason), + Kind::User(ref e) => return write!(fmt, "user error: {}", e), + Kind::Io(ref e) => return e.fmt(fmt), + }; + + if !debug_data.is_empty() { + write!(fmt, " ({:?})", debug_data)?; } + + Ok(()) } } diff --git a/src/frame/go_away.rs b/src/frame/go_away.rs index 52dd91d4c..91d9c4c6b 100644 --- a/src/frame/go_away.rs +++ b/src/frame/go_away.rs @@ -29,8 +29,7 @@ impl GoAway { self.error_code } - #[cfg(feature = "unstable")] - pub fn debug_data(&self) -> &[u8] { + pub fn debug_data(&self) -> &Bytes { &self.debug_data } diff --git a/src/frame/reset.rs b/src/frame/reset.rs index b2613028d..39f6ac202 100644 --- a/src/frame/reset.rs +++ b/src/frame/reset.rs @@ -2,7 +2,7 @@ use crate::frame::{self, Error, Head, Kind, Reason, StreamId}; use bytes::BufMut; -#[derive(Debug, Eq, PartialEq)] +#[derive(Copy, Clone, Debug, Eq, PartialEq)] pub struct Reset { stream_id: StreamId, error_code: Reason, diff --git a/src/lib.rs b/src/lib.rs index 951cd96b3..381a62a46 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -104,8 +104,14 @@ macro_rules! ready { mod codec; mod error; mod hpack; + +#[cfg(not(feature = "unstable"))] mod proto; +#[cfg(feature = "unstable")] +#[allow(missing_docs)] +pub mod proto; + #[cfg(not(feature = "unstable"))] mod frame; @@ -125,7 +131,7 @@ pub use crate::error::{Error, Reason}; pub use crate::share::{FlowControl, Ping, PingPong, Pong, RecvStream, SendStream, StreamId}; #[cfg(feature = "unstable")] -pub use codec::{Codec, RecvError, SendError, UserError}; +pub use codec::{Codec, SendError, UserError}; use std::task::Poll; diff --git a/src/proto/connection.rs b/src/proto/connection.rs index b44fdcd5c..a75df4369 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -1,6 +1,6 @@ -use crate::codec::{RecvError, UserError}; +use crate::codec::UserError; use crate::frame::{Reason, StreamId}; -use crate::{client, frame, proto, server}; +use crate::{client, frame, server}; use crate::frame::DEFAULT_INITIAL_WINDOW_SIZE; use crate::proto::*; @@ -40,7 +40,7 @@ where /// /// This exists separately from State in order to support /// graceful shutdown. - error: Option, + error: Option, /// Pending GOAWAY frames to write. go_away: GoAway, @@ -68,7 +68,7 @@ struct DynConnection<'a, B: Buf = Bytes> { streams: DynStreams<'a, B>, - error: &'a mut Option, + error: &'a mut Option, ping_pong: &'a mut PingPong, } @@ -88,10 +88,10 @@ enum State { Open, /// The codec must be flushed - Closing(Reason), + Closing(Reason, Initiator), /// In a closed state - Closed(Reason), + Closed(Reason, Initiator), } impl Connection @@ -161,9 +161,9 @@ where /// Returns `Ready` when the connection is ready to receive a frame. /// - /// Returns `RecvError` as this may raise errors that are caused by delayed + /// Returns `Error` as this may raise errors that are caused by delayed /// processing of received frames. - fn poll_ready(&mut self, cx: &mut Context) -> Poll> { + fn poll_ready(&mut self, cx: &mut Context) -> Poll> { let _e = self.inner.span.enter(); let span = tracing::trace_span!("poll_ready"); let _e = span.enter(); @@ -191,26 +191,24 @@ where self.inner.as_dyn().go_away_from_user(e) } - fn take_error(&mut self, ours: Reason) -> Poll> { - let reason = if let Some(theirs) = self.inner.error.take() { - match (ours, theirs) { - // If either side reported an error, return that - // to the user. - (Reason::NO_ERROR, err) | (err, Reason::NO_ERROR) => err, - // If both sides reported an error, give their - // error back to th user. We assume our error - // was a consequence of their error, and less - // important. - (_, theirs) => theirs, - } - } else { - ours - }; - - if reason == Reason::NO_ERROR { - Poll::Ready(Ok(())) - } else { - Poll::Ready(Err(proto::Error::Proto(reason))) + fn take_error(&mut self, ours: Reason, initiator: Initiator) -> Result<(), Error> { + let (debug_data, theirs) = self + .inner + .error + .take() + .as_ref() + .map_or((Bytes::new(), Reason::NO_ERROR), |frame| { + (frame.debug_data().clone(), frame.reason()) + }); + + match (ours, theirs) { + (Reason::NO_ERROR, Reason::NO_ERROR) => return Ok(()), + (ours, Reason::NO_ERROR) => Err(Error::GoAway(Bytes::new(), ours, initiator)), + // If both sides reported an error, give their + // error back to th user. We assume our error + // was a consequence of their error, and less + // important. + (_, theirs) => Err(Error::remote_go_away(debug_data, theirs)), } } @@ -229,7 +227,7 @@ where } /// Advances the internal state of the connection. - pub fn poll(&mut self, cx: &mut Context) -> Poll> { + pub fn poll(&mut self, cx: &mut Context) -> Poll> { // XXX(eliza): cloning the span is unfortunately necessary here in // order to placate the borrow checker โ€” `self` is mutably borrowed by // `poll2`, which means that we can't borrow `self.span` to enter it. @@ -268,20 +266,22 @@ where self.inner.as_dyn().handle_poll2_result(result)? } - State::Closing(reason) => { + State::Closing(reason, initiator) => { tracing::trace!("connection closing after flush"); // Flush/shutdown the codec ready!(self.codec.shutdown(cx))?; // Transition the state to error - self.inner.state = State::Closed(reason); + self.inner.state = State::Closed(reason, initiator); + } + State::Closed(reason, initiator) => { + return Poll::Ready(self.take_error(reason, initiator)); } - State::Closed(reason) => return self.take_error(reason), } } } - fn poll2(&mut self, cx: &mut Context) -> Poll> { + fn poll2(&mut self, cx: &mut Context) -> Poll> { // This happens outside of the loop to prevent needing to do a clock // check and then comparison of the queue possibly multiple times a // second (and thus, the clock wouldn't have changed enough to matter). @@ -300,7 +300,7 @@ where // the same error back to the user. return Poll::Ready(Ok(())); } else { - return Poll::Ready(Err(RecvError::Connection(reason))); + return Poll::Ready(Err(Error::library_go_away(reason))); } } // Only NO_ERROR should be waiting for idle @@ -384,42 +384,45 @@ where self.go_away.go_away_from_user(frame); // Notify all streams of reason we're abruptly closing. - self.streams.recv_err(&proto::Error::Proto(e)); + self.streams.handle_error(Error::user_go_away(e)); } - fn handle_poll2_result(&mut self, result: Result<(), RecvError>) -> Result<(), Error> { - use crate::codec::RecvError::*; + fn handle_poll2_result(&mut self, result: Result<(), Error>) -> Result<(), Error> { match result { // The connection has shutdown normally Ok(()) => { - *self.state = State::Closing(Reason::NO_ERROR); + *self.state = State::Closing(Reason::NO_ERROR, Initiator::Library); Ok(()) } // Attempting to read a frame resulted in a connection level // error. This is handled by setting a GOAWAY frame followed by // terminating the connection. - Err(Connection(e)) => { + Err(Error::GoAway(debug_data, reason, initiator)) => { + let e = Error::GoAway(debug_data, reason, initiator); tracing::debug!(error = ?e, "Connection::poll; connection error"); // We may have already sent a GOAWAY for this error, // if so, don't send another, just flush and close up. - if let Some(reason) = self.go_away.going_away_reason() { - if reason == e { - tracing::trace!(" -> already going away"); - *self.state = State::Closing(e); - return Ok(()); - } + if self + .go_away + .going_away() + .map_or(false, |frame| frame.reason() == reason) + { + tracing::trace!(" -> already going away"); + *self.state = State::Closing(reason, initiator); + return Ok(()); } // Reset all active streams - self.streams.recv_err(&e.into()); - self.go_away_now(e); + self.streams.handle_error(e); + self.go_away_now(reason); Ok(()) } // Attempting to read a frame resulted in a stream level error. // This is handled by resetting the frame then trying to read // another frame. - Err(Stream { id, reason }) => { + Err(Error::Reset(id, reason, initiator)) => { + debug_assert_eq!(initiator, Initiator::Library); tracing::trace!(?id, ?reason, "stream error"); self.streams.send_reset(id, reason); Ok(()) @@ -428,12 +431,12 @@ where // active streams must be reset. // // TODO: Are I/O errors recoverable? - Err(Io(e)) => { + Err(Error::Io(e, inner)) => { tracing::debug!(error = ?e, "Connection::poll; IO error"); - let e = e.into(); + let e = Error::Io(e, inner); // Reset all active streams - self.streams.recv_err(&e); + self.streams.handle_error(e.clone()); // Return the error Err(e) @@ -441,7 +444,7 @@ where } } - fn recv_frame(&mut self, frame: Option) -> Result { + fn recv_frame(&mut self, frame: Option) -> Result { use crate::frame::Frame::*; match frame { Some(Headers(frame)) => { @@ -471,7 +474,7 @@ where // until they are all EOS. Once they are, State should // transition to GoAway. self.streams.recv_go_away(&frame)?; - *self.error = Some(frame.reason()); + *self.error = Some(frame); } Some(Ping(frame)) => { tracing::trace!(?frame, "recv PING"); diff --git a/src/proto/error.rs b/src/proto/error.rs index c3ee20d03..197237263 100644 --- a/src/proto/error.rs +++ b/src/proto/error.rs @@ -1,53 +1,87 @@ -use crate::codec::{RecvError, SendError}; -use crate::frame::Reason; +use crate::codec::SendError; +use crate::frame::{Reason, StreamId}; +use bytes::Bytes; +use std::fmt; use std::io; /// Either an H2 reason or an I/O error -#[derive(Debug)] +#[derive(Clone, Debug)] pub enum Error { - Proto(Reason), - Io(io::Error), + Reset(StreamId, Reason, Initiator), + GoAway(Bytes, Reason, Initiator), + Io(io::ErrorKind, Option), +} + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum Initiator { + User, + Library, + Remote, } impl Error { - /// Clone the error for internal purposes. - /// - /// `io::Error` is not `Clone`, so we only copy the `ErrorKind`. - pub(super) fn shallow_clone(&self) -> Error { + pub(crate) fn is_local(&self) -> bool { match *self { - Error::Proto(reason) => Error::Proto(reason), - Error::Io(ref io) => Error::Io(io::Error::from(io.kind())), + Self::Reset(_, _, initiator) | Self::GoAway(_, _, initiator) => initiator.is_local(), + Self::Io(..) => true, } } -} -impl From for Error { - fn from(src: Reason) -> Self { - Error::Proto(src) + pub(crate) fn user_go_away(reason: Reason) -> Self { + Self::GoAway(Bytes::new(), reason, Initiator::User) + } + + pub(crate) fn library_reset(stream_id: StreamId, reason: Reason) -> Self { + Self::Reset(stream_id, reason, Initiator::Library) + } + + pub(crate) fn library_go_away(reason: Reason) -> Self { + Self::GoAway(Bytes::new(), reason, Initiator::Library) + } + + pub(crate) fn remote_reset(stream_id: StreamId, reason: Reason) -> Self { + Self::Reset(stream_id, reason, Initiator::Remote) + } + + pub(crate) fn remote_go_away(debug_data: Bytes, reason: Reason) -> Self { + Self::GoAway(debug_data, reason, Initiator::Remote) } } -impl From for Error { - fn from(src: io::Error) -> Self { - Error::Io(src) +impl Initiator { + fn is_local(&self) -> bool { + match *self { + Self::User | Self::Library => true, + Self::Remote => false, + } } } -impl From for RecvError { - fn from(src: Error) -> RecvError { - match src { - Error::Proto(reason) => RecvError::Connection(reason), - Error::Io(e) => RecvError::Io(e), +impl fmt::Display for Error { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + match *self { + Self::Reset(_, reason, _) | Self::GoAway(_, reason, _) => reason.fmt(fmt), + Self::Io(_, Some(ref inner)) => inner.fmt(fmt), + Self::Io(kind, None) => io::Error::from(kind).fmt(fmt), } } } +impl From for Error { + fn from(src: io::ErrorKind) -> Self { + Error::Io(src.into(), None) + } +} + +impl From for Error { + fn from(src: io::Error) -> Self { + Error::Io(src.kind(), src.get_ref().map(|inner| inner.to_string())) + } +} + impl From for SendError { - fn from(src: Error) -> SendError { - match src { - Error::Proto(reason) => SendError::Connection(reason), - Error::Io(e) => SendError::Io(e), - } + fn from(src: Error) -> Self { + Self::Connection(src) } } diff --git a/src/proto/go_away.rs b/src/proto/go_away.rs index 91d37b642..759427878 100644 --- a/src/proto/go_away.rs +++ b/src/proto/go_away.rs @@ -31,7 +31,7 @@ pub(super) struct GoAway { /// well, and we wouldn't want to save that here to accidentally dump in logs, /// or waste struct space.) #[derive(Debug)] -struct GoingAway { +pub(crate) struct GoingAway { /// Stores the highest stream ID of a GOAWAY that has been sent. /// /// It's illegal to send a subsequent GOAWAY with a higher ID. @@ -98,9 +98,9 @@ impl GoAway { self.is_user_initiated } - /// Return the last Reason we've sent. - pub fn going_away_reason(&self) -> Option { - self.going_away.as_ref().map(|g| g.reason) + /// Returns the going away info, if any. + pub fn going_away(&self) -> Option<&GoingAway> { + self.going_away.as_ref() } /// Returns if the connection should close now, or wait until idle. @@ -141,7 +141,7 @@ impl GoAway { return Poll::Ready(Some(Ok(reason))); } else if self.should_close_now() { - return match self.going_away_reason() { + return match self.going_away().map(|going_away| going_away.reason) { Some(reason) => Poll::Ready(Some(Ok(reason))), None => Poll::Ready(None), }; @@ -150,3 +150,9 @@ impl GoAway { Poll::Ready(None) } } + +impl GoingAway { + pub(crate) fn reason(&self) -> Reason { + self.reason + } +} diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 84fd8542e..d505e77f3 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -7,7 +7,7 @@ mod settings; mod streams; pub(crate) use self::connection::{Config, Connection}; -pub(crate) use self::error::Error; +pub use self::error::{Error, Initiator}; pub(crate) use self::peer::{Dyn as DynPeer, Peer}; pub(crate) use self::ping_pong::UserPings; pub(crate) use self::streams::{DynStreams, OpaqueStreamRef, StreamRef, Streams}; diff --git a/src/proto/peer.rs b/src/proto/peer.rs index 3bcc77224..d62d9e24e 100644 --- a/src/proto/peer.rs +++ b/src/proto/peer.rs @@ -1,7 +1,6 @@ -use crate::codec::RecvError; use crate::error::Reason; use crate::frame::{Pseudo, StreamId}; -use crate::proto::Open; +use crate::proto::{Error, Open}; use http::{HeaderMap, Request, Response}; @@ -21,7 +20,7 @@ pub(crate) trait Peer { pseudo: Pseudo, fields: HeaderMap, stream_id: StreamId, - ) -> Result; + ) -> Result; fn is_local_init(id: StreamId) -> bool { assert!(!id.is_zero()); @@ -61,7 +60,7 @@ impl Dyn { pseudo: Pseudo, fields: HeaderMap, stream_id: StreamId, - ) -> Result { + ) -> Result { if self.is_server() { crate::server::Peer::convert_poll_message(pseudo, fields, stream_id) .map(PollMessage::Server) @@ -72,12 +71,12 @@ impl Dyn { } /// Returns true if the remote peer can initiate a stream with the given ID. - pub fn ensure_can_open(&self, id: StreamId, mode: Open) -> Result<(), RecvError> { + pub fn ensure_can_open(&self, id: StreamId, mode: Open) -> Result<(), Error> { if self.is_server() { // Ensure that the ID is a valid client initiated ID if mode.is_push_promise() || !id.is_client_initiated() { proto_err!(conn: "cannot open stream {:?} - not client initiated", id); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } Ok(()) @@ -85,7 +84,7 @@ impl Dyn { // Ensure that the ID is a valid server initiated ID if !mode.is_push_promise() || !id.is_server_initiated() { proto_err!(conn: "cannot open stream {:?} - not server initiated", id); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } Ok(()) diff --git a/src/proto/settings.rs b/src/proto/settings.rs index 453292324..44f4c2df4 100644 --- a/src/proto/settings.rs +++ b/src/proto/settings.rs @@ -1,4 +1,4 @@ -use crate::codec::{RecvError, UserError}; +use crate::codec::UserError; use crate::error::Reason; use crate::frame; use crate::proto::*; @@ -40,7 +40,7 @@ impl Settings { frame: frame::Settings, codec: &mut Codec, streams: &mut Streams, - ) -> Result<(), RecvError> + ) -> Result<(), Error> where T: AsyncWrite + Unpin, B: Buf, @@ -68,7 +68,7 @@ impl Settings { // We haven't sent any SETTINGS frames to be ACKed, so // this is very bizarre! Remote is either buggy or malicious. proto_err!(conn: "received unexpected settings ack"); - Err(RecvError::Connection(Reason::PROTOCOL_ERROR)) + Err(Error::library_go_away(Reason::PROTOCOL_ERROR)) } } } else { @@ -97,7 +97,7 @@ impl Settings { cx: &mut Context, dst: &mut Codec, streams: &mut Streams, - ) -> Poll> + ) -> Poll> where T: AsyncWrite + Unpin, B: Buf, diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index 77eb507db..9671d5898 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -791,7 +791,10 @@ impl Prioritize { }), None => { if let Some(reason) = stream.state.get_scheduled_reset() { - stream.state.set_reset(reason); + let stream_id = stream.id; + stream + .state + .set_reset(stream_id, reason, Initiator::Library); let frame = frame::Reset::new(stream.id, reason); Frame::Reset(frame) diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 08a2fc336..be996b963 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -1,7 +1,7 @@ use super::*; -use crate::codec::{RecvError, UserError}; -use crate::frame::{PushPromiseHeaderError, Reason, DEFAULT_INITIAL_WINDOW_SIZE}; -use crate::{frame, proto}; +use crate::codec::UserError; +use crate::frame::{self, PushPromiseHeaderError, Reason, DEFAULT_INITIAL_WINDOW_SIZE}; +use crate::proto::{self, Error}; use std::task::Context; use http::{HeaderMap, Request, Response}; @@ -68,7 +68,7 @@ pub(super) enum Event { #[derive(Debug)] pub(super) enum RecvHeaderBlockError { Oversize(T), - State(RecvError), + State(Error), } #[derive(Debug)] @@ -124,7 +124,7 @@ impl Recv { id: StreamId, mode: Open, counts: &mut Counts, - ) -> Result, RecvError> { + ) -> Result, Error> { assert!(self.refused.is_none()); counts.peer().ensure_can_open(id, mode)?; @@ -132,7 +132,7 @@ impl Recv { let next_id = self.next_stream_id()?; if id < next_id { proto_err!(conn: "id ({:?}) < next_id ({:?})", id, next_id); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } self.next_stream_id = id.next_id(); @@ -176,11 +176,7 @@ impl Recv { Ok(v) => v, Err(()) => { proto_err!(stream: "could not parse content-length; stream={:?}", stream.id); - return Err(RecvError::Stream { - id: stream.id, - reason: Reason::PROTOCOL_ERROR, - } - .into()); + return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into()); } }; @@ -312,16 +308,13 @@ impl Recv { &mut self, frame: frame::Headers, stream: &mut store::Ptr, - ) -> Result<(), RecvError> { + ) -> Result<(), Error> { // Transition the state stream.state.recv_close()?; if stream.ensure_content_length_zero().is_err() { proto_err!(stream: "recv_trailers: content-length is not zero; stream={:?};", stream.id); - return Err(RecvError::Stream { - id: stream.id, - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR)); } let trailers = frame.into_fields(); @@ -455,7 +448,7 @@ impl Recv { &mut self, settings: &frame::Settings, store: &mut Store, - ) -> Result<(), RecvError> { + ) -> Result<(), proto::Error> { let target = if let Some(val) = settings.initial_window_size() { val } else { @@ -502,7 +495,7 @@ impl Recv { stream .recv_flow .inc_window(inc) - .map_err(RecvError::Connection)?; + .map_err(proto::Error::library_go_away)?; stream.recv_flow.assign_capacity(inc); Ok(()) }) @@ -520,11 +513,7 @@ impl Recv { stream.pending_recv.is_empty() } - pub fn recv_data( - &mut self, - frame: frame::Data, - stream: &mut store::Ptr, - ) -> Result<(), RecvError> { + pub fn recv_data(&mut self, frame: frame::Data, stream: &mut store::Ptr) -> Result<(), Error> { let sz = frame.payload().len(); // This should have been enforced at the codec::FramedRead layer, so @@ -542,7 +531,7 @@ impl Recv { // Receiving a DATA frame when not expecting one is a protocol // error. proto_err!(conn: "unexpected DATA frame; stream={:?}", stream.id); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } tracing::trace!( @@ -557,7 +546,7 @@ impl Recv { "recv_data; frame ignored on locally reset {:?} for some time", stream.id, ); - return self.ignore_data(sz); + return Ok(self.ignore_data(sz)?); } // Ensure that there is enough capacity on the connection before acting @@ -573,10 +562,7 @@ impl Recv { // So, for violating the **stream** window, we can send either a // stream or connection error. We've opted to send a stream // error. - return Err(RecvError::Stream { - id: stream.id, - reason: Reason::FLOW_CONTROL_ERROR, - }); + return Err(Error::library_reset(stream.id, Reason::FLOW_CONTROL_ERROR)); } if stream.dec_content_length(frame.payload().len()).is_err() { @@ -585,10 +571,7 @@ impl Recv { stream.id, frame.payload().len(), ); - return Err(RecvError::Stream { - id: stream.id, - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR)); } if frame.is_end_stream() { @@ -598,15 +581,12 @@ impl Recv { stream.id, frame.payload().len(), ); - return Err(RecvError::Stream { - id: stream.id, - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR)); } if stream.state.recv_close().is_err() { proto_err!(conn: "recv_data: failed to transition to closed state; stream={:?}", stream.id); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); } } @@ -625,7 +605,7 @@ impl Recv { Ok(()) } - pub fn ignore_data(&mut self, sz: WindowSize) -> Result<(), RecvError> { + pub fn ignore_data(&mut self, sz: WindowSize) -> Result<(), Error> { // Ensure that there is enough capacity on the connection... self.consume_connection_window(sz)?; @@ -641,14 +621,14 @@ impl Recv { Ok(()) } - pub fn consume_connection_window(&mut self, sz: WindowSize) -> Result<(), RecvError> { + pub fn consume_connection_window(&mut self, sz: WindowSize) -> Result<(), Error> { if self.flow.window_size() < sz { tracing::debug!( "connection error FLOW_CONTROL_ERROR -- window_size ({:?}) < sz ({:?});", self.flow.window_size(), sz, ); - return Err(RecvError::Connection(Reason::FLOW_CONTROL_ERROR)); + return Err(Error::library_go_away(Reason::FLOW_CONTROL_ERROR)); } // Update connection level flow control @@ -663,7 +643,7 @@ impl Recv { &mut self, frame: frame::PushPromise, stream: &mut store::Ptr, - ) -> Result<(), RecvError> { + ) -> Result<(), Error> { stream.state.reserve_remote()?; if frame.is_over_size() { // A frame is over size if the decoded header block was bigger than @@ -682,10 +662,10 @@ impl Recv { headers frame is over size; promised_id={:?};", frame.promised_id(), ); - return Err(RecvError::Stream { - id: frame.promised_id(), - reason: Reason::REFUSED_STREAM, - }); + return Err(Error::library_reset( + frame.promised_id(), + Reason::REFUSED_STREAM, + )); } let promised_id = frame.promised_id(); @@ -708,10 +688,7 @@ impl Recv { promised_id, ), } - return Err(RecvError::Stream { - id: promised_id, - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset(promised_id, Reason::PROTOCOL_ERROR)); } use super::peer::PollMessage::*; @@ -741,18 +718,16 @@ impl Recv { /// Handle remote sending an explicit RST_STREAM. pub fn recv_reset(&mut self, frame: frame::Reset, stream: &mut Stream) { // Notify the stream - stream - .state - .recv_reset(frame.reason(), stream.is_pending_send); + stream.state.recv_reset(frame, stream.is_pending_send); stream.notify_send(); stream.notify_recv(); } - /// Handle a received error - pub fn recv_err(&mut self, err: &proto::Error, stream: &mut Stream) { + /// Handle a connection-level error + pub fn handle_error(&mut self, err: &proto::Error, stream: &mut Stream) { // Receive an error - stream.state.recv_err(err); + stream.state.handle_error(err); // If a receiver is waiting, notify it stream.notify_send(); @@ -783,11 +758,11 @@ impl Recv { self.max_stream_id } - pub fn next_stream_id(&self) -> Result { + pub fn next_stream_id(&self) -> Result { if let Ok(id) = self.next_stream_id { Ok(id) } else { - Err(RecvError::Connection(Reason::PROTOCOL_ERROR)) + Err(Error::library_go_away(Reason::PROTOCOL_ERROR)) } } @@ -802,10 +777,10 @@ impl Recv { } /// Returns true if the remote peer can reserve a stream with the given ID. - pub fn ensure_can_reserve(&self) -> Result<(), RecvError> { + pub fn ensure_can_reserve(&self) -> Result<(), Error> { if !self.is_push_enabled { proto_err!(conn: "recv_push_promise: push is disabled"); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } Ok(()) @@ -1092,8 +1067,8 @@ impl Open { // ===== impl RecvHeaderBlockError ===== -impl From for RecvHeaderBlockError { - fn from(err: RecvError) -> Self { +impl From for RecvHeaderBlockError { + fn from(err: Error) -> Self { RecvHeaderBlockError::State(err) } } diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index e8804127b..d4d64cd80 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -2,8 +2,9 @@ use super::{ store, Buffer, Codec, Config, Counts, Frame, Prioritize, Prioritized, Store, Stream, StreamId, StreamIdOverflow, WindowSize, }; -use crate::codec::{RecvError, UserError}; +use crate::codec::UserError; use crate::frame::{self, Reason}; +use crate::proto::{Error, Initiator}; use bytes::Buf; use http; @@ -161,6 +162,7 @@ impl Send { pub fn send_reset( &mut self, reason: Reason, + initiator: Initiator, buffer: &mut Buffer>, stream: &mut store::Ptr, counts: &mut Counts, @@ -169,14 +171,16 @@ impl Send { let is_reset = stream.state.is_reset(); let is_closed = stream.state.is_closed(); let is_empty = stream.pending_send.is_empty(); + let stream_id = stream.id; tracing::trace!( - "send_reset(..., reason={:?}, stream={:?}, ..., \ + "send_reset(..., reason={:?}, initiator={:?}, stream={:?}, ..., \ is_reset={:?}; is_closed={:?}; pending_send.is_empty={:?}; \ state={:?} \ ", reason, - stream.id, + initiator, + stream_id, is_reset, is_closed, is_empty, @@ -187,13 +191,13 @@ impl Send { // Don't double reset tracing::trace!( " -> not sending RST_STREAM ({:?} is already reset)", - stream.id + stream_id ); return; } // Transition the state to reset no matter what. - stream.state.set_reset(reason); + stream.state.set_reset(stream_id, reason, initiator); // If closed AND the send queue is flushed, then the stream cannot be // reset explicitly, either. Implicit resets can still be queued. @@ -201,7 +205,7 @@ impl Send { tracing::trace!( " -> not sending explicit RST_STREAM ({:?} was closed \ and send queue was flushed)", - stream.id + stream_id ); return; } @@ -371,7 +375,14 @@ impl Send { if let Err(e) = self.prioritize.recv_stream_window_update(sz, stream) { tracing::debug!("recv_stream_window_update !!; err={:?}", e); - self.send_reset(Reason::FLOW_CONTROL_ERROR, buffer, stream, counts, task); + self.send_reset( + Reason::FLOW_CONTROL_ERROR, + Initiator::Library, + buffer, + stream, + counts, + task, + ); return Err(e); } @@ -379,7 +390,7 @@ impl Send { Ok(()) } - pub(super) fn recv_go_away(&mut self, last_stream_id: StreamId) -> Result<(), RecvError> { + pub(super) fn recv_go_away(&mut self, last_stream_id: StreamId) -> Result<(), Error> { if last_stream_id > self.max_stream_id { // The remote endpoint sent a `GOAWAY` frame indicating a stream // that we never sent, or that we have already terminated on account @@ -392,14 +403,14 @@ impl Send { "recv_go_away: last_stream_id ({:?}) > max_stream_id ({:?})", last_stream_id, self.max_stream_id, ); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } self.max_stream_id = last_stream_id; Ok(()) } - pub fn recv_err( + pub fn handle_error( &mut self, buffer: &mut Buffer>, stream: &mut store::Ptr, @@ -417,7 +428,7 @@ impl Send { store: &mut Store, counts: &mut Counts, task: &mut Option, - ) -> Result<(), RecvError> { + ) -> Result<(), Error> { // Applies an update to the remote endpoint's initial window size. // // Per RFC 7540 ยง6.9.2: @@ -480,7 +491,7 @@ impl Send { // of a stream is reduced? Maybe it should if the capacity // is reduced to zero, allowing the producer to stop work. - Ok::<_, RecvError>(()) + Ok::<_, Error>(()) })?; self.prioritize @@ -490,7 +501,7 @@ impl Send { store.for_each(|mut stream| { self.recv_stream_window_update(inc, buffer, &mut stream, counts, task) - .map_err(RecvError::Connection) + .map_err(Error::library_go_away) })?; } } diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index 3e739daf9..9931d41b1 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -1,9 +1,8 @@ use std::io; -use crate::codec::UserError::*; -use crate::codec::{RecvError, UserError}; -use crate::frame::{self, Reason}; -use crate::proto::{self, PollReset}; +use crate::codec::UserError; +use crate::frame::{self, Reason, StreamId}; +use crate::proto::{self, Error, Initiator, PollReset}; use self::Inner::*; use self::Peer::*; @@ -53,7 +52,7 @@ pub struct State { inner: Inner, } -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone)] enum Inner { Idle, // TODO: these states shouldn't count against concurrency limits: @@ -71,12 +70,10 @@ enum Peer { Streaming, } -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Clone)] enum Cause { EndStream, - Proto(Reason), - LocallyReset(Reason), - Io, + Error(Error), /// This indicates to the connection that a reset frame must be sent out /// once the send queue has been flushed. @@ -85,7 +82,7 @@ enum Cause { /// - User drops all references to a stream, so we want to CANCEL the it. /// - Header block size was too large, so we want to REFUSE, possibly /// after sending a 431 response frame. - Scheduled(Reason), + ScheduledLibraryReset(Reason), } impl State { @@ -123,7 +120,7 @@ impl State { } _ => { // All other transitions result in a protocol error - return Err(UnexpectedFrameType); + return Err(UserError::UnexpectedFrameType); } }; @@ -133,7 +130,7 @@ impl State { /// Opens the receive-half of the stream when a HEADERS frame is received. /// /// Returns true if this transitions the state to Open. - pub fn recv_open(&mut self, frame: &frame::Headers) -> Result { + pub fn recv_open(&mut self, frame: &frame::Headers) -> Result { let mut initial = false; let eos = frame.is_end_stream(); @@ -195,10 +192,10 @@ impl State { HalfClosedLocal(Streaming) } } - state => { + ref state => { // All other transitions result in a protocol error proto_err!(conn: "recv_open: in unexpected state {:?}", state); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } }; @@ -206,15 +203,15 @@ impl State { } /// Transition from Idle -> ReservedRemote - pub fn reserve_remote(&mut self) -> Result<(), RecvError> { + pub fn reserve_remote(&mut self) -> Result<(), Error> { match self.inner { Idle => { self.inner = ReservedRemote; Ok(()) } - state => { + ref state => { proto_err!(conn: "reserve_remote: in unexpected state {:?}", state); - Err(RecvError::Connection(Reason::PROTOCOL_ERROR)) + Err(Error::library_go_away(Reason::PROTOCOL_ERROR)) } } } @@ -231,7 +228,7 @@ impl State { } /// Indicates that the remote side will not send more data to the local. - pub fn recv_close(&mut self) -> Result<(), RecvError> { + pub fn recv_close(&mut self) -> Result<(), Error> { match self.inner { Open { local, .. } => { // The remote side will continue to receive data. @@ -244,9 +241,9 @@ impl State { self.inner = Closed(Cause::EndStream); Ok(()) } - state => { + ref state => { proto_err!(conn: "recv_close: in unexpected state {:?}", state); - Err(RecvError::Connection(Reason::PROTOCOL_ERROR)) + Err(Error::library_go_away(Reason::PROTOCOL_ERROR)) } } } @@ -254,9 +251,9 @@ impl State { /// The remote explicitly sent a RST_STREAM. /// /// # Arguments - /// - `reason`: the reason field of the received RST_STREAM frame. + /// - `frame`: the received RST_STREAM frame. /// - `queued`: true if this stream has frames in the pending send queue. - pub fn recv_reset(&mut self, reason: Reason, queued: bool) { + pub fn recv_reset(&mut self, frame: frame::Reset, queued: bool) { match self.inner { // If the stream is already in a `Closed` state, do nothing, // provided that there are no frames still in the send queue. @@ -275,30 +272,28 @@ impl State { // In either of these cases, we want to overwrite the stream's // previous state with the received RST_STREAM, so that the queue // will be cleared by `Prioritize::pop_frame`. - state => { + ref state => { tracing::trace!( - "recv_reset; reason={:?}; state={:?}; queued={:?}", - reason, + "recv_reset; frame={:?}; state={:?}; queued={:?}", + frame, state, queued ); - self.inner = Closed(Cause::Proto(reason)); + self.inner = Closed(Cause::Error(Error::remote_reset( + frame.stream_id(), + frame.reason(), + ))); } } } - /// We noticed a protocol error. - pub fn recv_err(&mut self, err: &proto::Error) { - use crate::proto::Error::*; - + /// Handle a connection-level error. + pub fn handle_error(&mut self, err: &proto::Error) { match self.inner { Closed(..) => {} _ => { - tracing::trace!("recv_err; err={:?}", err); - self.inner = Closed(match *err { - Proto(reason) => Cause::LocallyReset(reason), - Io(..) => Cause::Io, - }); + tracing::trace!("handle_error; err={:?}", err); + self.inner = Closed(Cause::Error(err.clone())); } } } @@ -306,9 +301,9 @@ impl State { pub fn recv_eof(&mut self) { match self.inner { Closed(..) => {} - s => { - tracing::trace!("recv_eof; state={:?}", s); - self.inner = Closed(Cause::Io); + ref state => { + tracing::trace!("recv_eof; state={:?}", state); + self.inner = Closed(Cause::Error(io::ErrorKind::BrokenPipe.into())); } } } @@ -325,39 +320,39 @@ impl State { tracing::trace!("send_close: HalfClosedRemote => Closed"); self.inner = Closed(Cause::EndStream); } - state => panic!("send_close: unexpected state {:?}", state), + ref state => panic!("send_close: unexpected state {:?}", state), } } /// Set the stream state to reset locally. - pub fn set_reset(&mut self, reason: Reason) { - self.inner = Closed(Cause::LocallyReset(reason)); + pub fn set_reset(&mut self, stream_id: StreamId, reason: Reason, initiator: Initiator) { + self.inner = Closed(Cause::Error(Error::Reset(stream_id, reason, initiator))); } /// Set the stream state to a scheduled reset. pub fn set_scheduled_reset(&mut self, reason: Reason) { debug_assert!(!self.is_closed()); - self.inner = Closed(Cause::Scheduled(reason)); + self.inner = Closed(Cause::ScheduledLibraryReset(reason)); } pub fn get_scheduled_reset(&self) -> Option { match self.inner { - Closed(Cause::Scheduled(reason)) => Some(reason), + Closed(Cause::ScheduledLibraryReset(reason)) => Some(reason), _ => None, } } pub fn is_scheduled_reset(&self) -> bool { match self.inner { - Closed(Cause::Scheduled(..)) => true, + Closed(Cause::ScheduledLibraryReset(..)) => true, _ => false, } } pub fn is_local_reset(&self) -> bool { match self.inner { - Closed(Cause::LocallyReset(_)) => true, - Closed(Cause::Scheduled(..)) => true, + Closed(Cause::Error(ref e)) => e.is_local(), + Closed(Cause::ScheduledLibraryReset(..)) => true, _ => false, } } @@ -436,10 +431,10 @@ impl State { pub fn ensure_recv_open(&self) -> Result { // TODO: Is this correct? match self.inner { - Closed(Cause::Proto(reason)) - | Closed(Cause::LocallyReset(reason)) - | Closed(Cause::Scheduled(reason)) => Err(proto::Error::Proto(reason)), - Closed(Cause::Io) => Err(proto::Error::Io(io::ErrorKind::BrokenPipe.into())), + Closed(Cause::Error(ref e)) => Err(e.clone()), + Closed(Cause::ScheduledLibraryReset(reason)) => { + Err(proto::Error::library_go_away(reason)) + } Closed(Cause::EndStream) | HalfClosedRemote(..) | ReservedLocal => Ok(false), _ => Ok(true), } @@ -448,10 +443,10 @@ impl State { /// Returns a reason if the stream has been reset. pub(super) fn ensure_reason(&self, mode: PollReset) -> Result, crate::Error> { match self.inner { - Closed(Cause::Proto(reason)) - | Closed(Cause::LocallyReset(reason)) - | Closed(Cause::Scheduled(reason)) => Ok(Some(reason)), - Closed(Cause::Io) => Err(proto::Error::Io(io::ErrorKind::BrokenPipe.into()).into()), + Closed(Cause::Error(Error::Reset(_, reason, _))) + | Closed(Cause::Error(Error::GoAway(_, reason, _))) + | Closed(Cause::ScheduledLibraryReset(reason)) => Ok(Some(reason)), + Closed(Cause::Error(ref e)) => Err(e.clone().into()), Open { local: Streaming, .. } diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index e3e02c2fa..1281b11bd 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -1,9 +1,9 @@ use super::recv::RecvHeaderBlockError; use super::store::{self, Entry, Resolve, Store}; use super::{Buffer, Config, Counts, Prioritized, Recv, Send, Stream, StreamId}; -use crate::codec::{Codec, RecvError, SendError, UserError}; +use crate::codec::{Codec, SendError, UserError}; use crate::frame::{self, Frame, Reason}; -use crate::proto::{peer, Open, Peer, WindowSize}; +use crate::proto::{peer, Error, Initiator, Open, Peer, WindowSize}; use crate::{client, proto, server}; use bytes::{Buf, Bytes}; @@ -180,7 +180,7 @@ where me.poll_complete(&self.send_buffer, cx, dst) } - pub fn apply_remote_settings(&mut self, frame: &frame::Settings) -> Result<(), RecvError> { + pub fn apply_remote_settings(&mut self, frame: &frame::Settings) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; @@ -198,7 +198,7 @@ where ) } - pub fn apply_local_settings(&mut self, frame: &frame::Settings) -> Result<(), RecvError> { + pub fn apply_local_settings(&mut self, frame: &frame::Settings) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; @@ -297,30 +297,30 @@ where } impl DynStreams<'_, B> { - pub fn recv_headers(&mut self, frame: frame::Headers) -> Result<(), RecvError> { + pub fn recv_headers(&mut self, frame: frame::Headers) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); me.recv_headers(self.peer, &self.send_buffer, frame) } - pub fn recv_data(&mut self, frame: frame::Data) -> Result<(), RecvError> { + pub fn recv_data(&mut self, frame: frame::Data) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); me.recv_data(self.peer, &self.send_buffer, frame) } - pub fn recv_reset(&mut self, frame: frame::Reset) -> Result<(), RecvError> { + pub fn recv_reset(&mut self, frame: frame::Reset) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); me.recv_reset(&self.send_buffer, frame) } - /// Handle a received error and return the ID of the last processed stream. - pub fn recv_err(&mut self, err: &proto::Error) -> StreamId { + /// Notify all streams that a connection-level error happened. + pub fn handle_error(&mut self, err: proto::Error) -> StreamId { let mut me = self.inner.lock().unwrap(); - me.recv_err(&self.send_buffer, err) + me.handle_error(&self.send_buffer, err) } - pub fn recv_go_away(&mut self, frame: &frame::GoAway) -> Result<(), RecvError> { + pub fn recv_go_away(&mut self, frame: &frame::GoAway) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); me.recv_go_away(&self.send_buffer, frame) } @@ -329,12 +329,12 @@ impl DynStreams<'_, B> { self.inner.lock().unwrap().actions.recv.last_processed_id() } - pub fn recv_window_update(&mut self, frame: frame::WindowUpdate) -> Result<(), RecvError> { + pub fn recv_window_update(&mut self, frame: frame::WindowUpdate) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); me.recv_window_update(&self.send_buffer, frame) } - pub fn recv_push_promise(&mut self, frame: frame::PushPromise) -> Result<(), RecvError> { + pub fn recv_push_promise(&mut self, frame: frame::PushPromise) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); me.recv_push_promise(&self.send_buffer, frame) } @@ -375,7 +375,7 @@ impl Inner { peer: peer::Dyn, send_buffer: &SendBuffer, frame: frame::Headers, - ) -> Result<(), RecvError> { + ) -> Result<(), Error> { let id = frame.stream_id(); // The GOAWAY process has begun. All streams with a greater ID than @@ -405,10 +405,7 @@ impl Inner { "recv_headers for old stream={:?}, sending STREAM_CLOSED", id, ); - return Err(RecvError::Stream { - id, - reason: Reason::STREAM_CLOSED, - }); + return Err(Error::library_reset(id, Reason::STREAM_CLOSED)); } } @@ -471,10 +468,7 @@ impl Inner { Ok(()) } else { - Err(RecvError::Stream { - id: stream.id, - reason: Reason::REFUSED_STREAM, - }) + Err(Error::library_reset(stream.id, Reason::REFUSED_STREAM)) } }, Err(RecvHeaderBlockError::State(err)) => Err(err), @@ -484,10 +478,7 @@ impl Inner { // Receiving trailers that don't set EOS is a "malformed" // message. Malformed messages are a stream error. proto_err!(stream: "recv_headers: trailers frame was not EOS; stream={:?}", stream.id); - return Err(RecvError::Stream { - id: stream.id, - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR)); } actions.recv.recv_trailers(frame, stream) @@ -502,7 +493,7 @@ impl Inner { peer: peer::Dyn, send_buffer: &SendBuffer, frame: frame::Data, - ) -> Result<(), RecvError> { + ) -> Result<(), Error> { let id = frame.stream_id(); let stream = match self.store.find_mut(&id) { @@ -529,14 +520,11 @@ impl Inner { let sz = sz as WindowSize; self.actions.recv.ignore_data(sz)?; - return Err(RecvError::Stream { - id, - reason: Reason::STREAM_CLOSED, - }); + return Err(Error::library_reset(id, Reason::STREAM_CLOSED)); } proto_err!(conn: "recv_data: stream not found; id={:?}", id); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } }; @@ -551,7 +539,7 @@ impl Inner { // Any stream error after receiving a DATA frame means // we won't give the data to the user, and so they can't // release the capacity. We do it automatically. - if let Err(RecvError::Stream { .. }) = res { + if let Err(Error::Reset(..)) = res { actions .recv .release_connection_capacity(sz as WindowSize, &mut None); @@ -564,12 +552,12 @@ impl Inner { &mut self, send_buffer: &SendBuffer, frame: frame::Reset, - ) -> Result<(), RecvError> { + ) -> Result<(), Error> { let id = frame.stream_id(); if id.is_zero() { proto_err!(conn: "recv_reset: invalid stream ID 0"); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } // The GOAWAY process has begun. All streams with a greater ID than @@ -589,7 +577,7 @@ impl Inner { // TODO: Are there other error cases? self.actions .ensure_not_idle(self.counts.peer(), id) - .map_err(RecvError::Connection)?; + .map_err(Error::library_go_away)?; return Ok(()); } @@ -602,7 +590,7 @@ impl Inner { self.counts.transition(stream, |counts, stream| { actions.recv.recv_reset(frame, stream); - actions.send.recv_err(send_buffer, stream, counts); + actions.send.handle_error(send_buffer, stream, counts); assert!(stream.state.is_closed()); Ok(()) }) @@ -612,7 +600,7 @@ impl Inner { &mut self, send_buffer: &SendBuffer, frame: frame::WindowUpdate, - ) -> Result<(), RecvError> { + ) -> Result<(), Error> { let id = frame.stream_id(); let mut send_buffer = send_buffer.inner.lock().unwrap(); @@ -622,7 +610,7 @@ impl Inner { self.actions .send .recv_connection_window_update(frame, &mut self.store, &mut self.counts) - .map_err(RecvError::Connection)?; + .map_err(Error::library_go_away)?; } else { // The remote may send window updates for streams that the local now // considers closed. It's ok... @@ -640,14 +628,14 @@ impl Inner { } else { self.actions .ensure_not_idle(self.counts.peer(), id) - .map_err(RecvError::Connection)?; + .map_err(Error::library_go_away)?; } } Ok(()) } - fn recv_err(&mut self, send_buffer: &SendBuffer, err: &proto::Error) -> StreamId { + fn handle_error(&mut self, send_buffer: &SendBuffer, err: proto::Error) -> StreamId { let actions = &mut self.actions; let counts = &mut self.counts; let mut send_buffer = send_buffer.inner.lock().unwrap(); @@ -658,14 +646,14 @@ impl Inner { self.store .for_each(|stream| { counts.transition(stream, |counts, stream| { - actions.recv.recv_err(err, &mut *stream); - actions.send.recv_err(send_buffer, stream, counts); + actions.recv.handle_error(&err, &mut *stream); + actions.send.handle_error(send_buffer, stream, counts); Ok::<_, ()>(()) }) }) .unwrap(); - actions.conn_error = Some(err.shallow_clone()); + actions.conn_error = Some(err); last_processed_id } @@ -674,7 +662,7 @@ impl Inner { &mut self, send_buffer: &SendBuffer, frame: &frame::GoAway, - ) -> Result<(), RecvError> { + ) -> Result<(), Error> { let actions = &mut self.actions; let counts = &mut self.counts; let mut send_buffer = send_buffer.inner.lock().unwrap(); @@ -684,14 +672,14 @@ impl Inner { actions.send.recv_go_away(last_stream_id)?; - let err = frame.reason().into(); + let err = Error::remote_go_away(frame.debug_data().clone(), frame.reason()); self.store .for_each(|stream| { if stream.id > last_stream_id { counts.transition(stream, |counts, stream| { - actions.recv.recv_err(&err, &mut *stream); - actions.send.recv_err(send_buffer, stream, counts); + actions.recv.handle_error(&err, &mut *stream); + actions.send.handle_error(send_buffer, stream, counts); Ok::<_, ()>(()) }) } else { @@ -709,7 +697,7 @@ impl Inner { &mut self, send_buffer: &SendBuffer, frame: frame::PushPromise, - ) -> Result<(), RecvError> { + ) -> Result<(), Error> { let id = frame.stream_id(); let promised_id = frame.promised_id(); @@ -733,7 +721,7 @@ impl Inner { } None => { proto_err!(conn: "recv_push_promise: initiating stream is in an invalid state"); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); } }; @@ -826,7 +814,7 @@ impl Inner { // This handles resetting send state associated with the // stream - actions.send.recv_err(send_buffer, stream, counts); + actions.send.handle_error(send_buffer, stream, counts); Ok::<_, ()>(()) }) }) @@ -886,8 +874,13 @@ impl Inner { let stream = self.store.resolve(key); let mut send_buffer = send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; - self.actions - .send_reset(stream, reason, &mut self.counts, send_buffer); + self.actions.send_reset( + stream, + reason, + Initiator::Library, + &mut self.counts, + send_buffer, + ); } } @@ -1060,7 +1053,7 @@ impl StreamRef { let send_buffer = &mut *send_buffer; me.actions - .send_reset(stream, reason, &mut me.counts, send_buffer); + .send_reset(stream, reason, Initiator::User, &mut me.counts, send_buffer); } pub fn send_response( @@ -1468,12 +1461,19 @@ impl Actions { &mut self, stream: store::Ptr, reason: Reason, + initiator: Initiator, counts: &mut Counts, send_buffer: &mut Buffer>, ) { counts.transition(stream, |counts, stream| { - self.send - .send_reset(reason, send_buffer, stream, counts, &mut self.task); + self.send.send_reset( + reason, + initiator, + send_buffer, + stream, + counts, + &mut self.task, + ); self.recv.enqueue_reset_expiration(stream, counts); // if a RecvStream is parked, ensure it's notified stream.notify_recv(); @@ -1485,12 +1485,13 @@ impl Actions { buffer: &mut Buffer>, stream: &mut store::Ptr, counts: &mut Counts, - res: Result<(), RecvError>, - ) -> Result<(), RecvError> { - if let Err(RecvError::Stream { reason, .. }) = res { + res: Result<(), Error>, + ) -> Result<(), Error> { + if let Err(Error::Reset(stream_id, reason, initiator)) = res { + debug_assert_eq!(stream_id, stream.id); // Reset the stream. self.send - .send_reset(reason, buffer, stream, counts, &mut self.task); + .send_reset(reason, initiator, buffer, stream, counts, &mut self.task); Ok(()) } else { res @@ -1507,7 +1508,7 @@ impl Actions { fn ensure_no_conn_error(&self) -> Result<(), proto::Error> { if let Some(ref err) = self.conn_error { - Err(err.shallow_clone()) + Err(err.clone()) } else { Ok(()) } diff --git a/src/server.rs b/src/server.rs index f71315363..6662712db 100644 --- a/src/server.rs +++ b/src/server.rs @@ -115,9 +115,9 @@ //! [`SendStream`]: ../struct.SendStream.html //! [`TcpListener`]: https://docs.rs/tokio-core/0.1/tokio_core/net/struct.TcpListener.html -use crate::codec::{Codec, RecvError, UserError}; +use crate::codec::{Codec, UserError}; use crate::frame::{self, Pseudo, PushPromiseHeaderError, Reason, Settings, StreamId}; -use crate::proto::{self, Config, Prioritized}; +use crate::proto::{self, Config, Error, Prioritized}; use crate::{FlowControl, PingPong, RecvStream, SendStream}; use bytes::{Buf, Bytes}; @@ -1202,7 +1202,7 @@ where if &PREFACE[self.pos..self.pos + n] != buf.filled() { proto_err!(conn: "read_preface: invalid preface"); // TODO: Should this just write the GO_AWAY frame directly? - return Poll::Ready(Err(Reason::PROTOCOL_ERROR.into())); + return Poll::Ready(Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into())); } self.pos += n; @@ -1388,7 +1388,7 @@ impl proto::Peer for Peer { pseudo: Pseudo, fields: HeaderMap, stream_id: StreamId, - ) -> Result { + ) -> Result { use http::{uri, Version}; let mut b = Request::builder(); @@ -1396,10 +1396,7 @@ impl proto::Peer for Peer { macro_rules! malformed { ($($arg:tt)*) => {{ tracing::debug!($($arg)*); - return Err(RecvError::Stream { - id: stream_id, - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset(stream_id, Reason::PROTOCOL_ERROR)); }} } @@ -1416,7 +1413,7 @@ impl proto::Peer for Peer { // Specifying :status for a request is a protocol error if pseudo.status.is_some() { tracing::trace!("malformed headers: :status field on request; PROTOCOL_ERROR"); - return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } // Convert the URI @@ -1483,10 +1480,7 @@ impl proto::Peer for Peer { // TODO: Should there be more specialized handling for different // kinds of errors proto_err!(stream: "error building request: {}; stream={:?}", e, stream_id); - return Err(RecvError::Stream { - id: stream_id, - reason: Reason::PROTOCOL_ERROR, - }); + return Err(Error::library_reset(stream_id, Reason::PROTOCOL_ERROR)); } }; diff --git a/tests/h2-support/src/mock.rs b/tests/h2-support/src/mock.rs index 4f81de239..b5df9ad9b 100644 --- a/tests/h2-support/src/mock.rs +++ b/tests/h2-support/src/mock.rs @@ -1,7 +1,8 @@ use crate::SendFrame; use h2::frame::{self, Frame}; -use h2::{self, RecvError, SendError}; +use h2::proto::Error; +use h2::{self, SendError}; use futures::future::poll_fn; use futures::{ready, Stream, StreamExt}; @@ -284,7 +285,7 @@ impl Handle { } impl Stream for Handle { - type Item = Result; + type Item = Result; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { Pin::new(&mut self.codec).poll_next(cx) diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index b574df5aa..23ddc1f36 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -410,7 +410,11 @@ async fn send_reset_notifies_recv_stream() { }; let rx = async { let mut body = res.into_body(); - body.next().await.unwrap().expect_err("RecvBody"); + let err = body.next().await.unwrap().expect_err("RecvBody"); + assert_eq!( + err.to_string(), + "stream error sent by user: refused stream before processing any application logic" + ); }; // a FuturesUnordered is used on purpose! @@ -672,7 +676,7 @@ async fn sending_request_on_closed_connection() { }; let poll_err = poll_fn(|cx| client.poll_ready(cx)).await.unwrap_err(); - let msg = "protocol error: unspecific protocol error detected"; + let msg = "connection error detected: unspecific protocol error detected"; assert_eq!(poll_err.to_string(), msg); let request = Request::builder() diff --git a/tests/h2-tests/tests/codec_read.rs b/tests/h2-tests/tests/codec_read.rs index fe3cfea97..d955e186b 100644 --- a/tests/h2-tests/tests/codec_read.rs +++ b/tests/h2-tests/tests/codec_read.rs @@ -236,7 +236,7 @@ async fn read_goaway_with_debug_data() { let data = poll_frame!(GoAway, codec); assert_eq!(data.reason(), Reason::ENHANCE_YOUR_CALM); assert_eq!(data.last_stream_id(), 1); - assert_eq!(data.debug_data(), b"too_many_pings"); + assert_eq!(&**data.debug_data(), b"too_many_pings"); assert_closed!(codec); } diff --git a/tests/h2-tests/tests/flow_control.rs b/tests/h2-tests/tests/flow_control.rs index 1b86cadb2..be04a61b7 100644 --- a/tests/h2-tests/tests/flow_control.rs +++ b/tests/h2-tests/tests/flow_control.rs @@ -217,7 +217,7 @@ async fn recv_data_overflows_connection_window() { let err = res.unwrap_err(); assert_eq!( err.to_string(), - "protocol error: flow-control protocol violated" + "connection error detected: flow-control protocol violated" ); }; @@ -227,7 +227,7 @@ async fn recv_data_overflows_connection_window() { let err = res.unwrap_err(); assert_eq!( err.to_string(), - "protocol error: flow-control protocol violated" + "connection error detected: flow-control protocol violated" ); }; join(conn, req).await; @@ -278,7 +278,7 @@ async fn recv_data_overflows_stream_window() { let err = res.unwrap_err(); assert_eq!( err.to_string(), - "protocol error: flow-control protocol violated" + "stream error detected: flow-control protocol violated" ); }; @@ -358,7 +358,7 @@ async fn stream_error_release_connection_capacity() { .expect_err("body"); assert_eq!( err.to_string(), - "protocol error: unspecific protocol error detected" + "stream error detected: unspecific protocol error detected" ); cap.release_capacity(to_release).expect("release_capacity"); }; diff --git a/tests/h2-tests/tests/push_promise.rs b/tests/h2-tests/tests/push_promise.rs index a5a7dfe97..f52f781d5 100644 --- a/tests/h2-tests/tests/push_promise.rs +++ b/tests/h2-tests/tests/push_promise.rs @@ -164,7 +164,7 @@ async fn recv_push_when_push_disabled_is_conn_error() { let err = res.unwrap_err(); assert_eq!( err.to_string(), - "protocol error: unspecific protocol error detected" + "connection error detected: unspecific protocol error detected" ); }; @@ -174,7 +174,7 @@ async fn recv_push_when_push_disabled_is_conn_error() { let err = res.unwrap_err(); assert_eq!( err.to_string(), - "protocol error: unspecific protocol error detected" + "connection error detected: unspecific protocol error detected" ); }; @@ -380,8 +380,16 @@ async fn recv_push_promise_skipped_stream_id() { .unwrap(); let req = async move { - let res = client.send_request(request, true).unwrap().0.await; - assert!(res.is_err()); + let err = client + .send_request(request, true) + .unwrap() + .0 + .await + .unwrap_err(); + assert_eq!( + err.to_string(), + "connection error detected: unspecific protocol error detected" + ); }; // client should see a protocol error @@ -390,7 +398,7 @@ async fn recv_push_promise_skipped_stream_id() { let err = res.unwrap_err(); assert_eq!( err.to_string(), - "protocol error: unspecific protocol error detected" + "connection error detected: unspecific protocol error detected" ); }; @@ -435,7 +443,11 @@ async fn recv_push_promise_dup_stream_id() { let req = async move { let res = client.send_request(request, true).unwrap().0.await; - assert!(res.is_err()); + let err = res.unwrap_err(); + assert_eq!( + err.to_string(), + "connection error detected: unspecific protocol error detected" + ); }; // client should see a protocol error @@ -444,7 +456,7 @@ async fn recv_push_promise_dup_stream_id() { let err = res.unwrap_err(); assert_eq!( err.to_string(), - "protocol error: unspecific protocol error detected" + "connection error detected: unspecific protocol error detected" ); }; diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index cd2644d06..91ef4939b 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -207,13 +207,19 @@ async fn errors_if_recv_frame_exceeds_max_frame_size() { let body = resp.into_parts().1; let res = util::concat(body).await; let err = res.unwrap_err(); - assert_eq!(err.to_string(), "protocol error: frame with invalid size"); + assert_eq!( + err.to_string(), + "connection error detected: frame with invalid size" + ); }; // client should see a conn error let conn = async move { let err = h2.await.unwrap_err(); - assert_eq!(err.to_string(), "protocol error: frame with invalid size"); + assert_eq!( + err.to_string(), + "connection error detected: frame with invalid size" + ); }; join(conn, req).await; }; @@ -321,7 +327,10 @@ async fn recv_goaway_finishes_processed_streams() { // this request will trigger a goaway let req2 = async move { let err = client.get("https://example.com/").await.unwrap_err(); - assert_eq!(err.to_string(), "protocol error: not a result of an error"); + assert_eq!( + err.to_string(), + "connection error received: not a result of an error" + ); }; join3(async move { h2.await.expect("client") }, req1, req2).await; From 508bcb1d27e1e90f58c3254cac5d5b4fa22c17bc Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Tue, 28 Sep 2021 18:05:06 +0200 Subject: [PATCH 062/178] Store buffered data size as usize (fixes #269) (#542) --- src/proto/streams/flow_control.rs | 30 ++++++-------------------- src/proto/streams/prioritize.rs | 36 ++++++++++++++++--------------- src/proto/streams/send.rs | 4 ++-- src/proto/streams/stream.rs | 2 +- 4 files changed, 29 insertions(+), 43 deletions(-) diff --git a/src/proto/streams/flow_control.rs b/src/proto/streams/flow_control.rs index bd0aadc09..4a47f08dd 100644 --- a/src/proto/streams/flow_control.rs +++ b/src/proto/streams/flow_control.rs @@ -173,7 +173,7 @@ impl FlowControl { ); // Ensure that the argument is correct - assert!(sz <= self.window_size); + assert!(self.window_size >= sz as usize); // Update values self.window_size -= sz; @@ -206,38 +206,22 @@ impl Window { } } -impl PartialEq for Window { - fn eq(&self, other: &WindowSize) -> bool { +impl PartialEq for Window { + fn eq(&self, other: &usize) -> bool { if self.0 < 0 { false } else { - (self.0 as WindowSize).eq(other) + (self.0 as usize).eq(other) } } } -impl PartialEq for WindowSize { - fn eq(&self, other: &Window) -> bool { - other.eq(self) - } -} - -impl PartialOrd for Window { - fn partial_cmp(&self, other: &WindowSize) -> Option<::std::cmp::Ordering> { +impl PartialOrd for Window { + fn partial_cmp(&self, other: &usize) -> Option<::std::cmp::Ordering> { if self.0 < 0 { Some(::std::cmp::Ordering::Less) } else { - (self.0 as WindowSize).partial_cmp(other) - } - } -} - -impl PartialOrd for WindowSize { - fn partial_cmp(&self, other: &Window) -> Option<::std::cmp::Ordering> { - if other.0 < 0 { - Some(::std::cmp::Ordering::Greater) - } else { - self.partial_cmp(&(other.0 as WindowSize)) + (self.0 as usize).partial_cmp(other) } } } diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index 9671d5898..eaaee162b 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -158,7 +158,7 @@ impl Prioritize { } // Update the buffered data counter - stream.buffered_send_data += sz; + stream.buffered_send_data += sz as usize; let span = tracing::trace_span!("send_data", sz, requested = stream.requested_send_capacity); @@ -167,9 +167,10 @@ impl Prioritize { // Implicitly request more send capacity if not enough has been // requested yet. - if stream.requested_send_capacity < stream.buffered_send_data { + if (stream.requested_send_capacity as usize) < stream.buffered_send_data { // Update the target requested capacity - stream.requested_send_capacity = stream.buffered_send_data; + stream.requested_send_capacity = + cmp::min(stream.buffered_send_data, WindowSize::MAX as usize) as WindowSize; self.try_assign_capacity(stream); } @@ -217,28 +218,28 @@ impl Prioritize { "reserve_capacity", ?stream.id, requested = capacity, - effective = capacity + stream.buffered_send_data, + effective = (capacity as usize) + stream.buffered_send_data, curr = stream.requested_send_capacity ); let _e = span.enter(); // Actual capacity is `capacity` + the current amount of buffered data. // If it were less, then we could never send out the buffered data. - let capacity = capacity + stream.buffered_send_data; + let capacity = (capacity as usize) + stream.buffered_send_data; - if capacity == stream.requested_send_capacity { + if capacity == stream.requested_send_capacity as usize { // Nothing to do - } else if capacity < stream.requested_send_capacity { + } else if capacity < stream.requested_send_capacity as usize { // Update the target requested capacity - stream.requested_send_capacity = capacity; + stream.requested_send_capacity = capacity as WindowSize; // Currently available capacity assigned to the stream let available = stream.send_flow.available().as_size(); // If the stream has more assigned capacity than requested, reclaim // some for the connection - if available > capacity { - let diff = available - capacity; + if available as usize > capacity { + let diff = available - capacity as WindowSize; stream.send_flow.claim_capacity(diff); @@ -252,7 +253,8 @@ impl Prioritize { } // Update the target requested capacity - stream.requested_send_capacity = capacity; + stream.requested_send_capacity = + cmp::min(capacity, WindowSize::MAX as usize) as WindowSize; // Try to assign additional capacity to the stream. If none is // currently available, the stream will be queued to receive some @@ -316,8 +318,8 @@ impl Prioritize { /// it to the connection pub fn reclaim_reserved_capacity(&mut self, stream: &mut store::Ptr, counts: &mut Counts) { // only reclaim requested capacity that isn't already buffered - if stream.requested_send_capacity > stream.buffered_send_data { - let reserved = stream.requested_send_capacity - stream.buffered_send_data; + if stream.requested_send_capacity as usize > stream.buffered_send_data { + let reserved = stream.requested_send_capacity - stream.buffered_send_data as WindowSize; stream.send_flow.claim_capacity(reserved); self.assign_connection_capacity(reserved, stream, counts); @@ -377,7 +379,7 @@ impl Prioritize { // Total requested should never go below actual assigned // (Note: the window size can go lower than assigned) - debug_assert!(total_requested >= stream.send_flow.available()); + debug_assert!(stream.send_flow.available() <= total_requested as usize); // The amount of additional capacity that the stream requests. // Don't assign more than the window has available! @@ -435,7 +437,7 @@ impl Prioritize { has_unavailable = %stream.send_flow.has_unavailable() ); - if stream.send_flow.available() < stream.requested_send_capacity + if stream.send_flow.available() < stream.requested_send_capacity as usize && stream.send_flow.has_unavailable() { // The stream requires additional capacity and the stream's @@ -735,8 +737,8 @@ impl Prioritize { stream.send_flow.send_data(len); // Decrement the stream's buffered data counter - debug_assert!(stream.buffered_send_data >= len); - stream.buffered_send_data -= len; + debug_assert!(stream.buffered_send_data >= len as usize); + stream.buffered_send_data -= len as usize; stream.requested_send_capacity -= len; // Assign the capacity back to the connection that diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index d4d64cd80..ec2014000 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -332,10 +332,10 @@ impl Send { let available = stream.send_flow.available().as_size(); let buffered = stream.buffered_send_data; - if available <= buffered { + if available as usize <= buffered { 0 } else { - available - buffered + available - buffered as WindowSize } } diff --git a/src/proto/streams/stream.rs b/src/proto/streams/stream.rs index 5bbda250c..79de47a9a 100644 --- a/src/proto/streams/stream.rs +++ b/src/proto/streams/stream.rs @@ -45,7 +45,7 @@ pub(super) struct Stream { /// Amount of data buffered at the prioritization layer. /// TODO: Technically this could be greater than the window size... - pub buffered_send_data: WindowSize, + pub buffered_send_data: usize, /// Task tracking additional send capacity (i.e. window updates). send_task: Option, From 953112944a963dcfe0e5ae98bc0393a6c6a26ccf Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 29 Sep 2021 13:00:20 -0700 Subject: [PATCH 063/178] v0.3.5 --- CHANGELOG.md | 6 ++++++ Cargo.toml | 4 ++-- src/lib.rs | 2 +- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 574e1fbca..1ff389b56 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.3.5 (September 29, 2021) + +* Fix sending of very large headers. Previously when a single header was too big to fit in a single `HEADERS` frame, an error was returned. Now it is broken up and sent correctly. +* Fix buffered data field to be a bigger integer size. +* Refactor error format to include what initiated the error (remote, local, or user), if it was a stream or connection-level error, and any received debug data. + # 0.3.4 (August 20, 2021) * Fix panic when encoding header size update over a certain size. diff --git a/Cargo.toml b/Cargo.toml index fed704593..f6e709759 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,14 +5,14 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.4" +version = "0.3.5" license = "MIT" authors = [ "Carl Lerche ", "Sean McArthur ", ] description = "An HTTP/2.0 client and server" -documentation = "https://docs.rs/h2/0.3.4" +documentation = "https://docs.rs/h2/0.3.5" repository = "https://github.com/hyperium/h2" readme = "README.md" keywords = ["http", "async", "non-blocking"] diff --git a/src/lib.rs b/src/lib.rs index 381a62a46..06064a2c8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.4")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.5")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From 44cb57c9d95d71eb9bf8be0ccfd6596bf651c57c Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 30 Sep 2021 10:41:44 -0700 Subject: [PATCH 064/178] tests: add explicit SendResponse::send_reset test --- tests/h2-tests/tests/server.rs | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 556b53c71..770750d80 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -1093,3 +1093,35 @@ async fn serve_when_request_in_response_extensions() { join(client, srv).await; } + +#[tokio::test] +async fn send_reset_explicitly() { + h2_support::trace_init!(); + let (io, mut client) = mock::new(); + + let client = async move { + let settings = client.assert_server_handshake().await; + assert_default_settings!(settings); + client + .send_frame( + frames::headers(1) + .request("GET", "https://example.com/") + .eos(), + ) + .await; + client + .recv_frame(frames::reset(1).reason(Reason::ENHANCE_YOUR_CALM)) + .await; + }; + + let srv = async move { + let mut srv = server::handshake(io).await.expect("handshake"); + let (_req, mut stream) = srv.next().await.unwrap().unwrap(); + + stream.send_reset(Reason::ENHANCE_YOUR_CALM); + + assert!(srv.next().await.is_none()); + }; + + join(client, srv).await; +} From fc73fc987f6cc49595bf6e96dea2654406d74500 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 30 Sep 2021 10:42:21 -0700 Subject: [PATCH 065/178] fix: user created Error would not return provided Reason --- src/error.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/src/error.rs b/src/error.rs index 0421f4030..5c6cfa2ed 100644 --- a/src/error.rs +++ b/src/error.rs @@ -50,7 +50,9 @@ impl Error { /// action taken by the peer (i.e. a protocol error). pub fn reason(&self) -> Option { match self.kind { - Kind::Reset(_, reason, _) | Kind::GoAway(_, reason, _) => Some(reason), + Kind::Reset(_, reason, _) | Kind::GoAway(_, reason, _) | Kind::Reason(reason) => { + Some(reason) + } _ => None, } } @@ -167,3 +169,15 @@ impl fmt::Display for Error { } impl error::Error for Error {} + +#[cfg(test)] +mod tests { + use super::Error; + use crate::Reason; + + #[test] + fn error_from_reason() { + let err = Error::from(Reason::HTTP_1_1_REQUIRED); + assert_eq!(err.reason(), Some(Reason::HTTP_1_1_REQUIRED)); + } +} From 8520f06f93b0f70298a750ec08672559f41cb652 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 30 Sep 2021 11:28:36 -0700 Subject: [PATCH 066/178] v0.3.6 --- CHANGELOG.md | 4 ++++ Cargo.toml | 4 ++-- src/lib.rs | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1ff389b56..b9bbf5b4b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.3.6 (September 30, 2021) + +* Fix regression of `h2::Error` that were created via `From` not returning their reason code in `Error::reason()`. + # 0.3.5 (September 29, 2021) * Fix sending of very large headers. Previously when a single header was too big to fit in a single `HEADERS` frame, an error was returned. Now it is broken up and sent correctly. diff --git a/Cargo.toml b/Cargo.toml index f6e709759..25a459caf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,14 +5,14 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.5" +version = "0.3.6" license = "MIT" authors = [ "Carl Lerche ", "Sean McArthur ", ] description = "An HTTP/2.0 client and server" -documentation = "https://docs.rs/h2/0.3.5" +documentation = "https://docs.rs/h2/0.3.6" repository = "https://github.com/hyperium/h2" readme = "README.md" keywords = ["http", "async", "non-blocking"] diff --git a/src/lib.rs b/src/lib.rs index 06064a2c8..ea1f9c804 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.5")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.6")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From c38c94cb16cc8076231f015b15d29c8b9287153b Mon Sep 17 00:00:00 2001 From: Anthony Ramine Date: Tue, 19 Oct 2021 12:40:27 +0200 Subject: [PATCH 067/178] Make :status in requests be a stream error --- src/frame/headers.rs | 5 +++++ src/server.rs | 4 +--- tests/h2-support/src/frames.rs | 10 +++++++++- tests/h2-tests/tests/server.rs | 24 ++++++++++++++++++++++++ 4 files changed, 39 insertions(+), 4 deletions(-) diff --git a/src/frame/headers.rs b/src/frame/headers.rs index cfc6a1a27..0851d7660 100644 --- a/src/frame/headers.rs +++ b/src/frame/headers.rs @@ -575,6 +575,11 @@ impl Pseudo { } } + #[cfg(feature = "unstable")] + pub fn set_status(&mut self, value: StatusCode) { + self.status = Some(value); + } + pub fn set_scheme(&mut self, scheme: uri::Scheme) { let bytes_str = match scheme.as_str() { "http" => BytesStr::from_static("http"), diff --git a/src/server.rs b/src/server.rs index 6662712db..e14cdb04f 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1410,10 +1410,8 @@ impl proto::Peer for Peer { malformed!("malformed headers: missing method"); } - // Specifying :status for a request is a protocol error if pseudo.status.is_some() { - tracing::trace!("malformed headers: :status field on request; PROTOCOL_ERROR"); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); + malformed!("malformed headers: :status field on request"); } // Convert the URI diff --git a/tests/h2-support/src/frames.rs b/tests/h2-support/src/frames.rs index 05fb3202f..824bc5c19 100644 --- a/tests/h2-support/src/frames.rs +++ b/tests/h2-support/src/frames.rs @@ -2,7 +2,7 @@ use std::convert::TryInto; use std::fmt; use bytes::Bytes; -use http::{self, HeaderMap}; +use http::{self, HeaderMap, StatusCode}; use h2::frame::{self, Frame, StreamId}; @@ -162,6 +162,14 @@ impl Mock { Mock(frame) } + pub fn status(self, value: StatusCode) -> Self { + let (id, mut pseudo, fields) = self.into_parts(); + + pseudo.set_status(value); + + Mock(frame::Headers::new(id, pseudo, fields)) + } + pub fn scheme(self, value: &str) -> Self { let (id, mut pseudo, fields) = self.into_parts(); let value = value.parse().unwrap(); diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 770750d80..ab47dfe96 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -1025,6 +1025,30 @@ async fn server_error_on_unclean_shutdown() { srv.await.expect_err("should error"); } +#[tokio::test] +async fn server_error_on_status_in_request() { + h2_support::trace_init!(); + + let (io, mut client) = mock::new(); + + let client = async move { + let settings = client.assert_server_handshake().await; + assert_default_settings!(settings); + client + .send_frame(frames::headers(1).status(StatusCode::OK)) + .await; + client.recv_frame(frames::reset(1).protocol_error()).await; + }; + + let srv = async move { + let mut srv = server::handshake(io).await.expect("handshake"); + + assert!(srv.next().await.is_none()); + }; + + join(client, srv).await; +} + #[tokio::test] async fn request_without_authority() { h2_support::trace_init!(); From f52d5e6290d1519c5b11fed14ea034f31bd7f654 Mon Sep 17 00:00:00 2001 From: Anthony Ramine Date: Tue, 19 Oct 2021 15:17:44 +0200 Subject: [PATCH 068/178] =?UTF-8?q?Replace=20HTTP/2.0=20by=20HTTP/2=20?= =?UTF-8?q?=F0=9F=98=85?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The protocol is named HTTP/2. --- src/client.rs | 96 +++++++++++++------------- src/error.rs | 2 +- src/frame/head.rs | 2 +- src/frame/reason.rs | 2 +- src/hpack/header.rs | 2 +- src/lib.rs | 18 ++--- src/proto/streams/streams.rs | 2 +- src/server.rs | 84 +++++++++++----------- src/share.rs | 10 +-- tests/h2-tests/tests/client_request.rs | 2 +- tests/h2-tests/tests/server.rs | 2 +- 11 files changed, 111 insertions(+), 111 deletions(-) diff --git a/src/client.rs b/src/client.rs index 9c211ab32..9cd0b8f46 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1,18 +1,18 @@ -//! Client implementation of the HTTP/2.0 protocol. +//! Client implementation of the HTTP/2 protocol. //! //! # Getting started //! -//! Running an HTTP/2.0 client requires the caller to establish the underlying +//! Running an HTTP/2 client requires the caller to establish the underlying //! connection as well as get the connection to a state that is ready to begin -//! the HTTP/2.0 handshake. See [here](../index.html#handshake) for more +//! the HTTP/2 handshake. See [here](../index.html#handshake) for more //! details. //! //! This could be as basic as using Tokio's [`TcpStream`] to connect to a remote //! host, but usually it means using either ALPN or HTTP/1.1 protocol upgrades. //! //! Once a connection is obtained, it is passed to [`handshake`], which will -//! begin the [HTTP/2.0 handshake]. This returns a future that completes once -//! the handshake process is performed and HTTP/2.0 streams may be initialized. +//! begin the [HTTP/2 handshake]. This returns a future that completes once +//! the handshake process is performed and HTTP/2 streams may be initialized. //! //! [`handshake`] uses default configuration values. There are a number of //! settings that can be changed by using [`Builder`] instead. @@ -26,16 +26,16 @@ //! # Making requests //! //! Requests are made using the [`SendRequest`] handle provided by the handshake -//! future. Once a request is submitted, an HTTP/2.0 stream is initialized and +//! future. Once a request is submitted, an HTTP/2 stream is initialized and //! the request is sent to the server. //! //! A request body and request trailers are sent using [`SendRequest`] and the //! server's response is returned once the [`ResponseFuture`] future completes. //! Both the [`SendStream`] and [`ResponseFuture`] instances are returned by -//! [`SendRequest::send_request`] and are tied to the HTTP/2.0 stream +//! [`SendRequest::send_request`] and are tied to the HTTP/2 stream //! initialized by the sent request. //! -//! The [`SendRequest::poll_ready`] function returns `Ready` when a new HTTP/2.0 +//! The [`SendRequest::poll_ready`] function returns `Ready` when a new HTTP/2 //! stream can be created, i.e. as long as the current number of active streams //! is below [`MAX_CONCURRENT_STREAMS`]. If a new stream cannot be created, the //! caller will be notified once an existing stream closes, freeing capacity for @@ -131,7 +131,7 @@ //! [`SendRequest`]: struct.SendRequest.html //! [`ResponseFuture`]: struct.ResponseFuture.html //! [`SendRequest::poll_ready`]: struct.SendRequest.html#method.poll_ready -//! [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader +//! [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader //! [`Builder`]: struct.Builder.html //! [`Error`]: ../struct.Error.html @@ -151,7 +151,7 @@ use std::usize; use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; use tracing::Instrument; -/// Initializes new HTTP/2.0 streams on a connection by sending a request. +/// Initializes new HTTP/2 streams on a connection by sending a request. /// /// This type does no work itself. Instead, it is a handle to the inner /// connection state held by [`Connection`]. If the associated connection @@ -161,7 +161,7 @@ use tracing::Instrument; /// / threads than their associated [`Connection`] instance. Internally, there /// is a buffer used to stage requests before they get written to the /// connection. There is no guarantee that requests get written to the -/// connection in FIFO order as HTTP/2.0 prioritization logic can play a role. +/// connection in FIFO order as HTTP/2 prioritization logic can play a role. /// /// [`SendRequest`] implements [`Clone`], enabling the creation of many /// instances that are backed by a single connection. @@ -184,10 +184,10 @@ pub struct ReadySendRequest { inner: Option>, } -/// Manages all state associated with an HTTP/2.0 client connection. +/// Manages all state associated with an HTTP/2 client connection. /// /// A `Connection` is backed by an I/O resource (usually a TCP socket) and -/// implements the HTTP/2.0 client logic for that connection. It is responsible +/// implements the HTTP/2 client logic for that connection. It is responsible /// for driving the internal state forward, performing the work requested of the /// associated handles ([`SendRequest`], [`ResponseFuture`], [`SendStream`], /// [`RecvStream`]). @@ -220,7 +220,7 @@ pub struct ReadySendRequest { /// // Submit the connection handle to an executor. /// tokio::spawn(async { connection.await.expect("connection failed"); }); /// -/// // Now, use `send_request` to initialize HTTP/2.0 streams. +/// // Now, use `send_request` to initialize HTTP/2 streams. /// // ... /// # Ok(()) /// # } @@ -274,7 +274,7 @@ pub struct PushPromises { /// Methods can be chained in order to set the configuration values. /// /// The client is constructed by calling [`handshake`] and passing the I/O -/// handle that will back the HTTP/2.0 server. +/// handle that will back the HTTP/2 server. /// /// New instances of `Builder` are obtained via [`Builder::new`]. /// @@ -294,7 +294,7 @@ pub struct PushPromises { /// # async fn doc(my_io: T) /// -> Result<((SendRequest, Connection)), h2::Error> /// # { -/// // `client_fut` is a future representing the completion of the HTTP/2.0 +/// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .initial_window_size(1_000_000) @@ -339,7 +339,7 @@ impl SendRequest where B: Buf + 'static, { - /// Returns `Ready` when the connection can initialize a new HTTP/2.0 + /// Returns `Ready` when the connection can initialize a new HTTP/2 /// stream. /// /// This function must return `Ready` before `send_request` is called. When @@ -387,16 +387,16 @@ where ReadySendRequest { inner: Some(self) } } - /// Sends a HTTP/2.0 request to the server. + /// Sends a HTTP/2 request to the server. /// - /// `send_request` initializes a new HTTP/2.0 stream on the associated + /// `send_request` initializes a new HTTP/2 stream on the associated /// connection, then sends the given request using this new stream. Only the /// request head is sent. /// /// On success, a [`ResponseFuture`] instance and [`SendStream`] instance /// are returned. The [`ResponseFuture`] instance is used to get the /// server's response and the [`SendStream`] instance is used to send a - /// request body or trailers to the server over the same HTTP/2.0 stream. + /// request body or trailers to the server over the same HTTP/2 stream. /// /// To send a request body or trailers, set `end_of_stream` to `false`. /// Then, use the returned [`SendStream`] instance to stream request body @@ -601,7 +601,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .initial_window_size(1_000_000) @@ -643,7 +643,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .initial_window_size(1_000_000) @@ -678,7 +678,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .initial_connection_window_size(1_000_000) @@ -693,7 +693,7 @@ impl Builder { self } - /// Indicates the size (in octets) of the largest HTTP/2.0 frame payload that the + /// Indicates the size (in octets) of the largest HTTP/2 frame payload that the /// configured client is able to accept. /// /// The sender may send data frames that are **smaller** than this value, @@ -712,7 +712,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .max_frame_size(1_000_000) @@ -752,7 +752,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .max_header_list_size(16 * 1024) @@ -787,7 +787,7 @@ impl Builder { /// a protocol level error. Instead, the `h2` library will immediately reset /// the stream. /// - /// See [Section 5.1.2] in the HTTP/2.0 spec for more details. + /// See [Section 5.1.2] in the HTTP/2 spec for more details. /// /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 /// @@ -801,7 +801,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .max_concurrent_streams(1000) @@ -828,7 +828,7 @@ impl Builder { /// Sending streams past the limit returned by the peer will be treated /// as a stream error of type PROTOCOL_ERROR or REFUSED_STREAM. /// - /// See [Section 5.1.2] in the HTTP/2.0 spec for more details. + /// See [Section 5.1.2] in the HTTP/2 spec for more details. /// /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 /// @@ -842,7 +842,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .initial_max_send_streams(1000) @@ -859,7 +859,7 @@ impl Builder { /// Sets the maximum number of concurrent locally reset streams. /// - /// When a stream is explicitly reset, the HTTP/2.0 specification requires + /// When a stream is explicitly reset, the HTTP/2 specification requires /// that any further frames received for that stream must be ignored for /// "some time". /// @@ -887,7 +887,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .max_concurrent_reset_streams(1000) @@ -904,7 +904,7 @@ impl Builder { /// Sets the duration to remember locally reset streams. /// - /// When a stream is explicitly reset, the HTTP/2.0 specification requires + /// When a stream is explicitly reset, the HTTP/2 specification requires /// that any further frames received for that stream must be ignored for /// "some time". /// @@ -933,7 +933,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .reset_stream_duration(Duration::from_secs(10)) @@ -955,7 +955,7 @@ impl Builder { /// false in the initial SETTINGS handshake guarantees that the remote server /// will never send a push promise. /// - /// This setting can be changed during the life of a single HTTP/2.0 + /// This setting can be changed during the life of a single HTTP/2 /// connection by sending another settings frame updating the value. /// /// Default value: `true`. @@ -971,7 +971,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .enable_push(false) @@ -997,22 +997,22 @@ impl Builder { self } - /// Creates a new configured HTTP/2.0 client backed by `io`. + /// Creates a new configured HTTP/2 client backed by `io`. /// /// It is expected that `io` already be in an appropriate state to commence - /// the [HTTP/2.0 handshake]. The handshake is completed once both the connection + /// the [HTTP/2 handshake]. The handshake is completed once both the connection /// preface and the initial settings frame is sent by the client. /// /// The handshake future does not wait for the initial settings frame from the /// server. /// /// Returns a future which resolves to the [`Connection`] / [`SendRequest`] - /// tuple once the HTTP/2.0 handshake has been completed. + /// tuple once the HTTP/2 handshake has been completed. /// /// This function also allows the caller to configure the send payload data /// type. See [Outbound data type] for more details. /// - /// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader + /// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader /// [`Connection`]: struct.Connection.html /// [`SendRequest`]: struct.SendRequest.html /// [Outbound data type]: ../index.html#outbound-data-type. @@ -1029,7 +1029,7 @@ impl Builder { /// # async fn doc(my_io: T) /// -> Result<((SendRequest, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .handshake(my_io); @@ -1049,7 +1049,7 @@ impl Builder { /// # async fn doc(my_io: T) /// # -> Result<((SendRequest<&'static [u8]>, Connection)), h2::Error> /// # { - /// // `client_fut` is a future representing the completion of the HTTP/2.0 + /// // `client_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let client_fut = Builder::new() /// .handshake::<_, &'static [u8]>(my_io); @@ -1076,19 +1076,19 @@ impl Default for Builder { } } -/// Creates a new configured HTTP/2.0 client with default configuration +/// Creates a new configured HTTP/2 client with default configuration /// values backed by `io`. /// /// It is expected that `io` already be in an appropriate state to commence -/// the [HTTP/2.0 handshake]. See [Handshake] for more details. +/// the [HTTP/2 handshake]. See [Handshake] for more details. /// /// Returns a future which resolves to the [`Connection`] / [`SendRequest`] -/// tuple once the HTTP/2.0 handshake has been completed. The returned +/// tuple once the HTTP/2 handshake has been completed. The returned /// [`Connection`] instance will be using default configuration values. Use /// [`Builder`] to customize the configuration values used by a [`Connection`] /// instance. /// -/// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader +/// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader /// [Handshake]: ../index.html#handshake /// [`Connection`]: struct.Connection.html /// [`SendRequest`]: struct.SendRequest.html @@ -1103,7 +1103,7 @@ impl Default for Builder { /// # async fn doc(my_io: T) -> Result<(), h2::Error> /// # { /// let (send_request, connection) = client::handshake(my_io).await?; -/// // The HTTP/2.0 handshake has completed, now start polling +/// // The HTTP/2 handshake has completed, now start polling /// // `connection` and use `send_request` to send requests to the /// // server. /// # Ok(()) @@ -1455,7 +1455,7 @@ impl Peer { return Err(UserError::MissingUriSchemeAndAuthority.into()); } else { // This is acceptable as per the above comment. However, - // HTTP/2.0 requires that a scheme is set. Since we are + // HTTP/2 requires that a scheme is set. Since we are // forwarding an HTTP 1.1 request, the scheme is set to // "http". pseudo.set_scheme(uri::Scheme::HTTP); diff --git a/src/error.rs b/src/error.rs index 5c6cfa2ed..fdbfc0d1b 100644 --- a/src/error.rs +++ b/src/error.rs @@ -7,7 +7,7 @@ use std::{error, fmt, io}; pub use crate::frame::Reason; -/// Represents HTTP/2.0 operation errors. +/// Represents HTTP/2 operation errors. /// /// `Error` covers error cases raised by protocol errors caused by the /// peer, I/O (transport) errors, and errors caused by the user of the library. diff --git a/src/frame/head.rs b/src/frame/head.rs index 2abc08e1d..38be2f697 100644 --- a/src/frame/head.rs +++ b/src/frame/head.rs @@ -36,7 +36,7 @@ impl Head { } } - /// Parse an HTTP/2.0 frame header + /// Parse an HTTP/2 frame header pub fn parse(header: &[u8]) -> Head { let (stream_id, _) = StreamId::parse(&header[5..]); diff --git a/src/frame/reason.rs b/src/frame/reason.rs index 031b6cd92..ff5e2012f 100644 --- a/src/frame/reason.rs +++ b/src/frame/reason.rs @@ -1,6 +1,6 @@ use std::fmt; -/// HTTP/2.0 error codes. +/// HTTP/2 error codes. /// /// Error codes are used in `RST_STREAM` and `GOAWAY` frames to convey the /// reasons for the stream or connection error. For example, diff --git a/src/hpack/header.rs b/src/hpack/header.rs index e5b1a342d..8d6136e16 100644 --- a/src/hpack/header.rs +++ b/src/hpack/header.rs @@ -5,7 +5,7 @@ use http::header::{HeaderName, HeaderValue}; use http::{Method, StatusCode}; use std::fmt; -/// HTTP/2.0 Header +/// HTTP/2 Header #[derive(Debug, Clone, Eq, PartialEq)] pub enum Header { Field { name: T, value: HeaderValue }, diff --git a/src/lib.rs b/src/lib.rs index ea1f9c804..5a689cdcf 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,6 @@ -//! An asynchronous, HTTP/2.0 server and client implementation. +//! An asynchronous, HTTP/2 server and client implementation. //! -//! This library implements the [HTTP/2.0] specification. The implementation is +//! This library implements the [HTTP/2] specification. The implementation is //! asynchronous, using [futures] as the basis for the API. The implementation //! is also decoupled from TCP or TLS details. The user must handle ALPN and //! HTTP/1.1 upgrades themselves. @@ -24,19 +24,19 @@ //! # Handshake //! //! Both the client and the server require a connection to already be in a state -//! ready to start the HTTP/2.0 handshake. This library does not provide +//! ready to start the HTTP/2 handshake. This library does not provide //! facilities to do this. //! -//! There are three ways to reach an appropriate state to start the HTTP/2.0 +//! There are three ways to reach an appropriate state to start the HTTP/2 //! handshake. //! //! * Opening an HTTP/1.1 connection and performing an [upgrade]. //! * Opening a connection with TLS and use ALPN to negotiate the protocol. //! * Open a connection with prior knowledge, i.e. both the client and the //! server assume that the connection is immediately ready to start the -//! HTTP/2.0 handshake once opened. +//! HTTP/2 handshake once opened. //! -//! Once the connection is ready to start the HTTP/2.0 handshake, it can be +//! Once the connection is ready to start the HTTP/2 handshake, it can be //! passed to [`server::handshake`] or [`client::handshake`]. At this point, the //! library will start the handshake process, which consists of: //! @@ -48,10 +48,10 @@ //! //! # Flow control //! -//! [Flow control] is a fundamental feature of HTTP/2.0. The `h2` library +//! [Flow control] is a fundamental feature of HTTP/2. The `h2` library //! exposes flow control to the user. //! -//! An HTTP/2.0 client or server may not send unlimited data to the peer. When a +//! An HTTP/2 client or server may not send unlimited data to the peer. When a //! stream is initiated, both the client and the server are provided with an //! initial window size for that stream. A window size is the number of bytes //! the endpoint can send to the peer. At any point in time, the peer may @@ -66,7 +66,7 @@ //! Managing flow control for outbound data is done through [`SendStream`]. See //! the struct level documentation for those two types for more details. //! -//! [HTTP/2.0]: https://http2.github.io/ +//! [HTTP/2]: https://http2.github.io/ //! [futures]: https://docs.rs/futures/ //! [`client`]: client/index.html //! [`server`]: server/index.html diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 1281b11bd..ac762c8f5 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -21,7 +21,7 @@ where P: Peer, { /// Holds most of the connection and stream related state for processing - /// HTTP/2.0 frames associated with streams. + /// HTTP/2 frames associated with streams. inner: Arc>, /// This is the queue of frames to be written to the wire. This is split out diff --git a/src/server.rs b/src/server.rs index e14cdb04f..491446460 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1,10 +1,10 @@ -//! Server implementation of the HTTP/2.0 protocol. +//! Server implementation of the HTTP/2 protocol. //! //! # Getting started //! -//! Running an HTTP/2.0 server requires the caller to manage accepting the +//! Running an HTTP/2 server requires the caller to manage accepting the //! connections as well as getting the connections to a state that is ready to -//! begin the HTTP/2.0 handshake. See [here](../index.html#handshake) for more +//! begin the HTTP/2 handshake. See [here](../index.html#handshake) for more //! details. //! //! This could be as basic as using Tokio's [`TcpListener`] to accept @@ -12,8 +12,8 @@ //! upgrades. //! //! Once a connection is obtained, it is passed to [`handshake`], -//! which will begin the [HTTP/2.0 handshake]. This returns a future that -//! completes once the handshake process is performed and HTTP/2.0 streams may +//! which will begin the [HTTP/2 handshake]. This returns a future that +//! completes once the handshake process is performed and HTTP/2 streams may //! be received. //! //! [`handshake`] uses default configuration values. There are a number of @@ -21,7 +21,7 @@ //! //! # Inbound streams //! -//! The [`Connection`] instance is used to accept inbound HTTP/2.0 streams. It +//! The [`Connection`] instance is used to accept inbound HTTP/2 streams. It //! does this by implementing [`futures::Stream`]. When a new stream is //! received, a call to [`Connection::accept`] will return `(request, response)`. //! The `request` handle (of type [`http::Request`]) contains the @@ -59,9 +59,9 @@ //! //! # Example //! -//! A basic HTTP/2.0 server example that runs over TCP and assumes [prior +//! A basic HTTP/2 server example that runs over TCP and assumes [prior //! knowledge], i.e. both the client and the server assume that the TCP socket -//! will use the HTTP/2.0 protocol without prior negotiation. +//! will use the HTTP/2 protocol without prior negotiation. //! //! ```no_run //! use h2::server; @@ -77,9 +77,9 @@ //! if let Ok((socket, _peer_addr)) = listener.accept().await { //! // Spawn a new task to process each connection. //! tokio::spawn(async { -//! // Start the HTTP/2.0 connection handshake +//! // Start the HTTP/2 connection handshake //! let mut h2 = server::handshake(socket).await.unwrap(); -//! // Accept all inbound HTTP/2.0 streams sent over the +//! // Accept all inbound HTTP/2 streams sent over the //! // connection. //! while let Some(request) = h2.accept().await { //! let (request, mut respond) = request.unwrap(); @@ -104,7 +104,7 @@ //! //! [prior knowledge]: http://httpwg.org/specs/rfc7540.html#known-http //! [`handshake`]: fn.handshake.html -//! [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader +//! [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader //! [`Builder`]: struct.Builder.html //! [`Connection`]: struct.Connection.html //! [`Connection::poll`]: struct.Connection.html#method.poll @@ -130,7 +130,7 @@ use std::{convert, fmt, io, mem}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tracing::instrument::{Instrument, Instrumented}; -/// In progress HTTP/2.0 connection handshake future. +/// In progress HTTP/2 connection handshake future. /// /// This type implements `Future`, yielding a `Connection` instance once the /// handshake has completed. @@ -154,10 +154,10 @@ pub struct Handshake { span: tracing::Span, } -/// Accepts inbound HTTP/2.0 streams on a connection. +/// Accepts inbound HTTP/2 streams on a connection. /// /// A `Connection` is backed by an I/O resource (usually a TCP socket) and -/// implements the HTTP/2.0 server logic for that connection. It is responsible +/// implements the HTTP/2 server logic for that connection. It is responsible /// for receiving inbound streams initiated by the client as well as driving the /// internal state forward. /// @@ -202,7 +202,7 @@ pub struct Connection { /// Methods can be chained in order to set the configuration values. /// /// The server is constructed by calling [`handshake`] and passing the I/O -/// handle that will back the HTTP/2.0 server. +/// handle that will back the HTTP/2 server. /// /// New instances of `Builder` are obtained via [`Builder::new`]. /// @@ -221,7 +221,7 @@ pub struct Connection { /// # fn doc(my_io: T) /// # -> Handshake /// # { -/// // `server_fut` is a future representing the completion of the HTTP/2.0 +/// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .initial_window_size(1_000_000) @@ -257,7 +257,7 @@ pub struct Builder { /// stream. /// /// If the `SendResponse` instance is dropped without sending a response, then -/// the HTTP/2.0 stream will be reset. +/// the HTTP/2 stream will be reset. /// /// See [module] level docs for more details. /// @@ -276,7 +276,7 @@ pub struct SendResponse { /// It can not be used to initiate push promises. /// /// If the `SendPushedResponse` instance is dropped without sending a response, then -/// the HTTP/2.0 stream will be reset. +/// the HTTP/2 stream will be reset. /// /// See [module] level docs for more details. /// @@ -318,18 +318,18 @@ pub(crate) struct Peer; const PREFACE: [u8; 24] = *b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; -/// Creates a new configured HTTP/2.0 server with default configuration +/// Creates a new configured HTTP/2 server with default configuration /// values backed by `io`. /// /// It is expected that `io` already be in an appropriate state to commence -/// the [HTTP/2.0 handshake]. See [Handshake] for more details. +/// the [HTTP/2 handshake]. See [Handshake] for more details. /// /// Returns a future which resolves to the [`Connection`] instance once the -/// HTTP/2.0 handshake has been completed. The returned [`Connection`] +/// HTTP/2 handshake has been completed. The returned [`Connection`] /// instance will be using default configuration values. Use [`Builder`] to /// customize the configuration values used by a [`Connection`] instance. /// -/// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader +/// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader /// [Handshake]: ../index.html#handshake /// [`Connection`]: struct.Connection.html /// @@ -343,8 +343,8 @@ const PREFACE: [u8; 24] = *b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; /// # async fn doc(my_io: T) /// # { /// let connection = server::handshake(my_io).await.unwrap(); -/// // The HTTP/2.0 handshake has completed, now use `connection` to -/// // accept inbound HTTP/2.0 streams. +/// // The HTTP/2 handshake has completed, now use `connection` to +/// // accept inbound HTTP/2 streams. /// # } /// # /// # pub fn main() {} @@ -603,7 +603,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .initial_window_size(1_000_000) @@ -642,7 +642,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .initial_window_size(1_000_000) @@ -676,7 +676,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .initial_connection_window_size(1_000_000) @@ -691,7 +691,7 @@ impl Builder { self } - /// Indicates the size (in octets) of the largest HTTP/2.0 frame payload that the + /// Indicates the size (in octets) of the largest HTTP/2 frame payload that the /// configured server is able to accept. /// /// The sender may send data frames that are **smaller** than this value, @@ -709,7 +709,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .max_frame_size(1_000_000) @@ -748,7 +748,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .max_header_list_size(16 * 1024) @@ -783,7 +783,7 @@ impl Builder { /// a protocol level error. Instead, the `h2` library will immediately reset /// the stream. /// - /// See [Section 5.1.2] in the HTTP/2.0 spec for more details. + /// See [Section 5.1.2] in the HTTP/2 spec for more details. /// /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 /// @@ -796,7 +796,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .max_concurrent_streams(1000) @@ -815,7 +815,7 @@ impl Builder { /// /// When a stream is explicitly reset by either calling /// [`SendResponse::send_reset`] or by dropping a [`SendResponse`] instance - /// before completing the stream, the HTTP/2.0 specification requires that + /// before completing the stream, the HTTP/2 specification requires that /// any further frames received for that stream must be ignored for "some /// time". /// @@ -842,7 +842,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .max_concurrent_reset_streams(1000) @@ -861,7 +861,7 @@ impl Builder { /// /// When a stream is explicitly reset by either calling /// [`SendResponse::send_reset`] or by dropping a [`SendResponse`] instance - /// before completing the stream, the HTTP/2.0 specification requires that + /// before completing the stream, the HTTP/2 specification requires that /// any further frames received for that stream must be ignored for "some /// time". /// @@ -889,7 +889,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .reset_stream_duration(Duration::from_secs(10)) @@ -904,18 +904,18 @@ impl Builder { self } - /// Creates a new configured HTTP/2.0 server backed by `io`. + /// Creates a new configured HTTP/2 server backed by `io`. /// /// It is expected that `io` already be in an appropriate state to commence - /// the [HTTP/2.0 handshake]. See [Handshake] for more details. + /// the [HTTP/2 handshake]. See [Handshake] for more details. /// /// Returns a future which resolves to the [`Connection`] instance once the - /// HTTP/2.0 handshake has been completed. + /// HTTP/2 handshake has been completed. /// /// This function also allows the caller to configure the send payload data /// type. See [Outbound data type] for more details. /// - /// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader + /// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader /// [Handshake]: ../index.html#handshake /// [`Connection`]: struct.Connection.html /// [Outbound data type]: ../index.html#outbound-data-type. @@ -931,7 +931,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut = Builder::new() /// .handshake(my_io); @@ -951,7 +951,7 @@ impl Builder { /// # fn doc(my_io: T) /// # -> Handshake /// # { - /// // `server_fut` is a future representing the completion of the HTTP/2.0 + /// // `server_fut` is a future representing the completion of the HTTP/2 /// // handshake. /// let server_fut: Handshake<_, &'static [u8]> = Builder::new() /// .handshake(my_io); diff --git a/src/share.rs b/src/share.rs index 3249e5550..2a4ff1cdd 100644 --- a/src/share.rs +++ b/src/share.rs @@ -16,7 +16,7 @@ use std::task::{Context, Poll}; /// # Overview /// /// A `SendStream` is provided by [`SendRequest`] and [`SendResponse`] once the -/// HTTP/2.0 message header has been sent sent. It is used to stream the message +/// HTTP/2 message header has been sent sent. It is used to stream the message /// body and send the message trailers. See method level documentation for more /// details. /// @@ -35,7 +35,7 @@ use std::task::{Context, Poll}; /// /// # Flow control /// -/// In HTTP/2.0, data cannot be sent to the remote peer unless there is +/// In HTTP/2, data cannot be sent to the remote peer unless there is /// available window capacity on both the stream and the connection. When a data /// frame is sent, both the stream window and the connection window are /// decremented. When the stream level window reaches zero, no further data can @@ -44,7 +44,7 @@ use std::task::{Context, Poll}; /// /// When the remote peer is ready to receive more data, it sends `WINDOW_UPDATE` /// frames. These frames increment the windows. See the [specification] for more -/// details on the principles of HTTP/2.0 flow control. +/// details on the principles of HTTP/2 flow control. /// /// The implications for sending data are that the caller **should** ensure that /// both the stream and the connection has available window capacity before @@ -115,7 +115,7 @@ pub struct StreamId(u32); /// Receives the body stream and trailers from the remote peer. /// /// A `RecvStream` is provided by [`client::ResponseFuture`] and -/// [`server::Connection`] with the received HTTP/2.0 message head (the response +/// [`server::Connection`] with the received HTTP/2 message head (the response /// and request head respectively). /// /// A `RecvStream` instance is used to receive the streaming message body and @@ -168,7 +168,7 @@ pub struct RecvStream { /// /// # Scenarios /// -/// Following is a basic scenario with an HTTP/2.0 connection containing a +/// Following is a basic scenario with an HTTP/2 connection containing a /// single active stream. /// /// * A new stream is activated. The receive window is initialized to 1024 (the diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 23ddc1f36..2af0bdeec 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -521,7 +521,7 @@ async fn request_with_connection_headers() { ("keep-alive", "5"), ("proxy-connection", "bar"), ("transfer-encoding", "chunked"), - ("upgrade", "HTTP/2.0"), + ("upgrade", "HTTP/2"), ("te", "boom"), ]; diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index ab47dfe96..e60483d0d 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -536,7 +536,7 @@ async fn recv_connection_header() { client .send_frame(req(7, "transfer-encoding", "chunked")) .await; - client.send_frame(req(9, "upgrade", "HTTP/2.0")).await; + client.send_frame(req(9, "upgrade", "HTTP/2")).await; client.recv_frame(frames::reset(1).protocol_error()).await; client.recv_frame(frames::reset(3).protocol_error()).await; client.recv_frame(frames::reset(5).protocol_error()).await; From 405972739bb34d801252d6dee3c4b588b8f582c5 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 21 Oct 2021 12:12:58 -0700 Subject: [PATCH 069/178] Fix panic if remote causes local to reset a stream before opening --- src/proto/streams/send.rs | 10 +++++ src/proto/streams/streams.rs | 18 +++++++++ tests/h2-tests/tests/stream_states.rs | 57 +++++++++++++++++++++++++++ 3 files changed, 85 insertions(+) diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index ec2014000..3735d13dd 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -544,4 +544,14 @@ impl Send { true } } + + pub(super) fn maybe_reset_next_stream_id(&mut self, id: StreamId) { + if let Ok(next_id) = self.next_stream_id { + // Peer::is_local_init should have been called beforehand + debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated()); + if id >= next_id { + self.next_stream_id = id.next_id(); + } + } + } } diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index ac762c8f5..4962db8d2 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -865,6 +865,24 @@ impl Inner { let key = match self.store.find_entry(id) { Entry::Occupied(e) => e.key(), Entry::Vacant(e) => { + // Resetting a stream we don't know about? That could be OK... + // + // 1. As a server, we just received a request, but that request + // was bad, so we're resetting before even accepting it. + // This is totally fine. + // + // 2. The remote may have sent us a frame on new stream that + // it's *not* supposed to have done, and thus, we don't know + // the stream. In that case, sending a reset will "open" the + // stream in our store. Maybe that should be a connection + // error instead? At least for now, we need to update what + // our vision of the next stream is. + if self.counts.peer().is_local_init(id) { + // We normally would open this stream, so update our + // next-send-id record. + self.actions.send.maybe_reset_next_stream_id(id); + } + let stream = Stream::new(id, 0, 0); e.insert(stream) diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index 91ef4939b..f2b2efc1e 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -1022,3 +1022,60 @@ async fn srv_window_update_on_lower_stream_id() { }; join(srv, client).await; } + +// See https://github.com/hyperium/h2/issues/570 +#[tokio::test] +async fn reset_new_stream_before_send() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv.assert_client_handshake().await; + assert_default_settings!(settings); + srv.recv_frame( + frames::headers(1) + .request("GET", "https://example.com/") + .eos(), + ) + .await; + srv.send_frame(frames::headers(1).response(200).eos()).await; + // Send unexpected headers, that depends on itself, causing a framing error. + srv.send_bytes(&[ + 0, 0, 0x6, // len + 0x1, // type (headers) + 0x25, // flags (eos, eoh, pri) + 0, 0, 0, 0x3, // stream id + 0, 0, 0, 0x3, // dependency + 2, // weight + 0x88, // HPACK :status=200 + ]) + .await; + srv.recv_frame(frames::reset(3).protocol_error()).await; + srv.recv_frame( + frames::headers(5) + .request("GET", "https://example.com/") + .eos(), + ) + .await; + srv.send_frame(frames::headers(5).response(200).eos()).await; + }; + + let client = async move { + let (mut client, mut conn) = client::handshake(io).await.expect("handshake"); + let resp = conn + .drive(client.get("https://example.com/")) + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::OK); + + // req number 2 + let resp = conn + .drive(client.get("https://example.com/")) + .await + .unwrap(); + assert_eq!(resp.status(), StatusCode::OK); + conn.await.expect("client"); + }; + + join(srv, client).await; +} From ce81583cf16556f93da5263db6f73a3d46c7ab54 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 22 Oct 2021 10:02:17 -0700 Subject: [PATCH 070/178] v0.3.7 --- CHANGELOG.md | 5 +++++ Cargo.toml | 4 ++-- src/lib.rs | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b9bbf5b4b..14fa00350 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# 0.3.7 (October 22, 2021) + +* Fix panic if server sends a malformed frame on a stream client was about to open. +* Fix server to treat `:status` in a request as a stream error instead of connection error. + # 0.3.6 (September 30, 2021) * Fix regression of `h2::Error` that were created via `From` not returning their reason code in `Error::reason()`. diff --git a/Cargo.toml b/Cargo.toml index 25a459caf..7bbf1647e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,14 +5,14 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.6" +version = "0.3.7" license = "MIT" authors = [ "Carl Lerche ", "Sean McArthur ", ] description = "An HTTP/2.0 client and server" -documentation = "https://docs.rs/h2/0.3.6" +documentation = "https://docs.rs/h2" repository = "https://github.com/hyperium/h2" readme = "README.md" keywords = ["http", "async", "non-blocking"] diff --git a/src/lib.rs b/src/lib.rs index 5a689cdcf..cb02acaff 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.6")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.7")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From dbaa3a42856d23c6bdebb6125c2205d965532178 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 22 Nov 2021 17:04:24 -0800 Subject: [PATCH 071/178] fix: properly reject prioritized HEADERS with stream ID of zero --- src/frame/headers.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/frame/headers.rs b/src/frame/headers.rs index 0851d7660..2fc9561cf 100644 --- a/src/frame/headers.rs +++ b/src/frame/headers.rs @@ -146,6 +146,10 @@ impl Headers { tracing::trace!("loading headers; flags={:?}", flags); + if head.stream_id().is_zero() { + return Err(Error::InvalidStreamId); + } + // Read the padding length if flags.is_padded() { if src.is_empty() { From 87969c1f296173a7838956165014b4828dc5d5db Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Wed, 24 Nov 2021 10:05:10 +0100 Subject: [PATCH 072/178] Implement the extended CONNECT protocol from RFC 8441 (#565) --- src/client.rs | 20 ++- src/ext.rs | 55 ++++++++ src/frame/headers.rs | 20 ++- src/frame/settings.rs | 27 ++++ src/hpack/header.rs | 18 +++ src/hpack/table.rs | 1 + src/hpack/test/fixture.rs | 2 + src/lib.rs | 1 + src/proto/connection.rs | 11 ++ src/proto/settings.rs | 4 +- src/proto/streams/mod.rs | 3 + src/proto/streams/recv.rs | 110 ++++++++------- src/proto/streams/send.rs | 18 ++- src/proto/streams/store.rs | 16 ++- src/proto/streams/streams.rs | 66 ++++----- src/server.rs | 36 ++++- tests/h2-support/src/assert.rs | 11 ++ tests/h2-support/src/frames.rs | 28 +++- tests/h2-support/src/mock.rs | 21 +-- tests/h2-support/src/prelude.rs | 5 +- tests/h2-tests/tests/client_request.rs | 147 +++++++++++++++++++ tests/h2-tests/tests/server.rs | 188 +++++++++++++++++++++++++ 22 files changed, 691 insertions(+), 117 deletions(-) create mode 100644 src/ext.rs diff --git a/src/client.rs b/src/client.rs index 9cd0b8f46..3a818a582 100644 --- a/src/client.rs +++ b/src/client.rs @@ -136,6 +136,7 @@ //! [`Error`]: ../struct.Error.html use crate::codec::{Codec, SendError, UserError}; +use crate::ext::Protocol; use crate::frame::{Headers, Pseudo, Reason, Settings, StreamId}; use crate::proto::{self, Error}; use crate::{FlowControl, PingPong, RecvStream, SendStream}; @@ -517,6 +518,19 @@ where (response, stream) }) } + + /// Returns whether the [extended CONNECT protocol][1] is enabled or not. + /// + /// This setting is configured by the server peer by sending the + /// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame. + /// This method returns the currently acknowledged value recieved from the + /// remote. + /// + /// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 + /// [2]: https://datatracker.ietf.org/doc/html/rfc8441#section-3 + pub fn is_extended_connect_protocol_enabled(&self) -> bool { + self.inner.is_extended_connect_protocol_enabled() + } } impl fmt::Debug for SendRequest @@ -1246,11 +1260,10 @@ where /// This method returns the currently acknowledged value recieved from the /// remote. /// - /// [settings]: https://tools.ietf.org/html/rfc7540#section-5.1.2 + /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2 pub fn max_concurrent_send_streams(&self) -> usize { self.inner.max_send_streams() } - /// Returns the maximum number of concurrent streams that may be initiated /// by the server on this connection. /// @@ -1416,6 +1429,7 @@ impl Peer { pub fn convert_send_message( id: StreamId, request: Request<()>, + protocol: Option, end_of_stream: bool, ) -> Result { use http::request::Parts; @@ -1435,7 +1449,7 @@ impl Peer { // Build the set pseudo header set. All requests will include `method` // and `path`. - let mut pseudo = Pseudo::request(method, uri); + let mut pseudo = Pseudo::request(method, uri, protocol); if pseudo.scheme.is_none() { // If the scheme is not set, then there are a two options. diff --git a/src/ext.rs b/src/ext.rs new file mode 100644 index 000000000..cf383a495 --- /dev/null +++ b/src/ext.rs @@ -0,0 +1,55 @@ +//! Extensions specific to the HTTP/2 protocol. + +use crate::hpack::BytesStr; + +use bytes::Bytes; +use std::fmt; + +/// Represents the `:protocol` pseudo-header used by +/// the [Extended CONNECT Protocol]. +/// +/// [Extended CONNECT Protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 +#[derive(Clone, Eq, PartialEq)] +pub struct Protocol { + value: BytesStr, +} + +impl Protocol { + /// Converts a static string to a protocol name. + pub const fn from_static(value: &'static str) -> Self { + Self { + value: BytesStr::from_static(value), + } + } + + /// Returns a str representation of the header. + pub fn as_str(&self) -> &str { + self.value.as_str() + } + + pub(crate) fn try_from(bytes: Bytes) -> Result { + Ok(Self { + value: BytesStr::try_from(bytes)?, + }) + } +} + +impl<'a> From<&'a str> for Protocol { + fn from(value: &'a str) -> Self { + Self { + value: BytesStr::from(value), + } + } +} + +impl AsRef<[u8]> for Protocol { + fn as_ref(&self) -> &[u8] { + self.value.as_ref() + } +} + +impl fmt::Debug for Protocol { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.value.fmt(f) + } +} diff --git a/src/frame/headers.rs b/src/frame/headers.rs index 2fc9561cf..05d77234f 100644 --- a/src/frame/headers.rs +++ b/src/frame/headers.rs @@ -1,4 +1,5 @@ use super::{util, StreamDependency, StreamId}; +use crate::ext::Protocol; use crate::frame::{Error, Frame, Head, Kind}; use crate::hpack::{self, BytesStr}; @@ -66,6 +67,7 @@ pub struct Pseudo { pub scheme: Option, pub authority: Option, pub path: Option, + pub protocol: Option, // Response pub status: Option, @@ -292,6 +294,10 @@ impl fmt::Debug for Headers { .field("stream_id", &self.stream_id) .field("flags", &self.flags); + if let Some(ref protocol) = self.header_block.pseudo.protocol { + builder.field("protocol", protocol); + } + if let Some(ref dep) = self.stream_dep { builder.field("stream_dep", dep); } @@ -529,7 +535,7 @@ impl Continuation { // ===== impl Pseudo ===== impl Pseudo { - pub fn request(method: Method, uri: Uri) -> Self { + pub fn request(method: Method, uri: Uri, protocol: Option) -> Self { let parts = uri::Parts::from(uri); let mut path = parts @@ -550,6 +556,7 @@ impl Pseudo { scheme: None, authority: None, path: Some(path).filter(|p| !p.is_empty()), + protocol, status: None, }; @@ -575,6 +582,7 @@ impl Pseudo { scheme: None, authority: None, path: None, + protocol: None, status: Some(status), } } @@ -593,6 +601,11 @@ impl Pseudo { self.scheme = Some(bytes_str); } + #[cfg(feature = "unstable")] + pub fn set_protocol(&mut self, protocol: Protocol) { + self.protocol = Some(protocol); + } + pub fn set_authority(&mut self, authority: BytesStr) { self.authority = Some(authority); } @@ -681,6 +694,10 @@ impl Iterator for Iter { return Some(Path(path)); } + if let Some(protocol) = pseudo.protocol.take() { + return Some(Protocol(protocol)); + } + if let Some(status) = pseudo.status.take() { return Some(Status(status)); } @@ -879,6 +896,7 @@ impl HeaderBlock { Method(v) => set_pseudo!(method, v), Scheme(v) => set_pseudo!(scheme, v), Path(v) => set_pseudo!(path, v), + Protocol(v) => set_pseudo!(protocol, v), Status(v) => set_pseudo!(status, v), } }); diff --git a/src/frame/settings.rs b/src/frame/settings.rs index 523f20b06..080d0f4e5 100644 --- a/src/frame/settings.rs +++ b/src/frame/settings.rs @@ -13,6 +13,7 @@ pub struct Settings { initial_window_size: Option, max_frame_size: Option, max_header_list_size: Option, + enable_connect_protocol: Option, } /// An enum that lists all valid settings that can be sent in a SETTINGS @@ -27,6 +28,7 @@ pub enum Setting { InitialWindowSize(u32), MaxFrameSize(u32), MaxHeaderListSize(u32), + EnableConnectProtocol(u32), } #[derive(Copy, Clone, Eq, PartialEq, Default)] @@ -107,6 +109,14 @@ impl Settings { self.enable_push = Some(enable as u32); } + pub fn is_extended_connect_protocol_enabled(&self) -> Option { + self.enable_connect_protocol.map(|val| val != 0) + } + + pub fn set_enable_connect_protocol(&mut self, val: Option) { + self.enable_connect_protocol = val; + } + pub fn header_table_size(&self) -> Option { self.header_table_size } @@ -181,6 +191,14 @@ impl Settings { Some(MaxHeaderListSize(val)) => { settings.max_header_list_size = Some(val); } + Some(EnableConnectProtocol(val)) => match val { + 0 | 1 => { + settings.enable_connect_protocol = Some(val); + } + _ => { + return Err(Error::InvalidSettingValue); + } + }, None => {} } } @@ -236,6 +254,10 @@ impl Settings { if let Some(v) = self.max_header_list_size { f(MaxHeaderListSize(v)); } + + if let Some(v) = self.enable_connect_protocol { + f(EnableConnectProtocol(v)); + } } } @@ -269,6 +291,9 @@ impl fmt::Debug for Settings { Setting::MaxHeaderListSize(v) => { builder.field("max_header_list_size", &v); } + Setting::EnableConnectProtocol(v) => { + builder.field("enable_connect_protocol", &v); + } }); builder.finish() @@ -291,6 +316,7 @@ impl Setting { 4 => Some(InitialWindowSize(val)), 5 => Some(MaxFrameSize(val)), 6 => Some(MaxHeaderListSize(val)), + 8 => Some(EnableConnectProtocol(val)), _ => None, } } @@ -322,6 +348,7 @@ impl Setting { InitialWindowSize(v) => (4, v), MaxFrameSize(v) => (5, v), MaxHeaderListSize(v) => (6, v), + EnableConnectProtocol(v) => (8, v), }; dst.put_u16(kind); diff --git a/src/hpack/header.rs b/src/hpack/header.rs index 8d6136e16..e6df555ab 100644 --- a/src/hpack/header.rs +++ b/src/hpack/header.rs @@ -1,4 +1,5 @@ use super::{DecoderError, NeedMore}; +use crate::ext::Protocol; use bytes::Bytes; use http::header::{HeaderName, HeaderValue}; @@ -14,6 +15,7 @@ pub enum Header { Method(Method), Scheme(BytesStr), Path(BytesStr), + Protocol(Protocol), Status(StatusCode), } @@ -25,6 +27,7 @@ pub enum Name<'a> { Method, Scheme, Path, + Protocol, Status, } @@ -51,6 +54,7 @@ impl Header> { Method(v) => Method(v), Scheme(v) => Scheme(v), Path(v) => Path(v), + Protocol(v) => Protocol(v), Status(v) => Status(v), }) } @@ -79,6 +83,10 @@ impl Header { let value = BytesStr::try_from(value)?; Ok(Header::Path(value)) } + b"protocol" => { + let value = Protocol::try_from(value)?; + Ok(Header::Protocol(value)) + } b"status" => { let status = StatusCode::from_bytes(&value)?; Ok(Header::Status(status)) @@ -104,6 +112,7 @@ impl Header { Header::Method(ref v) => 32 + 7 + v.as_ref().len(), Header::Scheme(ref v) => 32 + 7 + v.len(), Header::Path(ref v) => 32 + 5 + v.len(), + Header::Protocol(ref v) => 32 + 9 + v.as_str().len(), Header::Status(_) => 32 + 7 + 3, } } @@ -116,6 +125,7 @@ impl Header { Header::Method(..) => Name::Method, Header::Scheme(..) => Name::Scheme, Header::Path(..) => Name::Path, + Header::Protocol(..) => Name::Protocol, Header::Status(..) => Name::Status, } } @@ -127,6 +137,7 @@ impl Header { Header::Method(ref v) => v.as_ref().as_ref(), Header::Scheme(ref v) => v.as_ref(), Header::Path(ref v) => v.as_ref(), + Header::Protocol(ref v) => v.as_ref(), Header::Status(ref v) => v.as_str().as_ref(), } } @@ -156,6 +167,10 @@ impl Header { Header::Path(ref b) => a == b, _ => false, }, + Header::Protocol(ref a) => match *other { + Header::Protocol(ref b) => a == b, + _ => false, + }, Header::Status(ref a) => match *other { Header::Status(ref b) => a == b, _ => false, @@ -205,6 +220,7 @@ impl From

for Header> { Header::Method(v) => Header::Method(v), Header::Scheme(v) => Header::Scheme(v), Header::Path(v) => Header::Path(v), + Header::Protocol(v) => Header::Protocol(v), Header::Status(v) => Header::Status(v), } } @@ -221,6 +237,7 @@ impl<'a> Name<'a> { Name::Method => Ok(Header::Method(Method::from_bytes(&*value)?)), Name::Scheme => Ok(Header::Scheme(BytesStr::try_from(value)?)), Name::Path => Ok(Header::Path(BytesStr::try_from(value)?)), + Name::Protocol => Ok(Header::Protocol(Protocol::try_from(value)?)), Name::Status => { match StatusCode::from_bytes(&value) { Ok(status) => Ok(Header::Status(status)), @@ -238,6 +255,7 @@ impl<'a> Name<'a> { Name::Method => b":method", Name::Scheme => b":scheme", Name::Path => b":path", + Name::Protocol => b":protocol", Name::Status => b":status", } } diff --git a/src/hpack/table.rs b/src/hpack/table.rs index 2328743a8..0124f216d 100644 --- a/src/hpack/table.rs +++ b/src/hpack/table.rs @@ -751,6 +751,7 @@ fn index_static(header: &Header) -> Option<(usize, bool)> { "/index.html" => Some((5, true)), _ => Some((4, false)), }, + Header::Protocol(..) => None, Header::Status(ref v) => match u16::from(*v) { 200 => Some((8, true)), 204 => Some((9, true)), diff --git a/src/hpack/test/fixture.rs b/src/hpack/test/fixture.rs index 6d0448425..3428c3958 100644 --- a/src/hpack/test/fixture.rs +++ b/src/hpack/test/fixture.rs @@ -134,6 +134,7 @@ fn key_str(e: &Header) -> &str { Header::Method(..) => ":method", Header::Scheme(..) => ":scheme", Header::Path(..) => ":path", + Header::Protocol(..) => ":protocol", Header::Status(..) => ":status", } } @@ -145,6 +146,7 @@ fn value_str(e: &Header) -> &str { Header::Method(ref m) => m.as_str(), Header::Scheme(ref v) => &**v, Header::Path(ref v) => &**v, + Header::Protocol(ref v) => v.as_str(), Header::Status(ref v) => v.as_str(), } } diff --git a/src/lib.rs b/src/lib.rs index cb02acaff..db6b4888c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -120,6 +120,7 @@ mod frame; pub mod frame; pub mod client; +pub mod ext; pub mod server; mod share; diff --git a/src/proto/connection.rs b/src/proto/connection.rs index a75df4369..d1b8b5125 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -110,6 +110,10 @@ where initial_max_send_streams: config.initial_max_send_streams, local_next_stream_id: config.next_stream_id, local_push_enabled: config.settings.is_push_enabled().unwrap_or(true), + extended_connect_protocol_enabled: config + .settings + .is_extended_connect_protocol_enabled() + .unwrap_or(false), local_reset_duration: config.reset_stream_duration, local_reset_max: config.reset_stream_max, remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, @@ -147,6 +151,13 @@ where self.inner.settings.send_settings(settings) } + /// Send a new SETTINGS frame with extended CONNECT protocol enabled. + pub(crate) fn set_enable_connect_protocol(&mut self) -> Result<(), UserError> { + let mut settings = frame::Settings::default(); + settings.set_enable_connect_protocol(Some(1)); + self.inner.settings.send_settings(settings) + } + /// Returns the maximum number of concurrent streams that may be initiated /// by this peer. pub(crate) fn max_send_streams(&self) -> usize { diff --git a/src/proto/settings.rs b/src/proto/settings.rs index 44f4c2df4..6cc617209 100644 --- a/src/proto/settings.rs +++ b/src/proto/settings.rs @@ -117,6 +117,8 @@ impl Settings { tracing::trace!("ACK sent; applying settings"); + streams.apply_remote_settings(settings)?; + if let Some(val) = settings.header_table_size() { dst.set_send_header_table_size(val as usize); } @@ -124,8 +126,6 @@ impl Settings { if let Some(val) = settings.max_frame_size() { dst.set_max_send_frame_size(val as usize); } - - streams.apply_remote_settings(settings)?; } self.remote = None; diff --git a/src/proto/streams/mod.rs b/src/proto/streams/mod.rs index 608395c0f..0fd61a29a 100644 --- a/src/proto/streams/mod.rs +++ b/src/proto/streams/mod.rs @@ -47,6 +47,9 @@ pub struct Config { /// If the local peer is willing to receive push promises pub local_push_enabled: bool, + /// If extended connect protocol is enabled. + pub extended_connect_protocol_enabled: bool, + /// How long a locally reset stream should ignore frames pub local_reset_duration: Duration, diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index be996b963..e613c26b3 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -56,6 +56,9 @@ pub(super) struct Recv { /// If push promises are allowed to be received. is_push_enabled: bool, + + /// If extended connect protocol is enabled. + is_extended_connect_protocol_enabled: bool, } #[derive(Debug)] @@ -103,6 +106,7 @@ impl Recv { buffer: Buffer::new(), refused: None, is_push_enabled: config.local_push_enabled, + is_extended_connect_protocol_enabled: config.extended_connect_protocol_enabled, } } @@ -216,6 +220,14 @@ impl Recv { let stream_id = frame.stream_id(); let (pseudo, fields) = frame.into_parts(); + + if pseudo.protocol.is_some() { + if counts.peer().is_server() && !self.is_extended_connect_protocol_enabled { + proto_err!(stream: "cannot use :protocol if extended connect protocol is disabled; stream={:?}", stream.id); + return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into()); + } + } + if !pseudo.is_informational() { let message = counts .peer() @@ -449,60 +461,58 @@ impl Recv { settings: &frame::Settings, store: &mut Store, ) -> Result<(), proto::Error> { - let target = if let Some(val) = settings.initial_window_size() { - val - } else { - return Ok(()); - }; + if let Some(val) = settings.is_extended_connect_protocol_enabled() { + self.is_extended_connect_protocol_enabled = val; + } - let old_sz = self.init_window_sz; - self.init_window_sz = target; + if let Some(target) = settings.initial_window_size() { + let old_sz = self.init_window_sz; + self.init_window_sz = target; - tracing::trace!("update_initial_window_size; new={}; old={}", target, old_sz,); + tracing::trace!("update_initial_window_size; new={}; old={}", target, old_sz,); - // Per RFC 7540 ยง6.9.2: - // - // In addition to changing the flow-control window for streams that are - // not yet active, a SETTINGS frame can alter the initial flow-control - // window size for streams with active flow-control windows (that is, - // streams in the "open" or "half-closed (remote)" state). When the - // value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust - // the size of all stream flow-control windows that it maintains by the - // difference between the new value and the old value. - // - // A change to `SETTINGS_INITIAL_WINDOW_SIZE` can cause the available - // space in a flow-control window to become negative. A sender MUST - // track the negative flow-control window and MUST NOT send new - // flow-controlled frames until it receives WINDOW_UPDATE frames that - // cause the flow-control window to become positive. - - if target < old_sz { - // We must decrease the (local) window on every open stream. - let dec = old_sz - target; - tracing::trace!("decrementing all windows; dec={}", dec); - - store.for_each(|mut stream| { - stream.recv_flow.dec_recv_window(dec); - Ok(()) - }) - } else if target > old_sz { - // We must increase the (local) window on every open stream. - let inc = target - old_sz; - tracing::trace!("incrementing all windows; inc={}", inc); - store.for_each(|mut stream| { - // XXX: Shouldn't the peer have already noticed our - // overflow and sent us a GOAWAY? - stream - .recv_flow - .inc_window(inc) - .map_err(proto::Error::library_go_away)?; - stream.recv_flow.assign_capacity(inc); - Ok(()) - }) - } else { - // size is the same... so do nothing - Ok(()) + // Per RFC 7540 ยง6.9.2: + // + // In addition to changing the flow-control window for streams that are + // not yet active, a SETTINGS frame can alter the initial flow-control + // window size for streams with active flow-control windows (that is, + // streams in the "open" or "half-closed (remote)" state). When the + // value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust + // the size of all stream flow-control windows that it maintains by the + // difference between the new value and the old value. + // + // A change to `SETTINGS_INITIAL_WINDOW_SIZE` can cause the available + // space in a flow-control window to become negative. A sender MUST + // track the negative flow-control window and MUST NOT send new + // flow-controlled frames until it receives WINDOW_UPDATE frames that + // cause the flow-control window to become positive. + + if target < old_sz { + // We must decrease the (local) window on every open stream. + let dec = old_sz - target; + tracing::trace!("decrementing all windows; dec={}", dec); + + store.for_each(|mut stream| { + stream.recv_flow.dec_recv_window(dec); + }) + } else if target > old_sz { + // We must increase the (local) window on every open stream. + let inc = target - old_sz; + tracing::trace!("incrementing all windows; inc={}", inc); + store.try_for_each(|mut stream| { + // XXX: Shouldn't the peer have already noticed our + // overflow and sent us a GOAWAY? + stream + .recv_flow + .inc_window(inc) + .map_err(proto::Error::library_go_away)?; + stream.recv_flow.assign_capacity(inc); + Ok::<_, proto::Error>(()) + })?; + } } + + Ok(()) } pub fn is_end_stream(&self, stream: &store::Ptr) -> bool { diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index 3735d13dd..e3fcf6d32 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -35,6 +35,9 @@ pub(super) struct Send { prioritize: Prioritize, is_push_enabled: bool, + + /// If extended connect protocol is enabled. + is_extended_connect_protocol_enabled: bool, } /// A value to detect which public API has called `poll_reset`. @@ -53,6 +56,7 @@ impl Send { next_stream_id: Ok(config.local_next_stream_id), prioritize: Prioritize::new(config), is_push_enabled: true, + is_extended_connect_protocol_enabled: false, } } @@ -429,6 +433,10 @@ impl Send { counts: &mut Counts, task: &mut Option, ) -> Result<(), Error> { + if let Some(val) = settings.is_extended_connect_protocol_enabled() { + self.is_extended_connect_protocol_enabled = val; + } + // Applies an update to the remote endpoint's initial window size. // // Per RFC 7540 ยง6.9.2: @@ -490,16 +498,14 @@ impl Send { // TODO: Should this notify the producer when the capacity // of a stream is reduced? Maybe it should if the capacity // is reduced to zero, allowing the producer to stop work. - - Ok::<_, Error>(()) - })?; + }); self.prioritize .assign_connection_capacity(total_reclaimed, store, counts); } else if val > old_val { let inc = val - old_val; - store.for_each(|mut stream| { + store.try_for_each(|mut stream| { self.recv_stream_window_update(inc, buffer, &mut stream, counts, task) .map_err(Error::library_go_away) })?; @@ -554,4 +560,8 @@ impl Send { } } } + + pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool { + self.is_extended_connect_protocol_enabled + } } diff --git a/src/proto/streams/store.rs b/src/proto/streams/store.rs index ac58f43ac..3e34b7cb2 100644 --- a/src/proto/streams/store.rs +++ b/src/proto/streams/store.rs @@ -4,6 +4,7 @@ use slab; use indexmap::{self, IndexMap}; +use std::convert::Infallible; use std::fmt; use std::marker::PhantomData; use std::ops; @@ -128,7 +129,20 @@ impl Store { } } - pub fn for_each(&mut self, mut f: F) -> Result<(), E> + pub(crate) fn for_each(&mut self, mut f: F) + where + F: FnMut(Ptr), + { + match self.try_for_each(|ptr| { + f(ptr); + Ok::<_, Infallible>(()) + }) { + Ok(()) => (), + Err(infallible) => match infallible {}, + } + } + + pub fn try_for_each(&mut self, mut f: F) -> Result<(), E> where F: FnMut(Ptr) -> Result<(), E>, { diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 4962db8d2..5c235c15c 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -2,6 +2,7 @@ use super::recv::RecvHeaderBlockError; use super::store::{self, Entry, Resolve, Store}; use super::{Buffer, Config, Counts, Prioritized, Recv, Send, Stream, StreamId}; use crate::codec::{Codec, SendError, UserError}; +use crate::ext::Protocol; use crate::frame::{self, Frame, Reason}; use crate::proto::{peer, Error, Initiator, Open, Peer, WindowSize}; use crate::{client, proto, server}; @@ -214,6 +215,8 @@ where use super::stream::ContentLength; use http::Method; + let protocol = request.extensions_mut().remove::(); + // Clear before taking lock, incase extensions contain a StreamRef. request.extensions_mut().clear(); @@ -261,7 +264,8 @@ where } // Convert the message - let headers = client::Peer::convert_send_message(stream_id, request, end_of_stream)?; + let headers = + client::Peer::convert_send_message(stream_id, request, protocol, end_of_stream)?; let mut stream = me.store.insert(stream.id, stream); @@ -294,6 +298,15 @@ where send_buffer: self.send_buffer.clone(), }) } + + pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool { + self.inner + .lock() + .unwrap() + .actions + .send + .is_extended_connect_protocol_enabled() + } } impl DynStreams<'_, B> { @@ -643,15 +656,12 @@ impl Inner { let last_processed_id = actions.recv.last_processed_id(); - self.store - .for_each(|stream| { - counts.transition(stream, |counts, stream| { - actions.recv.handle_error(&err, &mut *stream); - actions.send.handle_error(send_buffer, stream, counts); - Ok::<_, ()>(()) - }) + self.store.for_each(|stream| { + counts.transition(stream, |counts, stream| { + actions.recv.handle_error(&err, &mut *stream); + actions.send.handle_error(send_buffer, stream, counts); }) - .unwrap(); + }); actions.conn_error = Some(err); @@ -674,19 +684,14 @@ impl Inner { let err = Error::remote_go_away(frame.debug_data().clone(), frame.reason()); - self.store - .for_each(|stream| { - if stream.id > last_stream_id { - counts.transition(stream, |counts, stream| { - actions.recv.handle_error(&err, &mut *stream); - actions.send.handle_error(send_buffer, stream, counts); - Ok::<_, ()>(()) - }) - } else { - Ok::<_, ()>(()) - } - }) - .unwrap(); + self.store.for_each(|stream| { + if stream.id > last_stream_id { + counts.transition(stream, |counts, stream| { + actions.recv.handle_error(&err, &mut *stream); + actions.send.handle_error(send_buffer, stream, counts); + }) + } + }); actions.conn_error = Some(err); @@ -807,18 +812,15 @@ impl Inner { tracing::trace!("Streams::recv_eof"); - self.store - .for_each(|stream| { - counts.transition(stream, |counts, stream| { - actions.recv.recv_eof(stream); + self.store.for_each(|stream| { + counts.transition(stream, |counts, stream| { + actions.recv.recv_eof(stream); - // This handles resetting send state associated with the - // stream - actions.send.handle_error(send_buffer, stream, counts); - Ok::<_, ()>(()) - }) + // This handles resetting send state associated with the + // stream + actions.send.handle_error(send_buffer, stream, counts); }) - .expect("recv_eof"); + }); actions.clear_queues(clear_pending_accept, &mut self.store, counts); Ok(()) diff --git a/src/server.rs b/src/server.rs index 491446460..1eb40312c 100644 --- a/src/server.rs +++ b/src/server.rs @@ -470,6 +470,19 @@ where Ok(()) } + /// Enables the [extended CONNECT protocol]. + /// + /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 + /// + /// # Errors + /// + /// Returns an error if a previous call is still pending acknowledgement + /// from the remote endpoint. + pub fn enable_connect_protocol(&mut self) -> Result<(), crate::Error> { + self.connection.set_enable_connect_protocol()?; + Ok(()) + } + /// Returns `Ready` when the underlying connection has closed. /// /// If any new inbound streams are received during a call to `poll_closed`, @@ -904,6 +917,14 @@ impl Builder { self } + /// Enables the [extended CONNECT protocol]. + /// + /// [extended CONNECT protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 + pub fn enable_connect_protocol(&mut self) -> &mut Self { + self.settings.set_enable_connect_protocol(Some(1)); + self + } + /// Creates a new configured HTTP/2 server backed by `io`. /// /// It is expected that `io` already be in an appropriate state to commence @@ -1360,7 +1381,7 @@ impl Peer { _, ) = request.into_parts(); - let pseudo = Pseudo::request(method, uri); + let pseudo = Pseudo::request(method, uri, None); Ok(frame::PushPromise::new( stream_id, @@ -1410,6 +1431,11 @@ impl proto::Peer for Peer { malformed!("malformed headers: missing method"); } + let has_protocol = pseudo.protocol.is_some(); + if !is_connect && has_protocol { + malformed!("malformed headers: :protocol on non-CONNECT request"); + } + if pseudo.status.is_some() { malformed!("malformed headers: :status field on request"); } @@ -1432,7 +1458,7 @@ impl proto::Peer for Peer { // A :scheme is required, except CONNECT. if let Some(scheme) = pseudo.scheme { - if is_connect { + if is_connect && !has_protocol { malformed!(":scheme in CONNECT"); } let maybe_scheme = scheme.parse(); @@ -1450,12 +1476,12 @@ impl proto::Peer for Peer { if parts.authority.is_some() { parts.scheme = Some(scheme); } - } else if !is_connect { + } else if !is_connect || has_protocol { malformed!("malformed headers: missing scheme"); } if let Some(path) = pseudo.path { - if is_connect { + if is_connect && !has_protocol { malformed!(":path in CONNECT"); } @@ -1468,6 +1494,8 @@ impl proto::Peer for Peer { parts.path_and_query = Some(maybe_path.or_else(|why| { malformed!("malformed headers: malformed path ({:?}): {}", path, why,) })?); + } else if is_connect && has_protocol { + malformed!("malformed headers: missing path in extended CONNECT"); } b = b.uri(parts); diff --git a/tests/h2-support/src/assert.rs b/tests/h2-support/src/assert.rs index 8bc6d25c7..88e3d4f7c 100644 --- a/tests/h2-support/src/assert.rs +++ b/tests/h2-support/src/assert.rs @@ -47,6 +47,16 @@ macro_rules! assert_settings { }}; } +#[macro_export] +macro_rules! assert_go_away { + ($frame:expr) => {{ + match $frame { + h2::frame::Frame::GoAway(v) => v, + f => panic!("expected GO_AWAY; actual={:?}", f), + } + }}; +} + #[macro_export] macro_rules! poll_err { ($transport:expr) => {{ @@ -80,6 +90,7 @@ macro_rules! assert_default_settings { use h2::frame::Frame; +#[track_caller] pub fn assert_frame_eq, U: Into>(t: T, u: U) { let actual: Frame = t.into(); let expected: Frame = u.into(); diff --git a/tests/h2-support/src/frames.rs b/tests/h2-support/src/frames.rs index 824bc5c19..f2c07bacb 100644 --- a/tests/h2-support/src/frames.rs +++ b/tests/h2-support/src/frames.rs @@ -4,7 +4,10 @@ use std::fmt; use bytes::Bytes; use http::{self, HeaderMap, StatusCode}; -use h2::frame::{self, Frame, StreamId}; +use h2::{ + ext::Protocol, + frame::{self, Frame, StreamId}, +}; pub const SETTINGS: &'static [u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; pub const SETTINGS_ACK: &'static [u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; @@ -109,7 +112,9 @@ impl Mock { let method = method.try_into().unwrap(); let uri = uri.try_into().unwrap(); let (id, _, fields) = self.into_parts(); - let frame = frame::Headers::new(id, frame::Pseudo::request(method, uri), fields); + let extensions = Default::default(); + let pseudo = frame::Pseudo::request(method, uri, extensions); + let frame = frame::Headers::new(id, pseudo, fields); Mock(frame) } @@ -179,6 +184,15 @@ impl Mock { Mock(frame::Headers::new(id, pseudo, fields)) } + pub fn protocol(self, value: &str) -> Self { + let (id, mut pseudo, fields) = self.into_parts(); + let value = Protocol::from(value); + + pseudo.set_protocol(value); + + Mock(frame::Headers::new(id, pseudo, fields)) + } + pub fn eos(mut self) -> Self { self.0.set_end_stream(); self @@ -230,8 +244,9 @@ impl Mock { let method = method.try_into().unwrap(); let uri = uri.try_into().unwrap(); let (id, promised, _, fields) = self.into_parts(); - let frame = - frame::PushPromise::new(id, promised, frame::Pseudo::request(method, uri), fields); + let extensions = Default::default(); + let pseudo = frame::Pseudo::request(method, uri, extensions); + let frame = frame::PushPromise::new(id, promised, pseudo, fields); Mock(frame) } @@ -352,6 +367,11 @@ impl Mock { self.0.set_enable_push(false); self } + + pub fn enable_connect_protocol(mut self, val: u32) -> Self { + self.0.set_enable_connect_protocol(Some(val)); + self + } } impl From> for frame::Settings { diff --git a/tests/h2-support/src/mock.rs b/tests/h2-support/src/mock.rs index b5df9ad9b..cc314cd06 100644 --- a/tests/h2-support/src/mock.rs +++ b/tests/h2-support/src/mock.rs @@ -221,22 +221,15 @@ impl Handle { let settings = settings.into(); self.send(settings.into()).await.unwrap(); - let frame = self.next().await; - let settings = match frame { - Some(frame) => match frame.unwrap() { - Frame::Settings(settings) => { - // Send the ACK - let ack = frame::Settings::ack(); + let frame = self.next().await.expect("unexpected EOF").unwrap(); + let settings = assert_settings!(frame); - // TODO: Don't unwrap? - self.send(ack.into()).await.unwrap(); + // Send the ACK + let ack = frame::Settings::ack(); + + // TODO: Don't unwrap? + self.send(ack.into()).await.unwrap(); - settings - } - frame => panic!("unexpected frame; frame={:?}", frame), - }, - None => panic!("unexpected EOF"), - }; let frame = self.next().await; let f = assert_settings!(frame.unwrap().unwrap()); diff --git a/tests/h2-support/src/prelude.rs b/tests/h2-support/src/prelude.rs index 1fcb0dcc4..86ef3249e 100644 --- a/tests/h2-support/src/prelude.rs +++ b/tests/h2-support/src/prelude.rs @@ -2,6 +2,7 @@ pub use h2; pub use h2::client; +pub use h2::ext::Protocol; pub use h2::frame::StreamId; pub use h2::server; pub use h2::*; @@ -20,8 +21,8 @@ pub use super::{Codec, SendFrame}; // Re-export macros pub use super::{ - assert_closed, assert_data, assert_default_settings, assert_headers, assert_ping, poll_err, - poll_frame, raw_codec, + assert_closed, assert_data, assert_default_settings, assert_go_away, assert_headers, + assert_ping, assert_settings, poll_err, poll_frame, raw_codec, }; pub use super::assert::assert_frame_eq; diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 2af0bdeec..9635bcc6c 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -1305,6 +1305,153 @@ async fn informational_while_local_streaming() { join(srv, h2).await; } +#[tokio::test] +async fn extended_connect_protocol_disabled_by_default() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv.assert_client_handshake().await; + assert_default_settings!(settings); + + srv.recv_frame( + frames::headers(1) + .request("GET", "https://example.com/") + .eos(), + ) + .await; + srv.send_frame(frames::headers(1).response(200).eos()).await; + }; + + let h2 = async move { + let (mut client, mut h2) = client::handshake(io).await.unwrap(); + + // we send a simple req here just to drive the connection so we can + // receive the server settings. + let request = Request::get("https://example.com/").body(()).unwrap(); + // first request is allowed + let (response, _) = client.send_request(request, true).unwrap(); + h2.drive(response).await.unwrap(); + + assert!(!client.is_extended_connect_protocol_enabled()); + }; + + join(srv, h2).await; +} + +#[tokio::test] +async fn extended_connect_protocol_enabled_during_handshake() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv + .assert_client_handshake_with_settings(frames::settings().enable_connect_protocol(1)) + .await; + assert_default_settings!(settings); + + srv.recv_frame( + frames::headers(1) + .request("GET", "https://example.com/") + .eos(), + ) + .await; + srv.send_frame(frames::headers(1).response(200).eos()).await; + }; + + let h2 = async move { + let (mut client, mut h2) = client::handshake(io).await.unwrap(); + + // we send a simple req here just to drive the connection so we can + // receive the server settings. + let request = Request::get("https://example.com/").body(()).unwrap(); + let (response, _) = client.send_request(request, true).unwrap(); + h2.drive(response).await.unwrap(); + + assert!(client.is_extended_connect_protocol_enabled()); + }; + + join(srv, h2).await; +} + +#[tokio::test] +async fn invalid_connect_protocol_enabled_setting() { + h2_support::trace_init!(); + + let (io, mut srv) = mock::new(); + + let srv = async move { + // Send a settings frame + srv.send(frames::settings().enable_connect_protocol(2).into()) + .await + .unwrap(); + srv.read_preface().await.unwrap(); + + let settings = assert_settings!(srv.next().await.expect("unexpected EOF").unwrap()); + assert_default_settings!(settings); + + // Send the ACK + let ack = frame::Settings::ack(); + + // TODO: Don't unwrap? + srv.send(ack.into()).await.unwrap(); + + let frame = srv.next().await.unwrap().unwrap(); + let go_away = assert_go_away!(frame); + assert_eq!(go_away.reason(), Reason::PROTOCOL_ERROR); + }; + + let h2 = async move { + let (mut client, mut h2) = client::handshake(io).await.unwrap(); + + // we send a simple req here just to drive the connection so we can + // receive the server settings. + let request = Request::get("https://example.com/").body(()).unwrap(); + let (response, _) = client.send_request(request, true).unwrap(); + + let error = h2.drive(response).await.unwrap_err(); + assert_eq!(error.reason(), Some(Reason::PROTOCOL_ERROR)); + }; + + join(srv, h2).await; +} + +#[tokio::test] +async fn extended_connect_request() { + h2_support::trace_init!(); + + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv + .assert_client_handshake_with_settings(frames::settings().enable_connect_protocol(1)) + .await; + assert_default_settings!(settings); + + srv.recv_frame( + frames::headers(1) + .request("CONNECT", "http://bread/baguette") + .protocol("the-bread-protocol") + .eos(), + ) + .await; + srv.send_frame(frames::headers(1).response(200).eos()).await; + }; + + let h2 = async move { + let (mut client, mut h2) = client::handshake(io).await.unwrap(); + + let request = Request::connect("http://bread/baguette") + .extension(Protocol::from("the-bread-protocol")) + .body(()) + .unwrap(); + let (response, _) = client.send_request(request, true).unwrap(); + h2.drive(response).await.unwrap(); + }; + + join(srv, h2).await; +} + const SETTINGS: &'static [u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; const SETTINGS_ACK: &'static [u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index e60483d0d..b3bf1a286 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -1149,3 +1149,191 @@ async fn send_reset_explicitly() { join(client, srv).await; } + +#[tokio::test] +async fn extended_connect_protocol_disabled_by_default() { + h2_support::trace_init!(); + + let (io, mut client) = mock::new(); + + let client = async move { + let settings = client.assert_server_handshake().await; + + assert_eq!(settings.is_extended_connect_protocol_enabled(), None); + + client + .send_frame( + frames::headers(1) + .request("CONNECT", "http://bread/baguette") + .protocol("the-bread-protocol"), + ) + .await; + + client.recv_frame(frames::reset(1).protocol_error()).await; + }; + + let srv = async move { + let mut srv = server::handshake(io).await.expect("handshake"); + + poll_fn(move |cx| srv.poll_closed(cx)) + .await + .expect("server"); + }; + + join(client, srv).await; +} + +#[tokio::test] +async fn extended_connect_protocol_enabled_during_handshake() { + h2_support::trace_init!(); + + let (io, mut client) = mock::new(); + + let client = async move { + let settings = client.assert_server_handshake().await; + + assert_eq!(settings.is_extended_connect_protocol_enabled(), Some(true)); + + client + .send_frame( + frames::headers(1) + .request("CONNECT", "http://bread/baguette") + .protocol("the-bread-protocol"), + ) + .await; + + client.recv_frame(frames::headers(1).response(200)).await; + }; + + let srv = async move { + let mut builder = server::Builder::new(); + + builder.enable_connect_protocol(); + + let mut srv = builder.handshake::<_, Bytes>(io).await.expect("handshake"); + + let (_req, mut stream) = srv.next().await.unwrap().unwrap(); + + let rsp = Response::new(()); + stream.send_response(rsp, false).unwrap(); + + poll_fn(move |cx| srv.poll_closed(cx)) + .await + .expect("server"); + }; + + join(client, srv).await; +} + +#[tokio::test] +async fn reject_pseudo_protocol_on_non_connect_request() { + h2_support::trace_init!(); + + let (io, mut client) = mock::new(); + + let client = async move { + let settings = client.assert_server_handshake().await; + + assert_eq!(settings.is_extended_connect_protocol_enabled(), Some(true)); + + client + .send_frame( + frames::headers(1) + .request("GET", "http://bread/baguette") + .protocol("the-bread-protocol"), + ) + .await; + + client.recv_frame(frames::reset(1).protocol_error()).await; + }; + + let srv = async move { + let mut builder = server::Builder::new(); + + builder.enable_connect_protocol(); + + let mut srv = builder.handshake::<_, Bytes>(io).await.expect("handshake"); + + assert!(srv.next().await.is_none()); + + poll_fn(move |cx| srv.poll_closed(cx)) + .await + .expect("server"); + }; + + join(client, srv).await; +} + +#[tokio::test] +async fn reject_authority_target_on_extended_connect_request() { + h2_support::trace_init!(); + + let (io, mut client) = mock::new(); + + let client = async move { + let settings = client.assert_server_handshake().await; + + assert_eq!(settings.is_extended_connect_protocol_enabled(), Some(true)); + + client + .send_frame( + frames::headers(1) + .request("CONNECT", "bread:80") + .protocol("the-bread-protocol"), + ) + .await; + + client.recv_frame(frames::reset(1).protocol_error()).await; + }; + + let srv = async move { + let mut builder = server::Builder::new(); + + builder.enable_connect_protocol(); + + let mut srv = builder.handshake::<_, Bytes>(io).await.expect("handshake"); + + assert!(srv.next().await.is_none()); + + poll_fn(move |cx| srv.poll_closed(cx)) + .await + .expect("server"); + }; + + join(client, srv).await; +} + +#[tokio::test] +async fn reject_non_authority_target_on_connect_request() { + h2_support::trace_init!(); + + let (io, mut client) = mock::new(); + + let client = async move { + let settings = client.assert_server_handshake().await; + + assert_eq!(settings.is_extended_connect_protocol_enabled(), Some(true)); + + client + .send_frame(frames::headers(1).request("CONNECT", "https://bread/baguette")) + .await; + + client.recv_frame(frames::reset(1).protocol_error()).await; + }; + + let srv = async move { + let mut builder = server::Builder::new(); + + builder.enable_connect_protocol(); + + let mut srv = builder.handshake::<_, Bytes>(io).await.expect("handshake"); + + assert!(srv.next().await.is_none()); + + poll_fn(move |cx| srv.poll_closed(cx)) + .await + .expect("server"); + }; + + join(client, srv).await; +} From e9e0f27b8013b61862e2e0aa81175ab08244d629 Mon Sep 17 00:00:00 2001 From: Anthony Ramine Date: Thu, 2 Dec 2021 12:46:50 +0100 Subject: [PATCH 073/178] Add test that would make wait_for_capacity hang if it doesn't loop --- tests/h2-support/src/util.rs | 12 +++---- tests/h2-tests/tests/flow_control.rs | 50 ++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 6 deletions(-) diff --git a/tests/h2-support/src/util.rs b/tests/h2-support/src/util.rs index ec768badc..b3322c4d2 100644 --- a/tests/h2-support/src/util.rs +++ b/tests/h2-support/src/util.rs @@ -54,14 +54,14 @@ impl Future for WaitForCapacity { type Output = h2::SendStream; fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - let _ = ready!(self.stream().poll_capacity(cx)).unwrap(); + loop { + let _ = ready!(self.stream().poll_capacity(cx)).unwrap(); - let act = self.stream().capacity(); + let act = self.stream().capacity(); - if act >= self.target { - return Poll::Ready(self.stream.take().unwrap().into()); + if act >= self.target { + return Poll::Ready(self.stream.take().unwrap().into()); + } } - - Poll::Pending } } diff --git a/tests/h2-tests/tests/flow_control.rs b/tests/h2-tests/tests/flow_control.rs index be04a61b7..e7d630808 100644 --- a/tests/h2-tests/tests/flow_control.rs +++ b/tests/h2-tests/tests/flow_control.rs @@ -1561,3 +1561,53 @@ async fn data_padding() { join(srv, h2).await; } + +#[tokio::test] +async fn poll_capacity_after_send_data_and_reserve() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv + .assert_client_handshake_with_settings(frames::settings().initial_window_size(5)) + .await; + assert_default_settings!(settings); + srv.recv_frame(frames::headers(1).request("POST", "https://www.example.com/")) + .await; + srv.send_frame(frames::headers(1).response(200)).await; + srv.recv_frame(frames::data(1, &b"abcde"[..])).await; + srv.send_frame(frames::window_update(1, 5)).await; + srv.recv_frame(frames::data(1, &b""[..]).eos()).await; + }; + + let h2 = async move { + let (mut client, mut h2) = client::handshake(io).await.unwrap(); + let request = Request::builder() + .method(Method::POST) + .uri("https://www.example.com/") + .body(()) + .unwrap(); + + let (response, mut stream) = client.send_request(request, false).unwrap(); + + let response = h2.drive(response).await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + stream.send_data("abcde".into(), false).unwrap(); + + stream.reserve_capacity(5); + + // Initial window size was 5 so current capacity is 0 even if we just reserved. + assert_eq!(stream.capacity(), 0); + + // The first call to `poll_capacity` in `wait_for_capacity` will return 0. + let mut stream = h2.drive(util::wait_for_capacity(stream, 5)).await; + + stream.send_data("".into(), true).unwrap(); + + // Wait for the connection to close + h2.await.unwrap(); + }; + + join(srv, h2).await; +} From efa113bac6252104cd65284091814f8d13cd36dc Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 8 Dec 2021 10:03:15 -0800 Subject: [PATCH 074/178] Add max send buffer per stream option (#580) --- src/client.rs | 23 +++++++++++ src/proto/connection.rs | 2 + src/proto/mod.rs | 1 + src/proto/streams/mod.rs | 3 ++ src/proto/streams/send.rs | 12 +++--- src/server.rs | 23 +++++++++++ tests/h2-tests/tests/flow_control.rs | 57 ++++++++++++++++++++++++++++ 7 files changed, 115 insertions(+), 6 deletions(-) diff --git a/src/client.rs b/src/client.rs index 3a818a582..d4ec3b906 100644 --- a/src/client.rs +++ b/src/client.rs @@ -320,6 +320,9 @@ pub struct Builder { /// Initial target window size for new connections. initial_target_connection_window_size: Option, + /// Maximum amount of bytes to "buffer" for writing per stream. + max_send_buffer_size: usize, + /// Maximum number of locally reset streams to keep at a time. reset_stream_max: usize, @@ -628,6 +631,7 @@ impl Builder { /// ``` pub fn new() -> Builder { Builder { + max_send_buffer_size: proto::DEFAULT_MAX_SEND_BUFFER_SIZE, reset_stream_duration: Duration::from_secs(proto::DEFAULT_RESET_STREAM_SECS), reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX, initial_target_connection_window_size: None, @@ -962,6 +966,24 @@ impl Builder { self } + /// Sets the maximum send buffer size per stream. + /// + /// Once a stream has buffered up to (or over) the maximum, the stream's + /// flow control will not "poll" additional capacity. Once bytes for the + /// stream have been written to the connection, the send buffer capacity + /// will be freed up again. + /// + /// The default is currently ~400MB, but may change. + /// + /// # Panics + /// + /// This function panics if `max` is larger than `u32::MAX`. + pub fn max_send_buffer_size(&mut self, max: usize) -> &mut Self { + assert!(max <= std::u32::MAX as usize); + self.max_send_buffer_size = max; + self + } + /// Enables or disables server push promises. /// /// This value is included in the initial SETTINGS handshake. When set, the @@ -1184,6 +1206,7 @@ where proto::Config { next_stream_id: builder.stream_id, initial_max_send_streams: builder.initial_max_send_streams, + max_send_buffer_size: builder.max_send_buffer_size, reset_stream_duration: builder.reset_stream_duration, reset_stream_max: builder.reset_stream_max, settings: builder.settings.clone(), diff --git a/src/proto/connection.rs b/src/proto/connection.rs index d1b8b5125..cd011a1d5 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -77,6 +77,7 @@ struct DynConnection<'a, B: Buf = Bytes> { pub(crate) struct Config { pub next_stream_id: StreamId, pub initial_max_send_streams: usize, + pub max_send_buffer_size: usize, pub reset_stream_duration: Duration, pub reset_stream_max: usize, pub settings: frame::Settings, @@ -108,6 +109,7 @@ where .initial_window_size() .unwrap_or(DEFAULT_INITIAL_WINDOW_SIZE), initial_max_send_streams: config.initial_max_send_streams, + local_max_buffer_size: config.max_send_buffer_size, local_next_stream_id: config.next_stream_id, local_push_enabled: config.settings.is_push_enabled().unwrap_or(true), extended_connect_protocol_enabled: config diff --git a/src/proto/mod.rs b/src/proto/mod.rs index d505e77f3..5ec7bf992 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -33,3 +33,4 @@ pub type WindowSize = u32; pub const MAX_WINDOW_SIZE: WindowSize = (1 << 31) - 1; pub const DEFAULT_RESET_STREAM_MAX: usize = 10; pub const DEFAULT_RESET_STREAM_SECS: u64 = 30; +pub const DEFAULT_MAX_SEND_BUFFER_SIZE: usize = 1024 * 400; diff --git a/src/proto/streams/mod.rs b/src/proto/streams/mod.rs index 0fd61a29a..de2a2c85a 100644 --- a/src/proto/streams/mod.rs +++ b/src/proto/streams/mod.rs @@ -41,6 +41,9 @@ pub struct Config { /// MAX_CONCURRENT_STREAMS specified in the frame. pub initial_max_send_streams: usize, + /// Max amount of DATA bytes to buffer per stream. + pub local_max_buffer_size: usize, + /// The stream ID to start the next local stream with pub local_next_stream_id: StreamId, diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index e3fcf6d32..b7230030e 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -28,6 +28,9 @@ pub(super) struct Send { /// > the identified last stream. max_stream_id: StreamId, + /// The maximum amount of bytes a stream should buffer. + max_buffer_size: usize, + /// Initial window size of locally initiated streams init_window_sz: WindowSize, @@ -52,6 +55,7 @@ impl Send { pub fn new(config: &Config) -> Self { Send { init_window_sz: config.remote_init_window_sz, + max_buffer_size: config.local_max_buffer_size, max_stream_id: StreamId::MAX, next_stream_id: Ok(config.local_next_stream_id), prioritize: Prioritize::new(config), @@ -333,14 +337,10 @@ impl Send { /// Current available stream send capacity pub fn capacity(&self, stream: &mut store::Ptr) -> WindowSize { - let available = stream.send_flow.available().as_size(); + let available = stream.send_flow.available().as_size() as usize; let buffered = stream.buffered_send_data; - if available as usize <= buffered { - 0 - } else { - available - buffered as WindowSize - } + available.min(self.max_buffer_size).saturating_sub(buffered) as WindowSize } pub fn poll_reset( diff --git a/src/server.rs b/src/server.rs index 1eb40312c..87c300083 100644 --- a/src/server.rs +++ b/src/server.rs @@ -245,6 +245,9 @@ pub struct Builder { /// Initial target window size for new connections. initial_target_connection_window_size: Option, + + /// Maximum amount of bytes to "buffer" for writing per stream. + max_send_buffer_size: usize, } /// Send a response back to the client @@ -633,6 +636,7 @@ impl Builder { reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX, settings: Settings::default(), initial_target_connection_window_size: None, + max_send_buffer_size: proto::DEFAULT_MAX_SEND_BUFFER_SIZE, } } @@ -870,6 +874,24 @@ impl Builder { self } + /// Sets the maximum send buffer size per stream. + /// + /// Once a stream has buffered up to (or over) the maximum, the stream's + /// flow control will not "poll" additional capacity. Once bytes for the + /// stream have been written to the connection, the send buffer capacity + /// will be freed up again. + /// + /// The default is currently ~400MB, but may change. + /// + /// # Panics + /// + /// This function panics if `max` is larger than `u32::MAX`. + pub fn max_send_buffer_size(&mut self, max: usize) -> &mut Self { + assert!(max <= std::u32::MAX as usize); + self.max_send_buffer_size = max; + self + } + /// Sets the maximum number of concurrent locally reset streams. /// /// When a stream is explicitly reset by either calling @@ -1290,6 +1312,7 @@ where next_stream_id: 2.into(), // Server does not need to locally initiate any streams initial_max_send_streams: 0, + max_send_buffer_size: self.builder.max_send_buffer_size, reset_stream_duration: self.builder.reset_stream_duration, reset_stream_max: self.builder.reset_stream_max, settings: self.builder.settings.clone(), diff --git a/tests/h2-tests/tests/flow_control.rs b/tests/h2-tests/tests/flow_control.rs index e7d630808..1a6018f73 100644 --- a/tests/h2-tests/tests/flow_control.rs +++ b/tests/h2-tests/tests/flow_control.rs @@ -1611,3 +1611,60 @@ async fn poll_capacity_after_send_data_and_reserve() { join(srv, h2).await; } + +#[tokio::test] +async fn max_send_buffer_size_overflow() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv.assert_client_handshake().await; + assert_default_settings!(settings); + srv.recv_frame(frames::headers(1).request("POST", "https://www.example.com/")) + .await; + srv.send_frame(frames::headers(1).response(200).eos()).await; + srv.recv_frame(frames::data(1, &[0; 10][..])).await; + srv.recv_frame(frames::data(1, &[][..]).eos()).await; + }; + + let client = async move { + let (mut client, mut conn) = client::Builder::new() + .max_send_buffer_size(5) + .handshake::<_, Bytes>(io) + .await + .unwrap(); + let request = Request::builder() + .method(Method::POST) + .uri("https://www.example.com/") + .body(()) + .unwrap(); + + let (response, mut stream) = client.send_request(request, false).unwrap(); + + let response = conn.drive(response).await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + assert_eq!(stream.capacity(), 0); + stream.reserve_capacity(10); + assert_eq!( + stream.capacity(), + 5, + "polled capacity not over max buffer size" + ); + + stream.send_data([0; 10][..].into(), false).unwrap(); + + stream.reserve_capacity(15); + assert_eq!( + stream.capacity(), + 0, + "now with buffered over the max, don't overflow" + ); + stream.send_data([0; 0][..].into(), true).unwrap(); + + // Wait for the connection to close + conn.await.unwrap(); + }; + + join(srv, client).await; +} From 88037ae0abb63eb1e25e02a2419136eaca34046b Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 8 Dec 2021 10:08:35 -0800 Subject: [PATCH 075/178] v0.3.8 --- CHANGELOG.md | 6 ++++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 14fa00350..dc8424624 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.3.8 (December 8, 2021) + +* Add "extended CONNECT support". Adds `h2::ext::Protocol`, which is used for request and response extensions to connect new protocols over an HTTP/2 stream. +* Add `max_send_buffer_size` options to client and server builders, and a default of ~400MB. This acts like a high-water mark for the `poll_capacity()` method. +* Fix panic if receiving malformed HEADERS with stream ID of 0. + # 0.3.7 (October 22, 2021) * Fix panic if server sends a malformed frame on a stream client was about to open. diff --git a/Cargo.toml b/Cargo.toml index 7bbf1647e..a92350242 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.7" +version = "0.3.8" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index db6b4888c..d407341ac 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.7")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.8")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From a5c60b24dec550ab65e26b948960a421d2e60b3d Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 8 Dec 2021 17:38:41 -0800 Subject: [PATCH 076/178] Fix poll_capacity to wake in combination with max_send_buffer_size --- src/proto/streams/prioritize.rs | 5 ++ src/proto/streams/stream.rs | 11 +++++ tests/h2-tests/tests/flow_control.rs | 74 ++++++++++++++++++++++++++++ 3 files changed, 90 insertions(+) diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index eaaee162b..2347f6f0b 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -741,6 +741,11 @@ impl Prioritize { stream.buffered_send_data -= len as usize; stream.requested_send_capacity -= len; + // If the capacity was limited because of the + // max_send_buffer_size, then consider waking + // the send task again... + stream.notify_if_can_buffer_more(); + // Assign the capacity back to the connection that // was just consumed from the stream in the previous // line. diff --git a/src/proto/streams/stream.rs b/src/proto/streams/stream.rs index 79de47a9a..f67cc3642 100644 --- a/src/proto/streams/stream.rs +++ b/src/proto/streams/stream.rs @@ -279,6 +279,17 @@ impl Stream { } } + /// If the capacity was limited because of the max_send_buffer_size, + /// then consider waking the send task again... + pub fn notify_if_can_buffer_more(&mut self) { + // Only notify if the capacity exceeds the amount of buffered data + if self.send_flow.available() > self.buffered_send_data { + self.send_capacity_inc = true; + tracing::trace!(" notifying task"); + self.notify_send(); + } + } + /// Returns `Err` when the decrement cannot be completed due to overflow. pub fn dec_content_length(&mut self, len: usize) -> Result<(), ()> { match self.content_length { diff --git a/tests/h2-tests/tests/flow_control.rs b/tests/h2-tests/tests/flow_control.rs index 1a6018f73..7adb3d730 100644 --- a/tests/h2-tests/tests/flow_control.rs +++ b/tests/h2-tests/tests/flow_control.rs @@ -1668,3 +1668,77 @@ async fn max_send_buffer_size_overflow() { join(srv, client).await; } + +#[tokio::test] +async fn max_send_buffer_size_poll_capacity_wakes_task() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv.assert_client_handshake().await; + assert_default_settings!(settings); + srv.recv_frame(frames::headers(1).request("POST", "https://www.example.com/")) + .await; + srv.send_frame(frames::headers(1).response(200).eos()).await; + srv.recv_frame(frames::data(1, &[0; 5][..])).await; + srv.recv_frame(frames::data(1, &[0; 5][..])).await; + srv.recv_frame(frames::data(1, &[0; 5][..])).await; + srv.recv_frame(frames::data(1, &[0; 5][..])).await; + srv.recv_frame(frames::data(1, &[][..]).eos()).await; + }; + + let client = async move { + let (mut client, mut conn) = client::Builder::new() + .max_send_buffer_size(5) + .handshake::<_, Bytes>(io) + .await + .unwrap(); + let request = Request::builder() + .method(Method::POST) + .uri("https://www.example.com/") + .body(()) + .unwrap(); + + let (response, mut stream) = client.send_request(request, false).unwrap(); + + let response = conn.drive(response).await.unwrap(); + + assert_eq!(response.status(), StatusCode::OK); + + assert_eq!(stream.capacity(), 0); + const TO_SEND: usize = 20; + stream.reserve_capacity(TO_SEND); + assert_eq!( + stream.capacity(), + 5, + "polled capacity not over max buffer size" + ); + + let t1 = tokio::spawn(async move { + let mut sent = 0; + let buf = [0; TO_SEND]; + loop { + match poll_fn(|cx| stream.poll_capacity(cx)).await { + None => panic!("no cap"), + Some(Err(e)) => panic!("cap error: {:?}", e), + Some(Ok(cap)) => { + stream + .send_data(buf[sent..(sent + cap)].to_vec().into(), false) + .unwrap(); + sent += cap; + if sent >= TO_SEND { + break; + } + } + } + } + stream.send_data(Bytes::new(), true).unwrap(); + }); + + // Wait for the connection to close + conn.await.unwrap(); + t1.await.unwrap(); + }; + + join(srv, client).await; +} From 308663e71e26fee65d1f2aa691469c456d80cf14 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 9 Dec 2021 09:21:13 -0800 Subject: [PATCH 077/178] v0.3.9 --- CHANGELOG.md | 4 ++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dc8424624..971d5f8bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.3.9 (December 9, 2021) + +* Fix hang related to new `max_send_buffer_size`. + # 0.3.8 (December 8, 2021) * Add "extended CONNECT support". Adds `h2::ext::Protocol`, which is used for request and response extensions to connect new protocols over an HTTP/2 stream. diff --git a/Cargo.toml b/Cargo.toml index a92350242..680c74f1b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.8" +version = "0.3.9" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index d407341ac..cd07f3e8e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.8")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.9")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From c876dda6d0c85665ed7e91ea2936f37764cb63fe Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 9 Dec 2021 14:21:20 -0800 Subject: [PATCH 078/178] Fix panic when receiving malformed push promise with stream id 0 --- src/frame/headers.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/frame/headers.rs b/src/frame/headers.rs index 05d77234f..bcb905013 100644 --- a/src/frame/headers.rs +++ b/src/frame/headers.rs @@ -400,6 +400,10 @@ impl PushPromise { let flags = PushPromiseFlag(head.flag()); let mut pad = 0; + if head.stream_id().is_zero() { + return Err(Error::InvalidStreamId); + } + // Read the padding length if flags.is_padded() { if src.is_empty() { From 6336cc3d7b8de71b15ff9ee9e11d537e443519df Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 6 Jan 2022 10:12:29 -0800 Subject: [PATCH 079/178] Add Error::is_go_away() and Error::is_remote() --- src/error.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/error.rs b/src/error.rs index fdbfc0d1b..6c8f6ed80 100644 --- a/src/error.rs +++ b/src/error.rs @@ -57,7 +57,7 @@ impl Error { } } - /// Returns the true if the error is an io::Error + /// Returns true if the error is an io::Error pub fn is_io(&self) -> bool { match self.kind { Kind::Io(_) => true, @@ -86,6 +86,21 @@ impl Error { kind: Kind::Io(err), } } + + /// Returns true if the error is from a `GOAWAY`. + pub fn is_go_away(&self) -> bool { + matches!(self.kind, Kind::GoAway(..)) + } + + /// Returns true if the error was received in a frame from the remote. + /// + /// Such as from a received `RST_STREAM` or `GOAWAY` frame. + pub fn is_remote(&self) -> bool { + matches!( + self.kind, + Kind::GoAway(_, _, Initiator::Remote) | Kind::Reset(_, _, Initiator::Remote) + ) + } } impl From for Error { From b949d6ef998ce3a8bd805a03352b55985cf2fb0b Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 6 Jan 2022 17:02:40 -0800 Subject: [PATCH 080/178] v0.3.10 --- CHANGELOG.md | 5 +++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 971d5f8bc..052d7da44 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# 0.3.10 (January 6, 2022) + +* Add `Error::is_go_away()` and `Error::is_remote()` methods. +* Fix panic if receiving malformed PUSH_PROMISE with stream ID of 0. + # 0.3.9 (December 9, 2021) * Fix hang related to new `max_send_buffer_size`. diff --git a/Cargo.toml b/Cargo.toml index 680c74f1b..c8f041837 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.9" +version = "0.3.10" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index cd07f3e8e..8e8e18a76 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.9")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.10")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From d92ba1c45ba4c68df9bdfb1a9e42f51c882d9c50 Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Wed, 19 Jan 2022 19:49:53 +0100 Subject: [PATCH 081/178] Make SendStream::poll_capacity never return Ok(Some(0)) (#596) Fixes #270 --- src/proto/streams/prioritize.rs | 12 +++++- src/proto/streams/send.rs | 8 ++-- src/proto/streams/stream.rs | 21 +++++----- tests/h2-support/src/util.rs | 6 +++ tests/h2-tests/tests/flow_control.rs | 57 +++++++++++++++++++++++++++- 5 files changed, 85 insertions(+), 19 deletions(-) diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index 2347f6f0b..c2904aca9 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -51,6 +51,9 @@ pub(super) struct Prioritize { /// What `DATA` frame is currently being sent in the codec. in_flight_data_frame: InFlightData, + + /// The maximum amount of bytes a stream should buffer. + max_buffer_size: usize, } #[derive(Debug, Eq, PartialEq)] @@ -93,9 +96,14 @@ impl Prioritize { flow, last_opened_id: StreamId::ZERO, in_flight_data_frame: InFlightData::Nothing, + max_buffer_size: config.local_max_buffer_size, } } + pub(crate) fn max_buffer_size(&self) -> usize { + self.max_buffer_size + } + /// Queue a frame to be sent to the remote pub fn queue_frame( &mut self, @@ -424,7 +432,7 @@ impl Prioritize { tracing::trace!(capacity = assign, "assigning"); // Assign the capacity to the stream - stream.assign_capacity(assign); + stream.assign_capacity(assign, self.max_buffer_size); // Claim the capacity from the connection self.flow.claim_capacity(assign); @@ -744,7 +752,7 @@ impl Prioritize { // If the capacity was limited because of the // max_send_buffer_size, then consider waking // the send task again... - stream.notify_if_can_buffer_more(); + stream.notify_if_can_buffer_more(self.max_buffer_size); // Assign the capacity back to the connection that // was just consumed from the stream in the previous diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index b7230030e..2c5a38c80 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -28,9 +28,6 @@ pub(super) struct Send { /// > the identified last stream. max_stream_id: StreamId, - /// The maximum amount of bytes a stream should buffer. - max_buffer_size: usize, - /// Initial window size of locally initiated streams init_window_sz: WindowSize, @@ -55,7 +52,6 @@ impl Send { pub fn new(config: &Config) -> Self { Send { init_window_sz: config.remote_init_window_sz, - max_buffer_size: config.local_max_buffer_size, max_stream_id: StreamId::MAX, next_stream_id: Ok(config.local_next_stream_id), prioritize: Prioritize::new(config), @@ -340,7 +336,9 @@ impl Send { let available = stream.send_flow.available().as_size() as usize; let buffered = stream.buffered_send_data; - available.min(self.max_buffer_size).saturating_sub(buffered) as WindowSize + available + .min(self.prioritize.max_buffer_size()) + .saturating_sub(buffered) as WindowSize } pub fn poll_reset( diff --git a/src/proto/streams/stream.rs b/src/proto/streams/stream.rs index f67cc3642..36d515bad 100644 --- a/src/proto/streams/stream.rs +++ b/src/proto/streams/stream.rs @@ -260,30 +260,29 @@ impl Stream { self.ref_count == 0 && !self.state.is_closed() } - pub fn assign_capacity(&mut self, capacity: WindowSize) { + pub fn assign_capacity(&mut self, capacity: WindowSize, max_buffer_size: usize) { debug_assert!(capacity > 0); - self.send_capacity_inc = true; self.send_flow.assign_capacity(capacity); tracing::trace!( - " assigned capacity to stream; available={}; buffered={}; id={:?}", + " assigned capacity to stream; available={}; buffered={}; id={:?}; max_buffer_size={}", self.send_flow.available(), self.buffered_send_data, - self.id + self.id, + max_buffer_size ); - // Only notify if the capacity exceeds the amount of buffered data - if self.send_flow.available() > self.buffered_send_data { - tracing::trace!(" notifying task"); - self.notify_send(); - } + self.notify_if_can_buffer_more(max_buffer_size); } /// If the capacity was limited because of the max_send_buffer_size, /// then consider waking the send task again... - pub fn notify_if_can_buffer_more(&mut self) { + pub fn notify_if_can_buffer_more(&mut self, max_buffer_size: usize) { + let available = self.send_flow.available().as_size() as usize; + let buffered = self.buffered_send_data; + // Only notify if the capacity exceeds the amount of buffered data - if self.send_flow.available() > self.buffered_send_data { + if available.min(max_buffer_size) > buffered { self.send_capacity_inc = true; tracing::trace!(" notifying task"); self.notify_send(); diff --git a/tests/h2-support/src/util.rs b/tests/h2-support/src/util.rs index b3322c4d2..1150d5925 100644 --- a/tests/h2-support/src/util.rs +++ b/tests/h2-support/src/util.rs @@ -32,6 +32,7 @@ pub async fn yield_once() { .await; } +/// Should only be called after a non-0 capacity was requested for the stream. pub fn wait_for_capacity(stream: h2::SendStream, target: usize) -> WaitForCapacity { WaitForCapacity { stream: Some(stream), @@ -59,6 +60,11 @@ impl Future for WaitForCapacity { let act = self.stream().capacity(); + // If a non-0 capacity was requested for the stream before calling + // wait_for_capacity, then poll_capacity should return Pending + // until there is a non-0 capacity. + assert_ne!(act, 0); + if act >= self.target { return Poll::Ready(self.stream.take().unwrap().into()); } diff --git a/tests/h2-tests/tests/flow_control.rs b/tests/h2-tests/tests/flow_control.rs index 7adb3d730..92e7a532f 100644 --- a/tests/h2-tests/tests/flow_control.rs +++ b/tests/h2-tests/tests/flow_control.rs @@ -1600,7 +1600,62 @@ async fn poll_capacity_after_send_data_and_reserve() { // Initial window size was 5 so current capacity is 0 even if we just reserved. assert_eq!(stream.capacity(), 0); - // The first call to `poll_capacity` in `wait_for_capacity` will return 0. + // This will panic if there is a bug causing h2 to return Ok(0) from poll_capacity. + let mut stream = h2.drive(util::wait_for_capacity(stream, 5)).await; + + stream.send_data("".into(), true).unwrap(); + + // Wait for the connection to close + h2.await.unwrap(); + }; + + join(srv, h2).await; +} + +#[tokio::test] +async fn poll_capacity_after_send_data_and_reserve_with_max_send_buffer_size() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv + .assert_client_handshake_with_settings(frames::settings().initial_window_size(10)) + .await; + assert_default_settings!(settings); + srv.recv_frame(frames::headers(1).request("POST", "https://www.example.com/")) + .await; + srv.send_frame(frames::headers(1).response(200)).await; + srv.recv_frame(frames::data(1, &b"abcde"[..])).await; + srv.send_frame(frames::window_update(1, 10)).await; + srv.recv_frame(frames::data(1, &b""[..]).eos()).await; + }; + + let h2 = async move { + let (mut client, mut h2) = client::Builder::new() + .max_send_buffer_size(5) + .handshake::<_, Bytes>(io) + .await + .unwrap(); + let request = Request::builder() + .method(Method::POST) + .uri("https://www.example.com/") + .body(()) + .unwrap(); + + let (response, mut stream) = client.send_request(request, false).unwrap(); + + let response = h2.drive(response).await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + stream.send_data("abcde".into(), false).unwrap(); + + stream.reserve_capacity(5); + + // Initial window size was 10 but with a max send buffer size of 10 in the client, + // so current capacity is 0 even if we just reserved. + assert_eq!(stream.capacity(), 0); + + // This will panic if there is a bug causing h2 to return Ok(0) from poll_capacity. let mut stream = h2.drive(util::wait_for_capacity(stream, 5)).await; stream.send_data("".into(), true).unwrap(); From a28a39ca4a489b5337bd60f1c49d3d845f65b50b Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Fri, 21 Jan 2022 19:59:11 +0100 Subject: [PATCH 082/178] Update tracing-subscriber and use tracing-tree when testing (#586) This makes reading the logs way easier on the eyes. --- tests/h2-support/Cargo.toml | 4 ++- tests/h2-support/src/trace.rs | 46 +++++++++-------------------------- 2 files changed, 14 insertions(+), 36 deletions(-) diff --git a/tests/h2-support/Cargo.toml b/tests/h2-support/Cargo.toml index e97c6b310..f178178eb 100644 --- a/tests/h2-support/Cargo.toml +++ b/tests/h2-support/Cargo.toml @@ -7,9 +7,11 @@ edition = "2018" [dependencies] h2 = { path = "../..", features = ["stream", "unstable"] } +atty = "0.2" bytes = "1" tracing = "0.1" -tracing-subscriber = { version = "0.2", default-features = false, features = ["fmt", "chrono", "ansi"] } +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } +tracing-tree = "0.2" futures = { version = "0.3", default-features = false } http = "0.2" tokio = { version = "1", features = ["time"] } diff --git a/tests/h2-support/src/trace.rs b/tests/h2-support/src/trace.rs index 4ac11742c..87038c350 100644 --- a/tests/h2-support/src/trace.rs +++ b/tests/h2-support/src/trace.rs @@ -1,41 +1,17 @@ -use std::{io, str}; pub use tracing; pub use tracing_subscriber; -pub fn init() -> tracing::dispatcher::DefaultGuard { - tracing::subscriber::set_default( - tracing_subscriber::fmt() - .with_max_level(tracing::Level::TRACE) - .with_span_events(tracing_subscriber::fmt::format::FmtSpan::CLOSE) - .with_writer(PrintlnWriter { _p: () }) - .finish(), - ) -} - -struct PrintlnWriter { - _p: (), -} +use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::util::SubscriberInitExt; -impl tracing_subscriber::fmt::MakeWriter for PrintlnWriter { - type Writer = PrintlnWriter; - fn make_writer(&self) -> Self::Writer { - PrintlnWriter { _p: () } - } -} - -impl io::Write for PrintlnWriter { - fn write(&mut self, buf: &[u8]) -> io::Result { - let s = str::from_utf8(buf).map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e))?; - println!("{}", s); - Ok(s.len()) - } - - fn write_fmt(&mut self, fmt: std::fmt::Arguments<'_>) -> io::Result<()> { - println!("{}", fmt); - Ok(()) - } +pub fn init() -> tracing::dispatcher::DefaultGuard { + let use_colors = atty::is(atty::Stream::Stdout); + let layer = tracing_tree::HierarchicalLayer::default() + .with_writer(tracing_subscriber::fmt::writer::TestWriter::default()) + .with_indent_lines(true) + .with_ansi(use_colors) + .with_targets(true) + .with_indent_amount(2); - fn flush(&mut self) -> io::Result<()> { - Ok(()) - } + tracing_subscriber::registry().with(layer).set_default() } From 7de2ccc1a3ca2a1ec265a686dc70a64dbcd56270 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 21 Jan 2022 15:41:39 -0800 Subject: [PATCH 083/178] fix panic when receiving already reset push promise (#597) Found by oss-fuzz --- src/proto/streams/recv.rs | 10 ++++++++++ src/proto/streams/streams.rs | 4 ++++ 2 files changed, 14 insertions(+) diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index e613c26b3..1754ab4dd 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -786,6 +786,16 @@ impl Recv { } } + pub(super) fn maybe_reset_next_stream_id(&mut self, id: StreamId) { + if let Ok(next_id) = self.next_stream_id { + // !Peer::is_local_init should have been called beforehand + debug_assert_eq!(id.is_server_initiated(), next_id.is_server_initiated()); + if id >= next_id { + self.next_stream_id = id.next_id(); + } + } + } + /// Returns true if the remote peer can reserve a stream with the given ID. pub fn ensure_can_reserve(&self) -> Result<(), Error> { if !self.is_push_enabled { diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 5c235c15c..3e7ae97d9 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -883,6 +883,10 @@ impl Inner { // We normally would open this stream, so update our // next-send-id record. self.actions.send.maybe_reset_next_stream_id(id); + } else { + // We normally would recv this stream, so update our + // next-recv-id record. + self.actions.recv.maybe_reset_next_stream_id(id); } let stream = Stream::new(id, 0, 0); From 556447c130dd38e905525c62e895d95dc8b04da3 Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Wed, 26 Jan 2022 11:18:28 +0100 Subject: [PATCH 084/178] Make use of NLL to clean up handshaking logic (#576) --- .github/workflows/CI.yml | 29 ++++++++ src/server.rs | 151 +++++++++++++++------------------------ 2 files changed, 86 insertions(+), 94 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 0dab7b3dd..775ce32e2 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -83,3 +83,32 @@ jobs: - name: Check minimal versions run: cargo clean; cargo update -Zminimal-versions; cargo check if: matrix.rust == 'nightly' + + msrv: + name: Check MSRV (${{ matrix.rust }}) + needs: [style] + strategy: + matrix: + rust: + - 1.46 # never go past Hyper's own MSRV + + os: + - ubuntu-latest + + runs-on: ${{ matrix.os }} + + steps: + - name: Checkout + uses: actions/checkout@v1 + + - name: Install Rust (${{ matrix.rust }}) + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: ${{ matrix.rust }} + override: true + + - name: Check + uses: actions-rs/cargo@v1 + with: + command: check diff --git a/src/server.rs b/src/server.rs index 87c300083..f82b05011 100644 --- a/src/server.rs +++ b/src/server.rs @@ -126,7 +126,7 @@ use std::future::Future; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; -use std::{convert, fmt, io, mem}; +use std::{fmt, io}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; use tracing::instrument::{Instrument, Instrumented}; @@ -301,8 +301,8 @@ enum Handshaking { Flushing(Instrumented>>), /// State 2. Connection is waiting for the client preface. ReadingPreface(Instrumented>>), - /// Dummy state for `mem::replace`. - Empty, + /// State 3. Handshake is done, polling again would panic. + Done, } /// Flush a Sink @@ -387,7 +387,8 @@ where .expect("invalid SETTINGS frame"); // Create the handshake future. - let state = Handshaking::from(codec); + let state = + Handshaking::Flushing(Flush::new(codec).instrument(tracing::trace_span!("flush"))); drop(entered); @@ -1269,63 +1270,58 @@ where let span = self.span.clone(); // XXX(eliza): T_T let _e = span.enter(); tracing::trace!(state = ?self.state); - use crate::server::Handshaking::*; - - self.state = if let Flushing(ref mut flush) = self.state { - // We're currently flushing a pending SETTINGS frame. Poll the - // flush future, and, if it's completed, advance our state to wait - // for the client preface. - let codec = match Pin::new(flush).poll(cx)? { - Poll::Pending => { - tracing::trace!(flush.poll = %"Pending"); - return Poll::Pending; + + loop { + match &mut self.state { + Handshaking::Flushing(flush) => { + // We're currently flushing a pending SETTINGS frame. Poll the + // flush future, and, if it's completed, advance our state to wait + // for the client preface. + let codec = match Pin::new(flush).poll(cx)? { + Poll::Pending => { + tracing::trace!(flush.poll = %"Pending"); + return Poll::Pending; + } + Poll::Ready(flushed) => { + tracing::trace!(flush.poll = %"Ready"); + flushed + } + }; + self.state = Handshaking::ReadingPreface( + ReadPreface::new(codec).instrument(tracing::trace_span!("read_preface")), + ); } - Poll::Ready(flushed) => { - tracing::trace!(flush.poll = %"Ready"); - flushed + Handshaking::ReadingPreface(read) => { + let codec = ready!(Pin::new(read).poll(cx)?); + + self.state = Handshaking::Done; + + let connection = proto::Connection::new( + codec, + Config { + next_stream_id: 2.into(), + // Server does not need to locally initiate any streams + initial_max_send_streams: 0, + max_send_buffer_size: self.builder.max_send_buffer_size, + reset_stream_duration: self.builder.reset_stream_duration, + reset_stream_max: self.builder.reset_stream_max, + settings: self.builder.settings.clone(), + }, + ); + + tracing::trace!("connection established!"); + let mut c = Connection { connection }; + if let Some(sz) = self.builder.initial_target_connection_window_size { + c.set_target_window_size(sz); + } + + return Poll::Ready(Ok(c)); + } + Handshaking::Done => { + panic!("Handshaking::poll() called again after handshaking was complete") } - }; - Handshaking::from(ReadPreface::new(codec)) - } else { - // Otherwise, we haven't actually advanced the state, but we have - // to replace it with itself, because we have to return a value. - // (note that the assignment to `self.state` has to be outside of - // the `if let` block above in order to placate the borrow checker). - mem::replace(&mut self.state, Handshaking::Empty) - }; - let poll = if let ReadingPreface(ref mut read) = self.state { - // We're now waiting for the client preface. Poll the `ReadPreface` - // future. If it has completed, we will create a `Connection` handle - // for the connection. - Pin::new(read).poll(cx) - // Actually creating the `Connection` has to occur outside of this - // `if let` block, because we've borrowed `self` mutably in order - // to poll the state and won't be able to borrow the SETTINGS frame - // as well until we release the borrow for `poll()`. - } else { - unreachable!("Handshake::poll() state was not advanced completely!") - }; - poll?.map(|codec| { - let connection = proto::Connection::new( - codec, - Config { - next_stream_id: 2.into(), - // Server does not need to locally initiate any streams - initial_max_send_streams: 0, - max_send_buffer_size: self.builder.max_send_buffer_size, - reset_stream_duration: self.builder.reset_stream_duration, - reset_stream_max: self.builder.reset_stream_max, - settings: self.builder.settings.clone(), - }, - ); - - tracing::trace!("connection established!"); - let mut c = Connection { connection }; - if let Some(sz) = self.builder.initial_target_connection_window_size { - c.set_target_window_size(sz); } - Ok(c) - }) + } } } @@ -1548,42 +1544,9 @@ where #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { match *self { - Handshaking::Flushing(_) => write!(f, "Handshaking::Flushing(_)"), - Handshaking::ReadingPreface(_) => write!(f, "Handshaking::ReadingPreface(_)"), - Handshaking::Empty => write!(f, "Handshaking::Empty"), + Handshaking::Flushing(_) => f.write_str("Flushing(_)"), + Handshaking::ReadingPreface(_) => f.write_str("ReadingPreface(_)"), + Handshaking::Done => f.write_str("Done"), } } } - -impl convert::From>> for Handshaking -where - T: AsyncRead + AsyncWrite, - B: Buf, -{ - #[inline] - fn from(flush: Flush>) -> Self { - Handshaking::Flushing(flush.instrument(tracing::trace_span!("flush"))) - } -} - -impl convert::From>> for Handshaking -where - T: AsyncRead + AsyncWrite, - B: Buf, -{ - #[inline] - fn from(read: ReadPreface>) -> Self { - Handshaking::ReadingPreface(read.instrument(tracing::trace_span!("read_preface"))) - } -} - -impl convert::From>> for Handshaking -where - T: AsyncRead + AsyncWrite, - B: Buf, -{ - #[inline] - fn from(codec: Codec>) -> Self { - Handshaking::from(Flush::new(codec)) - } -} From b0d01bb200cbc50c81786875fdb0117aeaf0e969 Mon Sep 17 00:00:00 2001 From: Anthony Ramine Date: Wed, 26 Jan 2022 11:23:42 +0100 Subject: [PATCH 085/178] v0.3.11 --- CHANGELOG.md | 5 +++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 052d7da44..fdc622d73 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# 0.3.11 (January 26, 2022) + +* Make `SendStream::poll_capacity` never return `Ok(Some(0))` (#596) +* Fix panic when receiving already reset push promise (#597) + # 0.3.10 (January 6, 2022) * Add `Error::is_go_away()` and `Error::is_remote()` methods. diff --git a/Cargo.toml b/Cargo.toml index c8f041837..fd693e185 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.10" +version = "0.3.11" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index 8e8e18a76..e9e25e3f6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.10")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.11")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From 4dc2b4a16431d3edeaa5394f370b8e48b753a63b Mon Sep 17 00:00:00 2001 From: Oliver Gould Date: Mon, 31 Jan 2022 23:52:42 +0000 Subject: [PATCH 086/178] Avoid time operations that can panic We have reports of runtime panics (linkerd/linkerd2#7748) that sound a lot like rust-lang/rust#86470. We don't have any evidence that these panics originate in h2, but there is one use of `Instant::sub` that could panic in this way. Even though this is almost definitely a bug in Rust, it seems most prudent to actively avoid the uses of `Instant` that are prone to this bug. These fixes should ultimately be made in the standard library, but this change lets us avoid this problem while we wait for those fixes. This change replaces uses of `Instant::elapsed` and `Instant::sub` with calls to `Instant::saturating_duration_since` to prevent this class of panic. See also hyperium/hyper#2746 --- src/proto/streams/recv.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 1754ab4dd..3af1af3a1 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -860,7 +860,10 @@ impl Recv { let reset_duration = self.reset_duration; while let Some(stream) = self.pending_reset_expired.pop_if(store, |stream| { let reset_at = stream.reset_at.expect("reset_at must be set if in queue"); - now - reset_at > reset_duration + // rust-lang/rust#86470 tracks a bug in the standard library where `Instant` + // subtraction can panic (because, on some platforms, `Instant` isn't actually + // monotonic). We use a saturating operation to avoid this panic here. + now.saturating_duration_since(reset_at) > reset_duration }) { counts.transition_after(stream, true); } From 47e9f62fb574ba52800a1a1067a32ef9c8ecd831 Mon Sep 17 00:00:00 2001 From: lucas Date: Sun, 6 Feb 2022 18:06:57 +0000 Subject: [PATCH 087/178] There's no such thing as HTTP/2.0 --- Cargo.toml | 2 +- README.md | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index fd693e185..6141dae5b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,7 @@ authors = [ "Carl Lerche ", "Sean McArthur ", ] -description = "An HTTP/2.0 client and server" +description = "An HTTP/2 client and server" documentation = "https://docs.rs/h2" repository = "https://github.com/hyperium/h2" readme = "README.md" diff --git a/README.md b/README.md index 63627b706..2e1599914 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # H2 -A Tokio aware, HTTP/2.0 client & server implementation for Rust. +A Tokio aware, HTTP/2 client & server implementation for Rust. [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![Crates.io](https://img.shields.io/crates/v/h2.svg)](https://crates.io/crates/h2) @@ -12,21 +12,21 @@ More information about this crate can be found in the [crate documentation][dox] ## Features -* Client and server HTTP/2.0 implementation. -* Implements the full HTTP/2.0 specification. +* Client and server HTTP/2 implementation. +* Implements the full HTTP/2 specification. * Passes [h2spec](https://github.com/summerwind/h2spec). * Focus on performance and correctness. * Built on [Tokio](https://tokio.rs). ## Non goals -This crate is intended to only be an implementation of the HTTP/2.0 +This crate is intended to only be an implementation of the HTTP/2 specification. It does not handle: * Managing TCP connections * HTTP 1.0 upgrade * TLS -* Any feature not described by the HTTP/2.0 specification. +* Any feature not described by the HTTP/2 specification. This crate is now used by [hyper](https://github.com/hyperium/hyper), which will provide all of these features. @@ -55,7 +55,7 @@ fn main() { **How does h2 compare to [solicit] or [rust-http2]?** -The h2 library has implemented more of the details of the HTTP/2.0 specification +The h2 library has implemented more of the details of the HTTP/2 specification than any other Rust library. It also passes the [h2spec] set of tests. The h2 library is rapidly approaching "production ready" quality. From 4c31a320beb01584b55cfcb44b96c0f151324af6 Mon Sep 17 00:00:00 2001 From: Dirkjan Ochtman Date: Fri, 11 Feb 2022 10:22:53 +0100 Subject: [PATCH 088/178] Upgrade dev-dependencies --- Cargo.toml | 16 ++- examples/akamai.rs | 24 +++-- fuzz/Cargo.toml | 2 - src/fuzz_bridge.rs | 2 +- src/hpack/test/fuzz.rs | 208 +++++++++++++++++++------------------ tests/h2-fuzz/Cargo.toml | 2 +- util/genfixture/Cargo.toml | 2 +- 7 files changed, 132 insertions(+), 124 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6141dae5b..cf9d3e437 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,22 +55,20 @@ indexmap = { version = "1.5.2", features = ["std"] } [dev-dependencies] # Fuzzing -quickcheck = { version = "0.4.1", default-features = false } -rand = "0.3.15" +quickcheck = { version = "1.0.3", default-features = false } +rand = "0.8.4" # HPACK fixtures -hex = "0.2.0" -walkdir = "1.0.0" +hex = "0.4.3" +walkdir = "2.3.2" serde = "1.0.0" serde_json = "1.0.0" # Examples tokio = { version = "1", features = ["rt-multi-thread", "macros", "sync", "net"] } -env_logger = { version = "0.5.3", default-features = false } -rustls = "0.19" -tokio-rustls = "0.22" -webpki = "0.21" -webpki-roots = "0.21" +env_logger = { version = "0.9", default-features = false } +tokio-rustls = "0.23.2" +webpki-roots = "0.22.2" [package.metadata.docs.rs] features = ["stream"] diff --git a/examples/akamai.rs b/examples/akamai.rs index 29d8a9347..e522b37ff 100644 --- a/examples/akamai.rs +++ b/examples/akamai.rs @@ -3,9 +3,9 @@ use http::{Method, Request}; use tokio::net::TcpStream; use tokio_rustls::TlsConnector; -use rustls::Session; -use webpki::DNSNameRef; +use tokio_rustls::rustls::{OwnedTrustAnchor, RootCertStore, ServerName}; +use std::convert::TryFrom; use std::error::Error; use std::net::ToSocketAddrs; @@ -16,9 +16,19 @@ pub async fn main() -> Result<(), Box> { let _ = env_logger::try_init(); let tls_client_config = std::sync::Arc::new({ - let mut c = rustls::ClientConfig::new(); - c.root_store - .add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS); + let mut root_store = RootCertStore::empty(); + root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.0.iter().map(|ta| { + OwnedTrustAnchor::from_subject_spki_name_constraints( + ta.subject, + ta.spki, + ta.name_constraints, + ) + })); + + let mut c = tokio_rustls::rustls::ClientConfig::builder() + .with_safe_defaults() + .with_root_certificates(root_store) + .with_no_client_auth(); c.alpn_protocols.push(ALPN_H2.as_bytes().to_owned()); c }); @@ -33,13 +43,13 @@ pub async fn main() -> Result<(), Box> { println!("ADDR: {:?}", addr); let tcp = TcpStream::connect(&addr).await?; - let dns_name = DNSNameRef::try_from_ascii_str("http2.akamai.com").unwrap(); + let dns_name = ServerName::try_from("http2.akamai.com").unwrap(); let connector = TlsConnector::from(tls_client_config); let res = connector.connect(dns_name, tcp).await; let tls = res.unwrap(); { let (_, session) = tls.get_ref(); - let negotiated_protocol = session.get_alpn_protocol(); + let negotiated_protocol = session.alpn_protocol(); assert_eq!( Some(ALPN_H2.as_bytes()), negotiated_protocol.as_ref().map(|x| &**x) diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index ca32138e2..aafb60ae7 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -13,12 +13,10 @@ cargo-fuzz = true arbitrary = { version = "1", features = ["derive"] } libfuzzer-sys = { version = "0.4.0", features = ["arbitrary-derive"] } tokio = { version = "1", features = [ "full" ] } -bytes = "0.5.2" h2 = { path = "../", features = [ "unstable" ] } h2-support = { path = "../tests/h2-support" } futures = { version = "0.3", default-features = false, features = ["std"] } http = "0.2" -env_logger = { version = "0.5.3", default-features = false } # Prevent this from interfering with workspaces [workspace] diff --git a/src/fuzz_bridge.rs b/src/fuzz_bridge.rs index 6132deeb4..3ea8b591c 100644 --- a/src/fuzz_bridge.rs +++ b/src/fuzz_bridge.rs @@ -1,7 +1,7 @@ #[cfg(fuzzing)] pub mod fuzz_logic { use crate::hpack; - use bytes::{BufMut, BytesMut}; + use bytes::BytesMut; use http::header::HeaderName; use std::io::Cursor; diff --git a/src/hpack/test/fuzz.rs b/src/hpack/test/fuzz.rs index 1d05a97c5..ad0d47b6b 100644 --- a/src/hpack/test/fuzz.rs +++ b/src/hpack/test/fuzz.rs @@ -4,7 +4,9 @@ use http::header::{HeaderName, HeaderValue}; use bytes::BytesMut; use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; -use rand::{Rng, SeedableRng, StdRng}; +use rand::distributions::Slice; +use rand::rngs::StdRng; +use rand::{thread_rng, Rng, SeedableRng}; use std::io::Cursor; @@ -46,9 +48,9 @@ struct HeaderFrame { } impl FuzzHpack { - fn new(seed: [usize; 4]) -> FuzzHpack { + fn new(seed: [u8; 32]) -> FuzzHpack { // Seed the RNG - let mut rng = StdRng::from_seed(&seed); + let mut rng = StdRng::from_seed(seed); // Generates a bunch of source headers let mut source: Vec>> = vec![]; @@ -58,12 +60,12 @@ impl FuzzHpack { } // Actual test run headers - let num: usize = rng.gen_range(40, 500); + let num: usize = rng.gen_range(40..500); let mut frames: Vec = vec![]; let mut added = 0; - let skew: i32 = rng.gen_range(1, 5); + let skew: i32 = rng.gen_range(1..5); // Rough number of headers to add while added < num { @@ -72,24 +74,24 @@ impl FuzzHpack { headers: vec![], }; - match rng.gen_range(0, 20) { + match rng.gen_range(0..20) { 0 => { // Two resizes - let high = rng.gen_range(128, MAX_CHUNK * 2); - let low = rng.gen_range(0, high); + let high = rng.gen_range(128..MAX_CHUNK * 2); + let low = rng.gen_range(0..high); frame.resizes.extend(&[low, high]); } 1..=3 => { - frame.resizes.push(rng.gen_range(128, MAX_CHUNK * 2)); + frame.resizes.push(rng.gen_range(128..MAX_CHUNK * 2)); } _ => {} } let mut is_name_required = true; - for _ in 0..rng.gen_range(1, (num - added) + 1) { - let x: f64 = rng.gen_range(0.0, 1.0); + for _ in 0..rng.gen_range(1..(num - added) + 1) { + let x: f64 = rng.gen_range(0.0..1.0); let x = x.powi(skew); let i = (x * source.len() as f64) as usize; @@ -177,31 +179,31 @@ impl FuzzHpack { } impl Arbitrary for FuzzHpack { - fn arbitrary(g: &mut G) -> Self { - FuzzHpack::new(quickcheck::Rng::gen(g)) + fn arbitrary(_: &mut Gen) -> Self { + FuzzHpack::new(thread_rng().gen()) } } fn gen_header(g: &mut StdRng) -> Header> { use http::{Method, StatusCode}; - if g.gen_weighted_bool(10) { - match g.next_u32() % 5 { + if g.gen_ratio(1, 10) { + match g.gen_range(0u32..5) { 0 => { let value = gen_string(g, 4, 20); Header::Authority(to_shared(value)) } 1 => { - let method = match g.next_u32() % 6 { + let method = match g.gen_range(0u32..6) { 0 => Method::GET, 1 => Method::POST, 2 => Method::PUT, 3 => Method::PATCH, 4 => Method::DELETE, 5 => { - let n: usize = g.gen_range(3, 7); + let n: usize = g.gen_range(3..7); let bytes: Vec = (0..n) - .map(|_| g.choose(b"ABCDEFGHIJKLMNOPQRSTUVWXYZ").unwrap().clone()) + .map(|_| *g.sample(Slice::new(b"ABCDEFGHIJKLMNOPQRSTUVWXYZ").unwrap())) .collect(); Method::from_bytes(&bytes).unwrap() @@ -212,7 +214,7 @@ fn gen_header(g: &mut StdRng) -> Header> { Header::Method(method) } 2 => { - let value = match g.next_u32() % 2 { + let value = match g.gen_range(0u32..2) { 0 => "http", 1 => "https", _ => unreachable!(), @@ -221,7 +223,7 @@ fn gen_header(g: &mut StdRng) -> Header> { Header::Scheme(to_shared(value.to_string())) } 3 => { - let value = match g.next_u32() % 100 { + let value = match g.gen_range(0u32..100) { 0 => "/".to_string(), 1 => "/index.html".to_string(), _ => gen_string(g, 2, 20), @@ -237,14 +239,14 @@ fn gen_header(g: &mut StdRng) -> Header> { _ => unreachable!(), } } else { - let name = if g.gen_weighted_bool(10) { + let name = if g.gen_ratio(1, 10) { None } else { Some(gen_header_name(g)) }; let mut value = gen_header_value(g); - if g.gen_weighted_bool(30) { + if g.gen_ratio(1, 30) { value.set_sensitive(true); } @@ -255,84 +257,86 @@ fn gen_header(g: &mut StdRng) -> Header> { fn gen_header_name(g: &mut StdRng) -> HeaderName { use http::header; - if g.gen_weighted_bool(2) { - g.choose(&[ - header::ACCEPT, - header::ACCEPT_CHARSET, - header::ACCEPT_ENCODING, - header::ACCEPT_LANGUAGE, - header::ACCEPT_RANGES, - header::ACCESS_CONTROL_ALLOW_CREDENTIALS, - header::ACCESS_CONTROL_ALLOW_HEADERS, - header::ACCESS_CONTROL_ALLOW_METHODS, - header::ACCESS_CONTROL_ALLOW_ORIGIN, - header::ACCESS_CONTROL_EXPOSE_HEADERS, - header::ACCESS_CONTROL_MAX_AGE, - header::ACCESS_CONTROL_REQUEST_HEADERS, - header::ACCESS_CONTROL_REQUEST_METHOD, - header::AGE, - header::ALLOW, - header::ALT_SVC, - header::AUTHORIZATION, - header::CACHE_CONTROL, - header::CONNECTION, - header::CONTENT_DISPOSITION, - header::CONTENT_ENCODING, - header::CONTENT_LANGUAGE, - header::CONTENT_LENGTH, - header::CONTENT_LOCATION, - header::CONTENT_RANGE, - header::CONTENT_SECURITY_POLICY, - header::CONTENT_SECURITY_POLICY_REPORT_ONLY, - header::CONTENT_TYPE, - header::COOKIE, - header::DNT, - header::DATE, - header::ETAG, - header::EXPECT, - header::EXPIRES, - header::FORWARDED, - header::FROM, - header::HOST, - header::IF_MATCH, - header::IF_MODIFIED_SINCE, - header::IF_NONE_MATCH, - header::IF_RANGE, - header::IF_UNMODIFIED_SINCE, - header::LAST_MODIFIED, - header::LINK, - header::LOCATION, - header::MAX_FORWARDS, - header::ORIGIN, - header::PRAGMA, - header::PROXY_AUTHENTICATE, - header::PROXY_AUTHORIZATION, - header::PUBLIC_KEY_PINS, - header::PUBLIC_KEY_PINS_REPORT_ONLY, - header::RANGE, - header::REFERER, - header::REFERRER_POLICY, - header::REFRESH, - header::RETRY_AFTER, - header::SERVER, - header::SET_COOKIE, - header::STRICT_TRANSPORT_SECURITY, - header::TE, - header::TRAILER, - header::TRANSFER_ENCODING, - header::USER_AGENT, - header::UPGRADE, - header::UPGRADE_INSECURE_REQUESTS, - header::VARY, - header::VIA, - header::WARNING, - header::WWW_AUTHENTICATE, - header::X_CONTENT_TYPE_OPTIONS, - header::X_DNS_PREFETCH_CONTROL, - header::X_FRAME_OPTIONS, - header::X_XSS_PROTECTION, - ]) - .unwrap() + if g.gen_ratio(1, 2) { + g.sample( + Slice::new(&[ + header::ACCEPT, + header::ACCEPT_CHARSET, + header::ACCEPT_ENCODING, + header::ACCEPT_LANGUAGE, + header::ACCEPT_RANGES, + header::ACCESS_CONTROL_ALLOW_CREDENTIALS, + header::ACCESS_CONTROL_ALLOW_HEADERS, + header::ACCESS_CONTROL_ALLOW_METHODS, + header::ACCESS_CONTROL_ALLOW_ORIGIN, + header::ACCESS_CONTROL_EXPOSE_HEADERS, + header::ACCESS_CONTROL_MAX_AGE, + header::ACCESS_CONTROL_REQUEST_HEADERS, + header::ACCESS_CONTROL_REQUEST_METHOD, + header::AGE, + header::ALLOW, + header::ALT_SVC, + header::AUTHORIZATION, + header::CACHE_CONTROL, + header::CONNECTION, + header::CONTENT_DISPOSITION, + header::CONTENT_ENCODING, + header::CONTENT_LANGUAGE, + header::CONTENT_LENGTH, + header::CONTENT_LOCATION, + header::CONTENT_RANGE, + header::CONTENT_SECURITY_POLICY, + header::CONTENT_SECURITY_POLICY_REPORT_ONLY, + header::CONTENT_TYPE, + header::COOKIE, + header::DNT, + header::DATE, + header::ETAG, + header::EXPECT, + header::EXPIRES, + header::FORWARDED, + header::FROM, + header::HOST, + header::IF_MATCH, + header::IF_MODIFIED_SINCE, + header::IF_NONE_MATCH, + header::IF_RANGE, + header::IF_UNMODIFIED_SINCE, + header::LAST_MODIFIED, + header::LINK, + header::LOCATION, + header::MAX_FORWARDS, + header::ORIGIN, + header::PRAGMA, + header::PROXY_AUTHENTICATE, + header::PROXY_AUTHORIZATION, + header::PUBLIC_KEY_PINS, + header::PUBLIC_KEY_PINS_REPORT_ONLY, + header::RANGE, + header::REFERER, + header::REFERRER_POLICY, + header::REFRESH, + header::RETRY_AFTER, + header::SERVER, + header::SET_COOKIE, + header::STRICT_TRANSPORT_SECURITY, + header::TE, + header::TRAILER, + header::TRANSFER_ENCODING, + header::USER_AGENT, + header::UPGRADE, + header::UPGRADE_INSECURE_REQUESTS, + header::VARY, + header::VIA, + header::WARNING, + header::WWW_AUTHENTICATE, + header::X_CONTENT_TYPE_OPTIONS, + header::X_DNS_PREFETCH_CONTROL, + header::X_FRAME_OPTIONS, + header::X_XSS_PROTECTION, + ]) + .unwrap(), + ) .clone() } else { let value = gen_string(g, 1, 25); @@ -349,9 +353,7 @@ fn gen_string(g: &mut StdRng, min: usize, max: usize) -> String { let bytes: Vec<_> = (min..max) .map(|_| { // Chars to pick from - g.choose(b"ABCDEFGHIJKLMNOPQRSTUVabcdefghilpqrstuvwxyz----") - .unwrap() - .clone() + *g.sample(Slice::new(b"ABCDEFGHIJKLMNOPQRSTUVabcdefghilpqrstuvwxyz----").unwrap()) }) .collect(); diff --git a/tests/h2-fuzz/Cargo.toml b/tests/h2-fuzz/Cargo.toml index 524627f31..dadb62c92 100644 --- a/tests/h2-fuzz/Cargo.toml +++ b/tests/h2-fuzz/Cargo.toml @@ -8,7 +8,7 @@ edition = "2018" [dependencies] h2 = { path = "../.." } -env_logger = { version = "0.5.3", default-features = false } +env_logger = { version = "0.9", default-features = false } futures = { version = "0.3", default-features = false, features = ["std"] } honggfuzz = "0.5" http = "0.2" diff --git a/util/genfixture/Cargo.toml b/util/genfixture/Cargo.toml index 694a99496..cce7eb1b1 100644 --- a/util/genfixture/Cargo.toml +++ b/util/genfixture/Cargo.toml @@ -6,4 +6,4 @@ publish = false edition = "2018" [dependencies] -walkdir = "1.0.0" +walkdir = "2.3.2" From 7bb14625ba7c4450c549ff0c2d06ad76f59170dd Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 23 Feb 2022 16:54:44 -0800 Subject: [PATCH 089/178] Bump MSRV to 1.49, since Tokio uses it --- .github/workflows/CI.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 775ce32e2..874af81fe 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -90,7 +90,7 @@ jobs: strategy: matrix: rust: - - 1.46 # never go past Hyper's own MSRV + - 1.49 # never go past Hyper's own MSRV os: - ubuntu-latest From 85549fca19a24e971009fc11a8477429e86c1fd1 Mon Sep 17 00:00:00 2001 From: hikaricai <13061980190@163.com> Date: Thu, 24 Feb 2022 09:09:04 +0800 Subject: [PATCH 090/178] fix header parsing: consume buf only if header name and value are both decoded Decoding error when processing continuation header which contains normal header name at boundary --- src/hpack/decoder.rs | 101 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 94 insertions(+), 7 deletions(-) diff --git a/src/hpack/decoder.rs b/src/hpack/decoder.rs index e4b34d1fc..988b48db1 100644 --- a/src/hpack/decoder.rs +++ b/src/hpack/decoder.rs @@ -142,6 +142,12 @@ struct Table { max_size: usize, } +struct StringMarker { + offset: usize, + len: usize, + string: Option, +} + // ===== impl Decoder ===== impl Decoder { @@ -279,10 +285,13 @@ impl Decoder { // First, read the header name if table_idx == 0 { + let old_pos = buf.position(); + let name_marker = self.try_decode_string(buf)?; + let value_marker = self.try_decode_string(buf)?; + buf.set_position(old_pos); // Read the name as a literal - let name = self.decode_string(buf)?; - let value = self.decode_string(buf)?; - + let name = name_marker.consume(buf); + let value = value_marker.consume(buf); Header::new(name, value) } else { let e = self.table.get(table_idx)?; @@ -292,7 +301,11 @@ impl Decoder { } } - fn decode_string(&mut self, buf: &mut Cursor<&mut BytesMut>) -> Result { + fn try_decode_string( + &mut self, + buf: &mut Cursor<&mut BytesMut>, + ) -> Result { + let old_pos = buf.position(); const HUFF_FLAG: u8 = 0b1000_0000; // The first bit in the first byte contains the huffman encoded flag. @@ -309,17 +322,34 @@ impl Decoder { return Err(DecoderError::NeedMore(NeedMore::StringUnderflow)); } + let offset = (buf.position() - old_pos) as usize; if huff { let ret = { let raw = &buf.chunk()[..len]; - huffman::decode(raw, &mut self.buffer).map(BytesMut::freeze) + huffman::decode(raw, &mut self.buffer).map(|buf| StringMarker { + offset, + len, + string: Some(BytesMut::freeze(buf)), + }) }; buf.advance(len); - return ret; + ret + } else { + buf.advance(len); + Ok(StringMarker { + offset, + len, + string: None, + }) } + } - Ok(take(buf, len)) + fn decode_string(&mut self, buf: &mut Cursor<&mut BytesMut>) -> Result { + let old_pos = buf.position(); + let marker = self.try_decode_string(buf)?; + buf.set_position(old_pos); + Ok(marker.consume(buf)) } } @@ -433,6 +463,19 @@ fn take(buf: &mut Cursor<&mut BytesMut>, n: usize) -> Bytes { head.freeze() } +impl StringMarker { + fn consume(self, buf: &mut Cursor<&mut BytesMut>) -> Bytes { + buf.advance(self.offset); + match self.string { + Some(string) => { + buf.advance(self.len); + string + } + None => take(buf, self.len), + } + } +} + fn consume(buf: &mut Cursor<&mut BytesMut>) { // remove bytes from the internal BytesMut when they have been successfully // decoded. This is a more permanent cursor position, which will be @@ -850,4 +893,48 @@ mod test { huffman::encode(src, &mut buf); buf } + + #[test] + fn test_decode_continuation_header_with_non_huff_encoded_name() { + let mut de = Decoder::new(0); + let value = huff_encode(b"bar"); + let mut buf = BytesMut::new(); + // header name is non_huff encoded + buf.extend(&[0b01000000, 0x00 | 3]); + buf.extend(b"foo"); + // header value is partial + buf.extend(&[0x80 | 3]); + buf.extend(&value[0..1]); + + let mut res = vec![]; + let e = de + .decode(&mut Cursor::new(&mut buf), |h| { + res.push(h); + }) + .unwrap_err(); + // decode error because the header value is partial + assert_eq!(e, DecoderError::NeedMore(NeedMore::StringUnderflow)); + + // extend buf with the remaining header value + buf.extend(&value[1..]); + let _ = de + .decode(&mut Cursor::new(&mut buf), |h| { + res.push(h); + }) + .unwrap(); + + assert_eq!(res.len(), 1); + assert_eq!(de.table.size(), 0); + + match res[0] { + Header::Field { + ref name, + ref value, + } => { + assert_eq!(name, "foo"); + assert_eq!(value, "bar"); + } + _ => panic!(), + } + } } From b8eab381c053ccf3ebf99d3ef1c0fd27f5e11d89 Mon Sep 17 00:00:00 2001 From: Eliza Weisman Date: Tue, 8 Mar 2022 15:54:31 -0800 Subject: [PATCH 091/178] tracing: remove I/O type names from handshake spans (#608) ## Motivation Currently, the `tracing` spans for the client and server handshakes contain the name of the I/O type. In some cases, where nested I/O types are in use, these names can be quite long; for example, in Linkerd, we see log lines like this: ``` 2022-03-07T23:38:15.322506670Z [ 10533.916262s] DEBUG ThreadId(01) inbound:accept{client.addr=192.168.1.9:1227}:server{port=4143}:direct:gateway{dst=server.echo.svc.cluster.local:8080}:server_handshake{io=hyper::common::io::rewind::Rewind, linkerd_io::prefixed::PrefixedIo>>>, linkerd_io::either::EitherIo, linkerd_io::prefixed::PrefixedIo>>>>, linkerd_transport_metrics::sensor::Sensor>, linkerd_io::sensor::SensorIo, linkerd_io::prefixed::PrefixedIo>>>, linkerd_io::either::EitherIo, linkerd_io::prefixed::PrefixedIo>>>, linkerd_transport_metrics::sensor::Sensor>>>}:FramedWrite::buffer{frame=Settings { flags: (0x0), initial_window_size: 65535, max_frame_size: 16384 }}: h2::codec::framed_write: send frame=Settings { flags: (0x0), initial_window_size: 65535, max_frame_size: 16384 } ``` which is kinda not great. ## Solution This branch removes the IO type's type name from the spans for the server and client handshakes. In practice, these are not particularly useful, because a given server or client instance is parameterized over the IO types and will only serve connections of that type. --- src/client.rs | 2 +- src/server.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client.rs b/src/client.rs index d4ec3b906..e75cd3507 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1154,7 +1154,7 @@ where let builder = Builder::new(); builder .handshake(io) - .instrument(tracing::trace_span!("client_handshake", io = %std::any::type_name::())) + .instrument(tracing::trace_span!("client_handshake")) .await } diff --git a/src/server.rs b/src/server.rs index f82b05011..16a50da4b 100644 --- a/src/server.rs +++ b/src/server.rs @@ -367,7 +367,7 @@ where B: Buf + 'static, { fn handshake2(io: T, builder: Builder) -> Handshake { - let span = tracing::trace_span!("server_handshake", io = %std::any::type_name::()); + let span = tracing::trace_span!("server_handshake"); let entered = span.enter(); // Create the codec. From 3383ef71e2dabe27fe979e8b2c82b61b51eca9f5 Mon Sep 17 00:00:00 2001 From: Eliza Weisman Date: Wed, 9 Mar 2022 09:22:49 -0800 Subject: [PATCH 092/178] v0.3.12 # 0.3.12 (March 9, 2022) * Avoid time operations that can panic (#599) * Bump MSRV to Rust 1.49 (#606) * Fix header decoding error when a header name is contained at a continuation header boundary (#589) * Remove I/O type names from handshake `tracing` spans (#608) --- CHANGELOG.md | 8 ++++++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fdc622d73..7cff904f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.3.12 (March 9, 2022) + +* Avoid time operations that can panic (#599) +* Bump MSRV to Rust 1.49 (#606) +* Fix header decoding error when a header name is contained at a continuation + header boundary (#589) +* Remove I/O type names from handshake `tracing` spans (#608) + # 0.3.11 (January 26, 2022) * Make `SendStream::poll_capacity` never return `Ok(Some(0))` (#596) diff --git a/Cargo.toml b/Cargo.toml index cf9d3e437..f7d06bf55 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.11" +version = "0.3.12" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index e9e25e3f6..925b10c93 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.11")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.12")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From a54d9265b7a6dfc78f67c38db62bfbeae790c1f8 Mon Sep 17 00:00:00 2001 From: Dirkjan Ochtman Date: Fri, 11 Feb 2022 09:43:40 +0100 Subject: [PATCH 093/178] Upgrade tokio-util to 0.7 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index f7d06bf55..0333389ae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,7 +43,7 @@ members = [ futures-core = { version = "0.3", default-features = false } futures-sink = { version = "0.3", default-features = false } futures-util = { version = "0.3", default-features = false } -tokio-util = { version = "0.6", features = ["codec"] } +tokio-util = { version = "0.7.1", features = ["codec"] } tokio = { version = "1", features = ["io-util"] } bytes = "1" http = "0.2" From 3a0c622f0c9ec8f53df8f6481ba7873dc06be9cf Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 31 Mar 2022 15:12:45 -0700 Subject: [PATCH 094/178] v0.3.13 --- CHANGELOG.md | 4 ++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7cff904f9..7b00632f3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.3.13 (March 31, 2022) + +* Update private internal `tokio-util` dependency. + # 0.3.12 (March 9, 2022) * Avoid time operations that can panic (#599) diff --git a/Cargo.toml b/Cargo.toml index 0333389ae..bc1388051 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.12" +version = "0.3.13" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index 925b10c93..be42b100e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.12")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.13")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From dc7aa8e0f2200614da0e4352532ef7aaaeac695b Mon Sep 17 00:00:00 2001 From: David Koloski Date: Wed, 18 May 2022 14:23:52 -0400 Subject: [PATCH 095/178] tests: move unexported macro doctest into unit test (#616) Nightly has begun running doctests for unexported macros as of https://github.com/rust-lang/rust/pull/96630, which caused a doctest for test_unpack_octets_4 which was previously ignored to be run. This broke the CI because macros that are not exported with `#[macro_export]` cannot be used from external crates (and thus cannot be doctested). This change ignores the doctest and copies the relevant code into a unit test. Co-authored-by: David Koloski --- src/frame/mod.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/frame/mod.rs b/src/frame/mod.rs index 5a682b634..570a162a8 100644 --- a/src/frame/mod.rs +++ b/src/frame/mod.rs @@ -11,7 +11,8 @@ use std::fmt; /// /// # Examples /// -/// ```rust +/// ```ignore +/// # // We ignore this doctest because the macro is not exported. /// let buf: [u8; 4] = [0, 0, 0, 1]; /// assert_eq!(1u32, unpack_octets_4!(buf, 0, u32)); /// ``` @@ -25,6 +26,15 @@ macro_rules! unpack_octets_4 { }; } +#[cfg(test)] +mod tests { + #[test] + fn test_unpack_octets_4() { + let buf: [u8; 4] = [0, 0, 0, 1]; + assert_eq!(1u32, unpack_octets_4!(buf, 0, u32)); + } +} + mod data; mod go_away; mod head; From f6aa3be6719270cd7b4094ee1940751b5f4ec88e Mon Sep 17 00:00:00 2001 From: Bruce Guenter Date: Wed, 18 May 2022 17:23:40 -0600 Subject: [PATCH 096/178] Add `Error::is_reset` function (#618) --- src/error.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/error.rs b/src/error.rs index 6c8f6ed80..d45827e36 100644 --- a/src/error.rs +++ b/src/error.rs @@ -59,10 +59,7 @@ impl Error { /// Returns true if the error is an io::Error pub fn is_io(&self) -> bool { - match self.kind { - Kind::Io(_) => true, - _ => false, - } + matches!(self.kind, Kind::Io(..)) } /// Returns the error if the error is an io::Error @@ -92,6 +89,11 @@ impl Error { matches!(self.kind, Kind::GoAway(..)) } + /// Returns true if the error is from a `RST_STREAM`. + pub fn is_reset(&self) -> bool { + matches!(self.kind, Kind::Reset(..)) + } + /// Returns true if the error was received in a frame from the remote. /// /// Such as from a received `RST_STREAM` or `GOAWAY` frame. From e4cf88c1a19a7f7823dfaa9eb4c6a19a24f4ead8 Mon Sep 17 00:00:00 2001 From: Ryan Russell Date: Wed, 1 Jun 2022 17:21:06 -0500 Subject: [PATCH 097/178] Fix Typo `received` (#620) Signed-off-by: Ryan Russell --- src/client.rs | 4 ++-- src/server.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client.rs b/src/client.rs index e75cd3507..a6c649811 100644 --- a/src/client.rs +++ b/src/client.rs @@ -526,7 +526,7 @@ where /// /// This setting is configured by the server peer by sending the /// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame. - /// This method returns the currently acknowledged value recieved from the + /// This method returns the currently acknowledged value received from the /// remote. /// /// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4 @@ -1280,7 +1280,7 @@ where /// /// This limit is configured by the server peer by sending the /// [`SETTINGS_MAX_CONCURRENT_STREAMS` parameter][1] in a `SETTINGS` frame. - /// This method returns the currently acknowledged value recieved from the + /// This method returns the currently acknowledged value received from the /// remote. /// /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2 diff --git a/src/server.rs b/src/server.rs index 16a50da4b..9f56f184a 100644 --- a/src/server.rs +++ b/src/server.rs @@ -554,7 +554,7 @@ where /// /// This limit is configured by the client peer by sending the /// [`SETTINGS_MAX_CONCURRENT_STREAMS` parameter][1] in a `SETTINGS` frame. - /// This method returns the currently acknowledged value recieved from the + /// This method returns the currently acknowledged value received from the /// remote. /// /// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2 From fd4040d90db3a69b327990f04e29887753cca730 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 18 Jul 2022 16:07:28 -0700 Subject: [PATCH 098/178] Bump MSRV to 1.56 (#626) --- .github/workflows/CI.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 874af81fe..f433f9082 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -90,7 +90,7 @@ jobs: strategy: matrix: rust: - - 1.49 # never go past Hyper's own MSRV + - 1.56 # never go past Hyper's own MSRV os: - ubuntu-latest From 756384f4cdd62bce3af7aa53a156ba2cc557b5ec Mon Sep 17 00:00:00 2001 From: Miguel Guarniz Date: Mon, 18 Jul 2022 19:23:52 -0400 Subject: [PATCH 099/178] Replace internal PollExt trait with Poll inherent methods (#625) Signed-off-by: Miguel Guarniz --- src/lib.rs | 41 ------------------------------------ src/proto/streams/streams.rs | 3 +-- src/share.rs | 7 +++--- 3 files changed, 4 insertions(+), 47 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index be42b100e..c5f01f3fb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -133,44 +133,3 @@ pub use crate::share::{FlowControl, Ping, PingPong, Pong, RecvStream, SendStream #[cfg(feature = "unstable")] pub use codec::{Codec, SendError, UserError}; - -use std::task::Poll; - -// TODO: Get rid of this trait once https://github.com/rust-lang/rust/pull/63512 -// is stabilized. -trait PollExt { - /// Changes the success value of this `Poll` with the closure provided. - fn map_ok_(self, f: F) -> Poll>> - where - F: FnOnce(T) -> U; - /// Changes the error value of this `Poll` with the closure provided. - fn map_err_(self, f: F) -> Poll>> - where - F: FnOnce(E) -> U; -} - -impl PollExt for Poll>> { - fn map_ok_(self, f: F) -> Poll>> - where - F: FnOnce(T) -> U, - { - match self { - Poll::Ready(Some(Ok(t))) => Poll::Ready(Some(Ok(f(t)))), - Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))), - Poll::Ready(None) => Poll::Ready(None), - Poll::Pending => Poll::Pending, - } - } - - fn map_err_(self, f: F) -> Poll>> - where - F: FnOnce(E) -> U, - { - match self { - Poll::Ready(Some(Ok(t))) => Poll::Ready(Some(Ok(t))), - Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(f(e)))), - Poll::Ready(None) => Poll::Ready(None), - Poll::Pending => Poll::Pending, - } - } -} diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 3e7ae97d9..f11ee085f 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -12,7 +12,6 @@ use http::{HeaderMap, Request, Response}; use std::task::{Context, Poll, Waker}; use tokio::io::AsyncWrite; -use crate::PollExt; use std::sync::{Arc, Mutex}; use std::{fmt, io}; @@ -1282,7 +1281,7 @@ impl OpaqueStreamRef { me.actions .recv .poll_pushed(cx, &mut stream) - .map_ok_(|(h, key)| { + .map_ok(|(h, key)| { me.refs += 1; let opaque_ref = OpaqueStreamRef::new(self.inner.clone(), &mut me.store.resolve(key)); diff --git a/src/share.rs b/src/share.rs index 2a4ff1cdd..f39d2a8fd 100644 --- a/src/share.rs +++ b/src/share.rs @@ -5,7 +5,6 @@ use crate::proto::{self, WindowSize}; use bytes::{Buf, Bytes}; use http::HeaderMap; -use crate::PollExt; use std::fmt; #[cfg(feature = "stream")] use std::pin::Pin; @@ -307,8 +306,8 @@ impl SendStream { pub fn poll_capacity(&mut self, cx: &mut Context) -> Poll>> { self.inner .poll_capacity(cx) - .map_ok_(|w| w as usize) - .map_err_(Into::into) + .map_ok(|w| w as usize) + .map_err(Into::into) } /// Sends a single data frame to the remote peer. @@ -403,7 +402,7 @@ impl RecvStream { /// Poll for the next data frame. pub fn poll_data(&mut self, cx: &mut Context<'_>) -> Poll>> { - self.inner.inner.poll_data(cx).map_err_(Into::into) + self.inner.inner.poll_data(cx).map_err(Into::into) } #[doc(hidden)] From b0f54d80f24112d198e630e13767501d47ae7290 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Er=C3=A8be=20-=20Romain=20Gerard?= Date: Mon, 15 Aug 2022 23:08:56 +0200 Subject: [PATCH 100/178] Use RST_STREAM(NO_ERROR) in case server early respond (#633) (#634) Http2 Server are allowed to early respond without fully consuming client input stream, but must respond with an error code of NO_ERROR when sending RST_STREAM. Nginx treat any other error code as fatal if not done so Commit change error code from CANCEL to NO_ERROR, when the server is early responding to the client https://github.com/hyperium/h2/issues/633 https://trac.nginx.org/nginx/ticket/2376 --- src/proto/streams/streams.rs | 14 +++++++++++++- tests/h2-tests/tests/server.rs | 4 +++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index f11ee085f..aee64ca6a 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -1461,9 +1461,21 @@ fn drop_stream_ref(inner: &Mutex, key: store::Key) { fn maybe_cancel(stream: &mut store::Ptr, actions: &mut Actions, counts: &mut Counts) { if stream.is_canceled_interest() { + // Server is allowed to early respond without fully consuming the client input stream + // But per the RFC, must send a RST_STREAM(NO_ERROR) in such cases. https://www.rfc-editor.org/rfc/rfc7540#section-8.1 + // Some other http2 implementation may interpret other error code as fatal if not respected (i.e: nginx https://trac.nginx.org/nginx/ticket/2376) + let reason = if counts.peer().is_server() + && stream.state.is_send_closed() + && stream.state.is_recv_streaming() + { + Reason::NO_ERROR + } else { + Reason::CANCEL + }; + actions .send - .schedule_implicit_reset(stream, Reason::CANCEL, counts, &mut actions.task); + .schedule_implicit_reset(stream, reason, counts, &mut actions.task); actions.recv.enqueue_reset_expiration(stream, counts); } } diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index b3bf1a286..948ad1630 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -566,7 +566,9 @@ async fn sends_reset_cancel_when_req_body_is_dropped() { client .recv_frame(frames::headers(1).response(200).eos()) .await; - client.recv_frame(frames::reset(1).cancel()).await; + client + .recv_frame(frames::reset(1).reason(Reason::NO_ERROR)) + .await; }; let srv = async move { From 88b078925416d9d220b69d66192f7be63457d6b8 Mon Sep 17 00:00:00 2001 From: Lucio Franco Date: Tue, 16 Aug 2022 14:57:14 -0400 Subject: [PATCH 101/178] v0.3.14 --- CHANGELOG.md | 6 ++++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b00632f3..57b4fa280 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.3.14 (August 16, 2022) + +* Add `Error::is_reset` function. +* Bump MSRV to Rust 1.56. +* Return `RST_STREAM(NO_ERROR)` when the server early responds. + # 0.3.13 (March 31, 2022) * Update private internal `tokio-util` dependency. diff --git a/Cargo.toml b/Cargo.toml index bc1388051..177a20cc1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.13" +version = "0.3.14" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index c5f01f3fb..8f27156d6 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.13")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.14")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From 756e2524ac53c0cf9810efcba0b92171b7d80e42 Mon Sep 17 00:00:00 2001 From: David Koloski Date: Tue, 6 Sep 2022 10:58:20 -0400 Subject: [PATCH 102/178] Remove `B: Buf` bound on `SendStream`'s parameter (#614) Co-authored-by: David Koloski --- src/share.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/share.rs b/src/share.rs index f39d2a8fd..ef520be33 100644 --- a/src/share.rs +++ b/src/share.rs @@ -94,7 +94,7 @@ use std::task::{Context, Poll}; /// [`send_trailers`]: #method.send_trailers /// [`send_reset`]: #method.send_reset #[derive(Debug)] -pub struct SendStream { +pub struct SendStream { inner: proto::StreamRef, } From 79dff0c2d9a90d7f867bc46bb09fb4e98df8b1ae Mon Sep 17 00:00:00 2001 From: Eric Rosenberg Date: Fri, 21 Oct 2022 12:35:15 -0700 Subject: [PATCH 103/178] add accessor for StreamId u32 (#639) --- src/share.rs | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/src/share.rs b/src/share.rs index ef520be33..f4e3cdeb0 100644 --- a/src/share.rs +++ b/src/share.rs @@ -108,9 +108,15 @@ pub struct SendStream { /// new stream. /// /// [Section 5.1.1]: https://tools.ietf.org/html/rfc7540#section-5.1.1 -#[derive(Debug, Clone, Eq, PartialEq, Hash)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] pub struct StreamId(u32); +impl From for u32 { + fn from(src: StreamId) -> Self { + src.0 + } +} + /// Receives the body stream and trailers from the remote peer. /// /// A `RecvStream` is provided by [`client::ResponseFuture`] and @@ -382,6 +388,18 @@ impl StreamId { pub(crate) fn from_internal(id: crate::frame::StreamId) -> Self { StreamId(id.into()) } + + /// Returns the `u32` corresponding to this `StreamId` + /// + /// # Note + /// + /// This is the same as the `From` implementation, but + /// included as an inherent method because that implementation doesn't + /// appear in rustdocs, as well as a way to force the type instead of + /// relying on inference. + pub fn as_u32(&self) -> u32 { + (*self).into() + } } // ===== impl RecvStream ===== From fcbef502f41cf05c06f1d7b4939fa288d13f7fbc Mon Sep 17 00:00:00 2001 From: Eric Rosenberg Date: Fri, 21 Oct 2022 14:13:47 -0700 Subject: [PATCH 104/178] v0.3.15 (#642) --- CHANGELOG.md | 5 +++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 57b4fa280..09c99aac3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# 0.3.15 (October 21, 2022) + +* Remove `B: Buf` bound on `SendStream`'s parameter +* add accessor for `StreamId` u32 + # 0.3.14 (August 16, 2022) * Add `Error::is_reset` function. diff --git a/Cargo.toml b/Cargo.toml index 177a20cc1..8e904875e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.14" +version = "0.3.15" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index 8f27156d6..376d15c9a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.14")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.15")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] From 294000c0745c64009151a1ab39978cd6f02dfd68 Mon Sep 17 00:00:00 2001 From: Vitaly Shukela Date: Sat, 29 Oct 2022 21:22:48 +0300 Subject: [PATCH 105/178] Fix docs of enable_push (#646) Remove redundant and misleading phrase in client::Builder::enable_push documentation. Resolves #645 --- src/client.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client.rs b/src/client.rs index a6c649811..411afa5e0 100644 --- a/src/client.rs +++ b/src/client.rs @@ -986,8 +986,8 @@ impl Builder { /// Enables or disables server push promises. /// - /// This value is included in the initial SETTINGS handshake. When set, the - /// server MUST NOT send a push promise. Setting this value to value to + /// This value is included in the initial SETTINGS handshake. + /// Setting this value to value to /// false in the initial SETTINGS handshake guarantees that the remote server /// will never send a push promise. /// From af47a086b6bb5d2d216301bf9673dee6e95ab4c3 Mon Sep 17 00:00:00 2001 From: silence-coding <32766901+silence-coding@users.noreply.github.com> Date: Fri, 2 Dec 2022 23:07:57 +0800 Subject: [PATCH 106/178] fix issue#648: drop frame if stream is released (#651) Co-authored-by: p00512853 --- src/proto/streams/recv.rs | 10 ++++++++++ src/proto/streams/stream.rs | 4 ++++ src/proto/streams/streams.rs | 3 ++- 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 3af1af3a1..21c575a1a 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -600,6 +600,16 @@ impl Recv { } } + // Received a frame, but no one cared about it. fix issue#648 + if !stream.is_recv { + tracing::trace!( + "recv_data; frame ignored on stream release {:?} for some time", + stream.id, + ); + self.release_connection_capacity(sz, &mut None); + return Ok(()); + } + // Update stream level flow control stream.recv_flow.send_data(sz); diff --git a/src/proto/streams/stream.rs b/src/proto/streams/stream.rs index 36d515bad..de7f4f641 100644 --- a/src/proto/streams/stream.rs +++ b/src/proto/streams/stream.rs @@ -99,6 +99,9 @@ pub(super) struct Stream { /// Frames pending for this stream to read pub pending_recv: buffer::Deque, + /// When the RecvStream drop occurs, no data should be received. + pub is_recv: bool, + /// Task tracking receiving frames pub recv_task: Option, @@ -180,6 +183,7 @@ impl Stream { reset_at: None, next_reset_expire: None, pending_recv: buffer::Deque::new(), + is_recv: true, recv_task: None, pending_push_promises: store::Queue::new(), content_length: ContentLength::Omitted, diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index aee64ca6a..4bd671b07 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -1345,12 +1345,13 @@ impl OpaqueStreamRef { .release_capacity(capacity, &mut stream, &mut me.actions.task) } + /// Clear the receive queue and set the status to no longer receive data frames. pub(crate) fn clear_recv_buffer(&mut self) { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let mut stream = me.store.resolve(self.key); - + stream.is_recv = false; me.actions.recv.clear_recv_buffer(&mut stream); } From c1ce37e1678af6186b2a12574e5bd1ef1ad39c26 Mon Sep 17 00:00:00 2001 From: gtsiam Date: Mon, 12 Dec 2022 19:18:35 +0200 Subject: [PATCH 107/178] Remove unnecessary Unpin + 'static bounds on body (#649) --- src/client.rs | 10 +++++----- src/proto/streams/streams.rs | 5 +---- src/server.rs | 8 ++++---- tests/h2-support/src/client_ext.rs | 2 +- tests/h2-support/src/prelude.rs | 2 +- 5 files changed, 12 insertions(+), 15 deletions(-) diff --git a/src/client.rs b/src/client.rs index 411afa5e0..5dd0b0f87 100644 --- a/src/client.rs +++ b/src/client.rs @@ -341,7 +341,7 @@ pub(crate) struct Peer; impl SendRequest where - B: Buf + 'static, + B: Buf, { /// Returns `Ready` when the connection can initialize a new HTTP/2 /// stream. @@ -584,7 +584,7 @@ where impl Future for ReadySendRequest where - B: Buf + 'static, + B: Buf, { type Output = Result, crate::Error>; @@ -1100,7 +1100,7 @@ impl Builder { ) -> impl Future, Connection), crate::Error>> where T: AsyncRead + AsyncWrite + Unpin, - B: Buf + 'static, + B: Buf, { Connection::handshake2(io, self.clone()) } @@ -1177,7 +1177,7 @@ where impl Connection where T: AsyncRead + AsyncWrite + Unpin, - B: Buf + 'static, + B: Buf, { async fn handshake2( mut io: T, @@ -1306,7 +1306,7 @@ where impl Future for Connection where T: AsyncRead + AsyncWrite + Unpin, - B: Buf + 'static, + B: Buf, { type Output = Result<(), crate::Error>; diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 4bd671b07..62c55524c 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -1229,10 +1229,7 @@ impl StreamRef { .map_err(From::from) } - pub fn clone_to_opaque(&self) -> OpaqueStreamRef - where - B: 'static, - { + pub fn clone_to_opaque(&self) -> OpaqueStreamRef { self.opaque.clone() } diff --git a/src/server.rs b/src/server.rs index 9f56f184a..6e216a40a 100644 --- a/src/server.rs +++ b/src/server.rs @@ -364,7 +364,7 @@ where impl Connection where T: AsyncRead + AsyncWrite + Unpin, - B: Buf + 'static, + B: Buf, { fn handshake2(io: T, builder: Builder) -> Handshake { let span = tracing::trace_span!("server_handshake"); @@ -582,7 +582,7 @@ where impl futures_core::Stream for Connection where T: AsyncRead + AsyncWrite + Unpin, - B: Buf + 'static, + B: Buf, { type Item = Result<(Request, SendResponse), crate::Error>; @@ -1007,7 +1007,7 @@ impl Builder { pub fn handshake(&self, io: T) -> Handshake where T: AsyncRead + AsyncWrite + Unpin, - B: Buf + 'static, + B: Buf, { Connection::handshake2(io, self.clone()) } @@ -1262,7 +1262,7 @@ where impl Future for Handshake where T: AsyncRead + AsyncWrite + Unpin, - B: Buf + 'static, + B: Buf, { type Output = Result, crate::Error>; diff --git a/tests/h2-support/src/client_ext.rs b/tests/h2-support/src/client_ext.rs index a9ab71d99..eebbae98b 100644 --- a/tests/h2-support/src/client_ext.rs +++ b/tests/h2-support/src/client_ext.rs @@ -11,7 +11,7 @@ pub trait SendRequestExt { impl SendRequestExt for SendRequest where - B: Buf + Unpin + 'static, + B: Buf, { fn get(&mut self, uri: &str) -> ResponseFuture { let req = Request::builder() diff --git a/tests/h2-support/src/prelude.rs b/tests/h2-support/src/prelude.rs index 86ef3249e..d34f1b996 100644 --- a/tests/h2-support/src/prelude.rs +++ b/tests/h2-support/src/prelude.rs @@ -90,7 +90,7 @@ pub trait ClientExt { impl ClientExt for client::Connection where T: AsyncRead + AsyncWrite + Unpin + 'static, - B: Buf + Unpin + 'static, + B: Buf, { fn run<'a, F: Future + Unpin + 'a>( &'a mut self, From 07d20b19abfd3bf57bd80976089e3c24a3166bca Mon Sep 17 00:00:00 2001 From: gtsiam Date: Mon, 12 Dec 2022 22:13:48 +0200 Subject: [PATCH 108/178] Fix all clippy warnings (#652) --- examples/akamai.rs | 5 +- src/codec/framed_read.rs | 10 +-- src/frame/data.rs | 8 +- src/frame/headers.rs | 13 +-- src/frame/settings.rs | 6 +- src/hpack/decoder.rs | 29 +++---- src/hpack/encoder.rs | 8 +- src/hpack/header.rs | 26 +++--- src/hpack/huffman/mod.rs | 9 +- src/hpack/table.rs | 2 +- src/hpack/test/fixture.rs | 10 +-- src/hpack/test/fuzz.rs | 2 +- src/lib.rs | 1 + src/proto/connection.rs | 2 +- src/proto/error.rs | 4 +- src/proto/ping_pong.rs | 5 +- src/proto/streams/flow_control.rs | 3 +- src/proto/streams/mod.rs | 1 + src/proto/streams/prioritize.rs | 68 ++++++++------- src/proto/streams/recv.rs | 78 +++++++++--------- src/proto/streams/send.rs | 110 +++++++++++++------------ src/proto/streams/state.rs | 69 +++++++--------- src/proto/streams/store.rs | 12 ++- src/proto/streams/stream.rs | 11 +-- src/proto/streams/streams.rs | 24 +++--- src/server.rs | 2 +- src/share.rs | 4 +- tests/h2-support/src/frames.rs | 4 +- tests/h2-support/src/mock.rs | 4 +- tests/h2-support/src/prelude.rs | 2 +- tests/h2-support/src/util.rs | 4 +- tests/h2-tests/tests/client_request.rs | 9 +- tests/h2-tests/tests/hammer.rs | 2 +- tests/h2-tests/tests/ping_pong.rs | 6 +- tests/h2-tests/tests/push_promise.rs | 2 +- tests/h2-tests/tests/server.rs | 4 +- tests/h2-tests/tests/stream_states.rs | 2 +- util/genfixture/src/main.rs | 12 +-- util/genhuff/src/main.rs | 10 +-- 39 files changed, 283 insertions(+), 300 deletions(-) diff --git a/examples/akamai.rs b/examples/akamai.rs index e522b37ff..1d0b17baf 100644 --- a/examples/akamai.rs +++ b/examples/akamai.rs @@ -50,10 +50,7 @@ pub async fn main() -> Result<(), Box> { { let (_, session) = tls.get_ref(); let negotiated_protocol = session.alpn_protocol(); - assert_eq!( - Some(ALPN_H2.as_bytes()), - negotiated_protocol.as_ref().map(|x| &**x) - ); + assert_eq!(Some(ALPN_H2.as_bytes()), negotiated_protocol); } println!("Starting client handshake"); diff --git a/src/codec/framed_read.rs b/src/codec/framed_read.rs index 7c3bbb3ba..a874d7732 100644 --- a/src/codec/framed_read.rs +++ b/src/codec/framed_read.rs @@ -109,7 +109,7 @@ fn decode_frame( if partial_inout.is_some() && head.kind() != Kind::Continuation { proto_err!(conn: "expected CONTINUATION, got {:?}", head.kind()); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } let kind = head.kind(); @@ -231,7 +231,7 @@ fn decode_frame( if head.stream_id() == 0 { // Invalid stream identifier proto_err!(conn: "invalid stream ID 0"); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } match frame::Priority::load(head, &bytes[frame::HEADER_LEN..]) { @@ -257,14 +257,14 @@ fn decode_frame( Some(partial) => partial, None => { proto_err!(conn: "received unexpected CONTINUATION frame"); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } }; // The stream identifiers must match if partial.frame.stream_id() != head.stream_id() { proto_err!(conn: "CONTINUATION frame stream ID does not match previous frame stream ID"); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } // Extend the buf @@ -287,7 +287,7 @@ fn decode_frame( // the attacker to go away. if partial.buf.len() + bytes.len() > max_header_list_size { proto_err!(conn: "CONTINUATION frame header block size over ignorable limit"); - return Err(Error::library_go_away(Reason::COMPRESSION_ERROR).into()); + return Err(Error::library_go_away(Reason::COMPRESSION_ERROR)); } } partial.buf.extend_from_slice(&bytes[frame::HEADER_LEN..]); diff --git a/src/frame/data.rs b/src/frame/data.rs index e253d5e23..d0cdf5f69 100644 --- a/src/frame/data.rs +++ b/src/frame/data.rs @@ -16,7 +16,7 @@ pub struct Data { pad_len: Option, } -#[derive(Copy, Clone, Eq, PartialEq)] +#[derive(Copy, Clone, Default, Eq, PartialEq)] struct DataFlags(u8); const END_STREAM: u8 = 0x1; @@ -211,12 +211,6 @@ impl DataFlags { } } -impl Default for DataFlags { - fn default() -> Self { - DataFlags(0) - } -} - impl From for u8 { fn from(src: DataFlags) -> u8 { src.0 diff --git a/src/frame/headers.rs b/src/frame/headers.rs index bcb905013..9d5c8cefe 100644 --- a/src/frame/headers.rs +++ b/src/frame/headers.rs @@ -309,17 +309,20 @@ impl fmt::Debug for Headers { // ===== util ===== -pub fn parse_u64(src: &[u8]) -> Result { +#[derive(Debug, PartialEq, Eq)] +pub struct ParseU64Error; + +pub fn parse_u64(src: &[u8]) -> Result { if src.len() > 19 { // At danger for overflow... - return Err(()); + return Err(ParseU64Error); } let mut ret = 0; for &d in src { if d < b'0' || d > b'9' { - return Err(()); + return Err(ParseU64Error); } ret *= 10; @@ -333,7 +336,7 @@ pub fn parse_u64(src: &[u8]) -> Result { #[derive(Debug)] pub enum PushPromiseHeaderError { - InvalidContentLength(Result), + InvalidContentLength(Result), NotSafeAndCacheable, } @@ -381,7 +384,7 @@ impl PushPromise { fn safe_and_cacheable(method: &Method) -> bool { // Cacheable: https://httpwg.org/specs/rfc7231.html#cacheable.methods // Safe: https://httpwg.org/specs/rfc7231.html#safe.methods - return method == Method::GET || method == Method::HEAD; + method == Method::GET || method == Method::HEAD } pub fn fields(&self) -> &HeaderMap { diff --git a/src/frame/settings.rs b/src/frame/settings.rs index 080d0f4e5..0c913f059 100644 --- a/src/frame/settings.rs +++ b/src/frame/settings.rs @@ -182,10 +182,10 @@ impl Settings { } } Some(MaxFrameSize(val)) => { - if val < DEFAULT_MAX_FRAME_SIZE || val > MAX_MAX_FRAME_SIZE { - return Err(Error::InvalidSettingValue); - } else { + if DEFAULT_MAX_FRAME_SIZE <= val && val <= MAX_MAX_FRAME_SIZE { settings.max_frame_size = Some(val); + } else { + return Err(Error::InvalidSettingValue); } } Some(MaxHeaderListSize(val)) => { diff --git a/src/hpack/decoder.rs b/src/hpack/decoder.rs index 988b48db1..b45c37927 100644 --- a/src/hpack/decoder.rs +++ b/src/hpack/decoder.rs @@ -852,8 +852,7 @@ mod test { fn test_decode_empty() { let mut de = Decoder::new(0); let mut buf = BytesMut::new(); - let empty = de.decode(&mut Cursor::new(&mut buf), |_| {}).unwrap(); - assert_eq!(empty, ()); + let _: () = de.decode(&mut Cursor::new(&mut buf), |_| {}).unwrap(); } #[test] @@ -861,17 +860,16 @@ mod test { let mut de = Decoder::new(0); let mut buf = BytesMut::new(); - buf.extend(&[0b01000000, 0x80 | 2]); + buf.extend([0b01000000, 0x80 | 2]); buf.extend(huff_encode(b"foo")); - buf.extend(&[0x80 | 3]); + buf.extend([0x80 | 3]); buf.extend(huff_encode(b"bar")); let mut res = vec![]; - let _ = de - .decode(&mut Cursor::new(&mut buf), |h| { - res.push(h); - }) - .unwrap(); + de.decode(&mut Cursor::new(&mut buf), |h| { + res.push(h); + }) + .unwrap(); assert_eq!(res.len(), 1); assert_eq!(de.table.size(), 0); @@ -900,10 +898,10 @@ mod test { let value = huff_encode(b"bar"); let mut buf = BytesMut::new(); // header name is non_huff encoded - buf.extend(&[0b01000000, 0x00 | 3]); + buf.extend([0b01000000, 3]); buf.extend(b"foo"); // header value is partial - buf.extend(&[0x80 | 3]); + buf.extend([0x80 | 3]); buf.extend(&value[0..1]); let mut res = vec![]; @@ -917,11 +915,10 @@ mod test { // extend buf with the remaining header value buf.extend(&value[1..]); - let _ = de - .decode(&mut Cursor::new(&mut buf), |h| { - res.push(h); - }) - .unwrap(); + de.decode(&mut Cursor::new(&mut buf), |h| { + res.push(h); + }) + .unwrap(); assert_eq!(res.len(), 1); assert_eq!(de.table.size(), 0); diff --git a/src/hpack/encoder.rs b/src/hpack/encoder.rs index 76b373830..d121a2aaf 100644 --- a/src/hpack/encoder.rs +++ b/src/hpack/encoder.rs @@ -118,12 +118,12 @@ impl Encoder { encode_int(idx, 7, 0x80, dst); } Index::Name(idx, _) => { - let header = self.table.resolve(&index); + let header = self.table.resolve(index); encode_not_indexed(idx, header.value_slice(), header.is_sensitive(), dst); } Index::Inserted(_) => { - let header = self.table.resolve(&index); + let header = self.table.resolve(index); assert!(!header.is_sensitive()); @@ -133,7 +133,7 @@ impl Encoder { encode_str(header.value_slice(), dst); } Index::InsertedValue(idx, _) => { - let header = self.table.resolve(&index); + let header = self.table.resolve(index); assert!(!header.is_sensitive()); @@ -141,7 +141,7 @@ impl Encoder { encode_str(header.value_slice(), dst); } Index::NotIndexed(_) => { - let header = self.table.resolve(&index); + let header = self.table.resolve(index); encode_not_indexed2( header.name().as_slice(), diff --git a/src/hpack/header.rs b/src/hpack/header.rs index e6df555ab..0b5d1fded 100644 --- a/src/hpack/header.rs +++ b/src/hpack/header.rs @@ -190,18 +190,18 @@ impl Header { use http::header; match *self { - Header::Field { ref name, .. } => match *name { + Header::Field { ref name, .. } => matches!( + *name, header::AGE - | header::AUTHORIZATION - | header::CONTENT_LENGTH - | header::ETAG - | header::IF_MODIFIED_SINCE - | header::IF_NONE_MATCH - | header::LOCATION - | header::COOKIE - | header::SET_COOKIE => true, - _ => false, - }, + | header::AUTHORIZATION + | header::CONTENT_LENGTH + | header::ETAG + | header::IF_MODIFIED_SINCE + | header::IF_NONE_MATCH + | header::LOCATION + | header::COOKIE + | header::SET_COOKIE + ), Header::Path(..) => true, _ => false, } @@ -231,10 +231,10 @@ impl<'a> Name<'a> { match self { Name::Field(name) => Ok(Header::Field { name: name.clone(), - value: HeaderValue::from_bytes(&*value)?, + value: HeaderValue::from_bytes(&value)?, }), Name::Authority => Ok(Header::Authority(BytesStr::try_from(value)?)), - Name::Method => Ok(Header::Method(Method::from_bytes(&*value)?)), + Name::Method => Ok(Header::Method(Method::from_bytes(&value)?)), Name::Scheme => Ok(Header::Scheme(BytesStr::try_from(value)?)), Name::Path => Ok(Header::Path(BytesStr::try_from(value)?)), Name::Protocol => Ok(Header::Protocol(Protocol::try_from(value)?)), diff --git a/src/hpack/huffman/mod.rs b/src/hpack/huffman/mod.rs index 07b3fd925..86c97eb58 100644 --- a/src/hpack/huffman/mod.rs +++ b/src/hpack/huffman/mod.rs @@ -112,7 +112,7 @@ mod test { #[test] fn decode_single_byte() { assert_eq!("o", decode(&[0b00111111]).unwrap()); - assert_eq!("0", decode(&[0x0 + 7]).unwrap()); + assert_eq!("0", decode(&[7]).unwrap()); assert_eq!("A", decode(&[(0x21 << 2) + 3]).unwrap()); } @@ -138,7 +138,7 @@ mod test { dst.clear(); encode(b"0", &mut dst); - assert_eq!(&dst[..], &[0x0 + 7]); + assert_eq!(&dst[..], &[7]); dst.clear(); encode(b"A", &mut dst); @@ -147,7 +147,7 @@ mod test { #[test] fn encode_decode_str() { - const DATA: &'static [&'static str] = &[ + const DATA: &[&str] = &[ "hello world", ":method", ":scheme", @@ -184,8 +184,7 @@ mod test { #[test] fn encode_decode_u8() { - const DATA: &'static [&'static [u8]] = - &[b"\0", b"\0\0\0", b"\0\x01\x02\x03\x04\x05", b"\xFF\xF8"]; + const DATA: &[&[u8]] = &[b"\0", b"\0\0\0", b"\0\x01\x02\x03\x04\x05", b"\xFF\xF8"]; for s in DATA { let mut dst = BytesMut::with_capacity(s.len()); diff --git a/src/hpack/table.rs b/src/hpack/table.rs index 0124f216d..a1a780451 100644 --- a/src/hpack/table.rs +++ b/src/hpack/table.rs @@ -404,7 +404,7 @@ impl Table { // Find the associated position probe_loop!(probe < self.indices.len(), { - debug_assert!(!self.indices[probe].is_none()); + debug_assert!(self.indices[probe].is_some()); let mut pos = self.indices[probe].unwrap(); diff --git a/src/hpack/test/fixture.rs b/src/hpack/test/fixture.rs index 3428c3958..0d33ca2de 100644 --- a/src/hpack/test/fixture.rs +++ b/src/hpack/test/fixture.rs @@ -52,8 +52,8 @@ fn test_story(story: Value) { Case { seqno: case.get("seqno").unwrap().as_u64().unwrap(), - wire: wire, - expect: expect, + wire, + expect, header_table_size: size, } }) @@ -142,10 +142,10 @@ fn key_str(e: &Header) -> &str { fn value_str(e: &Header) -> &str { match *e { Header::Field { ref value, .. } => value.to_str().unwrap(), - Header::Authority(ref v) => &**v, + Header::Authority(ref v) => v, Header::Method(ref m) => m.as_str(), - Header::Scheme(ref v) => &**v, - Header::Path(ref v) => &**v, + Header::Scheme(ref v) => v, + Header::Path(ref v) => v, Header::Protocol(ref v) => v.as_str(), Header::Status(ref v) => v.as_str(), } diff --git a/src/hpack/test/fuzz.rs b/src/hpack/test/fuzz.rs index ad0d47b6b..af9e8ea23 100644 --- a/src/hpack/test/fuzz.rs +++ b/src/hpack/test/fuzz.rs @@ -80,7 +80,7 @@ impl FuzzHpack { let high = rng.gen_range(128..MAX_CHUNK * 2); let low = rng.gen_range(0..high); - frame.resizes.extend(&[low, high]); + frame.resizes.extend([low, high]); } 1..=3 => { frame.resizes.push(rng.gen_range(128..MAX_CHUNK * 2)); diff --git a/src/lib.rs b/src/lib.rs index 376d15c9a..e7b95035f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -81,6 +81,7 @@ #![doc(html_root_url = "https://docs.rs/h2/0.3.15")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] +#![allow(clippy::type_complexity, clippy::manual_range_contains)] macro_rules! proto_err { (conn: $($msg:tt)+) => { diff --git a/src/proto/connection.rs b/src/proto/connection.rs index cd011a1d5..59883cf33 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -215,7 +215,7 @@ where }); match (ours, theirs) { - (Reason::NO_ERROR, Reason::NO_ERROR) => return Ok(()), + (Reason::NO_ERROR, Reason::NO_ERROR) => Ok(()), (ours, Reason::NO_ERROR) => Err(Error::GoAway(Bytes::new(), ours, initiator)), // If both sides reported an error, give their // error back to th user. We assume our error diff --git a/src/proto/error.rs b/src/proto/error.rs index 197237263..2c00c7ea6 100644 --- a/src/proto/error.rs +++ b/src/proto/error.rs @@ -13,7 +13,7 @@ pub enum Error { Io(io::ErrorKind, Option), } -#[derive(Clone, Copy, Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum Initiator { User, Library, @@ -70,7 +70,7 @@ impl fmt::Display for Error { impl From for Error { fn from(src: io::ErrorKind) -> Self { - Error::Io(src.into(), None) + Error::Io(src, None) } } diff --git a/src/proto/ping_pong.rs b/src/proto/ping_pong.rs index 844c5fbb9..59023e26a 100644 --- a/src/proto/ping_pong.rs +++ b/src/proto/ping_pong.rs @@ -200,10 +200,7 @@ impl PingPong { impl ReceivedPing { pub(crate) fn is_shutdown(&self) -> bool { - match *self { - ReceivedPing::Shutdown => true, - _ => false, - } + matches!(*self, Self::Shutdown) } } diff --git a/src/proto/streams/flow_control.rs b/src/proto/streams/flow_control.rs index 4a47f08dd..b1b2745eb 100644 --- a/src/proto/streams/flow_control.rs +++ b/src/proto/streams/flow_control.rs @@ -19,6 +19,7 @@ const UNCLAIMED_NUMERATOR: i32 = 1; const UNCLAIMED_DENOMINATOR: i32 = 2; #[test] +#[allow(clippy::assertions_on_constants)] fn sanity_unclaimed_ratio() { assert!(UNCLAIMED_NUMERATOR < UNCLAIMED_DENOMINATOR); assert!(UNCLAIMED_NUMERATOR >= 0); @@ -188,7 +189,7 @@ impl FlowControl { /// /// This type tries to centralize the knowledge of addition and subtraction /// to this capacity, instead of having integer casts throughout the source. -#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd)] pub struct Window(i32); impl Window { diff --git a/src/proto/streams/mod.rs b/src/proto/streams/mod.rs index de2a2c85a..0ff8131c1 100644 --- a/src/proto/streams/mod.rs +++ b/src/proto/streams/mod.rs @@ -7,6 +7,7 @@ mod send; mod state; mod store; mod stream; +#[allow(clippy::module_inception)] mod streams; pub(crate) use self::prioritize::Prioritized; diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index c2904aca9..329e55022 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -7,9 +7,11 @@ use crate::codec::UserError; use crate::codec::UserError::*; use bytes::buf::{Buf, Take}; -use std::io; -use std::task::{Context, Poll, Waker}; -use std::{cmp, fmt, mem}; +use std::{ + cmp::{self, Ordering}, + fmt, io, mem, + task::{Context, Poll, Waker}, +}; /// # Warning /// @@ -235,39 +237,43 @@ impl Prioritize { // If it were less, then we could never send out the buffered data. let capacity = (capacity as usize) + stream.buffered_send_data; - if capacity == stream.requested_send_capacity as usize { - // Nothing to do - } else if capacity < stream.requested_send_capacity as usize { - // Update the target requested capacity - stream.requested_send_capacity = capacity as WindowSize; + match capacity.cmp(&(stream.requested_send_capacity as usize)) { + Ordering::Equal => { + // Nothing to do + } + Ordering::Less => { + // Update the target requested capacity + stream.requested_send_capacity = capacity as WindowSize; - // Currently available capacity assigned to the stream - let available = stream.send_flow.available().as_size(); + // Currently available capacity assigned to the stream + let available = stream.send_flow.available().as_size(); - // If the stream has more assigned capacity than requested, reclaim - // some for the connection - if available as usize > capacity { - let diff = available - capacity as WindowSize; + // If the stream has more assigned capacity than requested, reclaim + // some for the connection + if available as usize > capacity { + let diff = available - capacity as WindowSize; - stream.send_flow.claim_capacity(diff); + stream.send_flow.claim_capacity(diff); - self.assign_connection_capacity(diff, stream, counts); - } - } else { - // If trying to *add* capacity, but the stream send side is closed, - // there's nothing to be done. - if stream.state.is_send_closed() { - return; + self.assign_connection_capacity(diff, stream, counts); + } } + Ordering::Greater => { + // If trying to *add* capacity, but the stream send side is closed, + // there's nothing to be done. + if stream.state.is_send_closed() { + return; + } - // Update the target requested capacity - stream.requested_send_capacity = - cmp::min(capacity, WindowSize::MAX as usize) as WindowSize; + // Update the target requested capacity + stream.requested_send_capacity = + cmp::min(capacity, WindowSize::MAX as usize) as WindowSize; - // Try to assign additional capacity to the stream. If none is - // currently available, the stream will be queued to receive some - // when more becomes available. - self.try_assign_capacity(stream); + // Try to assign additional capacity to the stream. If none is + // currently available, the stream will be queued to receive some + // when more becomes available. + self.try_assign_capacity(stream); + } } } @@ -372,11 +378,11 @@ impl Prioritize { continue; } - counts.transition(stream, |_, mut stream| { + counts.transition(stream, |_, stream| { // Try to assign capacity to the stream. This will also re-queue the // stream if there isn't enough connection level capacity to fulfill // the capacity request. - self.try_assign_capacity(&mut stream); + self.try_assign_capacity(stream); }) } } diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 21c575a1a..497efc9bc 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -2,12 +2,12 @@ use super::*; use crate::codec::UserError; use crate::frame::{self, PushPromiseHeaderError, Reason, DEFAULT_INITIAL_WINDOW_SIZE}; use crate::proto::{self, Error}; -use std::task::Context; use http::{HeaderMap, Request, Response}; +use std::cmp::Ordering; use std::io; -use std::task::{Poll, Waker}; +use std::task::{Context, Poll, Waker}; use std::time::{Duration, Instant}; #[derive(Debug)] @@ -178,7 +178,7 @@ impl Recv { if let Some(content_length) = frame.fields().get(header::CONTENT_LENGTH) { let content_length = match frame::parse_u64(content_length.as_bytes()) { Ok(v) => v, - Err(()) => { + Err(_) => { proto_err!(stream: "could not parse content-length; stream={:?}", stream.id); return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into()); } @@ -221,11 +221,12 @@ impl Recv { let stream_id = frame.stream_id(); let (pseudo, fields) = frame.into_parts(); - if pseudo.protocol.is_some() { - if counts.peer().is_server() && !self.is_extended_connect_protocol_enabled { - proto_err!(stream: "cannot use :protocol if extended connect protocol is disabled; stream={:?}", stream.id); - return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into()); - } + if pseudo.protocol.is_some() + && counts.peer().is_server() + && !self.is_extended_connect_protocol_enabled + { + proto_err!(stream: "cannot use :protocol if extended connect protocol is disabled; stream={:?}", stream.id); + return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into()); } if !pseudo.is_informational() { @@ -487,28 +488,32 @@ impl Recv { // flow-controlled frames until it receives WINDOW_UPDATE frames that // cause the flow-control window to become positive. - if target < old_sz { - // We must decrease the (local) window on every open stream. - let dec = old_sz - target; - tracing::trace!("decrementing all windows; dec={}", dec); - - store.for_each(|mut stream| { - stream.recv_flow.dec_recv_window(dec); - }) - } else if target > old_sz { - // We must increase the (local) window on every open stream. - let inc = target - old_sz; - tracing::trace!("incrementing all windows; inc={}", inc); - store.try_for_each(|mut stream| { - // XXX: Shouldn't the peer have already noticed our - // overflow and sent us a GOAWAY? - stream - .recv_flow - .inc_window(inc) - .map_err(proto::Error::library_go_away)?; - stream.recv_flow.assign_capacity(inc); - Ok::<_, proto::Error>(()) - })?; + match target.cmp(&old_sz) { + Ordering::Less => { + // We must decrease the (local) window on every open stream. + let dec = old_sz - target; + tracing::trace!("decrementing all windows; dec={}", dec); + + store.for_each(|mut stream| { + stream.recv_flow.dec_recv_window(dec); + }) + } + Ordering::Greater => { + // We must increase the (local) window on every open stream. + let inc = target - old_sz; + tracing::trace!("incrementing all windows; inc={}", inc); + store.try_for_each(|mut stream| { + // XXX: Shouldn't the peer have already noticed our + // overflow and sent us a GOAWAY? + stream + .recv_flow + .inc_window(inc) + .map_err(proto::Error::library_go_away)?; + stream.recv_flow.assign_capacity(inc); + Ok::<_, proto::Error>(()) + })?; + } + Ordering::Equal => (), } } @@ -556,7 +561,7 @@ impl Recv { "recv_data; frame ignored on locally reset {:?} for some time", stream.id, ); - return Ok(self.ignore_data(sz)?); + return self.ignore_data(sz); } // Ensure that there is enough capacity on the connection before acting @@ -596,7 +601,7 @@ impl Recv { if stream.state.recv_close().is_err() { proto_err!(conn: "recv_data: failed to transition to closed state; stream={:?}", stream.id); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } } @@ -766,7 +771,7 @@ impl Recv { } pub(super) fn clear_recv_buffer(&mut self, stream: &mut Stream) { - while let Some(_) = stream.pending_recv.pop_front(&mut self.buffer) { + while stream.pending_recv.pop_front(&mut self.buffer).is_some() { // drop it } } @@ -1089,12 +1094,7 @@ impl Recv { impl Open { pub fn is_push_promise(&self) -> bool { - use self::Open::*; - - match *self { - PushPromise => true, - _ => false, - } + matches!(*self, Self::PushPromise) } } diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index 2c5a38c80..38896a304 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -7,11 +7,11 @@ use crate::frame::{self, Reason}; use crate::proto::{Error, Initiator}; use bytes::Buf; -use http; -use std::task::{Context, Poll, Waker}; use tokio::io::AsyncWrite; +use std::cmp::Ordering; use std::io; +use std::task::{Context, Poll, Waker}; /// Manages state transitions related to outbound frames. #[derive(Debug)] @@ -456,57 +456,61 @@ impl Send { let old_val = self.init_window_sz; self.init_window_sz = val; - if val < old_val { - // We must decrease the (remote) window on every open stream. - let dec = old_val - val; - tracing::trace!("decrementing all windows; dec={}", dec); - - let mut total_reclaimed = 0; - store.for_each(|mut stream| { - let stream = &mut *stream; - - stream.send_flow.dec_send_window(dec); - - // It's possible that decreasing the window causes - // `window_size` (the stream-specific window) to fall below - // `available` (the portion of the connection-level window - // that we have allocated to the stream). - // In this case, we should take that excess allocation away - // and reassign it to other streams. - let window_size = stream.send_flow.window_size(); - let available = stream.send_flow.available().as_size(); - let reclaimed = if available > window_size { - // Drop down to `window_size`. - let reclaim = available - window_size; - stream.send_flow.claim_capacity(reclaim); - total_reclaimed += reclaim; - reclaim - } else { - 0 - }; - - tracing::trace!( - "decremented stream window; id={:?}; decr={}; reclaimed={}; flow={:?}", - stream.id, - dec, - reclaimed, - stream.send_flow - ); - - // TODO: Should this notify the producer when the capacity - // of a stream is reduced? Maybe it should if the capacity - // is reduced to zero, allowing the producer to stop work. - }); - - self.prioritize - .assign_connection_capacity(total_reclaimed, store, counts); - } else if val > old_val { - let inc = val - old_val; - - store.try_for_each(|mut stream| { - self.recv_stream_window_update(inc, buffer, &mut stream, counts, task) - .map_err(Error::library_go_away) - })?; + match val.cmp(&old_val) { + Ordering::Less => { + // We must decrease the (remote) window on every open stream. + let dec = old_val - val; + tracing::trace!("decrementing all windows; dec={}", dec); + + let mut total_reclaimed = 0; + store.for_each(|mut stream| { + let stream = &mut *stream; + + stream.send_flow.dec_send_window(dec); + + // It's possible that decreasing the window causes + // `window_size` (the stream-specific window) to fall below + // `available` (the portion of the connection-level window + // that we have allocated to the stream). + // In this case, we should take that excess allocation away + // and reassign it to other streams. + let window_size = stream.send_flow.window_size(); + let available = stream.send_flow.available().as_size(); + let reclaimed = if available > window_size { + // Drop down to `window_size`. + let reclaim = available - window_size; + stream.send_flow.claim_capacity(reclaim); + total_reclaimed += reclaim; + reclaim + } else { + 0 + }; + + tracing::trace!( + "decremented stream window; id={:?}; decr={}; reclaimed={}; flow={:?}", + stream.id, + dec, + reclaimed, + stream.send_flow + ); + + // TODO: Should this notify the producer when the capacity + // of a stream is reduced? Maybe it should if the capacity + // is reduced to zero, allowing the producer to stop work. + }); + + self.prioritize + .assign_connection_capacity(total_reclaimed, store, counts); + } + Ordering::Greater => { + let inc = val - old_val; + + store.try_for_each(|mut stream| { + self.recv_stream_window_update(inc, buffer, &mut stream, counts, task) + .map_err(Error::library_go_away) + })?; + } + Ordering::Equal => (), } } diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index 9931d41b1..1233e2352 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -343,10 +343,7 @@ impl State { } pub fn is_scheduled_reset(&self) -> bool { - match self.inner { - Closed(Cause::ScheduledLibraryReset(..)) => true, - _ => false, - } + matches!(self.inner, Closed(Cause::ScheduledLibraryReset(..))) } pub fn is_local_reset(&self) -> bool { @@ -367,65 +364,57 @@ impl State { } pub fn is_send_streaming(&self) -> bool { - match self.inner { + matches!( + self.inner, Open { - local: Streaming, .. - } => true, - HalfClosedRemote(Streaming) => true, - _ => false, - } + local: Streaming, + .. + } | HalfClosedRemote(Streaming) + ) } /// Returns true when the stream is in a state to receive headers pub fn is_recv_headers(&self) -> bool { - match self.inner { - Idle => true, - Open { + matches!( + self.inner, + Idle | Open { remote: AwaitingHeaders, .. - } => true, - HalfClosedLocal(AwaitingHeaders) => true, - ReservedRemote => true, - _ => false, - } + } | HalfClosedLocal(AwaitingHeaders) + | ReservedRemote + ) } pub fn is_recv_streaming(&self) -> bool { - match self.inner { + matches!( + self.inner, Open { - remote: Streaming, .. - } => true, - HalfClosedLocal(Streaming) => true, - _ => false, - } + remote: Streaming, + .. + } | HalfClosedLocal(Streaming) + ) } pub fn is_closed(&self) -> bool { - match self.inner { - Closed(_) => true, - _ => false, - } + matches!(self.inner, Closed(_)) } pub fn is_recv_closed(&self) -> bool { - match self.inner { - Closed(..) | HalfClosedRemote(..) | ReservedLocal => true, - _ => false, - } + matches!( + self.inner, + Closed(..) | HalfClosedRemote(..) | ReservedLocal + ) } pub fn is_send_closed(&self) -> bool { - match self.inner { - Closed(..) | HalfClosedLocal(..) | ReservedRemote => true, - _ => false, - } + matches!( + self.inner, + Closed(..) | HalfClosedLocal(..) | ReservedRemote + ) } pub fn is_idle(&self) -> bool { - match self.inner { - Idle => true, - _ => false, - } + matches!(self.inner, Idle) } pub fn ensure_recv_open(&self) -> Result { diff --git a/src/proto/streams/store.rs b/src/proto/streams/store.rs index 3e34b7cb2..d33a01cce 100644 --- a/src/proto/streams/store.rs +++ b/src/proto/streams/store.rs @@ -1,7 +1,5 @@ use super::*; -use slab; - use indexmap::{self, IndexMap}; use std::convert::Infallible; @@ -302,15 +300,15 @@ where let mut stream = store.resolve(idxs.head); if idxs.head == idxs.tail { - assert!(N::next(&*stream).is_none()); + assert!(N::next(&stream).is_none()); self.indices = None; } else { - idxs.head = N::take_next(&mut *stream).unwrap(); + idxs.head = N::take_next(&mut stream).unwrap(); self.indices = Some(idxs); } - debug_assert!(N::is_queued(&*stream)); - N::set_queued(&mut *stream, false); + debug_assert!(N::is_queued(&stream)); + N::set_queued(&mut stream, false); return Some(stream); } @@ -347,7 +345,7 @@ impl<'a> Ptr<'a> { } pub fn store_mut(&mut self) -> &mut Store { - &mut self.store + self.store } /// Remove the stream from the store diff --git a/src/proto/streams/stream.rs b/src/proto/streams/stream.rs index de7f4f641..68a29828c 100644 --- a/src/proto/streams/stream.rs +++ b/src/proto/streams/stream.rs @@ -252,7 +252,7 @@ impl Stream { // The stream is not in any queue !self.is_pending_send && !self.is_pending_send_capacity && !self.is_pending_accept && !self.is_pending_window_update && - !self.is_pending_open && !self.reset_at.is_some() + !self.is_pending_open && self.reset_at.is_none() } /// Returns true when the consumer of the stream has dropped all handles @@ -379,7 +379,7 @@ impl store::Next for NextSend { if val { // ensure that stream is not queued for being opened // if it's being put into queue for sending data - debug_assert_eq!(stream.is_pending_open, false); + debug_assert!(!stream.is_pending_open); } stream.is_pending_send = val; } @@ -450,7 +450,7 @@ impl store::Next for NextOpen { if val { // ensure that stream is not queued for being sent // if it's being put into queue for opening the stream - debug_assert_eq!(stream.is_pending_send, false); + debug_assert!(!stream.is_pending_send); } stream.is_pending_open = val; } @@ -486,9 +486,6 @@ impl store::Next for NextResetExpire { impl ContentLength { pub fn is_head(&self) -> bool { - match *self { - ContentLength::Head => true, - _ => false, - } + matches!(*self, Self::Head) } } diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 62c55524c..01bdcdd70 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -312,29 +312,29 @@ impl DynStreams<'_, B> { pub fn recv_headers(&mut self, frame: frame::Headers) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); - me.recv_headers(self.peer, &self.send_buffer, frame) + me.recv_headers(self.peer, self.send_buffer, frame) } pub fn recv_data(&mut self, frame: frame::Data) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); - me.recv_data(self.peer, &self.send_buffer, frame) + me.recv_data(self.peer, self.send_buffer, frame) } pub fn recv_reset(&mut self, frame: frame::Reset) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); - me.recv_reset(&self.send_buffer, frame) + me.recv_reset(self.send_buffer, frame) } /// Notify all streams that a connection-level error happened. pub fn handle_error(&mut self, err: proto::Error) -> StreamId { let mut me = self.inner.lock().unwrap(); - me.handle_error(&self.send_buffer, err) + me.handle_error(self.send_buffer, err) } pub fn recv_go_away(&mut self, frame: &frame::GoAway) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); - me.recv_go_away(&self.send_buffer, frame) + me.recv_go_away(self.send_buffer, frame) } pub fn last_processed_id(&self) -> StreamId { @@ -343,22 +343,22 @@ impl DynStreams<'_, B> { pub fn recv_window_update(&mut self, frame: frame::WindowUpdate) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); - me.recv_window_update(&self.send_buffer, frame) + me.recv_window_update(self.send_buffer, frame) } pub fn recv_push_promise(&mut self, frame: frame::PushPromise) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); - me.recv_push_promise(&self.send_buffer, frame) + me.recv_push_promise(self.send_buffer, frame) } pub fn recv_eof(&mut self, clear_pending_accept: bool) -> Result<(), ()> { let mut me = self.inner.lock().map_err(|_| ())?; - me.recv_eof(&self.send_buffer, clear_pending_accept) + me.recv_eof(self.send_buffer, clear_pending_accept) } pub fn send_reset(&mut self, id: StreamId, reason: Reason) { let mut me = self.inner.lock().unwrap(); - me.send_reset(&self.send_buffer, id, reason) + me.send_reset(self.send_buffer, id, reason) } pub fn send_go_away(&mut self, last_processed_id: StreamId) { @@ -725,7 +725,7 @@ impl Inner { } None => { proto_err!(conn: "recv_push_promise: initiating stream is in an invalid state"); - return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into()); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } }; @@ -1146,7 +1146,7 @@ impl StreamRef { let mut child_stream = me.store.resolve(child_key); child_stream.unlink(); child_stream.remove(); - return Err(err.into()); + return Err(err); } me.refs += 1; @@ -1390,7 +1390,7 @@ impl Clone for OpaqueStreamRef { OpaqueStreamRef { inner: self.inner.clone(), - key: self.key.clone(), + key: self.key, } } } diff --git a/src/server.rs b/src/server.rs index 6e216a40a..e4098e080 100644 --- a/src/server.rs +++ b/src/server.rs @@ -413,7 +413,7 @@ where ) -> Poll, SendResponse), crate::Error>>> { // Always try to advance the internal state. Getting Pending also is // needed to allow this function to return Pending. - if let Poll::Ready(_) = self.poll_closed(cx)? { + if self.poll_closed(cx)?.is_ready() { // If the socket is closed, don't return anything // TODO: drop any pending streams return Poll::Ready(None); diff --git a/src/share.rs b/src/share.rs index f4e3cdeb0..26b428797 100644 --- a/src/share.rs +++ b/src/share.rs @@ -556,8 +556,8 @@ impl PingPong { pub fn send_ping(&mut self, ping: Ping) -> Result<(), crate::Error> { // Passing a `Ping` here is just to be forwards-compatible with // eventually allowing choosing a ping payload. For now, we can - // just drop it. - drop(ping); + // just ignore it. + let _ = ping; self.inner.send_ping().map_err(|err| match err { Some(err) => err.into(), diff --git a/tests/h2-support/src/frames.rs b/tests/h2-support/src/frames.rs index f2c07bacb..862e0c629 100644 --- a/tests/h2-support/src/frames.rs +++ b/tests/h2-support/src/frames.rs @@ -9,8 +9,8 @@ use h2::{ frame::{self, Frame, StreamId}, }; -pub const SETTINGS: &'static [u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; -pub const SETTINGS_ACK: &'static [u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; +pub const SETTINGS: &[u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; +pub const SETTINGS_ACK: &[u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; // ==== helper functions to easily construct h2 Frames ==== diff --git a/tests/h2-support/src/mock.rs b/tests/h2-support/src/mock.rs index cc314cd06..18d084841 100644 --- a/tests/h2-support/src/mock.rs +++ b/tests/h2-support/src/mock.rs @@ -56,7 +56,7 @@ struct Inner { closed: bool, } -const PREFACE: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; +const PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; /// Create a new mock and handle pub fn new() -> (Mock, Handle) { @@ -148,7 +148,7 @@ impl Handle { poll_fn(move |cx| { while buf.has_remaining() { let res = Pin::new(self.codec.get_mut()) - .poll_write(cx, &mut buf.chunk()) + .poll_write(cx, buf.chunk()) .map_err(|e| panic!("write err={:?}", e)); let n = ready!(res).unwrap(); diff --git a/tests/h2-support/src/prelude.rs b/tests/h2-support/src/prelude.rs index d34f1b996..c40a518da 100644 --- a/tests/h2-support/src/prelude.rs +++ b/tests/h2-support/src/prelude.rs @@ -103,7 +103,7 @@ where // Connection is done... b.await } - Right((v, _)) => return v, + Right((v, _)) => v, Left((Err(e), _)) => panic!("err: {:?}", e), } }) diff --git a/tests/h2-support/src/util.rs b/tests/h2-support/src/util.rs index 1150d5925..aa7fb2c54 100644 --- a/tests/h2-support/src/util.rs +++ b/tests/h2-support/src/util.rs @@ -36,7 +36,7 @@ pub async fn yield_once() { pub fn wait_for_capacity(stream: h2::SendStream, target: usize) -> WaitForCapacity { WaitForCapacity { stream: Some(stream), - target: target, + target, } } @@ -66,7 +66,7 @@ impl Future for WaitForCapacity { assert_ne!(act, 0); if act >= self.target { - return Poll::Ready(self.stream.take().unwrap().into()); + return Poll::Ready(self.stream.take().unwrap()); } } } diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 9635bcc6c..07b291f42 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -371,7 +371,7 @@ async fn send_request_poll_ready_when_connection_error() { resp2.await.expect_err("req2"); })); - while let Some(_) = unordered.next().await {} + while unordered.next().await.is_some() {} }; join(srv, h2).await; @@ -489,9 +489,8 @@ async fn http_2_request_without_scheme_or_authority() { client .send_request(request, true) .expect_err("should be UserError"); - let ret = h2.await.expect("h2"); + let _: () = h2.await.expect("h2"); drop(client); - ret }; join(srv, h2).await; @@ -1452,8 +1451,8 @@ async fn extended_connect_request() { join(srv, h2).await; } -const SETTINGS: &'static [u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; -const SETTINGS_ACK: &'static [u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; +const SETTINGS: &[u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; +const SETTINGS_ACK: &[u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; trait MockH2 { fn handshake(&mut self) -> &mut Self; diff --git a/tests/h2-tests/tests/hammer.rs b/tests/h2-tests/tests/hammer.rs index 9a200537a..a5cba3dfa 100644 --- a/tests/h2-tests/tests/hammer.rs +++ b/tests/h2-tests/tests/hammer.rs @@ -58,7 +58,7 @@ impl Server { } fn addr(&self) -> SocketAddr { - self.addr.clone() + self.addr } fn request_count(&self) -> usize { diff --git a/tests/h2-tests/tests/ping_pong.rs b/tests/h2-tests/tests/ping_pong.rs index a57f35c17..0f93578cc 100644 --- a/tests/h2-tests/tests/ping_pong.rs +++ b/tests/h2-tests/tests/ping_pong.rs @@ -11,9 +11,8 @@ async fn recv_single_ping() { // Create the handshake let h2 = async move { - let (client, conn) = client::handshake(m).await.unwrap(); - let c = conn.await.unwrap(); - (client, c) + let (_client, conn) = client::handshake(m).await.unwrap(); + let _: () = conn.await.unwrap(); }; let mock = async move { @@ -146,6 +145,7 @@ async fn user_notifies_when_connection_closes() { srv }; + #[allow(clippy::async_yields_async)] let client = async move { let (_client, mut conn) = client::handshake(io).await.expect("client handshake"); // yield once so we can ack server settings diff --git a/tests/h2-tests/tests/push_promise.rs b/tests/h2-tests/tests/push_promise.rs index f52f781d5..94c1154ef 100644 --- a/tests/h2-tests/tests/push_promise.rs +++ b/tests/h2-tests/tests/push_promise.rs @@ -223,7 +223,7 @@ async fn pending_push_promises_reset_when_dropped() { assert_eq!(resp.status(), StatusCode::OK); }; - let _ = conn.drive(req).await; + conn.drive(req).await; conn.await.expect("client"); drop(client); }; diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 948ad1630..cc573f903 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -5,8 +5,8 @@ use futures::StreamExt; use h2_support::prelude::*; use tokio::io::AsyncWriteExt; -const SETTINGS: &'static [u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; -const SETTINGS_ACK: &'static [u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; +const SETTINGS: &[u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; +const SETTINGS_ACK: &[u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; #[tokio::test] async fn read_preface_in_multiple_frames() { diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index f2b2efc1e..9f348d5f2 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -786,7 +786,7 @@ async fn rst_while_closing() { // Enqueue trailers frame. let _ = stream.send_trailers(HeaderMap::new()); // Signal the server mock to send RST_FRAME - let _ = tx.send(()).unwrap(); + let _: () = tx.send(()).unwrap(); drop(stream); yield_once().await; // yield once to allow the server mock to be polled diff --git a/util/genfixture/src/main.rs b/util/genfixture/src/main.rs index a6d730761..9dc7b00f9 100644 --- a/util/genfixture/src/main.rs +++ b/util/genfixture/src/main.rs @@ -10,7 +10,7 @@ fn main() { let path = args.get(1).expect("usage: genfixture [PATH]"); let path = Path::new(path); - let mut tests = HashMap::new(); + let mut tests: HashMap> = HashMap::new(); for entry in WalkDir::new(path) { let entry = entry.unwrap(); @@ -28,21 +28,21 @@ fn main() { let fixture_path = path.split("fixtures/hpack/").last().unwrap(); // Now, split that into the group and the name - let module = fixture_path.split("/").next().unwrap(); + let module = fixture_path.split('/').next().unwrap(); tests .entry(module.to_string()) - .or_insert(vec![]) + .or_default() .push(fixture_path.to_string()); } let mut one = false; for (module, tests) in tests { - let module = module.replace("-", "_"); + let module = module.replace('-', "_"); if one { - println!(""); + println!(); } one = true; @@ -51,7 +51,7 @@ fn main() { println!(" {} => {{", module); for test in tests { - let ident = test.split("/").nth(1).unwrap().split(".").next().unwrap(); + let ident = test.split('/').nth(1).unwrap().split('.').next().unwrap(); println!(" ({}, {:?});", ident, test); } diff --git a/util/genhuff/src/main.rs b/util/genhuff/src/main.rs index 2d5b0ba75..6418fab8b 100644 --- a/util/genhuff/src/main.rs +++ b/util/genhuff/src/main.rs @@ -112,8 +112,8 @@ impl Node { }; start.transitions.borrow_mut().push(Transition { - target: target, - byte: byte, + target, + byte, maybe_eos: self.maybe_eos, }); @@ -238,7 +238,7 @@ pub fn main() { let (encode, decode) = load_table(); println!("// !!! DO NOT EDIT !!! Generated by util/genhuff/src/main.rs"); - println!(""); + println!(); println!("// (num-bits, bits)"); println!("pub const ENCODE_TABLE: [(usize, u64); 257] = ["); @@ -247,7 +247,7 @@ pub fn main() { } println!("];"); - println!(""); + println!(); println!("// (next-state, byte, flags)"); println!("pub const DECODE_TABLE: [[(usize, u8, u8); 16]; 256] = ["); @@ -256,7 +256,7 @@ pub fn main() { println!("];"); } -const TABLE: &'static str = r##" +const TABLE: &str = r##" ( 0) |11111111|11000 1ff8 [13] ( 1) |11111111|11111111|1011000 7fffd8 [23] ( 2) |11111111|11111111|11111110|0010 fffffe2 [28] From 68e44034b57607c964a257f4a2d77ecae38cf87e Mon Sep 17 00:00:00 2001 From: John Howard Date: Tue, 10 Jan 2023 11:29:05 -0800 Subject: [PATCH 109/178] Consistently indicate `malformed` in logs (#658) --- src/server.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server.rs b/src/server.rs index e4098e080..2de95374b 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1478,7 +1478,7 @@ impl proto::Peer for Peer { // A :scheme is required, except CONNECT. if let Some(scheme) = pseudo.scheme { if is_connect && !has_protocol { - malformed!(":scheme in CONNECT"); + malformed!("malformed headers: :scheme in CONNECT"); } let maybe_scheme = scheme.parse(); let scheme = maybe_scheme.or_else(|why| { @@ -1501,7 +1501,7 @@ impl proto::Peer for Peer { if let Some(path) = pseudo.path { if is_connect && !has_protocol { - malformed!(":path in CONNECT"); + malformed!("malformed headers: :path in CONNECT"); } // This cannot be empty From b84c2442ba9ec3728d11fbb7bd018e83e977dd55 Mon Sep 17 00:00:00 2001 From: Folke B Date: Fri, 20 Jan 2023 16:44:22 +0100 Subject: [PATCH 110/178] Add Protocol extension to Request on Extended CONNECT (#655) This exposes the :protocol pseudo header as Request extension. Fixes #347 --- src/server.rs | 9 +++++++-- tests/h2-tests/tests/server.rs | 7 ++++++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/server.rs b/src/server.rs index 2de95374b..bb3d3cf86 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1451,8 +1451,13 @@ impl proto::Peer for Peer { } let has_protocol = pseudo.protocol.is_some(); - if !is_connect && has_protocol { - malformed!("malformed headers: :protocol on non-CONNECT request"); + if has_protocol { + if is_connect { + // Assert that we have the right type. + b = b.extension::(pseudo.protocol.unwrap()); + } else { + malformed!("malformed headers: :protocol on non-CONNECT request"); + } } if pseudo.status.is_some() { diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index cc573f903..8aea1fd59 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -1214,7 +1214,12 @@ async fn extended_connect_protocol_enabled_during_handshake() { let mut srv = builder.handshake::<_, Bytes>(io).await.expect("handshake"); - let (_req, mut stream) = srv.next().await.unwrap().unwrap(); + let (req, mut stream) = srv.next().await.unwrap().unwrap(); + + assert_eq!( + req.extensions().get::(), + Some(&crate::ext::Protocol::from_static("the-bread-protocol")) + ); let rsp = Response::new(()); stream.send_response(rsp, false).unwrap(); From 73bea23e9b6967cc9699918b8965e0fd87e8ae53 Mon Sep 17 00:00:00 2001 From: Chekov Date: Tue, 14 Feb 2023 07:39:40 +0800 Subject: [PATCH 111/178] fix: panic in when there is connection window available, but not stream (#657) We met the panic in our production environment, so handle this panic condition before panic. The stack backtrace: Co-authored-by: winters.zc --- src/proto/streams/flow_control.rs | 15 +++++++++------ src/proto/streams/prioritize.rs | 8 ++++++++ 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/src/proto/streams/flow_control.rs b/src/proto/streams/flow_control.rs index b1b2745eb..73a7754db 100644 --- a/src/proto/streams/flow_control.rs +++ b/src/proto/streams/flow_control.rs @@ -173,12 +173,15 @@ impl FlowControl { self.available ); - // Ensure that the argument is correct - assert!(self.window_size >= sz as usize); - - // Update values - self.window_size -= sz; - self.available -= sz; + // If send size is zero it's meaningless to update flow control window + if sz > 0 { + // Ensure that the argument is correct + assert!(self.window_size >= sz as usize); + + // Update values + self.window_size -= sz; + self.available -= sz; + } } } diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index 329e55022..f89a772f0 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -744,6 +744,14 @@ impl Prioritize { // capacity at this point. debug_assert!(len <= self.flow.window_size()); + // Check if the stream level window the peer knows is available. In some + // scenarios, maybe the window we know is available but the window which + // peer knows is not. + if len > 0 && len > stream.send_flow.window_size() { + stream.pending_send.push_front(buffer, frame.into()); + continue; + } + tracing::trace!(len, "sending data frame"); // Update the flow control From 732319039ff6184076b339f6870746b6c7fed86f Mon Sep 17 00:00:00 2001 From: Vadim Egorov <96079228+vadim-eg@users.noreply.github.com> Date: Mon, 20 Feb 2023 14:55:05 -0800 Subject: [PATCH 112/178] Avoid spurious wakeups when stream capacity is not available (#661) Fixes #628 Sometimes `poll_capacity` returns `Ready(Some(0))` - in which case caller will have no way to wait for the stream capacity to become available. The previous attempt on the fix has addressed only a part of the problem. The root cause - in a nutshell - is the race condition between the application tasks that performs stream I/O and the task that serves the underlying HTTP/2 connection. The application thread that is about to send data calls `reserve_capacity/poll_capacity`, is provided with some send capacity and proceeds to `send_data`. Meanwhile the service thread may send some buffered data and/or receive some window updates - either way the stream's effective allocated send capacity may not change, but, since the capacity still available, `send_capacity_inc` flag may be set. The sending task calls `send_data` and uses the entire allocated capacity, leaving the flag set. Next time `poll_capacity` returns `Ready(Some(0))`. This change sets the flag and dispatches the wakeup event only in cases when the effective capacity reported by `poll_capacity` actually increases. --- src/proto/streams/prioritize.rs | 20 +++------ src/proto/streams/send.rs | 7 +--- src/proto/streams/stream.rs | 56 +++++++++++++++++++------ tests/h2-tests/tests/flow_control.rs | 61 ++++++++++++++++++++++++++++ 4 files changed, 111 insertions(+), 33 deletions(-) diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index f89a772f0..88204ddcc 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -323,9 +323,11 @@ impl Prioritize { /// connection pub fn reclaim_all_capacity(&mut self, stream: &mut store::Ptr, counts: &mut Counts) { let available = stream.send_flow.available().as_size(); - stream.send_flow.claim_capacity(available); - // Re-assign all capacity to the connection - self.assign_connection_capacity(available, stream, counts); + if available > 0 { + stream.send_flow.claim_capacity(available); + // Re-assign all capacity to the connection + self.assign_connection_capacity(available, stream, counts); + } } /// Reclaim just reserved capacity, not buffered capacity, and re-assign @@ -756,17 +758,7 @@ impl Prioritize { // Update the flow control tracing::trace_span!("updating stream flow").in_scope(|| { - stream.send_flow.send_data(len); - - // Decrement the stream's buffered data counter - debug_assert!(stream.buffered_send_data >= len as usize); - stream.buffered_send_data -= len as usize; - stream.requested_send_capacity -= len; - - // If the capacity was limited because of the - // max_send_buffer_size, then consider waking - // the send task again... - stream.notify_if_can_buffer_more(self.max_buffer_size); + stream.send_data(len, self.max_buffer_size); // Assign the capacity back to the connection that // was just consumed from the stream in the previous diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index 38896a304..20aba38d4 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -333,12 +333,7 @@ impl Send { /// Current available stream send capacity pub fn capacity(&self, stream: &mut store::Ptr) -> WindowSize { - let available = stream.send_flow.available().as_size() as usize; - let buffered = stream.buffered_send_data; - - available - .min(self.prioritize.max_buffer_size()) - .saturating_sub(buffered) as WindowSize + stream.capacity(self.prioritize.max_buffer_size()) } pub fn poll_reset( diff --git a/src/proto/streams/stream.rs b/src/proto/streams/stream.rs index 68a29828c..2888d744b 100644 --- a/src/proto/streams/stream.rs +++ b/src/proto/streams/stream.rs @@ -264,35 +264,65 @@ impl Stream { self.ref_count == 0 && !self.state.is_closed() } + /// Current available stream send capacity + pub fn capacity(&self, max_buffer_size: usize) -> WindowSize { + let available = self.send_flow.available().as_size() as usize; + let buffered = self.buffered_send_data; + + available.min(max_buffer_size).saturating_sub(buffered) as WindowSize + } + pub fn assign_capacity(&mut self, capacity: WindowSize, max_buffer_size: usize) { + let prev_capacity = self.capacity(max_buffer_size); debug_assert!(capacity > 0); self.send_flow.assign_capacity(capacity); tracing::trace!( - " assigned capacity to stream; available={}; buffered={}; id={:?}; max_buffer_size={}", + " assigned capacity to stream; available={}; buffered={}; id={:?}; max_buffer_size={} prev={}", self.send_flow.available(), self.buffered_send_data, self.id, - max_buffer_size + max_buffer_size, + prev_capacity, ); - self.notify_if_can_buffer_more(max_buffer_size); + if prev_capacity < self.capacity(max_buffer_size) { + self.notify_capacity(); + } } - /// If the capacity was limited because of the max_send_buffer_size, - /// then consider waking the send task again... - pub fn notify_if_can_buffer_more(&mut self, max_buffer_size: usize) { - let available = self.send_flow.available().as_size() as usize; - let buffered = self.buffered_send_data; + pub fn send_data(&mut self, len: WindowSize, max_buffer_size: usize) { + let prev_capacity = self.capacity(max_buffer_size); + + self.send_flow.send_data(len); - // Only notify if the capacity exceeds the amount of buffered data - if available.min(max_buffer_size) > buffered { - self.send_capacity_inc = true; - tracing::trace!(" notifying task"); - self.notify_send(); + // Decrement the stream's buffered data counter + debug_assert!(self.buffered_send_data >= len as usize); + self.buffered_send_data -= len as usize; + self.requested_send_capacity -= len; + + tracing::trace!( + " sent stream data; available={}; buffered={}; id={:?}; max_buffer_size={} prev={}", + self.send_flow.available(), + self.buffered_send_data, + self.id, + max_buffer_size, + prev_capacity, + ); + + if prev_capacity < self.capacity(max_buffer_size) { + self.notify_capacity(); } } + /// If the capacity was limited because of the max_send_buffer_size, + /// then consider waking the send task again... + pub fn notify_capacity(&mut self) { + self.send_capacity_inc = true; + tracing::trace!(" notifying task"); + self.notify_send(); + } + /// Returns `Err` when the decrement cannot be completed due to overflow. pub fn dec_content_length(&mut self, len: usize) -> Result<(), ()> { match self.content_length { diff --git a/tests/h2-tests/tests/flow_control.rs b/tests/h2-tests/tests/flow_control.rs index 92e7a532f..5caa2ec3a 100644 --- a/tests/h2-tests/tests/flow_control.rs +++ b/tests/h2-tests/tests/flow_control.rs @@ -1797,3 +1797,64 @@ async fn max_send_buffer_size_poll_capacity_wakes_task() { join(srv, client).await; } + +#[tokio::test] +async fn poll_capacity_wakeup_after_window_update() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv + .assert_client_handshake_with_settings(frames::settings().initial_window_size(10)) + .await; + assert_default_settings!(settings); + srv.recv_frame(frames::headers(1).request("POST", "https://www.example.com/")) + .await; + srv.send_frame(frames::headers(1).response(200)).await; + srv.recv_frame(frames::data(1, &b"abcde"[..])).await; + srv.send_frame(frames::window_update(1, 5)).await; + srv.send_frame(frames::window_update(1, 5)).await; + srv.recv_frame(frames::data(1, &b"abcde"[..])).await; + srv.recv_frame(frames::data(1, &b""[..]).eos()).await; + }; + + let h2 = async move { + let (mut client, mut h2) = client::Builder::new() + .max_send_buffer_size(5) + .handshake::<_, Bytes>(io) + .await + .unwrap(); + let request = Request::builder() + .method(Method::POST) + .uri("https://www.example.com/") + .body(()) + .unwrap(); + + let (response, mut stream) = client.send_request(request, false).unwrap(); + + let response = h2.drive(response).await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + stream.send_data("abcde".into(), false).unwrap(); + + stream.reserve_capacity(10); + assert_eq!(stream.capacity(), 0); + + let mut stream = h2.drive(util::wait_for_capacity(stream, 5)).await; + h2.drive(idle_ms(10)).await; + stream.send_data("abcde".into(), false).unwrap(); + + stream.reserve_capacity(5); + assert_eq!(stream.capacity(), 0); + + // This will panic if there is a bug causing h2 to return Ok(0) from poll_capacity. + let mut stream = h2.drive(util::wait_for_capacity(stream, 5)).await; + + stream.send_data("".into(), true).unwrap(); + + // Wait for the connection to close + h2.await.unwrap(); + }; + + join(srv, h2).await; +} From 96caf4fca32fad92037e082b6b74220274faaf16 Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Wed, 22 Feb 2023 17:09:20 +0100 Subject: [PATCH 113/178] Add a message for EOF-related broken pipe errors (#615) It's quite confusing from production logs when all I get is "broken pipe" and I don't know which path was taken for that error to be logged. --- src/proto/streams/state.rs | 8 +++++++- src/proto/streams/streams.rs | 8 +++++++- tests/h2-tests/tests/client_request.rs | 9 ++++++--- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index 1233e2352..db37831f8 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -303,7 +303,13 @@ impl State { Closed(..) => {} ref state => { tracing::trace!("recv_eof; state={:?}", state); - self.inner = Closed(Cause::Error(io::ErrorKind::BrokenPipe.into())); + self.inner = Closed(Cause::Error( + io::Error::new( + io::ErrorKind::BrokenPipe, + "stream closed because of a broken pipe", + ) + .into(), + )); } } } diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 01bdcdd70..e1a2e3fe7 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -806,7 +806,13 @@ impl Inner { let send_buffer = &mut *send_buffer; if actions.conn_error.is_none() { - actions.conn_error = Some(io::Error::from(io::ErrorKind::BrokenPipe).into()); + actions.conn_error = Some( + io::Error::new( + io::ErrorKind::BrokenPipe, + "connection closed because of a broken pipe", + ) + .into(), + ); } tracing::trace!("Streams::recv_eof"); diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 07b291f42..aff39f5c1 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -574,7 +574,7 @@ async fn connection_close_notifies_response_future() { .0 .await; let err = res.expect_err("response"); - assert_eq!(err.to_string(), "broken pipe"); + assert_eq!(err.to_string(), "stream closed because of a broken pipe"); }; join(async move { conn.await.expect("conn") }, req).await; }; @@ -613,7 +613,7 @@ async fn connection_close_notifies_client_poll_ready() { .0 .await; let err = res.expect_err("response"); - assert_eq!(err.to_string(), "broken pipe"); + assert_eq!(err.to_string(), "stream closed because of a broken pipe"); }; conn.drive(req).await; @@ -621,7 +621,10 @@ async fn connection_close_notifies_client_poll_ready() { let err = poll_fn(move |cx| client.poll_ready(cx)) .await .expect_err("poll_ready"); - assert_eq!(err.to_string(), "broken pipe"); + assert_eq!( + err.to_string(), + "connection closed because of a broken pipe" + ); }; join(srv, client).await; From b9dcd39915420ab1d9f4a8ad0f96c86af8f86558 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 27 Feb 2023 12:03:15 -0500 Subject: [PATCH 114/178] v0.3.16 --- CHANGELOG.md | 8 ++++++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 09c99aac3..17abf81db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +# 0.3.16 (February 27, 2023) + +* Set `Protocol` extension on requests when received Extended CONNECT requests. +* Remove `B: Unpin + 'static` bound requiremented of bufs +* Fix releasing of frames when stream is finished, reducing memory usage. +* Fix panic when trying to send data and connection window is available, but stream window is not. +* Fix spurious wakeups when stream capacity is not available. + # 0.3.15 (October 21, 2022) * Remove `B: Buf` bound on `SendStream`'s parameter diff --git a/Cargo.toml b/Cargo.toml index 8e904875e..be87f0303 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.15" +version = "0.3.16" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index e7b95035f..3af8b1a32 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.15")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.16")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] #![allow(clippy::type_complexity, clippy::manual_range_contains)] From 45b9bccdfcb26cfe9907123a1e975a22eb84c44f Mon Sep 17 00:00:00 2001 From: Alex Touchet Date: Tue, 28 Feb 2023 08:15:12 -0800 Subject: [PATCH 115/178] chore: set rust-version in Cargo.toml (#664) --- Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.toml b/Cargo.toml index be87f0303..64573e1f4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,6 +19,7 @@ keywords = ["http", "async", "non-blocking"] categories = ["asynchronous", "web-programming", "network-programming"] exclude = ["fixtures/**", "ci/**"] edition = "2018" +rust-version = "1.56" [features] # Enables `futures::Stream` implementations for various types. From d3d50ef8123f0a1b6d16faa2d9edc01418da7c00 Mon Sep 17 00:00:00 2001 From: Constantin Nickel Date: Wed, 12 Apr 2023 14:30:32 +0200 Subject: [PATCH 116/178] chore: Replace unmaintained/outdated GitHub Actions --- .github/workflows/CI.yml | 48 ++++++++++------------------------------ 1 file changed, 12 insertions(+), 36 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index f433f9082..1ae9a54b3 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -13,21 +13,14 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v3 - name: Install Rust - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@stable with: - profile: minimal - toolchain: stable - override: true components: rustfmt - - name: cargo fmt --check - uses: actions-rs/cargo@v1 - with: - command: fmt - args: --all -- --check + - run: cargo fmt --all --check test: name: Test @@ -43,38 +36,26 @@ jobs: - stable steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v3 - name: Install Rust (${{ matrix.rust }}) - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: - profile: minimal toolchain: ${{ matrix.rust }} - override: true - name: Install libssl-dev run: sudo apt-get update && sudo apt-get install libssl-dev - name: Build without unstable flag - uses: actions-rs/cargo@v1 - with: - command: build + run: cargo build - name: Check with unstable flag - uses: actions-rs/cargo@v1 - with: - command: check - args: --features unstable + run: cargo check --features unstable - name: Run lib tests and doc tests - uses: actions-rs/cargo@v1 - with: - command: test + run: cargo test - name: Run integration tests - uses: actions-rs/cargo@v1 - with: - command: test - args: -p h2-tests + run: cargo test -p h2-tests - name: Run h2spec run: ./ci/h2spec.sh @@ -99,16 +80,11 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v1 + uses: actions/checkout@v3 - name: Install Rust (${{ matrix.rust }}) - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: - profile: minimal toolchain: ${{ matrix.rust }} - override: true - - name: Check - uses: actions-rs/cargo@v1 - with: - command: check + - run: cargo check From 481c31d5283bf32b90c83388c037494548934971 Mon Sep 17 00:00:00 2001 From: Constantin Nickel Date: Wed, 12 Apr 2023 14:40:20 +0200 Subject: [PATCH 117/178] chore: Use Cargo metadata for the MSRV build job --- .github/workflows/CI.yml | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 1ae9a54b3..2cff15cff 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -66,25 +66,24 @@ jobs: if: matrix.rust == 'nightly' msrv: - name: Check MSRV (${{ matrix.rust }}) + name: Check MSRV needs: [style] - strategy: - matrix: - rust: - - 1.56 # never go past Hyper's own MSRV - - os: - - ubuntu-latest - runs-on: ${{ matrix.os }} + runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v3 - - name: Install Rust (${{ matrix.rust }}) + - name: Get MSRV from package metadata + id: metadata + run: | + cargo metadata --no-deps --format-version 1 | + jq -r '"msrv=" + (.packages[] | select(.name == "h2")).rust_version' >> $GITHUB_OUTPUT + + - name: Install Rust (${{ steps.metadata.outputs.msrv }}) uses: dtolnay/rust-toolchain@master with: - toolchain: ${{ matrix.rust }} + toolchain: ${{ steps.metadata.outputs.msrv }} - run: cargo check From 8088ca658d90a3874fb6b136b85776424265295b Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 13 Apr 2023 09:11:55 -0400 Subject: [PATCH 118/178] feat: add Error::is_library method --- src/error.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/error.rs b/src/error.rs index d45827e36..1b1438e48 100644 --- a/src/error.rs +++ b/src/error.rs @@ -103,6 +103,16 @@ impl Error { Kind::GoAway(_, _, Initiator::Remote) | Kind::Reset(_, _, Initiator::Remote) ) } + + /// Returns true if the error was created by `h2. + /// + /// Such as noticing some protocol error and sending a GOAWAY or RST_STREAM. + pub fn is_library(&self) -> bool { + matches!( + self.kind, + Kind::GoAway(_, _, Initiator::Library) | Kind::Reset(_, _, Initiator::Library) + ) + } } impl From for Error { From 5bc8e72e5fcbd8ae2d3d9bc78a1c0ef0040bcc39 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 12 Apr 2023 12:23:56 -0400 Subject: [PATCH 119/178] fix: limit the amount of pending-accept reset streams Streams that have been received by the peer, but not accepted by the user, can also receive a RST_STREAM. This is a legitimate pattern: one could send a request and then shortly after, realize it is not needed, sending a CANCEL. However, since those streams are now "closed", they don't count towards the max concurrent streams. So, they will sit in the accept queue, using memory. In most cases, the user is calling `accept` in a loop, and they can accept requests that have been reset fast enough that this isn't an issue in practice. But if the peer is able to flood the network faster than the server accept loop can run (simply accepting, not processing requests; that tends to happen in a separate task), the memory could grow. So, this introduces a maximum count for streams in the pending-accept but remotely-reset state. If the maximum is reached, a GOAWAY frame with the error code of ENHANCE_YOUR_CALM is sent, and the connection marks itself as errored. ref CVE-2023-26964 ref GHSA-f8vr-r385-rh5r Closes https://github.com/hyperium/hyper/issues/2877 --- src/proto/connection.rs | 8 +++++ src/proto/streams/counts.rs | 51 ++++++++++++++++++++++----- src/proto/streams/mod.rs | 4 +++ src/proto/streams/recv.rs | 30 ++++++++++++++-- src/proto/streams/state.rs | 7 ++++ src/proto/streams/streams.rs | 8 ++++- src/server.rs | 7 ++++ tests/h2-support/src/frames.rs | 4 +++ tests/h2-tests/tests/stream_states.rs | 41 ++++++++++++++++++++- 9 files changed, 148 insertions(+), 12 deletions(-) diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 59883cf33..1fec23102 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -14,6 +14,8 @@ use std::task::{Context, Poll}; use std::time::Duration; use tokio::io::{AsyncRead, AsyncWrite}; +const DEFAULT_MAX_REMOTE_RESET_STREAMS: usize = 20; + /// An H2 connection #[derive(Debug)] pub(crate) struct Connection @@ -118,6 +120,7 @@ where .unwrap_or(false), local_reset_duration: config.reset_stream_duration, local_reset_max: config.reset_stream_max, + remote_reset_max: DEFAULT_MAX_REMOTE_RESET_STREAMS, remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, remote_max_initiated: config .settings @@ -172,6 +175,11 @@ where self.inner.streams.max_recv_streams() } + #[cfg(feature = "unstable")] + pub fn num_wired_streams(&self) -> usize { + self.inner.streams.num_wired_streams() + } + /// Returns `Ready` when the connection is ready to receive a frame. /// /// Returns `Error` as this may raise errors that are caused by delayed diff --git a/src/proto/streams/counts.rs b/src/proto/streams/counts.rs index 70dfc7851..6a5aa9ccd 100644 --- a/src/proto/streams/counts.rs +++ b/src/proto/streams/counts.rs @@ -21,10 +21,16 @@ pub(super) struct Counts { num_recv_streams: usize, /// Maximum number of pending locally reset streams - max_reset_streams: usize, + max_local_reset_streams: usize, /// Current number of pending locally reset streams - num_reset_streams: usize, + num_local_reset_streams: usize, + + /// Max number of "pending accept" streams that were remotely reset + max_remote_reset_streams: usize, + + /// Current number of "pending accept" streams that were remotely reset + num_remote_reset_streams: usize, } impl Counts { @@ -36,8 +42,10 @@ impl Counts { num_send_streams: 0, max_recv_streams: config.remote_max_initiated.unwrap_or(usize::MAX), num_recv_streams: 0, - max_reset_streams: config.local_reset_max, - num_reset_streams: 0, + max_local_reset_streams: config.local_reset_max, + num_local_reset_streams: 0, + max_remote_reset_streams: config.remote_reset_max, + num_remote_reset_streams: 0, } } @@ -90,7 +98,7 @@ impl Counts { /// Returns true if the number of pending reset streams can be incremented. pub fn can_inc_num_reset_streams(&self) -> bool { - self.max_reset_streams > self.num_reset_streams + self.max_local_reset_streams > self.num_local_reset_streams } /// Increments the number of pending reset streams. @@ -101,7 +109,34 @@ impl Counts { pub fn inc_num_reset_streams(&mut self) { assert!(self.can_inc_num_reset_streams()); - self.num_reset_streams += 1; + self.num_local_reset_streams += 1; + } + + pub(crate) fn max_remote_reset_streams(&self) -> usize { + self.max_remote_reset_streams + } + + /// Returns true if the number of pending REMOTE reset streams can be + /// incremented. + pub(crate) fn can_inc_num_remote_reset_streams(&self) -> bool { + self.max_remote_reset_streams > self.num_remote_reset_streams + } + + /// Increments the number of pending REMOTE reset streams. + /// + /// # Panics + /// + /// Panics on failure as this should have been validated before hand. + pub(crate) fn inc_num_remote_reset_streams(&mut self) { + assert!(self.can_inc_num_remote_reset_streams()); + + self.num_remote_reset_streams += 1; + } + + pub(crate) fn dec_num_remote_reset_streams(&mut self) { + assert!(self.num_remote_reset_streams > 0); + + self.num_remote_reset_streams -= 1; } pub fn apply_remote_settings(&mut self, settings: &frame::Settings) { @@ -194,8 +229,8 @@ impl Counts { } fn dec_num_reset_streams(&mut self) { - assert!(self.num_reset_streams > 0); - self.num_reset_streams -= 1; + assert!(self.num_local_reset_streams > 0); + self.num_local_reset_streams -= 1; } } diff --git a/src/proto/streams/mod.rs b/src/proto/streams/mod.rs index 0ff8131c1..fbe32c7b0 100644 --- a/src/proto/streams/mod.rs +++ b/src/proto/streams/mod.rs @@ -60,6 +60,10 @@ pub struct Config { /// Maximum number of locally reset streams to keep at a time pub local_reset_max: usize, + /// Maximum number of remotely reset "pending accept" streams to keep at a + /// time. Going over this number results in a connection error. + pub remote_reset_max: usize, + /// Initial window size of remote initiated streams pub remote_init_window_sz: WindowSize, diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 497efc9bc..0fe2bdd57 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -741,12 +741,39 @@ impl Recv { } /// Handle remote sending an explicit RST_STREAM. - pub fn recv_reset(&mut self, frame: frame::Reset, stream: &mut Stream) { + pub fn recv_reset( + &mut self, + frame: frame::Reset, + stream: &mut Stream, + counts: &mut Counts, + ) -> Result<(), Error> { + // Reseting a stream that the user hasn't accepted is possible, + // but should be done with care. These streams will continue + // to take up memory in the accept queue, but will no longer be + // counted as "concurrent" streams. + // + // So, we have a separate limit for these. + // + // See https://github.com/hyperium/hyper/issues/2877 + if stream.is_pending_accept { + if counts.can_inc_num_remote_reset_streams() { + counts.inc_num_remote_reset_streams(); + } else { + tracing::warn!( + "recv_reset; remotely-reset pending-accept streams reached limit ({:?})", + counts.max_remote_reset_streams(), + ); + return Err(Error::library_go_away(Reason::ENHANCE_YOUR_CALM)); + } + } + // Notify the stream stream.state.recv_reset(frame, stream.is_pending_send); stream.notify_send(); stream.notify_recv(); + + Ok(()) } /// Handle a connection-level error @@ -1033,7 +1060,6 @@ impl Recv { cx: &Context, stream: &mut Stream, ) -> Poll>> { - // TODO: Return error when the stream is reset match stream.pending_recv.pop_front(&mut self.buffer) { Some(Event::Data(payload)) => Poll::Ready(Some(Ok(payload))), Some(event) => { diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index db37831f8..b9612addc 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -360,6 +360,13 @@ impl State { } } + pub fn is_remote_reset(&self) -> bool { + match self.inner { + Closed(Cause::Error(ref e)) => e.is_local(), + _ => false, + } + } + /// Returns true if the stream is already reset. pub fn is_reset(&self) -> bool { match self.inner { diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index e1a2e3fe7..dbaebfa7a 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -140,6 +140,12 @@ where // TODO: ideally, OpaqueStreamRefs::new would do this, but we're holding // the lock, so it can't. me.refs += 1; + + // Pending-accepted remotely-reset streams are counted. + if stream.state.is_remote_reset() { + me.counts.dec_num_remote_reset_streams(); + } + StreamRef { opaque: OpaqueStreamRef::new(self.inner.clone(), stream), send_buffer: self.send_buffer.clone(), @@ -601,7 +607,7 @@ impl Inner { let actions = &mut self.actions; self.counts.transition(stream, |counts, stream| { - actions.recv.recv_reset(frame, stream); + actions.recv.recv_reset(frame, stream, counts)?; actions.send.handle_error(send_buffer, stream, counts); assert!(stream.state.is_closed()); Ok(()) diff --git a/src/server.rs b/src/server.rs index bb3d3cf86..6f2455e0b 100644 --- a/src/server.rs +++ b/src/server.rs @@ -576,6 +576,13 @@ where pub fn max_concurrent_recv_streams(&self) -> usize { self.connection.max_recv_streams() } + + // Could disappear at anytime. + #[doc(hidden)] + #[cfg(feature = "unstable")] + pub fn num_wired_streams(&self) -> usize { + self.connection.num_wired_streams() + } } #[cfg(feature = "stream")] diff --git a/tests/h2-support/src/frames.rs b/tests/h2-support/src/frames.rs index 862e0c629..bc4e2e708 100644 --- a/tests/h2-support/src/frames.rs +++ b/tests/h2-support/src/frames.rs @@ -297,6 +297,10 @@ impl Mock { self.reason(frame::Reason::FRAME_SIZE_ERROR) } + pub fn calm(self) -> Self { + self.reason(frame::Reason::ENHANCE_YOUR_CALM) + } + pub fn no_error(self) -> Self { self.reason(frame::Reason::NO_ERROR) } diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index 9f348d5f2..610d3a530 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -1,6 +1,6 @@ #![deny(warnings)] -use futures::future::{join, join3, lazy, try_join}; +use futures::future::{join, join3, lazy, poll_fn, try_join}; use futures::{FutureExt, StreamExt, TryStreamExt}; use h2_support::prelude::*; use h2_support::util::yield_once; @@ -194,6 +194,45 @@ async fn closed_streams_are_released() { join(srv, h2).await; } +#[tokio::test] +async fn reset_streams_dont_grow_memory_continuously() { + //h2_support::trace_init!(); + let (io, mut client) = mock::new(); + + const N: u32 = 50; + + let client = async move { + let settings = client.assert_server_handshake().await; + assert_default_settings!(settings); + for n in (1..(N * 2)).step_by(2) { + client + .send_frame(frames::headers(n).request("GET", "https://a.b/").eos()) + .await; + client.send_frame(frames::reset(n).protocol_error()).await; + } + tokio::time::timeout( + std::time::Duration::from_secs(1), + client.recv_frame(frames::go_away(41).calm()), + ) + .await + .expect("client goaway"); + }; + + let srv = async move { + let mut srv = server::Builder::new() + .handshake::<_, Bytes>(io) + .await + .expect("handshake"); + + poll_fn(|cx| srv.poll_closed(cx)) + .await + .expect_err("server should error"); + // specifically, not 50; + assert_eq!(21, srv.num_wired_streams()); + }; + join(srv, client).await; +} + #[tokio::test] async fn errors_if_recv_frame_exceeds_max_frame_size() { h2_support::trace_init!(); From d3f37e9fbad6608ba74419c355eb1892bd55d977 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 12 Apr 2023 21:13:41 -0400 Subject: [PATCH 120/178] feat: add `max_pending_accept_reset_streams(n)` options The new option is available to both client and server `Builder`s. --- src/client.rs | 49 +++++++++++++++++++++++++++ src/proto/connection.rs | 5 ++- src/proto/mod.rs | 1 + src/server.rs | 49 +++++++++++++++++++++++++++ tests/h2-tests/tests/stream_states.rs | 4 ++- 5 files changed, 104 insertions(+), 4 deletions(-) diff --git a/src/client.rs b/src/client.rs index 5dd0b0f87..4147e8a46 100644 --- a/src/client.rs +++ b/src/client.rs @@ -326,6 +326,10 @@ pub struct Builder { /// Maximum number of locally reset streams to keep at a time. reset_stream_max: usize, + /// Maximum number of remotely reset streams to allow in the pending + /// accept queue. + pending_accept_reset_stream_max: usize, + /// Initial `Settings` frame to send as part of the handshake. settings: Settings, @@ -634,6 +638,7 @@ impl Builder { max_send_buffer_size: proto::DEFAULT_MAX_SEND_BUFFER_SIZE, reset_stream_duration: Duration::from_secs(proto::DEFAULT_RESET_STREAM_SECS), reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX, + pending_accept_reset_stream_max: proto::DEFAULT_REMOTE_RESET_STREAM_MAX, initial_target_connection_window_size: None, initial_max_send_streams: usize::MAX, settings: Default::default(), @@ -966,6 +971,49 @@ impl Builder { self } + /// Sets the maximum number of pending-accept remotely-reset streams. + /// + /// Streams that have been received by the peer, but not accepted by the + /// user, can also receive a RST_STREAM. This is a legitimate pattern: one + /// could send a request and then shortly after, realize it is not needed, + /// sending a CANCEL. + /// + /// However, since those streams are now "closed", they don't count towards + /// the max concurrent streams. So, they will sit in the accept queue, + /// using memory. + /// + /// When the number of remotely-reset streams sitting in the pending-accept + /// queue reaches this maximum value, a connection error with the code of + /// `ENHANCE_YOUR_CALM` will be sent to the peer, and returned by the + /// `Future`. + /// + /// The default value is currently 20, but could change. + /// + /// # Examples + /// + /// ``` + /// # use tokio::io::{AsyncRead, AsyncWrite}; + /// # use h2::client::*; + /// # use bytes::Bytes; + /// # + /// # async fn doc(my_io: T) + /// # -> Result<((SendRequest, Connection)), h2::Error> + /// # { + /// // `client_fut` is a future representing the completion of the HTTP/2 + /// // handshake. + /// let client_fut = Builder::new() + /// .max_pending_accept_reset_streams(100) + /// .handshake(my_io); + /// # client_fut.await + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn max_pending_accept_reset_streams(&mut self, max: usize) -> &mut Self { + self.pending_accept_reset_stream_max = max; + self + } + /// Sets the maximum send buffer size per stream. /// /// Once a stream has buffered up to (or over) the maximum, the stream's @@ -1209,6 +1257,7 @@ where max_send_buffer_size: builder.max_send_buffer_size, reset_stream_duration: builder.reset_stream_duration, reset_stream_max: builder.reset_stream_max, + remote_reset_stream_max: builder.pending_accept_reset_stream_max, settings: builder.settings.clone(), }, ); diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 1fec23102..619973df8 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -14,8 +14,6 @@ use std::task::{Context, Poll}; use std::time::Duration; use tokio::io::{AsyncRead, AsyncWrite}; -const DEFAULT_MAX_REMOTE_RESET_STREAMS: usize = 20; - /// An H2 connection #[derive(Debug)] pub(crate) struct Connection @@ -82,6 +80,7 @@ pub(crate) struct Config { pub max_send_buffer_size: usize, pub reset_stream_duration: Duration, pub reset_stream_max: usize, + pub remote_reset_stream_max: usize, pub settings: frame::Settings, } @@ -120,7 +119,7 @@ where .unwrap_or(false), local_reset_duration: config.reset_stream_duration, local_reset_max: config.reset_stream_max, - remote_reset_max: DEFAULT_MAX_REMOTE_RESET_STREAMS, + remote_reset_max: config.remote_reset_stream_max, remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, remote_max_initiated: config .settings diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 5ec7bf992..d71ee9c42 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -31,6 +31,7 @@ pub type WindowSize = u32; // Constants pub const MAX_WINDOW_SIZE: WindowSize = (1 << 31) - 1; +pub const DEFAULT_REMOTE_RESET_STREAM_MAX: usize = 20; pub const DEFAULT_RESET_STREAM_MAX: usize = 10; pub const DEFAULT_RESET_STREAM_SECS: u64 = 30; pub const DEFAULT_MAX_SEND_BUFFER_SIZE: usize = 1024 * 400; diff --git a/src/server.rs b/src/server.rs index 6f2455e0b..f1f4cf470 100644 --- a/src/server.rs +++ b/src/server.rs @@ -240,6 +240,10 @@ pub struct Builder { /// Maximum number of locally reset streams to keep at a time. reset_stream_max: usize, + /// Maximum number of remotely reset streams to allow in the pending + /// accept queue. + pending_accept_reset_stream_max: usize, + /// Initial `Settings` frame to send as part of the handshake. settings: Settings, @@ -642,6 +646,7 @@ impl Builder { Builder { reset_stream_duration: Duration::from_secs(proto::DEFAULT_RESET_STREAM_SECS), reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX, + pending_accept_reset_stream_max: proto::DEFAULT_REMOTE_RESET_STREAM_MAX, settings: Settings::default(), initial_target_connection_window_size: None, max_send_buffer_size: proto::DEFAULT_MAX_SEND_BUFFER_SIZE, @@ -882,6 +887,49 @@ impl Builder { self } + /// Sets the maximum number of pending-accept remotely-reset streams. + /// + /// Streams that have been received by the peer, but not accepted by the + /// user, can also receive a RST_STREAM. This is a legitimate pattern: one + /// could send a request and then shortly after, realize it is not needed, + /// sending a CANCEL. + /// + /// However, since those streams are now "closed", they don't count towards + /// the max concurrent streams. So, they will sit in the accept queue, + /// using memory. + /// + /// When the number of remotely-reset streams sitting in the pending-accept + /// queue reaches this maximum value, a connection error with the code of + /// `ENHANCE_YOUR_CALM` will be sent to the peer, and returned by the + /// `Future`. + /// + /// The default value is currently 20, but could change. + /// + /// # Examples + /// + /// + /// ``` + /// # use tokio::io::{AsyncRead, AsyncWrite}; + /// # use h2::server::*; + /// # + /// # fn doc(my_io: T) + /// # -> Handshake + /// # { + /// // `server_fut` is a future representing the completion of the HTTP/2 + /// // handshake. + /// let server_fut = Builder::new() + /// .max_pending_accept_reset_streams(100) + /// .handshake(my_io); + /// # server_fut + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn max_pending_accept_reset_streams(&mut self, max: usize) -> &mut Self { + self.pending_accept_reset_stream_max = max; + self + } + /// Sets the maximum send buffer size per stream. /// /// Once a stream has buffered up to (or over) the maximum, the stream's @@ -1312,6 +1360,7 @@ where max_send_buffer_size: self.builder.max_send_buffer_size, reset_stream_duration: self.builder.reset_stream_duration, reset_stream_max: self.builder.reset_stream_max, + remote_reset_stream_max: self.builder.pending_accept_reset_stream_max, settings: self.builder.settings.clone(), }, ); diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index 610d3a530..5d86f7b47 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -200,6 +200,7 @@ async fn reset_streams_dont_grow_memory_continuously() { let (io, mut client) = mock::new(); const N: u32 = 50; + const MAX: usize = 20; let client = async move { let settings = client.assert_server_handshake().await; @@ -212,7 +213,7 @@ async fn reset_streams_dont_grow_memory_continuously() { } tokio::time::timeout( std::time::Duration::from_secs(1), - client.recv_frame(frames::go_away(41).calm()), + client.recv_frame(frames::go_away((MAX * 2 + 1) as u32).calm()), ) .await .expect("client goaway"); @@ -220,6 +221,7 @@ async fn reset_streams_dont_grow_memory_continuously() { let srv = async move { let mut srv = server::Builder::new() + .max_pending_accept_reset_streams(MAX) .handshake::<_, Bytes>(io) .await .expect("handshake"); From af4bcacf6d3770e9e3dc10fdc631fc8c0bdd472b Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 13 Apr 2023 10:22:58 -0400 Subject: [PATCH 121/178] v0.3.17 --- CHANGELOG.md | 9 +++++++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 17abf81db..3c857790c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,12 @@ +# 0.3.17 (April 13, 2023) + +* Add `Error::is_library()` method to check if the originated inside `h2`. +* Add `max_pending_accept_reset_streams(usize)` option to client and server + builders. +* Fix theoretical memory growth when receiving too many HEADERS and then + RST_STREAM frames faster than an application can accept them off the queue. + (CVE-2023-26964) + # 0.3.16 (February 27, 2023) * Set `Protocol` extension on requests when received Extended CONNECT requests. diff --git a/Cargo.toml b/Cargo.toml index 64573e1f4..2dce90226 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.16" +version = "0.3.17" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index 3af8b1a32..70321ab98 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.16")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.17")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] #![allow(clippy::type_complexity, clippy::manual_range_contains)] From 1c6fa285afe436ca2a1f8abd38a6389353f360b6 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 17 Apr 2023 14:08:02 -0400 Subject: [PATCH 122/178] fix: pending-accept remotely-reset streams pattern was checking is_local --- src/proto/streams/state.rs | 2 +- tests/h2-tests/tests/server.rs | 34 ++++++++++++++++++++++ tests/h2-tests/tests/stream_states.rs | 41 +++++++++++++++++++++++++++ 3 files changed, 76 insertions(+), 1 deletion(-) diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index b9612addc..76638fc87 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -362,7 +362,7 @@ impl State { pub fn is_remote_reset(&self) -> bool { match self.inner { - Closed(Cause::Error(ref e)) => e.is_local(), + Closed(Cause::Error(ref e)) => !e.is_local(), _ => false, } } diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 8aea1fd59..c8c1c9d1c 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -879,6 +879,40 @@ async fn too_big_headers_sends_reset_after_431_if_not_eos() { join(client, srv).await; } +#[tokio::test] +async fn pending_accept_recv_illegal_content_length_data() { + h2_support::trace_init!(); + let (io, mut client) = mock::new(); + + let client = async move { + let settings = client.assert_server_handshake().await; + assert_default_settings!(settings); + client + .send_frame( + frames::headers(1) + .request("POST", "https://a.b") + .field("content-length", "1"), + ) + .await; + client + .send_frame(frames::data(1, &b"hello"[..]).eos()) + .await; + client.recv_frame(frames::reset(1).protocol_error()).await; + idle_ms(10).await; + }; + + let srv = async move { + let mut srv = server::Builder::new() + .handshake::<_, Bytes>(io) + .await + .expect("handshake"); + + let _req = srv.next().await.expect("req").expect("is_ok"); + }; + + join(client, srv).await; +} + #[tokio::test] async fn poll_reset() { h2_support::trace_init!(); diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index 5d86f7b47..138328efa 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -235,6 +235,47 @@ async fn reset_streams_dont_grow_memory_continuously() { join(srv, client).await; } +#[tokio::test] +async fn pending_accept_reset_streams_decrement_too() { + h2_support::trace_init!(); + let (io, mut client) = mock::new(); + + // If it didn't decrement internally, this would eventually get + // the count over MAX. + const M: usize = 2; + const N: usize = 5; + const MAX: usize = 6; + + let client = async move { + let settings = client.assert_server_handshake().await; + assert_default_settings!(settings); + let mut id = 1; + for _ in 0..M { + for _ in 0..N { + client + .send_frame(frames::headers(id).request("GET", "https://a.b/").eos()) + .await; + client.send_frame(frames::reset(id).protocol_error()).await; + id += 2; + } + tokio::time::sleep(std::time::Duration::from_millis(50)).await; + } + }; + + let srv = async move { + let mut srv = server::Builder::new() + .max_pending_accept_reset_streams(MAX) + .handshake::<_, Bytes>(io) + .await + .expect("handshake"); + + while let Some(Ok(_)) = srv.accept().await {} + + poll_fn(|cx| srv.poll_closed(cx)).await.expect("server"); + }; + join(srv, client).await; +} + #[tokio::test] async fn errors_if_recv_frame_exceeds_max_frame_size() { h2_support::trace_init!(); From 1b9f0704ff24d5f7939d16162082c5a764a0bfaa Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 17 Apr 2023 14:40:41 -0400 Subject: [PATCH 123/178] v0.3.18 --- CHANGELOG.md | 4 ++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c857790c..31852daff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.3.18 (April 17, 2023) + +* Fix panic because of opposite check in `is_remote_local()`. + # 0.3.17 (April 13, 2023) * Add `Error::is_library()` method to check if the originated inside `h2`. diff --git a/Cargo.toml b/Cargo.toml index 2dce90226..767961d0a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.17" +version = "0.3.18" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index 70321ab98..420e0fee1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.17")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.18")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] #![allow(clippy::type_complexity, clippy::manual_range_contains)] From 072f7ee918d4d155e04320bd1785cd3f5fe6583d Mon Sep 17 00:00:00 2001 From: Rasmus Larsen Date: Fri, 14 Apr 2023 19:20:38 +0200 Subject: [PATCH 124/178] Serialize debug_data when present in GOAWAY frames --- src/frame/go_away.rs | 13 +++++++++++-- src/proto/connection.rs | 23 ++++++++++++++++++++++ src/proto/go_away.rs | 4 ---- src/server.rs | 6 ++++++ tests/h2-support/src/frames.rs | 7 +++++++ tests/h2-tests/tests/server.rs | 35 ++++++++++++++++++++++++++++++++++ 6 files changed, 82 insertions(+), 6 deletions(-) diff --git a/src/frame/go_away.rs b/src/frame/go_away.rs index 91d9c4c6b..4ab28d514 100644 --- a/src/frame/go_away.rs +++ b/src/frame/go_away.rs @@ -8,7 +8,6 @@ use crate::frame::{self, Error, Head, Kind, Reason, StreamId}; pub struct GoAway { last_stream_id: StreamId, error_code: Reason, - #[allow(unused)] debug_data: Bytes, } @@ -21,6 +20,15 @@ impl GoAway { } } + #[doc(hidden)] + #[cfg(feature = "unstable")] + pub fn with_debug_data(self, debug_data: impl Into) -> Self { + Self { + debug_data: debug_data.into(), + ..self + } + } + pub fn last_stream_id(&self) -> StreamId { self.last_stream_id } @@ -52,9 +60,10 @@ impl GoAway { pub fn encode(&self, dst: &mut B) { tracing::trace!("encoding GO_AWAY; code={:?}", self.error_code); let head = Head::new(Kind::GoAway, 0, StreamId::zero()); - head.encode(8, dst); + head.encode(8 + self.debug_data.len(), dst); dst.put_u32(self.last_stream_id.into()); dst.put_u32(self.error_code.into()); + dst.put(self.debug_data.slice(..)); } } diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 619973df8..7ea124e44 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -398,6 +398,18 @@ where self.go_away.go_away_now(frame); } + #[doc(hidden)] + #[cfg(feature = "unstable")] + fn go_away_now_debug_data(&mut self) { + let last_processed_id = self.streams.last_processed_id(); + + let frame = frame::GoAway::new(last_processed_id, Reason::NO_ERROR) + .with_debug_data("something went wrong"); + + self.streams.send_go_away(last_processed_id); + self.go_away.go_away(frame); + } + fn go_away_from_user(&mut self, e: Reason) { let last_processed_id = self.streams.last_processed_id(); let frame = frame::GoAway::new(last_processed_id, e); @@ -576,6 +588,17 @@ where // for a pong before proceeding. self.inner.ping_pong.ping_shutdown(); } + + #[doc(hidden)] + #[cfg(feature = "unstable")] + pub fn go_away_debug_data(&mut self) { + if self.inner.go_away.is_going_away() { + return; + } + + self.inner.as_dyn().go_away_now_debug_data(); + self.inner.ping_pong.ping_shutdown(); + } } impl Drop for Connection diff --git a/src/proto/go_away.rs b/src/proto/go_away.rs index 759427878..d52252cd7 100644 --- a/src/proto/go_away.rs +++ b/src/proto/go_away.rs @@ -26,10 +26,6 @@ pub(super) struct GoAway { /// were a `frame::GoAway`, it might appear like we eventually wanted to /// serialize it. We **only** want to be able to look up these fields at a /// later time. -/// -/// (Technically, `frame::GoAway` should gain an opaque_debug_data field as -/// well, and we wouldn't want to save that here to accidentally dump in logs, -/// or waste struct space.) #[derive(Debug)] pub(crate) struct GoingAway { /// Stores the highest stream ID of a GOAWAY that has been sent. diff --git a/src/server.rs b/src/server.rs index f1f4cf470..032c0d17a 100644 --- a/src/server.rs +++ b/src/server.rs @@ -544,6 +544,12 @@ where self.connection.go_away_gracefully(); } + #[doc(hidden)] + #[cfg(feature = "unstable")] + pub fn debug_data_shutdown(&mut self) { + self.connection.go_away_debug_data(); + } + /// Takes a `PingPong` instance from the connection. /// /// # Note diff --git a/tests/h2-support/src/frames.rs b/tests/h2-support/src/frames.rs index bc4e2e708..4ee20dd77 100644 --- a/tests/h2-support/src/frames.rs +++ b/tests/h2-support/src/frames.rs @@ -305,6 +305,13 @@ impl Mock { self.reason(frame::Reason::NO_ERROR) } + pub fn data(self, debug_data: I) -> Self + where + I: Into, + { + Mock(self.0.with_debug_data(debug_data.into())) + } + pub fn reason(self, reason: frame::Reason) -> Self { Mock(frame::GoAway::new(self.0.last_stream_id(), reason)) } diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index c8c1c9d1c..78f4891a1 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -705,6 +705,41 @@ async fn graceful_shutdown() { join(client, srv).await; } +#[tokio::test] +async fn go_away_sends_debug_data() { + h2_support::trace_init!(); + + let (io, mut client) = mock::new(); + + let client = async move { + let settings = client.assert_server_handshake().await; + assert_default_settings!(settings); + client + .send_frame(frames::headers(1).request("POST", "https://example.com/")) + .await; + client + .recv_frame(frames::go_away(1).no_error().data("something went wrong")) + .await; + }; + + let src = async move { + let mut srv = server::handshake(io).await.expect("handshake"); + let (_req, _tx) = srv.next().await.unwrap().expect("server receives request"); + + srv.debug_data_shutdown(); + + let srv_fut = async move { + poll_fn(move |cx| srv.poll_closed(cx)) + .await + .expect("server"); + }; + + srv_fut.await + }; + + join(client, src).await; +} + #[tokio::test] async fn goaway_even_if_client_sent_goaway() { h2_support::trace_init!(); From b0e5470ae557845e5b1f5c304f59f21b1f47f5d8 Mon Sep 17 00:00:00 2001 From: Martijn Gribnau Date: Fri, 28 Apr 2023 01:44:00 +0200 Subject: [PATCH 125/178] Fix markdown code element in error::is_library --- src/error.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/error.rs b/src/error.rs index 1b1438e48..eb2b2acbc 100644 --- a/src/error.rs +++ b/src/error.rs @@ -104,7 +104,7 @@ impl Error { ) } - /// Returns true if the error was created by `h2. + /// Returns true if the error was created by `h2`. /// /// Such as noticing some protocol error and sending a GOAWAY or RST_STREAM. pub fn is_library(&self) -> bool { From 70eade5b4bf70c3c117ffc21b14bdfbd9a308567 Mon Sep 17 00:00:00 2001 From: Rasmus Larsen Date: Fri, 28 Apr 2023 16:25:56 +0200 Subject: [PATCH 126/178] Add too_many_resets debug_data to the GOAWAY we send (#678) Closes hyperium/hyper#3211 --- src/frame/go_away.rs | 9 +++---- src/proto/connection.rs | 27 ++++----------------- src/proto/error.rs | 4 +++ src/proto/streams/recv.rs | 5 +++- src/server.rs | 6 ----- tests/h2-support/src/frames.rs | 12 +++++++-- tests/h2-tests/tests/server.rs | 35 --------------------------- tests/h2-tests/tests/stream_states.rs | 7 +++++- 8 files changed, 33 insertions(+), 72 deletions(-) diff --git a/src/frame/go_away.rs b/src/frame/go_away.rs index 4ab28d514..99330e981 100644 --- a/src/frame/go_away.rs +++ b/src/frame/go_away.rs @@ -20,12 +20,11 @@ impl GoAway { } } - #[doc(hidden)] - #[cfg(feature = "unstable")] - pub fn with_debug_data(self, debug_data: impl Into) -> Self { + pub fn with_debug_data(last_stream_id: StreamId, reason: Reason, debug_data: Bytes) -> Self { Self { - debug_data: debug_data.into(), - ..self + last_stream_id, + error_code: reason, + debug_data, } } diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 7ea124e44..727643a65 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -398,16 +398,10 @@ where self.go_away.go_away_now(frame); } - #[doc(hidden)] - #[cfg(feature = "unstable")] - fn go_away_now_debug_data(&mut self) { + fn go_away_now_data(&mut self, e: Reason, data: Bytes) { let last_processed_id = self.streams.last_processed_id(); - - let frame = frame::GoAway::new(last_processed_id, Reason::NO_ERROR) - .with_debug_data("something went wrong"); - - self.streams.send_go_away(last_processed_id); - self.go_away.go_away(frame); + let frame = frame::GoAway::with_debug_data(last_processed_id, e, data); + self.go_away.go_away_now(frame); } fn go_away_from_user(&mut self, e: Reason) { @@ -430,7 +424,7 @@ where // error. This is handled by setting a GOAWAY frame followed by // terminating the connection. Err(Error::GoAway(debug_data, reason, initiator)) => { - let e = Error::GoAway(debug_data, reason, initiator); + let e = Error::GoAway(debug_data.clone(), reason, initiator); tracing::debug!(error = ?e, "Connection::poll; connection error"); // We may have already sent a GOAWAY for this error, @@ -447,7 +441,7 @@ where // Reset all active streams self.streams.handle_error(e); - self.go_away_now(reason); + self.go_away_now_data(reason, debug_data); Ok(()) } // Attempting to read a frame resulted in a stream level error. @@ -588,17 +582,6 @@ where // for a pong before proceeding. self.inner.ping_pong.ping_shutdown(); } - - #[doc(hidden)] - #[cfg(feature = "unstable")] - pub fn go_away_debug_data(&mut self) { - if self.inner.go_away.is_going_away() { - return; - } - - self.inner.as_dyn().go_away_now_debug_data(); - self.inner.ping_pong.ping_shutdown(); - } } impl Drop for Connection diff --git a/src/proto/error.rs b/src/proto/error.rs index 2c00c7ea6..ad023317e 100644 --- a/src/proto/error.rs +++ b/src/proto/error.rs @@ -40,6 +40,10 @@ impl Error { Self::GoAway(Bytes::new(), reason, Initiator::Library) } + pub(crate) fn library_go_away_data(reason: Reason, debug_data: impl Into) -> Self { + Self::GoAway(debug_data.into(), reason, Initiator::Library) + } + pub(crate) fn remote_reset(stream_id: StreamId, reason: Reason) -> Self { Self::Reset(stream_id, reason, Initiator::Remote) } diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 0fe2bdd57..cfc357082 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -763,7 +763,10 @@ impl Recv { "recv_reset; remotely-reset pending-accept streams reached limit ({:?})", counts.max_remote_reset_streams(), ); - return Err(Error::library_go_away(Reason::ENHANCE_YOUR_CALM)); + return Err(Error::library_go_away_data( + Reason::ENHANCE_YOUR_CALM, + "too_many_resets", + )); } } diff --git a/src/server.rs b/src/server.rs index 032c0d17a..f1f4cf470 100644 --- a/src/server.rs +++ b/src/server.rs @@ -544,12 +544,6 @@ where self.connection.go_away_gracefully(); } - #[doc(hidden)] - #[cfg(feature = "unstable")] - pub fn debug_data_shutdown(&mut self) { - self.connection.go_away_debug_data(); - } - /// Takes a `PingPong` instance from the connection. /// /// # Note diff --git a/tests/h2-support/src/frames.rs b/tests/h2-support/src/frames.rs index 4ee20dd77..d302d3ce5 100644 --- a/tests/h2-support/src/frames.rs +++ b/tests/h2-support/src/frames.rs @@ -309,11 +309,19 @@ impl Mock { where I: Into, { - Mock(self.0.with_debug_data(debug_data.into())) + Mock(frame::GoAway::with_debug_data( + self.0.last_stream_id(), + self.0.reason(), + debug_data.into(), + )) } pub fn reason(self, reason: frame::Reason) -> Self { - Mock(frame::GoAway::new(self.0.last_stream_id(), reason)) + Mock(frame::GoAway::with_debug_data( + self.0.last_stream_id(), + reason, + self.0.debug_data().clone(), + )) } } diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 78f4891a1..c8c1c9d1c 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -705,41 +705,6 @@ async fn graceful_shutdown() { join(client, srv).await; } -#[tokio::test] -async fn go_away_sends_debug_data() { - h2_support::trace_init!(); - - let (io, mut client) = mock::new(); - - let client = async move { - let settings = client.assert_server_handshake().await; - assert_default_settings!(settings); - client - .send_frame(frames::headers(1).request("POST", "https://example.com/")) - .await; - client - .recv_frame(frames::go_away(1).no_error().data("something went wrong")) - .await; - }; - - let src = async move { - let mut srv = server::handshake(io).await.expect("handshake"); - let (_req, _tx) = srv.next().await.unwrap().expect("server receives request"); - - srv.debug_data_shutdown(); - - let srv_fut = async move { - poll_fn(move |cx| srv.poll_closed(cx)) - .await - .expect("server"); - }; - - srv_fut.await - }; - - join(client, src).await; -} - #[tokio::test] async fn goaway_even_if_client_sent_goaway() { h2_support::trace_init!(); diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index 138328efa..c28066d2c 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -211,9 +211,14 @@ async fn reset_streams_dont_grow_memory_continuously() { .await; client.send_frame(frames::reset(n).protocol_error()).await; } + tokio::time::timeout( std::time::Duration::from_secs(1), - client.recv_frame(frames::go_away((MAX * 2 + 1) as u32).calm()), + client.recv_frame( + frames::go_away((MAX * 2 + 1) as u32) + .data("too_many_resets") + .calm(), + ), ) .await .expect("client goaway"); From 7a77f93ca3a6fea028267a9066c8b976c49203b5 Mon Sep 17 00:00:00 2001 From: Anthony Ramine Date: Wed, 10 May 2023 09:58:02 +0200 Subject: [PATCH 127/178] Rename is_local_reset to is_local_error It also returns true for I/O errors and local GO_AWAYs. --- src/proto/streams/recv.rs | 4 ++-- src/proto/streams/state.rs | 2 +- src/proto/streams/streams.rs | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index cfc357082..8c7267a9d 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -537,7 +537,7 @@ impl Recv { let sz = sz as WindowSize; - let is_ignoring_frame = stream.state.is_local_reset(); + let is_ignoring_frame = stream.state.is_local_error(); if !is_ignoring_frame && !stream.state.is_recv_streaming() { // TODO: There are cases where this can be a stream error of @@ -853,7 +853,7 @@ impl Recv { /// Add a locally reset stream to queue to be eventually reaped. pub fn enqueue_reset_expiration(&mut self, stream: &mut store::Ptr, counts: &mut Counts) { - if !stream.state.is_local_reset() || stream.is_pending_reset_expiration() { + if !stream.state.is_local_error() || stream.is_pending_reset_expiration() { return; } diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index 76638fc87..72edbae77 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -352,7 +352,7 @@ impl State { matches!(self.inner, Closed(Cause::ScheduledLibraryReset(..))) } - pub fn is_local_reset(&self) -> bool { + pub fn is_local_error(&self) -> bool { match self.inner { Closed(Cause::Error(ref e)) => e.is_local(), Closed(Cause::ScheduledLibraryReset(..)) => true, diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index dbaebfa7a..dfc5c768b 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -448,7 +448,7 @@ impl Inner { let stream = self.store.resolve(key); - if stream.state.is_local_reset() { + if stream.state.is_local_error() { // Locally reset streams must ignore frames "for some time". // This is because the remote may have sent trailers before // receiving the RST_STREAM frame. From 3d558a6ed0b2cbaeb11805a7e6ecd53e38b508d6 Mon Sep 17 00:00:00 2001 From: Anthony Ramine Date: Wed, 10 May 2023 10:00:39 +0200 Subject: [PATCH 128/178] Ignore Error::GoAway in State::is_remote_reset When Streams::recv_go_away is called, Recv::handle_error is called on every stream whose stream id is past the GO_AWAY's last stream id, and those streams may have been pending-accepting. If the stream had not encountered an error before, Recv::handle_error then sets its state to State::Closed(Error::GoAway(_, _, Initiator::Remote)) which makes State::is_remote_reset return true in Streams::next_incoming, which leads to Counts::dec_remote_reset_streams being called even though Counts::inc_remote_reset_streams was never called for that stream, causing a panic about the counter being 0. --- src/proto/streams/state.rs | 2 +- tests/h2-tests/tests/stream_states.rs | 47 +++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index 72edbae77..6f89b34c5 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -362,7 +362,7 @@ impl State { pub fn is_remote_reset(&self) -> bool { match self.inner { - Closed(Cause::Error(ref e)) => !e.is_local(), + Closed(Cause::Error(Error::Reset(_, _, Initiator::Remote))) => true, _ => false, } } diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index c28066d2c..423129630 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -240,6 +240,53 @@ async fn reset_streams_dont_grow_memory_continuously() { join(srv, client).await; } +#[tokio::test] +async fn go_away_with_pending_accepting() { + // h2_support::trace_init!(); + let (io, mut client) = mock::new(); + + let (sent_go_away_tx, sent_go_away_rx) = oneshot::channel(); + let (recv_go_away_tx, recv_go_away_rx) = oneshot::channel(); + + let client = async move { + let settings = client.assert_server_handshake().await; + assert_default_settings!(settings); + + client + .send_frame(frames::headers(1).request("GET", "https://baguette/").eos()) + .await; + + client + .send_frame(frames::headers(3).request("GET", "https://campagne/").eos()) + .await; + client.send_frame(frames::go_away(1).protocol_error()).await; + + sent_go_away_tx.send(()).unwrap(); + + recv_go_away_rx.await.unwrap(); + }; + + let srv = async move { + let mut srv = server::Builder::new() + .max_pending_accept_reset_streams(1) + .handshake::<_, Bytes>(io) + .await + .expect("handshake"); + + let (_req_1, _send_response_1) = srv.accept().await.unwrap().unwrap(); + + poll_fn(|cx| srv.poll_closed(cx)) + .drive(sent_go_away_rx) + .await + .unwrap(); + + let (_req_2, _send_response_2) = srv.accept().await.unwrap().unwrap(); + + recv_go_away_tx.send(()).unwrap(); + }; + join(srv, client).await; +} + #[tokio::test] async fn pending_accept_reset_streams_decrement_too() { h2_support::trace_init!(); From f126229cf436b3609236582d80a5c25cc944dd4b Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 12 May 2023 14:38:36 -0400 Subject: [PATCH 129/178] v0.3.19 --- CHANGELOG.md | 5 +++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 31852daff..875cc70b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# 0.3.19 (May 12, 2023) + +* Fix counting reset streams when triggered by a GOAWAY. +* Send `too_many_resets` in opaque debug data of GOAWAY when too many resets received. + # 0.3.18 (April 17, 2023) * Fix panic because of opposite check in `is_remote_local()`. diff --git a/Cargo.toml b/Cargo.toml index 767961d0a..747ae7638 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.18" +version = "0.3.19" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index 420e0fee1..830147113 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.18")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.19")] #![deny(missing_debug_implementations, missing_docs)] #![cfg_attr(test, deny(warnings))] #![allow(clippy::type_complexity, clippy::manual_range_contains)] From 04e6398bfe0cd9cb9590bc198c0921ac6441aea9 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Tue, 23 May 2023 16:57:04 -0400 Subject: [PATCH 130/178] fix: panicked when a reset stream would decrement twice --- src/proto/streams/recv.rs | 9 --------- tests/h2-tests/tests/stream_states.rs | 12 ++++++------ 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 8c7267a9d..ec4db1c79 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -859,15 +859,6 @@ impl Recv { tracing::trace!("enqueue_reset_expiration; {:?}", stream.id); - if !counts.can_inc_num_reset_streams() { - // try to evict 1 stream if possible - // if max allow is 0, this won't be able to evict, - // and then we'll just bail after - if let Some(evicted) = self.pending_reset_expired.pop(stream.store_mut()) { - counts.transition_after(evicted, true); - } - } - if counts.can_inc_num_reset_streams() { counts.inc_num_reset_streams(); self.pending_reset_expired.push(stream); diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index 423129630..16d113132 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -750,14 +750,14 @@ async fn rst_stream_max() { srv.recv_frame(frames::reset(1).cancel()).await; srv.recv_frame(frames::reset(3).cancel()).await; // sending frame after canceled! - // newer streams trump older streams - // 3 is still being ignored - srv.send_frame(frames::data(3, vec![0; 16]).eos()).await; + // olders streams trump newer streams + // 1 is still being ignored + srv.send_frame(frames::data(1, vec![0; 16]).eos()).await; // ping pong to be sure of no goaway srv.ping_pong([1; 8]).await; - // 1 has been evicted, will get a reset - srv.send_frame(frames::data(1, vec![0; 16]).eos()).await; - srv.recv_frame(frames::reset(1).stream_closed()).await; + // 3 has been evicted, will get a reset + srv.send_frame(frames::data(3, vec![0; 16]).eos()).await; + srv.recv_frame(frames::reset(3).stream_closed()).await; }; let client = async move { From 66c36c4edb04d8f75ca66b9199546308fe089c0d Mon Sep 17 00:00:00 2001 From: Michael Rodler Date: Tue, 23 May 2023 15:58:30 +0000 Subject: [PATCH 131/178] fix panic on receiving invalid headers frame by making the `take_request` function return a Result Signed-off-by: Michael Rodler Reviewed-by: Daniele Ahmed --- src/proto/streams/recv.rs | 11 ++++++----- src/proto/streams/streams.rs | 2 +- src/server.rs | 17 ++++++++++++----- tests/h2-tests/tests/server.rs | 32 ++++++++++++++++++++++++++++++++ 4 files changed, 51 insertions(+), 11 deletions(-) diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index ec4db1c79..cd96dce2c 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -251,14 +251,15 @@ impl Recv { } /// Called by the server to get the request - /// - /// TODO: Should this fn return `Result`? - pub fn take_request(&mut self, stream: &mut store::Ptr) -> Request<()> { + pub fn take_request(&mut self, stream: &mut store::Ptr) -> Result, proto::Error> { use super::peer::PollMessage::*; match stream.pending_recv.pop_front(&mut self.buffer) { - Some(Event::Headers(Server(request))) => request, - _ => panic!(), + Some(Event::Headers(Server(request))) => Ok(request), + _ => { + proto_err!(stream: "received invalid request; stream={:?}", stream.id); + Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR)) + } } } diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index dfc5c768b..d64e00970 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -1178,7 +1178,7 @@ impl StreamRef { /// # Panics /// /// This function panics if the request isn't present. - pub fn take_request(&self) -> Request<()> { + pub fn take_request(&self) -> Result, proto::Error> { let mut me = self.opaque.inner.lock().unwrap(); let me = &mut *me; diff --git a/src/server.rs b/src/server.rs index f1f4cf470..148cad517 100644 --- a/src/server.rs +++ b/src/server.rs @@ -425,13 +425,20 @@ where if let Some(inner) = self.connection.next_incoming() { tracing::trace!("received incoming"); - let (head, _) = inner.take_request().into_parts(); - let body = RecvStream::new(FlowControl::new(inner.clone_to_opaque())); + match inner.take_request() { + Ok(req) => { + let (head, _) = req.into_parts(); + let body = RecvStream::new(FlowControl::new(inner.clone_to_opaque())); - let request = Request::from_parts(head, body); - let respond = SendResponse { inner }; + let request = Request::from_parts(head, body); + let respond = SendResponse { inner }; - return Poll::Ready(Some(Ok((request, respond)))); + return Poll::Ready(Some(Ok((request, respond)))); + } + Err(e) => { + return Poll::Ready(Some(Err(e.into()))); + } + } } Poll::Pending diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index c8c1c9d1c..2637011ff 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -1378,3 +1378,35 @@ async fn reject_non_authority_target_on_connect_request() { join(client, srv).await; } + +#[tokio::test] +async fn reject_response_headers_in_request() { + h2_support::trace_init!(); + + let (io, mut client) = mock::new(); + + let client = async move { + let _ = client.assert_server_handshake().await; + + client.send_frame(frames::headers(1).response(128)).await; + + // TODO: is CANCEL the right error code to expect here? + client.recv_frame(frames::reset(1).cancel()).await; + }; + + let srv = async move { + let builder = server::Builder::new(); + let mut srv = builder.handshake::<_, Bytes>(io).await.expect("handshake"); + + let res = srv.next().await; + tracing::warn!("{:?}", res); + assert!(res.is_some()); + assert!(res.unwrap().is_err()); + + poll_fn(move |cx| srv.poll_closed(cx)) + .await + .expect("server"); + }; + + join(client, srv).await; +} From 97bc3e36cf299e4e064653ced3352fc82a9cea70 Mon Sep 17 00:00:00 2001 From: Michael Rodler Date: Thu, 8 Jun 2023 19:19:55 +0200 Subject: [PATCH 132/178] hammer test requires a new tokio feature Signed-off-by: Michael Rodler Reviewed-by: Daniele Ahmed --- tests/h2-tests/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/h2-tests/Cargo.toml b/tests/h2-tests/Cargo.toml index 33436f3c4..6afdf9053 100644 --- a/tests/h2-tests/Cargo.toml +++ b/tests/h2-tests/Cargo.toml @@ -11,4 +11,4 @@ edition = "2018" h2-support = { path = "../h2-support" } tracing = "0.1.13" futures = { version = "0.3", default-features = false, features = ["alloc"] } -tokio = { version = "1", features = ["macros", "net", "rt", "io-util"] } +tokio = { version = "1", features = ["macros", "net", "rt", "io-util", "rt-multi-thread"] } From 972fb6f19ff195f9ea3920b40c862c60b898e791 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 12 Jun 2023 13:49:01 -0400 Subject: [PATCH 133/178] chore: add funding file --- .github/FUNDING.yml | 1 + 1 file changed, 1 insertion(+) create mode 100644 .github/FUNDING.yml diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 000000000..00642f837 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1 @@ +github: seanmonstar From 864430c5dd453b70c29bb3d058e81876858380f4 Mon Sep 17 00:00:00 2001 From: Michael Rodler Date: Fri, 9 Jun 2023 17:07:18 +0000 Subject: [PATCH 134/178] Enabled clippy in CI and ran `clippy --fix` Signed-off-by: Michael Rodler --- .github/workflows/CI.yml | 7 +++++++ src/frame/data.rs | 2 +- src/hpack/table.rs | 6 +++--- src/hpack/test/fixture.rs | 2 +- src/lib.rs | 9 +++++++-- src/proto/streams/state.rs | 8 ++++---- 6 files changed, 23 insertions(+), 11 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 2cff15cff..e90c68af7 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -65,6 +65,13 @@ jobs: run: cargo clean; cargo update -Zminimal-versions; cargo check if: matrix.rust == 'nightly' + clippy_check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Run Clippy + run: cargo clippy --all-targets --all-features + msrv: name: Check MSRV needs: [style] diff --git a/src/frame/data.rs b/src/frame/data.rs index d0cdf5f69..5ed3c31b5 100644 --- a/src/frame/data.rs +++ b/src/frame/data.rs @@ -148,7 +148,7 @@ impl Data { /// /// Panics if `dst` cannot contain the data frame. pub(crate) fn encode_chunk(&mut self, dst: &mut U) { - let len = self.data.remaining() as usize; + let len = self.data.remaining(); assert!(dst.remaining_mut() >= len); diff --git a/src/hpack/table.rs b/src/hpack/table.rs index a1a780451..3e45f413b 100644 --- a/src/hpack/table.rs +++ b/src/hpack/table.rs @@ -319,7 +319,7 @@ impl Table { let mut probe = probe + 1; probe_loop!(probe < self.indices.len(), { - let pos = &mut self.indices[probe as usize]; + let pos = &mut self.indices[probe]; prev = match mem::replace(pos, Some(prev)) { Some(p) => p, @@ -656,12 +656,12 @@ fn to_raw_capacity(n: usize) -> usize { #[inline] fn desired_pos(mask: usize, hash: HashValue) -> usize { - (hash.0 & mask) as usize + hash.0 & mask } #[inline] fn probe_distance(mask: usize, hash: HashValue, current: usize) -> usize { - current.wrapping_sub(desired_pos(mask, hash)) & mask as usize + current.wrapping_sub(desired_pos(mask, hash)) & mask } fn hash_header(header: &Header) -> HashValue { diff --git a/src/hpack/test/fixture.rs b/src/hpack/test/fixture.rs index 0d33ca2de..d3f76e3bf 100644 --- a/src/hpack/test/fixture.rs +++ b/src/hpack/test/fixture.rs @@ -100,7 +100,7 @@ fn test_story(story: Value) { let mut input: Vec<_> = case .expect .iter() - .map(|&(ref name, ref value)| { + .map(|(name, value)| { Header::new(name.clone().into(), value.clone().into()) .unwrap() .into() diff --git a/src/lib.rs b/src/lib.rs index 830147113..7975ea8c2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -79,9 +79,14 @@ //! [`client::handshake`]: client/fn.handshake.html #![doc(html_root_url = "https://docs.rs/h2/0.3.19")] -#![deny(missing_debug_implementations, missing_docs)] -#![cfg_attr(test, deny(warnings))] +#![deny( + missing_debug_implementations, + missing_docs, + clippy::missing_safety_doc, + clippy::undocumented_unsafe_blocks +)] #![allow(clippy::type_complexity, clippy::manual_range_contains)] +#![cfg_attr(test, deny(warnings))] macro_rules! proto_err { (conn: $($msg:tt)+) => { diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index 6f89b34c5..1ca8f5afb 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -361,10 +361,10 @@ impl State { } pub fn is_remote_reset(&self) -> bool { - match self.inner { - Closed(Cause::Error(Error::Reset(_, _, Initiator::Remote))) => true, - _ => false, - } + matches!( + self.inner, + Closed(Cause::Error(Error::Reset(_, _, Initiator::Remote))) + ) } /// Returns true if the stream is already reset. From 478f7b9889e9d8d53756558cca45cebd68aeaea0 Mon Sep 17 00:00:00 2001 From: Michael Rodler Date: Thu, 22 Jun 2023 18:09:52 +0200 Subject: [PATCH 135/178] Fix for invalid header panic corrected (#695) * Revert "fix panic on receiving invalid headers frame by making the `take_request` function return a Result" This reverts commit 66c36c4edb04d8f75ca66b9199546308fe089c0d. * proper fix for the panic in server receiving a request with a :status pseudo-header in the informational range of status codes --------- Signed-off-by: Michael Rodler Co-authored-by: Michael Rodler Co-authored-by: Daniele Ahmed --- src/proto/streams/recv.rs | 31 ++++++++++++++++++++----------- src/proto/streams/streams.rs | 2 +- src/server.rs | 17 +++++------------ tests/h2-tests/tests/server.rs | 19 ++++++++++--------- 4 files changed, 36 insertions(+), 33 deletions(-) diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index cd96dce2c..98de1bfa7 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -229,6 +229,11 @@ impl Recv { return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into()); } + if pseudo.status.is_some() && counts.peer().is_server() { + proto_err!(stream: "cannot use :status header for requests; stream={:?}", stream.id); + return Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR).into()); + } + if !pseudo.is_informational() { let message = counts .peer() @@ -239,27 +244,31 @@ impl Recv { .pending_recv .push_back(&mut self.buffer, Event::Headers(message)); stream.notify_recv(); - } - // Only servers can receive a headers frame that initiates the stream. - // This is verified in `Streams` before calling this function. - if counts.peer().is_server() { - self.pending_accept.push(stream); + // Only servers can receive a headers frame that initiates the stream. + // This is verified in `Streams` before calling this function. + if counts.peer().is_server() { + // Correctness: never push a stream to `pending_accept` without having the + // corresponding headers frame pushed to `stream.pending_recv`. + self.pending_accept.push(stream); + } } Ok(()) } /// Called by the server to get the request - pub fn take_request(&mut self, stream: &mut store::Ptr) -> Result, proto::Error> { + /// + /// # Panics + /// + /// Panics if `stream.pending_recv` has no `Event::Headers` queued. + /// + pub fn take_request(&mut self, stream: &mut store::Ptr) -> Request<()> { use super::peer::PollMessage::*; match stream.pending_recv.pop_front(&mut self.buffer) { - Some(Event::Headers(Server(request))) => Ok(request), - _ => { - proto_err!(stream: "received invalid request; stream={:?}", stream.id); - Err(Error::library_reset(stream.id, Reason::PROTOCOL_ERROR)) - } + Some(Event::Headers(Server(request))) => request, + _ => unreachable!("server stream queue must start with Headers"), } } diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index d64e00970..dfc5c768b 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -1178,7 +1178,7 @@ impl StreamRef { /// # Panics /// /// This function panics if the request isn't present. - pub fn take_request(&self) -> Result, proto::Error> { + pub fn take_request(&self) -> Request<()> { let mut me = self.opaque.inner.lock().unwrap(); let me = &mut *me; diff --git a/src/server.rs b/src/server.rs index 148cad517..f1f4cf470 100644 --- a/src/server.rs +++ b/src/server.rs @@ -425,20 +425,13 @@ where if let Some(inner) = self.connection.next_incoming() { tracing::trace!("received incoming"); - match inner.take_request() { - Ok(req) => { - let (head, _) = req.into_parts(); - let body = RecvStream::new(FlowControl::new(inner.clone_to_opaque())); + let (head, _) = inner.take_request().into_parts(); + let body = RecvStream::new(FlowControl::new(inner.clone_to_opaque())); - let request = Request::from_parts(head, body); - let respond = SendResponse { inner }; + let request = Request::from_parts(head, body); + let respond = SendResponse { inner }; - return Poll::Ready(Some(Ok((request, respond)))); - } - Err(e) => { - return Poll::Ready(Some(Err(e.into()))); - } - } + return Poll::Ready(Some(Ok((request, respond)))); } Poll::Pending diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 2637011ff..0d7bb61cc 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -1380,7 +1380,7 @@ async fn reject_non_authority_target_on_connect_request() { } #[tokio::test] -async fn reject_response_headers_in_request() { +async fn reject_informational_status_header_in_request() { h2_support::trace_init!(); let (io, mut client) = mock::new(); @@ -1388,21 +1388,22 @@ async fn reject_response_headers_in_request() { let client = async move { let _ = client.assert_server_handshake().await; - client.send_frame(frames::headers(1).response(128)).await; + let status_code = 128; + assert!(StatusCode::from_u16(status_code) + .unwrap() + .is_informational()); - // TODO: is CANCEL the right error code to expect here? - client.recv_frame(frames::reset(1).cancel()).await; + client + .send_frame(frames::headers(1).response(status_code)) + .await; + + client.recv_frame(frames::reset(1).protocol_error()).await; }; let srv = async move { let builder = server::Builder::new(); let mut srv = builder.handshake::<_, Bytes>(io).await.expect("handshake"); - let res = srv.next().await; - tracing::warn!("{:?}", res); - assert!(res.is_some()); - assert!(res.unwrap().is_err()); - poll_fn(move |cx| srv.poll_closed(cx)) .await .expect("server"); From 0189722fd64d3cb5acd9764fdb85bb9a95232ea8 Mon Sep 17 00:00:00 2001 From: Michael Rodler Date: Mon, 26 Jun 2023 14:40:03 +0200 Subject: [PATCH 136/178] Fix for a fuzzer-discovered integer underflow of the flow control window size (#692) Removed the SubAssign, etc. syntactic sugar functions and switched to return Result on over/underflow Whenever possible, switched to returning a library GoAway protocol error. Otherwise we check for over/underflow only with `debug_assert!`, assuming that those code paths do not over/underflow. Signed-off-by: Michael Rodler Signed-off-by: Daniele Ahmed Co-authored-by: Michael Rodler Co-authored-by: Daniele Ahmed --- src/proto/connection.rs | 4 +- src/proto/mod.rs | 2 +- src/proto/streams/flow_control.rs | 73 +++++++------- src/proto/streams/prioritize.rs | 32 +++++-- src/proto/streams/recv.rs | 49 +++++++--- src/proto/streams/send.rs | 26 ++++- src/proto/streams/stream.rs | 12 ++- src/proto/streams/streams.rs | 2 +- tests/h2-tests/tests/flow_control.rs | 136 +++++++++++++++++++++++++++ 9 files changed, 271 insertions(+), 65 deletions(-) diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 727643a65..637fac358 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -145,7 +145,9 @@ where /// connection flow control pub(crate) fn set_target_window_size(&mut self, size: WindowSize) { - self.inner.streams.set_target_connection_window_size(size); + let _res = self.inner.streams.set_target_connection_window_size(size); + // TODO: proper error handling + debug_assert!(_res.is_ok()); } /// Send a new SETTINGS frame with an updated initial window size. diff --git a/src/proto/mod.rs b/src/proto/mod.rs index d71ee9c42..567d03060 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -30,7 +30,7 @@ pub type PingPayload = [u8; 8]; pub type WindowSize = u32; // Constants -pub const MAX_WINDOW_SIZE: WindowSize = (1 << 31) - 1; +pub const MAX_WINDOW_SIZE: WindowSize = (1 << 31) - 1; // i32::MAX as u32 pub const DEFAULT_REMOTE_RESET_STREAM_MAX: usize = 20; pub const DEFAULT_RESET_STREAM_MAX: usize = 10; pub const DEFAULT_RESET_STREAM_SECS: u64 = 30; diff --git a/src/proto/streams/flow_control.rs b/src/proto/streams/flow_control.rs index 73a7754db..57a935825 100644 --- a/src/proto/streams/flow_control.rs +++ b/src/proto/streams/flow_control.rs @@ -75,12 +75,12 @@ impl FlowControl { self.window_size > self.available } - pub fn claim_capacity(&mut self, capacity: WindowSize) { - self.available -= capacity; + pub fn claim_capacity(&mut self, capacity: WindowSize) -> Result<(), Reason> { + self.available.decrease_by(capacity) } - pub fn assign_capacity(&mut self, capacity: WindowSize) { - self.available += capacity; + pub fn assign_capacity(&mut self, capacity: WindowSize) -> Result<(), Reason> { + self.available.increase_by(capacity) } /// If a WINDOW_UPDATE frame should be sent, returns a positive number @@ -136,22 +136,23 @@ impl FlowControl { /// /// This is called after receiving a SETTINGS frame with a lower /// INITIAL_WINDOW_SIZE value. - pub fn dec_send_window(&mut self, sz: WindowSize) { + pub fn dec_send_window(&mut self, sz: WindowSize) -> Result<(), Reason> { tracing::trace!( "dec_window; sz={}; window={}, available={}", sz, self.window_size, self.available ); - // This should not be able to overflow `window_size` from the bottom. - self.window_size -= sz; + // ~~This should not be able to overflow `window_size` from the bottom.~~ wrong. it can. + self.window_size.decrease_by(sz)?; + Ok(()) } /// Decrement the recv-side window size. /// /// This is called after receiving a SETTINGS ACK frame with a lower /// INITIAL_WINDOW_SIZE value. - pub fn dec_recv_window(&mut self, sz: WindowSize) { + pub fn dec_recv_window(&mut self, sz: WindowSize) -> Result<(), Reason> { tracing::trace!( "dec_recv_window; sz={}; window={}, available={}", sz, @@ -159,13 +160,14 @@ impl FlowControl { self.available ); // This should not be able to overflow `window_size` from the bottom. - self.window_size -= sz; - self.available -= sz; + self.window_size.decrease_by(sz)?; + self.available.decrease_by(sz)?; + Ok(()) } /// Decrements the window reflecting data has actually been sent. The caller /// must ensure that the window has capacity. - pub fn send_data(&mut self, sz: WindowSize) { + pub fn send_data(&mut self, sz: WindowSize) -> Result<(), Reason> { tracing::trace!( "send_data; sz={}; window={}; available={}", sz, @@ -176,12 +178,13 @@ impl FlowControl { // If send size is zero it's meaningless to update flow control window if sz > 0 { // Ensure that the argument is correct - assert!(self.window_size >= sz as usize); + assert!(self.window_size.0 >= sz as i32); // Update values - self.window_size -= sz; - self.available -= sz; + self.window_size.decrease_by(sz)?; + self.available.decrease_by(sz)?; } + Ok(()) } } @@ -208,6 +211,29 @@ impl Window { assert!(self.0 >= 0, "negative Window"); self.0 as WindowSize } + + pub fn decrease_by(&mut self, other: WindowSize) -> Result<(), Reason> { + if let Some(v) = self.0.checked_sub(other as i32) { + self.0 = v; + Ok(()) + } else { + Err(Reason::FLOW_CONTROL_ERROR) + } + } + + pub fn increase_by(&mut self, other: WindowSize) -> Result<(), Reason> { + let other = self.add(other)?; + self.0 = other.0; + Ok(()) + } + + pub fn add(&self, other: WindowSize) -> Result { + if let Some(v) = self.0.checked_add(other as i32) { + Ok(Self(v)) + } else { + Err(Reason::FLOW_CONTROL_ERROR) + } + } } impl PartialEq for Window { @@ -230,25 +256,6 @@ impl PartialOrd for Window { } } -impl ::std::ops::SubAssign for Window { - fn sub_assign(&mut self, other: WindowSize) { - self.0 -= other as i32; - } -} - -impl ::std::ops::Add for Window { - type Output = Self; - fn add(self, other: WindowSize) -> Self::Output { - Window(self.0 + other as i32) - } -} - -impl ::std::ops::AddAssign for Window { - fn add_assign(&mut self, other: WindowSize) { - self.0 += other as i32; - } -} - impl fmt::Display for Window { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&self.0, f) diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index 88204ddcc..35795fae4 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -87,7 +87,9 @@ impl Prioritize { flow.inc_window(config.remote_init_window_sz) .expect("invalid initial window size"); - flow.assign_capacity(config.remote_init_window_sz); + // TODO: proper error handling + let _res = flow.assign_capacity(config.remote_init_window_sz); + debug_assert!(_res.is_ok()); tracing::trace!("Prioritize::new; flow={:?}", flow); @@ -253,7 +255,9 @@ impl Prioritize { if available as usize > capacity { let diff = available - capacity as WindowSize; - stream.send_flow.claim_capacity(diff); + // TODO: proper error handling + let _res = stream.send_flow.claim_capacity(diff); + debug_assert!(_res.is_ok()); self.assign_connection_capacity(diff, stream, counts); } @@ -324,7 +328,9 @@ impl Prioritize { pub fn reclaim_all_capacity(&mut self, stream: &mut store::Ptr, counts: &mut Counts) { let available = stream.send_flow.available().as_size(); if available > 0 { - stream.send_flow.claim_capacity(available); + // TODO: proper error handling + let _res = stream.send_flow.claim_capacity(available); + debug_assert!(_res.is_ok()); // Re-assign all capacity to the connection self.assign_connection_capacity(available, stream, counts); } @@ -337,7 +343,9 @@ impl Prioritize { if stream.requested_send_capacity as usize > stream.buffered_send_data { let reserved = stream.requested_send_capacity - stream.buffered_send_data as WindowSize; - stream.send_flow.claim_capacity(reserved); + // TODO: proper error handling + let _res = stream.send_flow.claim_capacity(reserved); + debug_assert!(_res.is_ok()); self.assign_connection_capacity(reserved, stream, counts); } } @@ -363,7 +371,9 @@ impl Prioritize { let span = tracing::trace_span!("assign_connection_capacity", inc); let _e = span.enter(); - self.flow.assign_capacity(inc); + // TODO: proper error handling + let _res = self.flow.assign_capacity(inc); + debug_assert!(_res.is_ok()); // Assign newly acquired capacity to streams pending capacity. while self.flow.available() > 0 { @@ -443,7 +453,9 @@ impl Prioritize { stream.assign_capacity(assign, self.max_buffer_size); // Claim the capacity from the connection - self.flow.claim_capacity(assign); + // TODO: proper error handling + let _res = self.flow.claim_capacity(assign); + debug_assert!(_res.is_ok()); } tracing::trace!( @@ -763,12 +775,16 @@ impl Prioritize { // Assign the capacity back to the connection that // was just consumed from the stream in the previous // line. - self.flow.assign_capacity(len); + // TODO: proper error handling + let _res = self.flow.assign_capacity(len); + debug_assert!(_res.is_ok()); }); let (eos, len) = tracing::trace_span!("updating connection flow") .in_scope(|| { - self.flow.send_data(len); + // TODO: proper error handling + let _res = self.flow.send_data(len); + debug_assert!(_res.is_ok()); // Wrap the frame's data payload to ensure that the // correct amount of data gets written. diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 98de1bfa7..d0db00dd8 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -90,7 +90,7 @@ impl Recv { // settings flow.inc_window(DEFAULT_INITIAL_WINDOW_SIZE) .expect("invalid initial remote window size"); - flow.assign_capacity(DEFAULT_INITIAL_WINDOW_SIZE); + flow.assign_capacity(DEFAULT_INITIAL_WINDOW_SIZE).unwrap(); Recv { init_window_sz: config.local_init_window_sz, @@ -363,7 +363,9 @@ impl Recv { self.in_flight_data -= capacity; // Assign capacity to connection - self.flow.assign_capacity(capacity); + // TODO: proper error handling + let _res = self.flow.assign_capacity(capacity); + debug_assert!(_res.is_ok()); if self.flow.unclaimed_capacity().is_some() { if let Some(task) = task.take() { @@ -391,7 +393,9 @@ impl Recv { stream.in_flight_recv_data -= capacity; // Assign capacity to stream - stream.recv_flow.assign_capacity(capacity); + // TODO: proper error handling + let _res = stream.recv_flow.assign_capacity(capacity); + debug_assert!(_res.is_ok()); if stream.recv_flow.unclaimed_capacity().is_some() { // Queue the stream for sending the WINDOW_UPDATE frame. @@ -437,7 +441,11 @@ impl Recv { /// /// The `task` is an optional parked task for the `Connection` that might /// be blocked on needing more window capacity. - pub fn set_target_connection_window(&mut self, target: WindowSize, task: &mut Option) { + pub fn set_target_connection_window( + &mut self, + target: WindowSize, + task: &mut Option, + ) -> Result<(), Reason> { tracing::trace!( "set_target_connection_window; target={}; available={}, reserved={}", target, @@ -450,11 +458,15 @@ impl Recv { // // Update the flow controller with the difference between the new // target and the current target. - let current = (self.flow.available() + self.in_flight_data).checked_size(); + let current = self + .flow + .available() + .add(self.in_flight_data)? + .checked_size(); if target > current { - self.flow.assign_capacity(target - current); + self.flow.assign_capacity(target - current)?; } else { - self.flow.claim_capacity(current - target); + self.flow.claim_capacity(current - target)?; } // If changing the target capacity means we gained a bunch of capacity, @@ -465,6 +477,7 @@ impl Recv { task.wake(); } } + Ok(()) } pub(crate) fn apply_local_settings( @@ -504,9 +517,13 @@ impl Recv { let dec = old_sz - target; tracing::trace!("decrementing all windows; dec={}", dec); - store.for_each(|mut stream| { - stream.recv_flow.dec_recv_window(dec); - }) + store.try_for_each(|mut stream| { + stream + .recv_flow + .dec_recv_window(dec) + .map_err(proto::Error::library_go_away)?; + Ok::<_, proto::Error>(()) + })?; } Ordering::Greater => { // We must increase the (local) window on every open stream. @@ -519,7 +536,10 @@ impl Recv { .recv_flow .inc_window(inc) .map_err(proto::Error::library_go_away)?; - stream.recv_flow.assign_capacity(inc); + stream + .recv_flow + .assign_capacity(inc) + .map_err(proto::Error::library_go_away)?; Ok::<_, proto::Error>(()) })?; } @@ -626,7 +646,10 @@ impl Recv { } // Update stream level flow control - stream.recv_flow.send_data(sz); + stream + .recv_flow + .send_data(sz) + .map_err(proto::Error::library_go_away)?; // Track the data as in-flight stream.in_flight_recv_data += sz; @@ -667,7 +690,7 @@ impl Recv { } // Update connection level flow control - self.flow.send_data(sz); + self.flow.send_data(sz).map_err(Error::library_go_away)?; // Track the data as in-flight self.in_flight_data += sz; diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index 20aba38d4..dcb5225c7 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -4,7 +4,7 @@ use super::{ }; use crate::codec::UserError; use crate::frame::{self, Reason}; -use crate::proto::{Error, Initiator}; +use crate::proto::{self, Error, Initiator}; use bytes::Buf; use tokio::io::AsyncWrite; @@ -458,10 +458,21 @@ impl Send { tracing::trace!("decrementing all windows; dec={}", dec); let mut total_reclaimed = 0; - store.for_each(|mut stream| { + store.try_for_each(|mut stream| { let stream = &mut *stream; - stream.send_flow.dec_send_window(dec); + tracing::trace!( + "decrementing stream window; id={:?}; decr={}; flow={:?}", + stream.id, + dec, + stream.send_flow + ); + + // TODO: this decrement can underflow based on received frames! + stream + .send_flow + .dec_send_window(dec) + .map_err(proto::Error::library_go_away)?; // It's possible that decreasing the window causes // `window_size` (the stream-specific window) to fall below @@ -474,7 +485,10 @@ impl Send { let reclaimed = if available > window_size { // Drop down to `window_size`. let reclaim = available - window_size; - stream.send_flow.claim_capacity(reclaim); + stream + .send_flow + .claim_capacity(reclaim) + .map_err(proto::Error::library_go_away)?; total_reclaimed += reclaim; reclaim } else { @@ -492,7 +506,9 @@ impl Send { // TODO: Should this notify the producer when the capacity // of a stream is reduced? Maybe it should if the capacity // is reduced to zero, allowing the producer to stop work. - }); + + Ok::<_, proto::Error>(()) + })?; self.prioritize .assign_connection_capacity(total_reclaimed, store, counts); diff --git a/src/proto/streams/stream.rs b/src/proto/streams/stream.rs index 2888d744b..43e313647 100644 --- a/src/proto/streams/stream.rs +++ b/src/proto/streams/stream.rs @@ -146,7 +146,9 @@ impl Stream { recv_flow .inc_window(init_recv_window) .expect("invalid initial receive window"); - recv_flow.assign_capacity(init_recv_window); + // TODO: proper error handling? + let _res = recv_flow.assign_capacity(init_recv_window); + debug_assert!(_res.is_ok()); send_flow .inc_window(init_send_window) @@ -275,7 +277,9 @@ impl Stream { pub fn assign_capacity(&mut self, capacity: WindowSize, max_buffer_size: usize) { let prev_capacity = self.capacity(max_buffer_size); debug_assert!(capacity > 0); - self.send_flow.assign_capacity(capacity); + // TODO: proper error handling + let _res = self.send_flow.assign_capacity(capacity); + debug_assert!(_res.is_ok()); tracing::trace!( " assigned capacity to stream; available={}; buffered={}; id={:?}; max_buffer_size={} prev={}", @@ -294,7 +298,9 @@ impl Stream { pub fn send_data(&mut self, len: WindowSize, max_buffer_size: usize) { let prev_capacity = self.capacity(max_buffer_size); - self.send_flow.send_data(len); + // TODO: proper error handling + let _res = self.send_flow.send_data(len); + debug_assert!(_res.is_ok()); // Decrement the stream's buffered data counter debug_assert!(self.buffered_send_data >= len as usize); diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index dfc5c768b..eab362f73 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -118,7 +118,7 @@ where } } - pub fn set_target_connection_window_size(&mut self, size: WindowSize) { + pub fn set_target_connection_window_size(&mut self, size: WindowSize) -> Result<(), Reason> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; diff --git a/tests/h2-tests/tests/flow_control.rs b/tests/h2-tests/tests/flow_control.rs index 5caa2ec3a..dbb933286 100644 --- a/tests/h2-tests/tests/flow_control.rs +++ b/tests/h2-tests/tests/flow_control.rs @@ -1858,3 +1858,139 @@ async fn poll_capacity_wakeup_after_window_update() { join(srv, h2).await; } + +#[tokio::test] +async fn window_size_decremented_past_zero() { + h2_support::trace_init!(); + let (io, mut client) = mock::new(); + + let client = async move { + // let _ = client.assert_server_handshake().await; + + // preface + client.write_preface().await; + + // the following http 2 bytes are fuzzer-generated + client.send_bytes(&[0, 0, 0, 4, 0, 0, 0, 0, 0]).await; + client + .send_bytes(&[ + 0, 0, 23, 1, 1, 0, 249, 255, 191, 131, 1, 1, 1, 70, 1, 1, 1, 1, 65, 1, 1, 65, 1, 1, + 65, 1, 1, 1, 1, 1, 1, 190, + ]) + .await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client + .send_bytes(&[ + 0, 0, 9, 247, 0, 121, 255, 255, 184, 1, 65, 1, 1, 1, 1, 1, 1, 190, + ]) + .await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client + .send_bytes(&[0, 0, 3, 0, 1, 0, 249, 255, 191, 1, 1, 190]) + .await; + client + .send_bytes(&[0, 0, 2, 50, 107, 0, 0, 0, 1, 0, 0]) + .await; + client + .send_bytes(&[0, 0, 5, 2, 0, 0, 0, 0, 1, 128, 0, 55, 0, 0]) + .await; + client + .send_bytes(&[ + 0, 0, 12, 4, 0, 0, 0, 0, 0, 126, 4, 39, 184, 171, 125, 33, 0, 3, 107, 50, 98, + ]) + .await; + client + .send_bytes(&[0, 0, 6, 4, 0, 0, 0, 0, 0, 3, 4, 76, 255, 71, 131]) + .await; + client + .send_bytes(&[ + 0, 0, 12, 4, 0, 0, 0, 0, 0, 0, 4, 39, 184, 171, 74, 33, 0, 3, 107, 50, 98, + ]) + .await; + client + .send_bytes(&[ + 0, 0, 30, 4, 0, 0, 0, 0, 0, 0, 4, 56, 184, 171, 125, 65, 0, 35, 65, 65, 65, 61, + 232, 87, 115, 89, 116, 0, 4, 0, 58, 33, 125, 33, 79, 3, 107, 49, 98, + ]) + .await; + client + .send_bytes(&[ + 0, 0, 12, 4, 0, 0, 0, 0, 0, 0, 4, 39, 184, 171, 125, 33, 0, 3, 107, 50, 98, + ]) + .await; + client.send_bytes(&[0, 0, 0, 4, 0, 0, 0, 0, 0]).await; + client + .send_bytes(&[ + 0, 0, 12, 4, 0, 0, 0, 0, 0, 126, 4, 39, 184, 171, 125, 33, 0, 3, 107, 50, 98, + ]) + .await; + client + .send_bytes(&[ + 0, 0, 177, 1, 44, 0, 0, 0, 1, 67, 67, 67, 67, 67, 67, 131, 134, 5, 61, 67, 67, 67, + 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, + 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, + 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 115, 102, 1, 3, 48, 43, + 101, 64, 31, 37, 99, 99, 97, 97, 97, 97, 49, 97, 54, 97, 97, 97, 97, 49, 97, 54, + 97, 99, 54, 53, 53, 51, 53, 99, 99, 97, 97, 99, 97, 97, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ]) + .await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client + .send_bytes(&[ + 0, 0, 12, 4, 0, 0, 0, 0, 0, 0, 4, 0, 58, 171, 125, 33, 79, 3, 107, 49, 98, + ]) + .await; + client + .send_bytes(&[0, 0, 6, 4, 0, 0, 0, 0, 0, 0, 4, 87, 115, 89, 116]) + .await; + client + .send_bytes(&[ + 0, 0, 12, 4, 0, 0, 0, 0, 0, 126, 4, 39, 184, 171, 125, 33, 0, 3, 107, 50, 98, + ]) + .await; + client + .send_bytes(&[ + 0, 0, 129, 1, 44, 0, 0, 0, 1, 67, 67, 67, 67, 67, 67, 131, 134, 5, 18, 67, 67, 61, + 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 48, 54, 53, 55, 114, 1, 4, 97, 49, 51, 116, + 64, 2, 117, 115, 4, 103, 101, 110, 116, 64, 8, 57, 111, 110, 116, 101, 110, 115, + 102, 7, 43, 43, 49, 48, 48, 43, 101, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]) + .await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client.send_bytes(&[0, 0, 0, 0, 0, 0, 0, 0, 1]).await; + client + .send_bytes(&[ + 0, 0, 12, 4, 0, 0, 0, 0, 0, 0, 4, 0, 58, 171, 125, 33, 79, 3, 107, 49, 98, + ]) + .await; + + // TODO: is CANCEL the right error code to expect here? + // client.recv_frame(frames::reset(1).protocol_error()).await; + }; + + let srv = async move { + let builder = server::Builder::new(); + let mut srv = builder.handshake::<_, Bytes>(io).await.expect("handshake"); + + // just keep it open + let res = poll_fn(move |cx| srv.poll_closed(cx)).await; + tracing::debug!("{:?}", res); + }; + + join(client, srv).await; +} From 6a75f232330374d5f329aaae91afc2dee7ed2b1f Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 26 Jun 2023 08:43:36 -0400 Subject: [PATCH 137/178] v0.3.20 --- CHANGELOG.md | 6 ++++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 875cc70b2..9c035533c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.3.20 (June 26, 2023) + +* Fix panic if a server received a request with a `:status` pseudo header in the 1xx range. +* Fix panic if a reset stream had pending push promises that were more than allowed. +* Fix potential flow control overflow by subtraction, instead returning a connection error. + # 0.3.19 (May 12, 2023) * Fix counting reset streams when triggered by a GOAWAY. diff --git a/Cargo.toml b/Cargo.toml index 747ae7638..1ef688515 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.19" +version = "0.3.20" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index 7975ea8c2..1c5f57625 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.19")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.20")] #![deny( missing_debug_implementations, missing_docs, From 46fb80bfd07b4dc6b8ee87b9c2e6415399830910 Mon Sep 17 00:00:00 2001 From: Artem Medvedev Date: Sat, 22 Jul 2023 12:06:56 +0200 Subject: [PATCH 138/178] test: early server response with data (#703) - fix of the test's name after changing it in #634 - additional test that server also sends data frames correctly --- tests/h2-tests/tests/server.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 0d7bb61cc..33e08c19d 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -553,7 +553,7 @@ async fn recv_connection_header() { } #[tokio::test] -async fn sends_reset_cancel_when_req_body_is_dropped() { +async fn sends_reset_no_error_when_req_body_is_dropped() { h2_support::trace_init!(); let (io, mut client) = mock::new(); @@ -563,8 +563,11 @@ async fn sends_reset_cancel_when_req_body_is_dropped() { client .send_frame(frames::headers(1).request("POST", "https://example.com/")) .await; + // server responded with data before consuming POST-request's body, resulting in `RST_STREAM(NO_ERROR)`. + client.recv_frame(frames::headers(1).response(200)).await; + client.recv_frame(frames::data(1, vec![0; 16384])).await; client - .recv_frame(frames::headers(1).response(200).eos()) + .recv_frame(frames::data(1, vec![0; 16384]).eos()) .await; client .recv_frame(frames::reset(1).reason(Reason::NO_ERROR)) @@ -578,7 +581,8 @@ async fn sends_reset_cancel_when_req_body_is_dropped() { assert_eq!(req.method(), &http::Method::POST); let rsp = http::Response::builder().status(200).body(()).unwrap(); - stream.send_response(rsp, true).unwrap(); + let mut tx = stream.send_response(rsp, false).unwrap(); + tx.send_data(vec![0; 16384 * 2].into(), true).unwrap(); } assert!(srv.next().await.is_none()); }; From 633116ef68b4e7b5c4c5699fb5d10b58ef5818ac Mon Sep 17 00:00:00 2001 From: Artem Medvedev Date: Mon, 24 Jul 2023 18:59:27 +0200 Subject: [PATCH 139/178] fix: do not ignore result of `ensure_recv_open` (#687) --- src/proto/streams/recv.rs | 8 +++++++- src/proto/streams/streams.rs | 6 +++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index d0db00dd8..0063942a4 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -318,7 +318,13 @@ impl Recv { Some(Event::Headers(Client(response))) => Poll::Ready(Ok(response)), Some(_) => panic!("poll_response called after response returned"), None => { - stream.state.ensure_recv_open()?; + if !stream.state.ensure_recv_open()? { + proto_err!(stream: "poll_response: stream={:?} is not opened;", stream.id); + return Poll::Ready(Err(Error::library_reset( + stream.id, + Reason::PROTOCOL_ERROR, + ))); + } stream.recv_task = Some(cx.waker().clone()); Poll::Pending diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index eab362f73..02a0f61b6 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -726,7 +726,11 @@ impl Inner { } // The stream must be receive open - stream.state.ensure_recv_open()?; + if !stream.state.ensure_recv_open()? { + proto_err!(conn: "recv_push_promise: initiating stream is not opened"); + return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); + } + stream.key() } None => { From cdcc641902a2c5b058da62980c5b1bede16671a4 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 21 Aug 2023 10:36:52 -0400 Subject: [PATCH 140/178] msrv: bump to 1.63 (#708) * ci: only check h2 package for msrv * msrv: bump to 1.63, tokio requires it --- .github/workflows/CI.yml | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index e90c68af7..1467d9fc7 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -93,4 +93,4 @@ jobs: with: toolchain: ${{ steps.metadata.outputs.msrv }} - - run: cargo check + - run: cargo check -p h2 diff --git a/Cargo.toml b/Cargo.toml index 1ef688515..f97b16bef 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ keywords = ["http", "async", "non-blocking"] categories = ["asynchronous", "web-programming", "network-programming"] exclude = ["fixtures/**", "ci/**"] edition = "2018" -rust-version = "1.56" +rust-version = "1.63" [features] # Enables `futures::Stream` implementations for various types. From da9f34bc808a38be2c36f0f8e3a78c0164548419 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 21 Aug 2023 11:08:46 -0400 Subject: [PATCH 141/178] chore: fix up some clippy nags (#709) --- src/hpack/decoder.rs | 6 +++--- src/proto/streams/state.rs | 9 ++------- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/src/hpack/decoder.rs b/src/hpack/decoder.rs index b45c37927..960cbb143 100644 --- a/src/hpack/decoder.rs +++ b/src/hpack/decoder.rs @@ -447,7 +447,7 @@ fn decode_int(buf: &mut B, prefix_size: u8) -> Result(buf: &mut B) -> Option { +fn peek_u8(buf: &B) -> Option { if buf.has_remaining() { Some(buf.chunk()[0]) } else { @@ -835,9 +835,9 @@ mod test { fn test_peek_u8() { let b = 0xff; let mut buf = Cursor::new(vec![b]); - assert_eq!(peek_u8(&mut buf), Some(b)); + assert_eq!(peek_u8(&buf), Some(b)); assert_eq!(buf.get_u8(), b); - assert_eq!(peek_u8(&mut buf), None); + assert_eq!(peek_u8(&buf), None); } #[test] diff --git a/src/proto/streams/state.rs b/src/proto/streams/state.rs index 1ca8f5afb..5256f09cf 100644 --- a/src/proto/streams/state.rs +++ b/src/proto/streams/state.rs @@ -64,8 +64,9 @@ enum Inner { Closed(Cause), } -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Copy, Clone, Default)] enum Peer { + #[default] AwaitingHeaders, Streaming, } @@ -466,9 +467,3 @@ impl Default for State { State { inner: Inner::Idle } } } - -impl Default for Peer { - fn default() -> Self { - AwaitingHeaders - } -} From 9bd62a2efc19da072ff6b55c92586b02a714194e Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Mon, 21 Aug 2023 19:09:26 +0200 Subject: [PATCH 142/178] Test that client reacts correctly on rogue HEADERS (#667) --- tests/h2-tests/tests/client_request.rs | 85 ++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index aff39f5c1..258826d1d 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -1454,6 +1454,91 @@ async fn extended_connect_request() { join(srv, h2).await; } +#[tokio::test] +async fn rogue_server_odd_headers() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv.assert_client_handshake().await; + assert_default_settings!(settings); + srv.send_frame(frames::headers(1)).await; + srv.recv_frame(frames::go_away(0).protocol_error()).await; + }; + + let h2 = async move { + let (_client, h2) = client::handshake(io).await.unwrap(); + + let err = h2.await.unwrap_err(); + assert!(err.is_go_away()); + assert_eq!(err.reason(), Some(Reason::PROTOCOL_ERROR)); + }; + + join(srv, h2).await; +} + +#[tokio::test] +async fn rogue_server_even_headers() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv.assert_client_handshake().await; + assert_default_settings!(settings); + srv.send_frame(frames::headers(2)).await; + srv.recv_frame(frames::go_away(0).protocol_error()).await; + }; + + let h2 = async move { + let (_client, h2) = client::handshake(io).await.unwrap(); + + let err = h2.await.unwrap_err(); + assert!(err.is_go_away()); + assert_eq!(err.reason(), Some(Reason::PROTOCOL_ERROR)); + }; + + join(srv, h2).await; +} + +#[tokio::test] +async fn rogue_server_reused_headers() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv.assert_client_handshake().await; + assert_default_settings!(settings); + + srv.recv_frame( + frames::headers(1) + .request("GET", "https://camembert.fromage") + .eos(), + ) + .await; + srv.send_frame(frames::headers(1).response(200).eos()).await; + srv.send_frame(frames::headers(1)).await; + srv.recv_frame(frames::reset(1).stream_closed()).await; + }; + + let h2 = async move { + let (mut client, mut h2) = client::handshake(io).await.unwrap(); + + h2.drive(async { + let request = Request::builder() + .method(Method::GET) + .uri("https://camembert.fromage") + .body(()) + .unwrap(); + let _res = client.send_request(request, true).unwrap().0.await.unwrap(); + }) + .await; + + h2.await.unwrap(); + }; + + join(srv, h2).await; +} + const SETTINGS: &[u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; const SETTINGS_ACK: &[u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; From ee042922780651349915afb9cfe9b86d6b1084f9 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 21 Aug 2023 13:20:06 -0400 Subject: [PATCH 143/178] Fix opening new streams over max concurrent (#707) There was a bug where opening streams over the max concurrent streams was possible if max_concurrent_streams were lowered beyond the current number of open streams and there were already new streams adding to the pending_send queue. There was two mechanisms for streams to end up in that queue. 1. send_headers would push directly onto pending_send when below max_concurrent_streams 2. prioritize would pop from pending_open until max_concurrent_streams was reached. For case 1, a settings frame could be received after pushing many streams onto pending_send and before the socket was ready to write again. For case 2, the pending_send queue could have Headers frames queued going into a Not Ready state with the socket, a settings frame could be received, and then the headers would be written anyway after the ack. The fix is therefore also two fold. Fixing case 1 is as simple as letting Prioritize decide when to transition streams from `pending_open` to `pending_send` since only it knows the readiness of the socket and whether the headers can be written immediately. This is slightly complicated by the fact that previously SendRequest would block when streams would be added as "pending open". That was addressed by guessing when to block based on max concurrent streams rather than the stream state. The fix for Prioritize was to conservatively pop streams from pending_open when the socket is immediately available for writing a headers frame. This required a change to queuing to support pushing on the front of pending_send to ensure headers frames don't linger in pending_send. The alternative to this was adding a check to pending_send whether a new stream would exceed max concurrent. In that case, headers frames would need to carefully be reenqueued. This seemed to impose more complexity to ensure ordering of stream IDs would be maintained. Closes #704 Closes #706 Co-authored-by: Joe Wilm --- src/client.rs | 6 +- src/proto/streams/counts.rs | 8 +++ src/proto/streams/prioritize.rs | 18 ++++-- src/proto/streams/send.rs | 25 +++++--- src/proto/streams/store.rs | 42 +++++++++++- src/proto/streams/streams.rs | 14 ++-- tests/h2-tests/tests/client_request.rs | 88 ++++++++++++++++++++++++++ tests/h2-tests/tests/server.rs | 4 +- 8 files changed, 179 insertions(+), 26 deletions(-) diff --git a/src/client.rs b/src/client.rs index 4147e8a46..e83ef6a4a 100644 --- a/src/client.rs +++ b/src/client.rs @@ -510,8 +510,10 @@ where self.inner .send_request(request, end_of_stream, self.pending.as_ref()) .map_err(Into::into) - .map(|stream| { - if stream.is_pending_open() { + .map(|(stream, is_full)| { + if stream.is_pending_open() && is_full { + // Only prevent sending another request when the request queue + // is not full. self.pending = Some(stream.clone_to_opaque()); } diff --git a/src/proto/streams/counts.rs b/src/proto/streams/counts.rs index 6a5aa9ccd..add1312e5 100644 --- a/src/proto/streams/counts.rs +++ b/src/proto/streams/counts.rs @@ -49,6 +49,14 @@ impl Counts { } } + /// Returns true when the next opened stream will reach capacity of outbound streams + /// + /// The number of client send streams is incremented in prioritize; send_request has to guess if + /// it should wait before allowing another request to be sent. + pub fn next_send_stream_will_reach_capacity(&self) -> bool { + self.max_send_streams <= (self.num_send_streams + 1) + } + /// Returns the current peer pub fn peer(&self) -> peer::Dyn { self.peer diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index 35795fae4..3196049a4 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -520,7 +520,9 @@ impl Prioritize { tracing::trace!("poll_complete"); loop { - self.schedule_pending_open(store, counts); + if let Some(mut stream) = self.pop_pending_open(store, counts) { + self.pending_send.push_front(&mut stream); + } match self.pop_frame(buffer, store, max_frame_len, counts) { Some(frame) => { @@ -874,20 +876,24 @@ impl Prioritize { } } - fn schedule_pending_open(&mut self, store: &mut Store, counts: &mut Counts) { + fn pop_pending_open<'s>( + &mut self, + store: &'s mut Store, + counts: &mut Counts, + ) -> Option> { tracing::trace!("schedule_pending_open"); // check for any pending open streams - while counts.can_inc_num_send_streams() { + if counts.can_inc_num_send_streams() { if let Some(mut stream) = self.pending_open.pop(store) { tracing::trace!("schedule_pending_open; stream={:?}", stream.id); counts.inc_num_send_streams(&mut stream); - self.pending_send.push(&mut stream); stream.notify_send(); - } else { - return; + return Some(stream); } } + + None } } diff --git a/src/proto/streams/send.rs b/src/proto/streams/send.rs index dcb5225c7..626e61a33 100644 --- a/src/proto/streams/send.rs +++ b/src/proto/streams/send.rs @@ -143,22 +143,27 @@ impl Send { // Update the state stream.state.send_open(end_stream)?; - if counts.peer().is_local_init(frame.stream_id()) { - // If we're waiting on a PushPromise anyway - // handle potentially queueing the stream at that point - if !stream.is_pending_push { - if counts.can_inc_num_send_streams() { - counts.inc_num_send_streams(stream); - } else { - self.prioritize.queue_open(stream); - } - } + let mut pending_open = false; + if counts.peer().is_local_init(frame.stream_id()) && !stream.is_pending_push { + self.prioritize.queue_open(stream); + pending_open = true; } // Queue the frame for sending + // + // This call expects that, since new streams are in the open queue, new + // streams won't be pushed on pending_send. self.prioritize .queue_frame(frame.into(), buffer, stream, task); + // Need to notify the connection when pushing onto pending_open since + // queue_frame only notifies for pending_send. + if pending_open { + if let Some(task) = task.take() { + task.wake(); + } + } + Ok(()) } diff --git a/src/proto/streams/store.rs b/src/proto/streams/store.rs index d33a01cce..67b377b12 100644 --- a/src/proto/streams/store.rs +++ b/src/proto/streams/store.rs @@ -256,7 +256,7 @@ where /// /// If the stream is already contained by the list, return `false`. pub fn push(&mut self, stream: &mut store::Ptr) -> bool { - tracing::trace!("Queue::push"); + tracing::trace!("Queue::push_back"); if N::is_queued(stream) { tracing::trace!(" -> already queued"); @@ -292,6 +292,46 @@ where true } + /// Queue the stream + /// + /// If the stream is already contained by the list, return `false`. + pub fn push_front(&mut self, stream: &mut store::Ptr) -> bool { + tracing::trace!("Queue::push_front"); + + if N::is_queued(stream) { + tracing::trace!(" -> already queued"); + return false; + } + + N::set_queued(stream, true); + + // The next pointer shouldn't be set + debug_assert!(N::next(stream).is_none()); + + // Queue the stream + match self.indices { + Some(ref mut idxs) => { + tracing::trace!(" -> existing entries"); + + // Update the provided stream to point to the head node + let head_key = stream.resolve(idxs.head).key(); + N::set_next(stream, Some(head_key)); + + // Update the head pointer + idxs.head = stream.key(); + } + None => { + tracing::trace!(" -> first entry"); + self.indices = Some(store::Indices { + head: stream.key(), + tail: stream.key(), + }); + } + } + + true + } + pub fn pop<'a, R>(&mut self, store: &'a mut R) -> Option> where R: Resolve, diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 02a0f61b6..274bf4553 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -216,7 +216,7 @@ where mut request: Request<()>, end_of_stream: bool, pending: Option<&OpaqueStreamRef>, - ) -> Result, SendError> { + ) -> Result<(StreamRef, bool), SendError> { use super::stream::ContentLength; use http::Method; @@ -298,10 +298,14 @@ where // the lock, so it can't. me.refs += 1; - Ok(StreamRef { - opaque: OpaqueStreamRef::new(self.inner.clone(), &mut stream), - send_buffer: self.send_buffer.clone(), - }) + let is_full = me.counts.next_send_stream_will_reach_capacity(); + Ok(( + StreamRef { + opaque: OpaqueStreamRef::new(self.inner.clone(), &mut stream), + send_buffer: self.send_buffer.clone(), + }, + is_full, + )) } pub(crate) fn is_extended_connect_protocol_enabled(&self) -> bool { diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 258826d1d..7b4316004 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -239,6 +239,8 @@ async fn request_over_max_concurrent_streams_errors() { // first request is allowed let (resp1, mut stream1) = client.send_request(request, false).unwrap(); + // as long as we let the connection internals tick + client = h2.drive(client.ready()).await.unwrap(); let request = Request::builder() .method(Method::POST) @@ -284,6 +286,90 @@ async fn request_over_max_concurrent_streams_errors() { join(srv, h2).await; } +#[tokio::test] +async fn recv_decrement_max_concurrent_streams_when_requests_queued() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + let settings = srv.assert_client_handshake().await; + assert_default_settings!(settings); + srv.recv_frame( + frames::headers(1) + .request("POST", "https://example.com/") + .eos(), + ) + .await; + srv.send_frame(frames::headers(1).response(200).eos()).await; + + srv.ping_pong([0; 8]).await; + + // limit this server later in life + srv.send_frame(frames::settings().max_concurrent_streams(1)) + .await; + srv.recv_frame(frames::settings_ack()).await; + srv.recv_frame( + frames::headers(3) + .request("POST", "https://example.com/") + .eos(), + ) + .await; + srv.ping_pong([1; 8]).await; + srv.send_frame(frames::headers(3).response(200).eos()).await; + + srv.recv_frame( + frames::headers(5) + .request("POST", "https://example.com/") + .eos(), + ) + .await; + srv.send_frame(frames::headers(5).response(200).eos()).await; + }; + + let h2 = async move { + let (mut client, mut h2) = client::handshake(io).await.expect("handshake"); + // we send a simple req here just to drive the connection so we can + // receive the server settings. + let request = Request::builder() + .method(Method::POST) + .uri("https://example.com/") + .body(()) + .unwrap(); + // first request is allowed + let (response, _) = client.send_request(request, true).unwrap(); + h2.drive(response).await.unwrap(); + + let request = Request::builder() + .method(Method::POST) + .uri("https://example.com/") + .body(()) + .unwrap(); + + // first request is allowed + let (resp1, _) = client.send_request(request, true).unwrap(); + + let request = Request::builder() + .method(Method::POST) + .uri("https://example.com/") + .body(()) + .unwrap(); + + // second request is put into pending_open + let (resp2, _) = client.send_request(request, true).unwrap(); + + h2.drive(async move { + resp1.await.expect("req"); + }) + .await; + join(async move { h2.await.unwrap() }, async move { + resp2.await.unwrap() + }) + .await; + }; + + join(srv, h2).await; +} + #[tokio::test] async fn send_request_poll_ready_when_connection_error() { h2_support::trace_init!(); @@ -336,6 +422,8 @@ async fn send_request_poll_ready_when_connection_error() { // first request is allowed let (resp1, _) = client.send_request(request, true).unwrap(); + // as long as we let the connection internals tick + client = h2.drive(client.ready()).await.unwrap(); let request = Request::builder() .method(Method::POST) diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 33e08c19d..6075c7dcf 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -296,10 +296,10 @@ async fn push_request_against_concurrency() { .await; client.recv_frame(frames::data(2, &b""[..]).eos()).await; client - .recv_frame(frames::headers(1).response(200).eos()) + .recv_frame(frames::headers(4).response(200).eos()) .await; client - .recv_frame(frames::headers(4).response(200).eos()) + .recv_frame(frames::headers(1).response(200).eos()) .await; }; From da38b1c49c39ccf7e2e0dca51ebf8505d6905597 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 21 Aug 2023 13:26:55 -0400 Subject: [PATCH 144/178] v0.3.21 --- CHANGELOG.md | 6 ++++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9c035533c..9caebdf40 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.3.21 (August 21, 2023) + +* Fix opening of new streams over peer's max concurrent limit. +* Fix `RecvStream` to return data even if it has received a `CANCEL` stream error. +* Update MSRV to 1.63. + # 0.3.20 (June 26, 2023) * Fix panic if a server received a request with a `:status` pseudo header in the 1xx range. diff --git a/Cargo.toml b/Cargo.toml index f97b16bef..c815ff0e7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.20" +version = "0.3.21" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index 1c5f57625..a37c8b4c1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.20")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.21")] #![deny( missing_debug_implementations, missing_docs, From 62cf7a606f2c681f62e47d3d04df551db4c010f4 Mon Sep 17 00:00:00 2001 From: tottoto Date: Sat, 16 Sep 2023 09:20:12 +0900 Subject: [PATCH 145/178] Check minimal versions more precisely --- .github/workflows/CI.yml | 19 ++++++++++--------- Cargo.toml | 2 +- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 1467d9fc7..ee920442e 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -61,10 +61,6 @@ jobs: run: ./ci/h2spec.sh if: matrix.rust == 'stable' - - name: Check minimal versions - run: cargo clean; cargo update -Zminimal-versions; cargo check - if: matrix.rust == 'nightly' - clippy_check: runs-on: ubuntu-latest steps: @@ -83,14 +79,19 @@ jobs: uses: actions/checkout@v3 - name: Get MSRV from package metadata - id: metadata - run: | - cargo metadata --no-deps --format-version 1 | - jq -r '"msrv=" + (.packages[] | select(.name == "h2")).rust_version' >> $GITHUB_OUTPUT + id: msrv + run: grep rust-version Cargo.toml | cut -d '"' -f2 | sed 's/^/version=/' >> $GITHUB_OUTPUT - name: Install Rust (${{ steps.metadata.outputs.msrv }}) + id: msrv-toolchain uses: dtolnay/rust-toolchain@master with: - toolchain: ${{ steps.metadata.outputs.msrv }} + toolchain: ${{ steps.msrv.outputs.version }} - run: cargo check -p h2 + + - uses: dtolnay/rust-toolchain@nightly + - uses: taiki-e/install-action@cargo-hack + - uses: taiki-e/install-action@cargo-minimal-versions + + - run: cargo +${{ steps.msrv-toolchain.outputs.name }} minimal-versions check diff --git a/Cargo.toml b/Cargo.toml index c815ff0e7..dd351bfa2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,7 +48,7 @@ tokio-util = { version = "0.7.1", features = ["codec"] } tokio = { version = "1", features = ["io-util"] } bytes = "1" http = "0.2" -tracing = { version = "0.1.21", default-features = false, features = ["std"] } +tracing = { version = "0.1.32", default-features = false, features = ["std"] } fnv = "1.0.5" slab = "0.4.2" indexmap = { version = "1.5.2", features = ["std"] } From a3f01c19fda400196897e07a1b7f6747e17562c7 Mon Sep 17 00:00:00 2001 From: Xiaoya Wei Date: Thu, 28 Sep 2023 22:45:42 +0800 Subject: [PATCH 146/178] perf: Improve throughput when vectored IO is not enabled (#712) As discussed in https://github.com/hyperium/h2/issues/711, the current implementation of sending data is suboptimal when vectored I/O is not enabled: data frame's head is likely to be sent in a separate TCP segment, whose payload is of only 9 bytes. This PR adds some specialized implementaton for non-vectored I/O case. In short, it sets a larget chain threhold, and also makes sure a data frame's head is sent along with the beginning part of the real data payload. All existing unit tests passed. Also I take a look at the e2e https://github.com/hyperium/hyper/blob/0.14.x/benches/end_to_end.rs but realize that all the benchmarks there are for the case of vectored I/O if the OS supports vectored I/O. There isn't a specific case for non-vectored I/O so I am not sure how to proceed with benchmark for performance evaluations. --- Cargo.toml | 2 +- src/codec/framed_write.rs | 84 +++++++++++++++++---------------------- 2 files changed, 37 insertions(+), 49 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index dd351bfa2..b11981d26 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -44,7 +44,7 @@ members = [ futures-core = { version = "0.3", default-features = false } futures-sink = { version = "0.3", default-features = false } futures-util = { version = "0.3", default-features = false } -tokio-util = { version = "0.7.1", features = ["codec"] } +tokio-util = { version = "0.7.1", features = ["codec", "io"] } tokio = { version = "1", features = ["io-util"] } bytes = "1" http = "0.2" diff --git a/src/codec/framed_write.rs b/src/codec/framed_write.rs index 4b1b4accc..c88af02da 100644 --- a/src/codec/framed_write.rs +++ b/src/codec/framed_write.rs @@ -7,8 +7,9 @@ use bytes::{Buf, BufMut, BytesMut}; use std::pin::Pin; use std::task::{Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tokio_util::io::poll_write_buf; -use std::io::{self, Cursor, IoSlice}; +use std::io::{self, Cursor}; // A macro to get around a method needing to borrow &mut self macro_rules! limited_write_buf { @@ -45,8 +46,11 @@ struct Encoder { /// Max frame size, this is specified by the peer max_frame_size: FrameSize, - /// Whether or not the wrapped `AsyncWrite` supports vectored IO. - is_write_vectored: bool, + /// Chain payloads bigger than this. + chain_threshold: usize, + + /// Min buffer required to attempt to write a frame + min_buffer_capacity: usize, } #[derive(Debug)] @@ -61,14 +65,16 @@ enum Next { /// frame that big. const DEFAULT_BUFFER_CAPACITY: usize = 16 * 1_024; -/// Min buffer required to attempt to write a frame -const MIN_BUFFER_CAPACITY: usize = frame::HEADER_LEN + CHAIN_THRESHOLD; - -/// Chain payloads bigger than this. The remote will never advertise a max frame -/// size less than this (well, the spec says the max frame size can't be less -/// than 16kb, so not even close). +/// Chain payloads bigger than this when vectored I/O is enabled. The remote +/// will never advertise a max frame size less than this (well, the spec says +/// the max frame size can't be less than 16kb, so not even close). const CHAIN_THRESHOLD: usize = 256; +/// Chain payloads bigger than this when vectored I/O is **not** enabled. +/// A larger value in this scenario will reduce the number of small and +/// fragmented data being sent, and hereby improve the throughput. +const CHAIN_THRESHOLD_WITHOUT_VECTORED_IO: usize = 1024; + // TODO: Make generic impl FramedWrite where @@ -76,7 +82,11 @@ where B: Buf, { pub fn new(inner: T) -> FramedWrite { - let is_write_vectored = inner.is_write_vectored(); + let chain_threshold = if inner.is_write_vectored() { + CHAIN_THRESHOLD + } else { + CHAIN_THRESHOLD_WITHOUT_VECTORED_IO + }; FramedWrite { inner, encoder: Encoder { @@ -85,7 +95,8 @@ where next: None, last_data_frame: None, max_frame_size: frame::DEFAULT_MAX_FRAME_SIZE, - is_write_vectored, + chain_threshold, + min_buffer_capacity: chain_threshold + frame::HEADER_LEN, }, } } @@ -126,23 +137,17 @@ where Some(Next::Data(ref mut frame)) => { tracing::trace!(queued_data_frame = true); let mut buf = (&mut self.encoder.buf).chain(frame.payload_mut()); - ready!(write( - &mut self.inner, - self.encoder.is_write_vectored, - &mut buf, - cx, - ))? + ready!(poll_write_buf(Pin::new(&mut self.inner), cx, &mut buf))? } _ => { tracing::trace!(queued_data_frame = false); - ready!(write( - &mut self.inner, - self.encoder.is_write_vectored, - &mut self.encoder.buf, + ready!(poll_write_buf( + Pin::new(&mut self.inner), cx, + &mut self.encoder.buf ))? } - } + }; } match self.encoder.unset_frame() { @@ -165,30 +170,6 @@ where } } -fn write( - writer: &mut T, - is_write_vectored: bool, - buf: &mut B, - cx: &mut Context<'_>, -) -> Poll> -where - T: AsyncWrite + Unpin, - B: Buf, -{ - // TODO(eliza): when tokio-util 0.5.1 is released, this - // could just use `poll_write_buf`... - const MAX_IOVS: usize = 64; - let n = if is_write_vectored { - let mut bufs = [IoSlice::new(&[]); MAX_IOVS]; - let cnt = buf.chunks_vectored(&mut bufs); - ready!(Pin::new(writer).poll_write_vectored(cx, &bufs[..cnt]))? - } else { - ready!(Pin::new(writer).poll_write(cx, buf.chunk()))? - }; - buf.advance(n); - Ok(()).into() -} - #[must_use] enum ControlFlow { Continue, @@ -240,12 +221,17 @@ where return Err(PayloadTooBig); } - if len >= CHAIN_THRESHOLD { + if len >= self.chain_threshold { let head = v.head(); // Encode the frame head to the buffer head.encode(len, self.buf.get_mut()); + if self.buf.get_ref().remaining() < self.chain_threshold { + let extra_bytes = self.chain_threshold - self.buf.remaining(); + self.buf.get_mut().put(v.payload_mut().take(extra_bytes)); + } + // Save the data frame self.next = Some(Next::Data(v)); } else { @@ -305,7 +291,9 @@ where } fn has_capacity(&self) -> bool { - self.next.is_none() && self.buf.get_ref().remaining_mut() >= MIN_BUFFER_CAPACITY + self.next.is_none() + && (self.buf.get_ref().capacity() - self.buf.get_ref().len() + >= self.min_buffer_capacity) } fn is_empty(&self) -> bool { From 1f247de691ed7db8c10d3d21a0235af9a1757dd9 Mon Sep 17 00:00:00 2001 From: Dirkjan Ochtman Date: Mon, 9 Oct 2023 13:50:35 +0200 Subject: [PATCH 147/178] Update indexmap to version 2 (#698) * Update indexmap to version 2 * Update webpki-roots dev-dep to 0.25 * Update tokio-rustls dev-dep to 0.24 * Update env_logger dev-dep to 0.10 * Remove combined minimal-versions + MSRV check for now --- .github/workflows/CI.yml | 2 -- Cargo.toml | 8 ++++---- examples/akamai.rs | 2 +- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index ee920442e..4c1990673 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -93,5 +93,3 @@ jobs: - uses: dtolnay/rust-toolchain@nightly - uses: taiki-e/install-action@cargo-hack - uses: taiki-e/install-action@cargo-minimal-versions - - - run: cargo +${{ steps.msrv-toolchain.outputs.name }} minimal-versions check diff --git a/Cargo.toml b/Cargo.toml index b11981d26..70156498e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -51,7 +51,7 @@ http = "0.2" tracing = { version = "0.1.32", default-features = false, features = ["std"] } fnv = "1.0.5" slab = "0.4.2" -indexmap = { version = "1.5.2", features = ["std"] } +indexmap = { version = "2", features = ["std"] } [dev-dependencies] @@ -67,9 +67,9 @@ serde_json = "1.0.0" # Examples tokio = { version = "1", features = ["rt-multi-thread", "macros", "sync", "net"] } -env_logger = { version = "0.9", default-features = false } -tokio-rustls = "0.23.2" -webpki-roots = "0.22.2" +env_logger = { version = "0.10", default-features = false } +tokio-rustls = "0.24" +webpki-roots = "0.25" [package.metadata.docs.rs] features = ["stream"] diff --git a/examples/akamai.rs b/examples/akamai.rs index 1d0b17baf..788bf3005 100644 --- a/examples/akamai.rs +++ b/examples/akamai.rs @@ -17,7 +17,7 @@ pub async fn main() -> Result<(), Box> { let tls_client_config = std::sync::Arc::new({ let mut root_store = RootCertStore::empty(); - root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.0.iter().map(|ta| { + root_store.add_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.iter().map(|ta| { OwnedTrustAnchor::from_subject_spki_name_constraints( ta.subject, ta.spki, From cbe7744c79a838ea91185500d8ede331eccc82c8 Mon Sep 17 00:00:00 2001 From: tottoto Date: Mon, 16 Oct 2023 22:32:33 +0900 Subject: [PATCH 148/178] chore(ci): update to actions/checkout@v4 (#716) --- .github/workflows/CI.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 4c1990673..f115c83bc 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Rust uses: dtolnay/rust-toolchain@stable @@ -36,7 +36,7 @@ jobs: - stable steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Install Rust (${{ matrix.rust }}) uses: dtolnay/rust-toolchain@master @@ -64,7 +64,7 @@ jobs: clippy_check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Run Clippy run: cargo clippy --all-targets --all-features @@ -76,7 +76,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Get MSRV from package metadata id: msrv From 05cf352ee35b15da6b3c6c68f18068e750dd9198 Mon Sep 17 00:00:00 2001 From: tottoto Date: Mon, 16 Oct 2023 19:29:03 +0900 Subject: [PATCH 149/178] chore(ci): add minimal versions checking on stable rust --- .github/workflows/CI.yml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index f115c83bc..14e4a3149 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -90,6 +90,13 @@ jobs: - run: cargo check -p h2 + minimal-versions: + runs-on: ubuntu-latest + needs: [style] + steps: + - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@nightly + - uses: dtolnay/rust-toolchain@stable - uses: taiki-e/install-action@cargo-hack - uses: taiki-e/install-action@cargo-minimal-versions + - run: cargo minimal-versions --ignore-private check From 3cdef9692ef75e4b1e42242bec8b29181847053e Mon Sep 17 00:00:00 2001 From: tottoto Date: Mon, 16 Oct 2023 19:30:51 +0900 Subject: [PATCH 150/178] fix(test): mark h2-support as private crate --- tests/h2-support/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/h2-support/Cargo.toml b/tests/h2-support/Cargo.toml index f178178eb..522d904cb 100644 --- a/tests/h2-support/Cargo.toml +++ b/tests/h2-support/Cargo.toml @@ -2,6 +2,7 @@ name = "h2-support" version = "0.1.0" authors = ["Carl Lerche "] +publish = false edition = "2018" [dependencies] From d03c54a80dad60a4f23e110eee227d24a413b21e Mon Sep 17 00:00:00 2001 From: tottoto Date: Mon, 16 Oct 2023 19:31:24 +0900 Subject: [PATCH 151/178] chore(dependencies): update tracing minimal version to 0.1.35 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 70156498e..a567bf538 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,7 +48,7 @@ tokio-util = { version = "0.7.1", features = ["codec", "io"] } tokio = { version = "1", features = ["io-util"] } bytes = "1" http = "0.2" -tracing = { version = "0.1.32", default-features = false, features = ["std"] } +tracing = { version = "0.1.35", default-features = false, features = ["std"] } fnv = "1.0.5" slab = "0.4.2" indexmap = { version = "2", features = ["std"] } From 4aa7b163425648926454564aa4116ed6f20f9fee Mon Sep 17 00:00:00 2001 From: Protryon Date: Wed, 18 Oct 2023 09:29:40 -0700 Subject: [PATCH 152/178] Fix documentation for max_send_buffer_size (#718) --- src/client.rs | 2 +- src/server.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client.rs b/src/client.rs index e83ef6a4a..b329121ab 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1023,7 +1023,7 @@ impl Builder { /// stream have been written to the connection, the send buffer capacity /// will be freed up again. /// - /// The default is currently ~400MB, but may change. + /// The default is currently ~400KB, but may change. /// /// # Panics /// diff --git a/src/server.rs b/src/server.rs index f1f4cf470..bb20adc5d 100644 --- a/src/server.rs +++ b/src/server.rs @@ -937,7 +937,7 @@ impl Builder { /// stream have been written to the connection, the send buffer capacity /// will be freed up again. /// - /// The default is currently ~400MB, but may change. + /// The default is currently ~400KB, but may change. /// /// # Panics /// From 56651e6e513597d105c5df37a5f5937e2ba50be6 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 30 Oct 2023 11:09:40 -0400 Subject: [PATCH 153/178] fix lint about unused import --- src/frame/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/frame/mod.rs b/src/frame/mod.rs index 570a162a8..0e8e7035c 100644 --- a/src/frame/mod.rs +++ b/src/frame/mod.rs @@ -69,7 +69,7 @@ pub use crate::hpack::BytesStr; pub use self::settings::{ DEFAULT_INITIAL_WINDOW_SIZE, DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE, - MAX_INITIAL_WINDOW_SIZE, MAX_MAX_FRAME_SIZE, + MAX_MAX_FRAME_SIZE, }; pub type FrameSize = u32; From ef743ecb2243786c0573b9fe726290878359689b Mon Sep 17 00:00:00 2001 From: 4JX <4JXcYvmyu3Hz8fV@protonmail.com> Date: Mon, 30 Oct 2023 16:35:36 +0100 Subject: [PATCH 154/178] Add a setter for header_table_size (#638) --- src/client.rs | 33 +++++++++++++++++++++++++ src/codec/framed_read.rs | 6 +++++ src/codec/mod.rs | 5 ++++ src/frame/settings.rs | 2 -- src/proto/settings.rs | 4 +++ tests/h2-support/src/frames.rs | 5 ++++ tests/h2-tests/tests/client_request.rs | 34 ++++++++++++++++++++++++++ 7 files changed, 87 insertions(+), 2 deletions(-) diff --git a/src/client.rs b/src/client.rs index b329121ab..35cfc1414 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1072,6 +1072,39 @@ impl Builder { self } + /// Sets the header table size. + /// + /// This setting informs the peer of the maximum size of the header compression + /// table used to encode header blocks, in octets. The encoder may select any value + /// equal to or less than the header table size specified by the sender. + /// + /// The default value is 4,096. + /// + /// # Examples + /// + /// ``` + /// # use tokio::io::{AsyncRead, AsyncWrite}; + /// # use h2::client::*; + /// # use bytes::Bytes; + /// # + /// # async fn doc(my_io: T) + /// # -> Result<((SendRequest, Connection)), h2::Error> + /// # { + /// // `client_fut` is a future representing the completion of the HTTP/2 + /// // handshake. + /// let client_fut = Builder::new() + /// .header_table_size(1_000_000) + /// .handshake(my_io); + /// # client_fut.await + /// # } + /// # + /// # pub fn main() {} + /// ``` + pub fn header_table_size(&mut self, size: u32) -> &mut Self { + self.settings.set_header_table_size(Some(size)); + self + } + /// Sets the first stream ID to something other than 1. #[cfg(feature = "unstable")] pub fn initial_stream_id(&mut self, stream_id: u32) -> &mut Self { diff --git a/src/codec/framed_read.rs b/src/codec/framed_read.rs index a874d7732..3b0030d93 100644 --- a/src/codec/framed_read.rs +++ b/src/codec/framed_read.rs @@ -88,6 +88,12 @@ impl FramedRead { pub fn set_max_header_list_size(&mut self, val: usize) { self.max_header_list_size = val; } + + /// Update the header table size setting. + #[inline] + pub fn set_header_table_size(&mut self, val: usize) { + self.hpack.queue_size_update(val); + } } /// Decodes a frame. diff --git a/src/codec/mod.rs b/src/codec/mod.rs index 359adf6e4..6cbdc1e18 100644 --- a/src/codec/mod.rs +++ b/src/codec/mod.rs @@ -95,6 +95,11 @@ impl Codec { self.framed_write().set_header_table_size(val) } + /// Set the decoder header table size size. + pub fn set_recv_header_table_size(&mut self, val: usize) { + self.inner.set_header_table_size(val) + } + /// Set the max header list size that can be received. pub fn set_max_recv_header_list_size(&mut self, val: usize) { self.inner.set_max_header_list_size(val); diff --git a/src/frame/settings.rs b/src/frame/settings.rs index 0c913f059..484498a9d 100644 --- a/src/frame/settings.rs +++ b/src/frame/settings.rs @@ -121,11 +121,9 @@ impl Settings { self.header_table_size } - /* pub fn set_header_table_size(&mut self, size: Option) { self.header_table_size = size; } - */ pub fn load(head: Head, payload: &[u8]) -> Result { use self::Setting::*; diff --git a/src/proto/settings.rs b/src/proto/settings.rs index 6cc617209..28065cc68 100644 --- a/src/proto/settings.rs +++ b/src/proto/settings.rs @@ -60,6 +60,10 @@ impl Settings { codec.set_max_recv_header_list_size(max as usize); } + if let Some(val) = local.header_table_size() { + codec.set_recv_header_table_size(val as usize); + } + streams.apply_local_settings(local)?; self.local = Local::Synced; Ok(()) diff --git a/tests/h2-support/src/frames.rs b/tests/h2-support/src/frames.rs index d302d3ce5..a76dd3b60 100644 --- a/tests/h2-support/src/frames.rs +++ b/tests/h2-support/src/frames.rs @@ -391,6 +391,11 @@ impl Mock { self.0.set_enable_connect_protocol(Some(val)); self } + + pub fn header_table_size(mut self, val: u32) -> Self { + self.0.set_header_table_size(Some(val)); + self + } } impl From> for frame::Settings { diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 7b4316004..88c7df464 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -1627,6 +1627,40 @@ async fn rogue_server_reused_headers() { join(srv, h2).await; } +#[tokio::test] +async fn client_builder_header_table_size() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + let mut settings = frame::Settings::default(); + + settings.set_header_table_size(Some(10000)); + + let srv = async move { + let recv_settings = srv.assert_client_handshake().await; + assert_frame_eq(recv_settings, settings); + + srv.recv_frame( + frames::headers(1) + .request("GET", "https://example.com/") + .eos(), + ) + .await; + srv.send_frame(frames::headers(1).response(200).eos()).await; + }; + + let mut builder = client::Builder::new(); + builder.header_table_size(10000); + + let h2 = async move { + let (mut client, mut h2) = builder.handshake::<_, Bytes>(io).await.unwrap(); + let request = Request::get("https://example.com/").body(()).unwrap(); + let (response, _) = client.send_request(request, true).unwrap(); + h2.drive(response).await.unwrap(); + }; + + join(srv, h2).await; +} + const SETTINGS: &[u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; const SETTINGS_ACK: &[u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; From c7ca62f69b3b16d66f088ed2684f4534a8034c76 Mon Sep 17 00:00:00 2001 From: vuittont60 <81072379+vuittont60@users.noreply.github.com> Date: Tue, 7 Nov 2023 19:15:22 +0800 Subject: [PATCH 155/178] docs: fix typos (#724) --- CONTRIBUTING.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 10e74bf29..8af0abcc7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,7 +5,7 @@ ## Getting Help ## If you have a question about the h2 library or have encountered problems using it, you may -[file an issue][issue] or ask ask a question on the [Tokio Gitter][gitter]. +[file an issue][issue] or ask a question on the [Tokio Gitter][gitter]. ## Submitting a Pull Request ## @@ -15,7 +15,7 @@ Do you have an improvement? 2. We will try to respond to your issue promptly. 3. Fork this repo, develop and test your code changes. See the project's [README](README.md) for further information about working in this repository. 4. Submit a pull request against this repo's `master` branch. -6. Your branch may be merged once all configured checks pass, including: +5. Your branch may be merged once all configured checks pass, including: - Code review has been completed. - The branch has passed tests in CI. From 0f412d8b9c8d309966197873ad1d065adc23c794 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 15 Nov 2023 09:00:16 -0500 Subject: [PATCH 156/178] v0.3.22 --- CHANGELOG.md | 6 ++++++ Cargo.toml | 2 +- src/lib.rs | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9caebdf40..00d69725a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.3.22 (November 15, 2023) + +* Add `header_table_size(usize)` option to client and server builders. +* Improve throughput when vectored IO is not available. +* Update indexmap to 2. + # 0.3.21 (August 21, 2023) * Fix opening of new streams over peer's max concurrent limit. diff --git a/Cargo.toml b/Cargo.toml index a567bf538..c413e56ce 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.21" +version = "0.3.22" license = "MIT" authors = [ "Carl Lerche ", diff --git a/src/lib.rs b/src/lib.rs index a37c8b4c1..a1fde6eb4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -78,7 +78,7 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.21")] +#![doc(html_root_url = "https://docs.rs/h2/0.3.22")] #![deny( missing_debug_implementations, missing_docs, From dfe9eb75b0f0250be55c5471f7837b5ff984aa69 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 15 Nov 2023 09:38:16 -0500 Subject: [PATCH 157/178] start 0.4 dev --- Cargo.toml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index c413e56ce..1048dbe4e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.3.22" +version = "0.4.0-dev" license = "MIT" authors = [ "Carl Lerche ", @@ -21,6 +21,8 @@ exclude = ["fixtures/**", "ci/**"] edition = "2018" rust-version = "1.63" +publish = false + [features] # Enables `futures::Stream` implementations for various types. stream = [] From 8867e955aeb4764412d6a510c9683b72b9d10e05 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 15 Nov 2023 10:24:25 -0500 Subject: [PATCH 158/178] remove private from root crate --- Cargo.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 1048dbe4e..53f9043c2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,8 +21,6 @@ exclude = ["fixtures/**", "ci/**"] edition = "2018" rust-version = "1.63" -publish = false - [features] # Enables `futures::Stream` implementations for various types. stream = [] From 1ca1dc6f384cdec12eebd2757ba648b869e507f9 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 15 Nov 2023 11:06:53 -0500 Subject: [PATCH 159/178] update http to 1.0 --- Cargo.toml | 2 +- fuzz/Cargo.toml | 2 +- tests/h2-fuzz/Cargo.toml | 2 +- tests/h2-support/Cargo.toml | 2 +- tests/h2-tests/tests/server.rs | 3 ++- 5 files changed, 6 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 53f9043c2..b8d4c0278 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,7 +47,7 @@ futures-util = { version = "0.3", default-features = false } tokio-util = { version = "0.7.1", features = ["codec", "io"] } tokio = { version = "1", features = ["io-util"] } bytes = "1" -http = "0.2" +http = "1" tracing = { version = "0.1.35", default-features = false, features = ["std"] } fnv = "1.0.5" slab = "0.4.2" diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index aafb60ae7..922eca238 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -16,7 +16,7 @@ tokio = { version = "1", features = [ "full" ] } h2 = { path = "../", features = [ "unstable" ] } h2-support = { path = "../tests/h2-support" } futures = { version = "0.3", default-features = false, features = ["std"] } -http = "0.2" +http = "1" # Prevent this from interfering with workspaces [workspace] diff --git a/tests/h2-fuzz/Cargo.toml b/tests/h2-fuzz/Cargo.toml index dadb62c92..b0f9599e9 100644 --- a/tests/h2-fuzz/Cargo.toml +++ b/tests/h2-fuzz/Cargo.toml @@ -11,5 +11,5 @@ h2 = { path = "../.." } env_logger = { version = "0.9", default-features = false } futures = { version = "0.3", default-features = false, features = ["std"] } honggfuzz = "0.5" -http = "0.2" +http = "1" tokio = { version = "1", features = [ "full" ] } diff --git a/tests/h2-support/Cargo.toml b/tests/h2-support/Cargo.toml index 522d904cb..970648d5a 100644 --- a/tests/h2-support/Cargo.toml +++ b/tests/h2-support/Cargo.toml @@ -14,6 +14,6 @@ tracing = "0.1" tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt"] } tracing-tree = "0.2" futures = { version = "0.3", default-features = false } -http = "0.2" +http = "1" tokio = { version = "1", features = ["time"] } tokio-test = "0.4" diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 6075c7dcf..dd97e94d2 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -1126,6 +1126,7 @@ async fn request_without_authority() { #[tokio::test] async fn serve_when_request_in_response_extensions() { + use std::sync::Arc; h2_support::trace_init!(); let (io, mut client) = mock::new(); @@ -1149,7 +1150,7 @@ async fn serve_when_request_in_response_extensions() { let (req, mut stream) = srv.next().await.unwrap().unwrap(); let mut rsp = http::Response::new(()); - rsp.extensions_mut().insert(req); + rsp.extensions_mut().insert(Arc::new(req)); stream.send_response(rsp, true).unwrap(); assert!(srv.next().await.is_none()); From 9defea85856523c6354aa4f0511353ac9f9b2b5c Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 15 Nov 2023 11:34:31 -0500 Subject: [PATCH 160/178] remove deprecated Server::poll_close (#727) --- src/server.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/server.rs b/src/server.rs index bb20adc5d..e72df7699 100644 --- a/src/server.rs +++ b/src/server.rs @@ -508,12 +508,6 @@ where self.connection.poll(cx).map_err(Into::into) } - #[doc(hidden)] - #[deprecated(note = "renamed to poll_closed")] - pub fn poll_close(&mut self, cx: &mut Context) -> Poll> { - self.poll_closed(cx) - } - /// Sets the connection to a GOAWAY state. /// /// Does not terminate the connection. Must continue being polled to close From 122091a296fa393e2c32d67bd53468601e86fc6c Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 15 Nov 2023 11:38:39 -0500 Subject: [PATCH 161/178] v0.4.0 --- CHANGELOG.md | 5 +++++ Cargo.toml | 2 +- README.md | 2 +- src/lib.rs | 3 +-- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 00d69725a..86220a112 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# 0.4.0 (November 15, 2023) + +* Update to `http` 1.0. +* Remove deprecated `Server::poll_close()`. + # 0.3.22 (November 15, 2023) * Add `header_table_size(usize)` option to client and server builders. diff --git a/Cargo.toml b/Cargo.toml index b8d4c0278..11c6df0bd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ name = "h2" # - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.4.0-dev" +version = "0.4.0" license = "MIT" authors = [ "Carl Lerche ", diff --git a/README.md b/README.md index 2e1599914..f83357d5d 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ To use `h2`, first add this to your `Cargo.toml`: ```toml [dependencies] -h2 = "0.3" +h2 = "0.4" ``` Next, add this to your crate: diff --git a/src/lib.rs b/src/lib.rs index a1fde6eb4..fd7782f8e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -11,7 +11,7 @@ //! //! ```toml //! [dependencies] -//! h2 = "0.3" +//! h2 = "0.4" //! ``` //! //! # Layout @@ -78,7 +78,6 @@ //! [`server::handshake`]: server/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html -#![doc(html_root_url = "https://docs.rs/h2/0.3.22")] #![deny( missing_debug_implementations, missing_docs, From 756345ecd8e0c3f890c862c72cc3e3f61a621dce Mon Sep 17 00:00:00 2001 From: dswij Date: Tue, 9 Jan 2024 05:25:40 +0800 Subject: [PATCH 162/178] fix: streams awaiting capacity lockout (#730) This PR changes the the assign-capacity queue to prioritize streams that are send-ready. This is necessary to prevent a lockout when streams aren't able to proceed while waiting for connection capacity, but there is none. Closes https://github.com/hyperium/hyper/issues/3338 --- src/error.rs | 1 + src/proto/streams/prioritize.rs | 11 ++- tests/h2-tests/tests/prioritization.rs | 97 +++++++++++++++++++++++++- 3 files changed, 106 insertions(+), 3 deletions(-) diff --git a/src/error.rs b/src/error.rs index eb2b2acbc..96a471bcb 100644 --- a/src/error.rs +++ b/src/error.rs @@ -25,6 +25,7 @@ pub struct Error { #[derive(Debug)] enum Kind { /// A RST_STREAM frame was received or sent. + #[allow(dead_code)] Reset(StreamId, Reason, Initiator), /// A GO_AWAY frame was received or sent. diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index 3196049a4..999bb0759 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -184,7 +184,15 @@ impl Prioritize { stream.requested_send_capacity = cmp::min(stream.buffered_send_data, WindowSize::MAX as usize) as WindowSize; - self.try_assign_capacity(stream); + // `try_assign_capacity` will queue the stream to `pending_capacity` if the capcaity + // cannot be assigned at the time it is called. + // + // Streams over the max concurrent count will still call `send_data` so we should be + // careful not to put it into `pending_capacity` as it will starve the connection + // capacity for other streams + if !stream.is_pending_open { + self.try_assign_capacity(stream); + } } if frame.is_end_stream() { @@ -522,6 +530,7 @@ impl Prioritize { loop { if let Some(mut stream) = self.pop_pending_open(store, counts) { self.pending_send.push_front(&mut stream); + self.try_assign_capacity(&mut stream); } match self.pop_frame(buffer, store, max_frame_len, counts) { diff --git a/tests/h2-tests/tests/prioritization.rs b/tests/h2-tests/tests/prioritization.rs index 7c2681068..11d2c2ccf 100644 --- a/tests/h2-tests/tests/prioritization.rs +++ b/tests/h2-tests/tests/prioritization.rs @@ -1,5 +1,6 @@ -use futures::future::join; -use futures::{FutureExt, StreamExt}; +use futures::future::{join, select}; +use futures::{pin_mut, FutureExt, StreamExt}; + use h2_support::prelude::*; use h2_support::DEFAULT_WINDOW_SIZE; use std::task::Context; @@ -408,3 +409,95 @@ async fn send_data_receive_window_update() { join(mock, h2).await; } + +#[tokio::test] +async fn stream_count_over_max_stream_limit_does_not_starve_capacity() { + use tokio::sync::oneshot; + + h2_support::trace_init!(); + + let (io, mut srv) = mock::new(); + + let (tx, rx) = oneshot::channel(); + + let srv = async move { + let _ = srv + .assert_client_handshake_with_settings( + frames::settings() + // super tiny server + .max_concurrent_streams(1), + ) + .await; + srv.recv_frame(frames::headers(1).request("POST", "http://example.com/")) + .await; + + srv.recv_frame(frames::data(1, vec![0; 16384])).await; + srv.recv_frame(frames::data(1, vec![0; 16384])).await; + srv.recv_frame(frames::data(1, vec![0; 16384])).await; + srv.recv_frame(frames::data(1, vec![0; 16383]).eos()).await; + srv.send_frame(frames::headers(1).response(200).eos()).await; + + // All of these connection capacities should be assigned to stream 3 + srv.send_frame(frames::window_update(0, 16384)).await; + srv.send_frame(frames::window_update(0, 16384)).await; + srv.send_frame(frames::window_update(0, 16384)).await; + srv.send_frame(frames::window_update(0, 16383)).await; + + // StreamId(3) should be able to send all of its request with the conn capacity + srv.recv_frame(frames::headers(3).request("POST", "http://example.com/")) + .await; + srv.recv_frame(frames::data(3, vec![0; 16384])).await; + srv.recv_frame(frames::data(3, vec![0; 16384])).await; + srv.recv_frame(frames::data(3, vec![0; 16384])).await; + srv.recv_frame(frames::data(3, vec![0; 16383]).eos()).await; + srv.send_frame(frames::headers(3).response(200).eos()).await; + + // Then all the future stream is guaranteed to be send-able by induction + tx.send(()).unwrap(); + }; + + fn request() -> Request<()> { + Request::builder() + .method(Method::POST) + .uri("http://example.com/") + .body(()) + .unwrap() + } + + let client = async move { + let (mut client, mut conn) = client::Builder::new() + .handshake::<_, Bytes>(io) + .await + .expect("handshake"); + + let (req1, mut send1) = client.send_request(request(), false).unwrap(); + let (req2, mut send2) = client.send_request(request(), false).unwrap(); + + // Use up the connection window. + send1.send_data(vec![0; 65535].into(), true).unwrap(); + // Queue up for more connection window. + send2.send_data(vec![0; 65535].into(), true).unwrap(); + + // Queue up more pending open streams + for _ in 0..5 { + let (_, mut send) = client.send_request(request(), false).unwrap(); + send.send_data(vec![0; 65535].into(), true).unwrap(); + } + + let response = conn.drive(req1).await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + let response = conn.drive(req2).await.unwrap(); + assert_eq!(response.status(), StatusCode::OK); + + let _ = rx.await; + }; + + let task = join(srv, client); + pin_mut!(task); + + let t = tokio::time::sleep(Duration::from_secs(5)).map(|_| panic!("time out")); + pin_mut!(t); + + select(task, t).await; +} From ee1f75a915a040a81ec2f0fc7c66ff87ee0ed7bc Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Mon, 8 Jan 2024 16:31:24 -0500 Subject: [PATCH 163/178] v0.4.1 --- CHANGELOG.md | 4 ++++ Cargo.toml | 4 +--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 86220a112..d16601b14 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.4.1 (January 8, 2024) + +* Fix assigning connection capacity which could starve streams in some instances. + # 0.4.0 (November 15, 2023) * Update to `http` 1.0. diff --git a/Cargo.toml b/Cargo.toml index 11c6df0bd..be5332405 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,11 +1,9 @@ [package] name = "h2" # When releasing to crates.io: -# - Update doc URL. -# - html_root_url. # - Update CHANGELOG.md. # - Create git tag -version = "0.4.0" +version = "0.4.1" license = "MIT" authors = [ "Carl Lerche ", From 66a1ed8cefe11718f9e524821b4590493b51cbc0 Mon Sep 17 00:00:00 2001 From: Yusuke Tanaka Date: Sat, 13 Jan 2024 18:22:22 +0900 Subject: [PATCH 164/178] doc: clarify that the default value of initial_max_send_streams is 100 --- src/client.rs | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/src/client.rs b/src/client.rs index 35cfc1414..c3afd7a99 100644 --- a/src/client.rs +++ b/src/client.rs @@ -312,9 +312,11 @@ pub struct Builder { reset_stream_duration: Duration, /// Initial maximum number of locally initiated (send) streams. - /// After receiving a Settings frame from the remote peer, + /// After receiving a SETTINGS frame from the remote peer, /// the connection will overwrite this value with the /// MAX_CONCURRENT_STREAMS specified in the frame. + /// If no value is advertised by the remote peer in the initial SETTINGS + /// frame, it will be set to usize::MAX. initial_max_send_streams: usize, /// Initial target window size for new connections. @@ -844,8 +846,10 @@ impl Builder { /// Sets the initial maximum of locally initiated (send) streams. /// /// The initial settings will be overwritten by the remote peer when - /// the Settings frame is received. The new value will be set to the - /// `max_concurrent_streams()` from the frame. + /// the SETTINGS frame is received. The new value will be set to the + /// `max_concurrent_streams()` from the frame. If no value is advertised in + /// the initial SETTINGS frame from the remote peer as part of + /// [HTTP/2 Connection Preface], `usize::MAX` will be set. /// /// This setting prevents the caller from exceeding this number of /// streams that are counted towards the concurrency limit. @@ -855,7 +859,10 @@ impl Builder { /// /// See [Section 5.1.2] in the HTTP/2 spec for more details. /// - /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 + /// The default value is `usize::MAX`. + /// + /// [HTTP/2 Connection Preface]: https://httpwg.org/specs/rfc9113.html#preface + /// [Section 5.1.2]: https://httpwg.org/specs/rfc9113.html#rfc.section.5.1.2 /// /// # Examples /// From d2f09fb757e5be928a6720da416dbe8bd37aa208 Mon Sep 17 00:00:00 2001 From: Yusuke Tanaka Date: Sat, 13 Jan 2024 18:49:46 +0900 Subject: [PATCH 165/178] fix: set MAX_CONCURRENT_STREAMS to usize::MAX if no value is advertised initially --- src/proto/settings.rs | 18 ++- src/proto/streams/counts.rs | 8 +- src/proto/streams/streams.rs | 8 +- tests/h2-tests/tests/client_request.rs | 161 +++++++++++++++++++++++++ 4 files changed, 188 insertions(+), 7 deletions(-) diff --git a/src/proto/settings.rs b/src/proto/settings.rs index 28065cc68..72ba11fa0 100644 --- a/src/proto/settings.rs +++ b/src/proto/settings.rs @@ -12,6 +12,9 @@ pub(crate) struct Settings { /// the socket first then the settings applied **before** receiving any /// further frames. remote: Option, + /// Whether the connection has received the initial SETTINGS frame from the + /// remote peer. + has_received_remote_initial_settings: bool, } #[derive(Debug)] @@ -32,6 +35,7 @@ impl Settings { // the handshake process. local: Local::WaitingAck(local), remote: None, + has_received_remote_initial_settings: false, } } @@ -96,6 +100,15 @@ impl Settings { } } + /// Sets `true` to `self.has_received_remote_initial_settings`. + /// Returns `true` if this method is called for the first time. + /// (i.e. it is the initial SETTINGS frame from the remote peer) + fn mark_remote_initial_settings_as_received(&mut self) -> bool { + let has_received = self.has_received_remote_initial_settings; + self.has_received_remote_initial_settings = true; + !has_received + } + pub(crate) fn poll_send( &mut self, cx: &mut Context, @@ -108,7 +121,7 @@ impl Settings { C: Buf, P: Peer, { - if let Some(settings) = &self.remote { + if let Some(settings) = self.remote.clone() { if !dst.poll_ready(cx)?.is_ready() { return Poll::Pending; } @@ -121,7 +134,8 @@ impl Settings { tracing::trace!("ACK sent; applying settings"); - streams.apply_remote_settings(settings)?; + let is_initial = self.mark_remote_initial_settings_as_received(); + streams.apply_remote_settings(&settings, is_initial)?; if let Some(val) = settings.header_table_size() { dst.set_send_header_table_size(val as usize); diff --git a/src/proto/streams/counts.rs b/src/proto/streams/counts.rs index add1312e5..f8810333d 100644 --- a/src/proto/streams/counts.rs +++ b/src/proto/streams/counts.rs @@ -147,9 +147,11 @@ impl Counts { self.num_remote_reset_streams -= 1; } - pub fn apply_remote_settings(&mut self, settings: &frame::Settings) { - if let Some(val) = settings.max_concurrent_streams() { - self.max_send_streams = val as usize; + pub fn apply_remote_settings(&mut self, settings: &frame::Settings, is_initial: bool) { + match settings.max_concurrent_streams() { + Some(val) => self.max_send_streams = val as usize, + None if is_initial => self.max_send_streams = usize::MAX, + None => {} } } diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index 274bf4553..b6194ca0e 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -186,14 +186,18 @@ where me.poll_complete(&self.send_buffer, cx, dst) } - pub fn apply_remote_settings(&mut self, frame: &frame::Settings) -> Result<(), Error> { + pub fn apply_remote_settings( + &mut self, + frame: &frame::Settings, + is_initial: bool, + ) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); let me = &mut *me; let mut send_buffer = self.send_buffer.inner.lock().unwrap(); let send_buffer = &mut *send_buffer; - me.counts.apply_remote_settings(frame); + me.counts.apply_remote_settings(frame, is_initial); me.actions.send.apply_remote_settings( frame, diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 88c7df464..13bc5f225 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -1661,6 +1661,167 @@ async fn client_builder_header_table_size() { join(srv, h2).await; } +#[tokio::test] +async fn configured_max_concurrent_send_streams_and_update_it_based_on_empty_settings_frame() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + // Send empty SETTINGS frame (no MAX_CONCURRENT_STREAMS is provided) + srv.send_frame(frames::settings()).await; + }; + + let h2 = async move { + let (_client, h2) = client::Builder::new() + // Configure the initial value to 2024 + .initial_max_send_streams(2024) + .handshake::<_, bytes::Bytes>(io) + .await + .unwrap(); + let mut h2 = std::pin::pin!(h2); + // It should be pre-configured value before it receives the initial + // SETTINGS frame from the server + assert_eq!(h2.max_concurrent_send_streams(), 2024); + h2.as_mut().await.unwrap(); + // If the server's initial SETTINGS frame does not include + // MAX_CONCURRENT_STREAMS, this should be updated to usize::MAX. + assert_eq!(h2.max_concurrent_send_streams(), usize::MAX); + }; + + join(srv, h2).await; +} + +#[tokio::test] +async fn configured_max_concurrent_send_streams_and_update_it_based_on_non_empty_settings_frame() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + // Send SETTINGS frame with MAX_CONCURRENT_STREAMS set to 42 + srv.send_frame(frames::settings().max_concurrent_streams(42)) + .await; + }; + + let h2 = async move { + let (_client, h2) = client::Builder::new() + // Configure the initial value to 2024 + .initial_max_send_streams(2024) + .handshake::<_, bytes::Bytes>(io) + .await + .unwrap(); + let mut h2 = std::pin::pin!(h2); + // It should be pre-configured value before it receives the initial + // SETTINGS frame from the server + assert_eq!(h2.max_concurrent_send_streams(), 2024); + h2.as_mut().await.unwrap(); + // Now the client has received the initial SETTINGS frame from the + // server, which should update the value accordingly + assert_eq!(h2.max_concurrent_send_streams(), 42); + }; + + join(srv, h2).await; +} + +#[tokio::test] +async fn receive_settings_frame_twice_with_second_one_empty() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + // Send the initial SETTINGS frame with MAX_CONCURRENT_STREAMS set to 42 + srv.send_frame(frames::settings().max_concurrent_streams(42)) + .await; + + // Handle the client's connection preface + srv.read_preface().await.unwrap(); + match srv.next().await { + Some(frame) => match frame.unwrap() { + h2::frame::Frame::Settings(_) => { + let ack = frame::Settings::ack(); + srv.send(ack.into()).await.unwrap(); + } + frame => { + panic!("unexpected frame: {:?}", frame); + } + }, + None => { + panic!("unexpected EOF"); + } + } + + // Should receive the ack for the server's initial SETTINGS frame + let frame = assert_settings!(srv.next().await.unwrap().unwrap()); + assert!(frame.is_ack()); + + // Send another SETTINGS frame with no MAX_CONCURRENT_STREAMS + // This should not update the max_concurrent_send_streams value that + // the client manages. + srv.send_frame(frames::settings()).await; + }; + + let h2 = async move { + let (_client, h2) = client::handshake(io).await.unwrap(); + let mut h2 = std::pin::pin!(h2); + assert_eq!(h2.max_concurrent_send_streams(), usize::MAX); + h2.as_mut().await.unwrap(); + // Even though the second SETTINGS frame contained no value for + // MAX_CONCURRENT_STREAMS, update to usize::MAX should not happen + assert_eq!(h2.max_concurrent_send_streams(), 42); + }; + + join(srv, h2).await; +} + +#[tokio::test] +async fn receive_settings_frame_twice_with_second_one_non_empty() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + // Send the initial SETTINGS frame with MAX_CONCURRENT_STREAMS set to 42 + srv.send_frame(frames::settings().max_concurrent_streams(42)) + .await; + + // Handle the client's connection preface + srv.read_preface().await.unwrap(); + match srv.next().await { + Some(frame) => match frame.unwrap() { + h2::frame::Frame::Settings(_) => { + let ack = frame::Settings::ack(); + srv.send(ack.into()).await.unwrap(); + } + frame => { + panic!("unexpected frame: {:?}", frame); + } + }, + None => { + panic!("unexpected EOF"); + } + } + + // Should receive the ack for the server's initial SETTINGS frame + let frame = assert_settings!(srv.next().await.unwrap().unwrap()); + assert!(frame.is_ack()); + + // Send another SETTINGS frame with no MAX_CONCURRENT_STREAMS + // This should not update the max_concurrent_send_streams value that + // the client manages. + srv.send_frame(frames::settings().max_concurrent_streams(2024)) + .await; + }; + + let h2 = async move { + let (_client, h2) = client::handshake(io).await.unwrap(); + let mut h2 = std::pin::pin!(h2); + assert_eq!(h2.max_concurrent_send_streams(), usize::MAX); + h2.as_mut().await.unwrap(); + // The most-recently advertised value should be used + assert_eq!(h2.max_concurrent_send_streams(), 2024); + }; + + join(srv, h2).await; +} + const SETTINGS: &[u8] = &[0, 0, 0, 4, 0, 0, 0, 0, 0]; const SETTINGS_ACK: &[u8] = &[0, 0, 0, 4, 1, 0, 0, 0, 0]; From 59570e11ccddbec85f67a0c7aa353f7730c68854 Mon Sep 17 00:00:00 2001 From: Noah Kennedy Date: Wed, 10 Jan 2024 13:37:11 -0600 Subject: [PATCH 166/178] streams: limit error resets for misbehaving connections This change causes GOAWAYs to be issued to misbehaving connections which for one reason or another cause us to emit lots of error resets. Error resets are not generally expected from valid implementations anyways. The threshold after which we issue GOAWAYs is tunable, and will default to 1024. --- src/client.rs | 25 +++++++++++++++++++++++++ src/proto/connection.rs | 2 ++ src/proto/mod.rs | 1 + src/proto/streams/counts.rs | 32 ++++++++++++++++++++++++++++++++ src/proto/streams/mod.rs | 6 ++++++ src/proto/streams/streams.rs | 22 ++++++++++++++++++---- src/server.rs | 29 +++++++++++++++++++++++++++++ 7 files changed, 113 insertions(+), 4 deletions(-) diff --git a/src/client.rs b/src/client.rs index c3afd7a99..0dbc5fc97 100644 --- a/src/client.rs +++ b/src/client.rs @@ -338,6 +338,12 @@ pub struct Builder { /// The stream ID of the first (lowest) stream. Subsequent streams will use /// monotonically increasing stream IDs. stream_id: StreamId, + + /// Maximum number of locally reset streams due to protocol error across + /// the lifetime of the connection. + /// + /// When this gets exceeded, we issue GOAWAYs. + local_max_error_reset_streams: Option, } #[derive(Debug)] @@ -647,6 +653,7 @@ impl Builder { initial_max_send_streams: usize::MAX, settings: Default::default(), stream_id: 1.into(), + local_max_error_reset_streams: Some(proto::DEFAULT_LOCAL_RESET_COUNT_MAX), } } @@ -980,6 +987,23 @@ impl Builder { self } + /// Sets the maximum number of local resets due to protocol errors made by the remote end. + /// + /// Invalid frames and many other protocol errors will lead to resets being generated for those streams. + /// Too many of these often indicate a malicious client, and there are attacks which can abuse this to DOS servers. + /// This limit protects against these DOS attacks by limiting the amount of resets we can be forced to generate. + /// + /// When the number of local resets exceeds this threshold, the client will close the connection. + /// + /// If you really want to disable this, supply [`Option::None`] here. + /// Disabling this is not recommended and may expose you to DOS attacks. + /// + /// The default value is currently 1024, but could change. + pub fn max_local_error_reset_streams(&mut self, max: Option) -> &mut Self { + self.local_max_error_reset_streams = max; + self + } + /// Sets the maximum number of pending-accept remotely-reset streams. /// /// Streams that have been received by the peer, but not accepted by the @@ -1300,6 +1324,7 @@ where reset_stream_duration: builder.reset_stream_duration, reset_stream_max: builder.reset_stream_max, remote_reset_stream_max: builder.pending_accept_reset_stream_max, + local_error_reset_streams_max: builder.local_max_error_reset_streams, settings: builder.settings.clone(), }, ); diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 637fac358..5d6b9d2b1 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -81,6 +81,7 @@ pub(crate) struct Config { pub reset_stream_duration: Duration, pub reset_stream_max: usize, pub remote_reset_stream_max: usize, + pub local_error_reset_streams_max: Option, pub settings: frame::Settings, } @@ -125,6 +126,7 @@ where .settings .max_concurrent_streams() .map(|max| max as usize), + local_max_error_reset_streams: config.local_error_reset_streams_max, } } let streams = Streams::new(streams_config(&config)); diff --git a/src/proto/mod.rs b/src/proto/mod.rs index 567d03060..560927598 100644 --- a/src/proto/mod.rs +++ b/src/proto/mod.rs @@ -32,6 +32,7 @@ pub type WindowSize = u32; // Constants pub const MAX_WINDOW_SIZE: WindowSize = (1 << 31) - 1; // i32::MAX as u32 pub const DEFAULT_REMOTE_RESET_STREAM_MAX: usize = 20; +pub const DEFAULT_LOCAL_RESET_COUNT_MAX: usize = 1024; pub const DEFAULT_RESET_STREAM_MAX: usize = 10; pub const DEFAULT_RESET_STREAM_SECS: u64 = 30; pub const DEFAULT_MAX_SEND_BUFFER_SIZE: usize = 1024 * 400; diff --git a/src/proto/streams/counts.rs b/src/proto/streams/counts.rs index f8810333d..fdb07f1cd 100644 --- a/src/proto/streams/counts.rs +++ b/src/proto/streams/counts.rs @@ -31,6 +31,16 @@ pub(super) struct Counts { /// Current number of "pending accept" streams that were remotely reset num_remote_reset_streams: usize, + + /// Maximum number of locally reset streams due to protocol error across + /// the lifetime of the connection. + /// + /// When this gets exceeded, we issue GOAWAYs. + max_local_error_reset_streams: Option, + + /// Total number of locally reset streams due to protocol error across the + /// lifetime of the connection. + num_local_error_reset_streams: usize, } impl Counts { @@ -46,6 +56,8 @@ impl Counts { num_local_reset_streams: 0, max_remote_reset_streams: config.remote_reset_max, num_remote_reset_streams: 0, + max_local_error_reset_streams: config.local_max_error_reset_streams, + num_local_error_reset_streams: 0, } } @@ -66,6 +78,26 @@ impl Counts { self.num_send_streams != 0 || self.num_recv_streams != 0 } + /// Returns true if we can issue another local reset due to protocol error. + pub fn can_inc_num_local_error_resets(&self) -> bool { + if let Some(max) = self.max_local_error_reset_streams { + max > self.num_local_error_reset_streams + } else { + true + } + } + + pub fn inc_num_local_error_resets(&mut self) { + assert!(self.can_inc_num_local_error_resets()); + + // Increment the number of remote initiated streams + self.num_local_error_reset_streams += 1; + } + + pub(crate) fn max_local_error_resets(&self) -> Option { + self.max_local_error_reset_streams + } + /// Returns true if the receive stream concurrency can be incremented pub fn can_inc_num_recv_streams(&self) -> bool { self.max_recv_streams > self.num_recv_streams diff --git a/src/proto/streams/mod.rs b/src/proto/streams/mod.rs index fbe32c7b0..b347442af 100644 --- a/src/proto/streams/mod.rs +++ b/src/proto/streams/mod.rs @@ -69,4 +69,10 @@ pub struct Config { /// Maximum number of remote initiated streams pub remote_max_initiated: Option, + + /// Maximum number of locally reset streams due to protocol error across + /// the lifetime of the connection. + /// + /// When this gets exceeded, we issue GOAWAYs. + pub local_max_error_reset_streams: Option, } diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index b6194ca0e..f4b12c7bb 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -1546,10 +1546,24 @@ impl Actions { ) -> Result<(), Error> { if let Err(Error::Reset(stream_id, reason, initiator)) = res { debug_assert_eq!(stream_id, stream.id); - // Reset the stream. - self.send - .send_reset(reason, initiator, buffer, stream, counts, &mut self.task); - Ok(()) + + if counts.can_inc_num_local_error_resets() { + counts.inc_num_local_error_resets(); + + // Reset the stream. + self.send + .send_reset(reason, initiator, buffer, stream, counts, &mut self.task); + Ok(()) + } else { + tracing::warn!( + "reset_on_recv_stream_err; locally-reset streams reached limit ({:?})", + counts.max_local_error_resets().unwrap(), + ); + Err(Error::library_go_away_data( + Reason::ENHANCE_YOUR_CALM, + "too_many_internal_resets", + )) + } } else { res } diff --git a/src/server.rs b/src/server.rs index e72df7699..d809c0e85 100644 --- a/src/server.rs +++ b/src/server.rs @@ -252,6 +252,12 @@ pub struct Builder { /// Maximum amount of bytes to "buffer" for writing per stream. max_send_buffer_size: usize, + + /// Maximum number of locally reset streams due to protocol error across + /// the lifetime of the connection. + /// + /// When this gets exceeded, we issue GOAWAYs. + local_max_error_reset_streams: Option, } /// Send a response back to the client @@ -644,6 +650,8 @@ impl Builder { settings: Settings::default(), initial_target_connection_window_size: None, max_send_buffer_size: proto::DEFAULT_MAX_SEND_BUFFER_SIZE, + + local_max_error_reset_streams: Some(proto::DEFAULT_LOCAL_RESET_COUNT_MAX), } } @@ -881,6 +889,24 @@ impl Builder { self } + /// Sets the maximum number of local resets due to protocol errors made by the remote end. + /// + /// Invalid frames and many other protocol errors will lead to resets being generated for those streams. + /// Too many of these often indicate a malicious client, and there are attacks which can abuse this to DOS servers. + /// This limit protects against these DOS attacks by limiting the amount of resets we can be forced to generate. + /// + /// When the number of local resets exceeds this threshold, the server will issue GOAWAYs with an error code of + /// `ENHANCE_YOUR_CALM` to the client. + /// + /// If you really want to disable this, supply [`Option::None`] here. + /// Disabling this is not recommended and may expose you to DOS attacks. + /// + /// The default value is currently 1024, but could change. + pub fn max_local_error_reset_streams(&mut self, max: Option) -> &mut Self { + self.local_max_error_reset_streams = max; + self + } + /// Sets the maximum number of pending-accept remotely-reset streams. /// /// Streams that have been received by the peer, but not accepted by the @@ -1355,6 +1381,9 @@ where reset_stream_duration: self.builder.reset_stream_duration, reset_stream_max: self.builder.reset_stream_max, remote_reset_stream_max: self.builder.pending_accept_reset_stream_max, + local_error_reset_streams_max: self + .builder + .local_max_error_reset_streams, settings: self.builder.settings.clone(), }, ); From 5f5360673877ac29ad2d83fafd3f2b221709ab5a Mon Sep 17 00:00:00 2001 From: Noah Kennedy Date: Wed, 17 Jan 2024 13:57:29 -0600 Subject: [PATCH 167/178] v0.4.2 --- CHANGELOG.md | 5 +++++ Cargo.toml | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d16601b14..307021dcb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +# 0.4.2 (January 17th, 2024) + +* Limit error resets for misbehaving connections. +* Fix selecting MAX_CONCURRENT_STREAMS value if no value is advertised initially. + # 0.4.1 (January 8, 2024) * Fix assigning connection capacity which could starve streams in some instances. diff --git a/Cargo.toml b/Cargo.toml index be5332405..cbb4bafd6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "h2" # When releasing to crates.io: # - Update CHANGELOG.md. # - Create git tag -version = "0.4.1" +version = "0.4.2" license = "MIT" authors = [ "Carl Lerche ", From 560bdb6c22c8689a68091b23432eef22c5dcc747 Mon Sep 17 00:00:00 2001 From: dswij Date: Fri, 19 Jan 2024 17:31:48 +0800 Subject: [PATCH 168/178] tests: add test for peer unexpectedly dropping connection --- tests/h2-support/src/mock.rs | 16 ++++++ tests/h2-tests/tests/client_request.rs | 67 +++++++++++--------------- tests/h2-tests/tests/server.rs | 37 ++++++++++++++ 3 files changed, 82 insertions(+), 38 deletions(-) diff --git a/tests/h2-support/src/mock.rs b/tests/h2-support/src/mock.rs index 18d084841..60539d0a0 100644 --- a/tests/h2-support/src/mock.rs +++ b/tests/h2-support/src/mock.rs @@ -54,6 +54,9 @@ struct Inner { /// True when the pipe is closed. closed: bool, + + /// Trigger an `UnexpectedEof` error on read + unexpected_eof: bool, } const PREFACE: &[u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"; @@ -73,6 +76,7 @@ pub fn new_with_write_capacity(cap: usize) -> (Mock, Handle) { tx_rem: cap, tx_rem_task: None, closed: false, + unexpected_eof: false, })); let mock = Mock { @@ -96,6 +100,11 @@ impl Handle { &mut self.codec } + pub fn close_without_notify(&mut self) { + let mut me = self.codec.get_mut().inner.lock().unwrap(); + me.unexpected_eof = true; + } + /// Send a frame pub async fn send(&mut self, item: SendFrame) -> Result<(), SendError> { // Queue the frame @@ -348,6 +357,13 @@ impl AsyncRead for Mock { let mut me = self.pipe.inner.lock().unwrap(); + if me.unexpected_eof { + return Poll::Ready(Err(io::Error::new( + io::ErrorKind::UnexpectedEof, + "Simulate an unexpected eof error", + ))); + } + if me.rx.is_empty() { if me.closed { return Poll::Ready(Ok(())); diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 13bc5f225..3d285ce2c 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -2,6 +2,7 @@ use futures::future::{join, ready, select, Either}; use futures::stream::FuturesUnordered; use futures::StreamExt; use h2_support::prelude::*; +use std::io; use std::pin::Pin; use std::task::Context; @@ -1773,52 +1774,42 @@ async fn receive_settings_frame_twice_with_second_one_empty() { } #[tokio::test] -async fn receive_settings_frame_twice_with_second_one_non_empty() { +async fn server_drop_connection_unexpectedly_return_unexpected_eof_err() { h2_support::trace_init!(); let (io, mut srv) = mock::new(); let srv = async move { - // Send the initial SETTINGS frame with MAX_CONCURRENT_STREAMS set to 42 - srv.send_frame(frames::settings().max_concurrent_streams(42)) - .await; - - // Handle the client's connection preface - srv.read_preface().await.unwrap(); - match srv.next().await { - Some(frame) => match frame.unwrap() { - h2::frame::Frame::Settings(_) => { - let ack = frame::Settings::ack(); - srv.send(ack.into()).await.unwrap(); - } - frame => { - panic!("unexpected frame: {:?}", frame); - } - }, - None => { - panic!("unexpected EOF"); - } - } - - // Should receive the ack for the server's initial SETTINGS frame - let frame = assert_settings!(srv.next().await.unwrap().unwrap()); - assert!(frame.is_ack()); - - // Send another SETTINGS frame with no MAX_CONCURRENT_STREAMS - // This should not update the max_concurrent_send_streams value that - // the client manages. - srv.send_frame(frames::settings().max_concurrent_streams(2024)) - .await; + let settings = srv.assert_client_handshake().await; + assert_default_settings!(settings); + srv.recv_frame( + frames::headers(1) + .request("GET", "https://http2.akamai.com/") + .eos(), + ) + .await; + srv.close_without_notify(); }; let h2 = async move { - let (_client, h2) = client::handshake(io).await.unwrap(); - let mut h2 = std::pin::pin!(h2); - assert_eq!(h2.max_concurrent_send_streams(), usize::MAX); - h2.as_mut().await.unwrap(); - // The most-recently advertised value should be used - assert_eq!(h2.max_concurrent_send_streams(), 2024); + let (mut client, h2) = client::handshake(io).await.unwrap(); + tokio::spawn(async move { + let request = Request::builder() + .uri("https://http2.akamai.com/") + .body(()) + .unwrap(); + let _res = client + .send_request(request, true) + .unwrap() + .0 + .await + .expect("request"); + }); + let err = h2.await.expect_err("should receive UnexpectedEof"); + assert_eq!( + err.get_io().expect("should be UnexpectedEof").kind(), + io::ErrorKind::UnexpectedEof, + ); }; - join(srv, h2).await; } diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index dd97e94d2..831f18823 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -1416,3 +1416,40 @@ async fn reject_informational_status_header_in_request() { join(client, srv).await; } + +#[tokio::test] +async fn client_drop_connection_without_close_notify() { + h2_support::trace_init!(); + + let (io, mut client) = mock::new(); + let client = async move { + let _recv_settings = client.assert_server_handshake().await; + client + .send_frame(frames::headers(1).request("GET", "https://example.com/")) + .await; + client.send_frame(frames::data(1, &b"hello"[..])).await; + client.recv_frame(frames::headers(1).response(200)).await; + + client.close_without_notify(); // Client closed without notify causing UnexpectedEof + }; + + let mut builder = server::Builder::new(); + builder.max_concurrent_streams(1); + + let h2 = async move { + let mut srv = builder.handshake::<_, Bytes>(io).await.expect("handshake"); + let (req, mut stream) = srv.next().await.unwrap().unwrap(); + + assert_eq!(req.method(), &http::Method::GET); + + let rsp = http::Response::builder().status(200).body(()).unwrap(); + stream.send_response(rsp, false).unwrap(); + + // Step the conn state forward and hitting the EOF + // But we have no outstanding request from client to be satisfied, so we should not return + // an error + let _ = poll_fn(|cx| srv.poll_closed(cx)).await.unwrap(); + }; + + join(client, h2).await; +} From 4ce59557b56943eb8b5d7dc45f97eda3f0ae0104 Mon Sep 17 00:00:00 2001 From: dswij Date: Fri, 19 Jan 2024 20:15:16 +0800 Subject: [PATCH 169/178] feat: not returning UnexpectedEof when client drop without close_notify --- src/proto/connection.rs | 20 +++++++++-- src/proto/streams/buffer.rs | 4 +++ src/proto/streams/streams.rs | 13 +++++++ tests/h2-tests/tests/client_request.rs | 50 ++++++++++++++++++++++++++ 4 files changed, 84 insertions(+), 3 deletions(-) diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 5d6b9d2b1..8627375ae 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -461,13 +461,27 @@ where // active streams must be reset. // // TODO: Are I/O errors recoverable? - Err(Error::Io(e, inner)) => { - tracing::debug!(error = ?e, "Connection::poll; IO error"); - let e = Error::Io(e, inner); + Err(Error::Io(kind, inner)) => { + tracing::debug!(error = ?kind, "Connection::poll; IO error"); + let e = Error::Io(kind, inner); // Reset all active streams self.streams.handle_error(e.clone()); + // Some client implementations drop the connections without notifying its peer + // Attempting to read after the client dropped the connection results in UnexpectedEof + // If as a server, we don't have anything more to send, just close the connection + // without error + // + // See https://github.com/hyperium/hyper/issues/3427 + if self.streams.is_server() + && self.streams.is_buffer_empty() + && matches!(kind, io::ErrorKind::UnexpectedEof) + { + *self.state = State::Closed(Reason::NO_ERROR, Initiator::Library); + return Ok(()); + } + // Return the error Err(e) } diff --git a/src/proto/streams/buffer.rs b/src/proto/streams/buffer.rs index 2648a410e..02d265061 100644 --- a/src/proto/streams/buffer.rs +++ b/src/proto/streams/buffer.rs @@ -29,6 +29,10 @@ impl Buffer { pub fn new() -> Self { Buffer { slab: Slab::new() } } + + pub fn is_empty(&self) -> bool { + self.slab.is_empty() + } } impl Deque { diff --git a/src/proto/streams/streams.rs b/src/proto/streams/streams.rs index f4b12c7bb..fa8e6843b 100644 --- a/src/proto/streams/streams.rs +++ b/src/proto/streams/streams.rs @@ -323,6 +323,14 @@ where } impl DynStreams<'_, B> { + pub fn is_buffer_empty(&self) -> bool { + self.send_buffer.is_empty() + } + + pub fn is_server(&self) -> bool { + self.peer.is_server() + } + pub fn recv_headers(&mut self, frame: frame::Headers) -> Result<(), Error> { let mut me = self.inner.lock().unwrap(); @@ -1509,6 +1517,11 @@ impl SendBuffer { let inner = Mutex::new(Buffer::new()); SendBuffer { inner } } + + pub fn is_empty(&self) -> bool { + let buf = self.inner.lock().unwrap(); + buf.is_empty() + } } // ===== impl Actions ===== diff --git a/tests/h2-tests/tests/client_request.rs b/tests/h2-tests/tests/client_request.rs index 3d285ce2c..7bd223e3c 100644 --- a/tests/h2-tests/tests/client_request.rs +++ b/tests/h2-tests/tests/client_request.rs @@ -1773,6 +1773,56 @@ async fn receive_settings_frame_twice_with_second_one_empty() { join(srv, h2).await; } +#[tokio::test] +async fn receive_settings_frame_twice_with_second_one_non_empty() { + h2_support::trace_init!(); + let (io, mut srv) = mock::new(); + + let srv = async move { + // Send the initial SETTINGS frame with MAX_CONCURRENT_STREAMS set to 42 + srv.send_frame(frames::settings().max_concurrent_streams(42)) + .await; + + // Handle the client's connection preface + srv.read_preface().await.unwrap(); + match srv.next().await { + Some(frame) => match frame.unwrap() { + h2::frame::Frame::Settings(_) => { + let ack = frame::Settings::ack(); + srv.send(ack.into()).await.unwrap(); + } + frame => { + panic!("unexpected frame: {:?}", frame); + } + }, + None => { + panic!("unexpected EOF"); + } + } + + // Should receive the ack for the server's initial SETTINGS frame + let frame = assert_settings!(srv.next().await.unwrap().unwrap()); + assert!(frame.is_ack()); + + // Send another SETTINGS frame with no MAX_CONCURRENT_STREAMS + // This should not update the max_concurrent_send_streams value that + // the client manages. + srv.send_frame(frames::settings().max_concurrent_streams(2024)) + .await; + }; + + let h2 = async move { + let (_client, h2) = client::handshake(io).await.unwrap(); + let mut h2 = std::pin::pin!(h2); + assert_eq!(h2.max_concurrent_send_streams(), usize::MAX); + h2.as_mut().await.unwrap(); + // The most-recently advertised value should be used + assert_eq!(h2.max_concurrent_send_streams(), 2024); + }; + + join(srv, h2).await; +} + #[tokio::test] async fn server_drop_connection_unexpectedly_return_unexpected_eof_err() { h2_support::trace_init!(); From 0077d3dcb4266c18d2b569f0257caff24778335b Mon Sep 17 00:00:00 2001 From: dswij Date: Fri, 2 Feb 2024 21:38:20 +0800 Subject: [PATCH 170/178] fix: stream flow control insufficient size before ack (#746) * fix: stream flow control insufficient size before ack * test: client sending data before initial settings ack --- src/proto/connection.rs | 4 --- src/proto/streams/mod.rs | 3 --- src/proto/streams/recv.rs | 2 +- tests/h2-tests/tests/server.rs | 45 ++++++++++++++++++++++++++++++++++ 4 files changed, 46 insertions(+), 8 deletions(-) diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 8627375ae..6b970c1a3 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -106,10 +106,6 @@ where pub fn new(codec: Codec>, config: Config) -> Connection { fn streams_config(config: &Config) -> streams::Config { streams::Config { - local_init_window_sz: config - .settings - .initial_window_size() - .unwrap_or(DEFAULT_INITIAL_WINDOW_SIZE), initial_max_send_streams: config.initial_max_send_streams, local_max_buffer_size: config.max_send_buffer_size, local_next_stream_id: config.next_stream_id, diff --git a/src/proto/streams/mod.rs b/src/proto/streams/mod.rs index b347442af..c4a832342 100644 --- a/src/proto/streams/mod.rs +++ b/src/proto/streams/mod.rs @@ -33,9 +33,6 @@ use std::time::Duration; #[derive(Debug)] pub struct Config { - /// Initial window size of locally initiated streams - pub local_init_window_sz: WindowSize, - /// Initial maximum number of locally initiated streams. /// After receiving a Settings frame from the remote peer, /// the connection will overwrite this value with the diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 0063942a4..76d197221 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -93,7 +93,7 @@ impl Recv { flow.assign_capacity(DEFAULT_INITIAL_WINDOW_SIZE).unwrap(); Recv { - init_window_sz: config.local_init_window_sz, + init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, flow, in_flight_data: 0 as WindowSize, next_stream_id: Ok(next_stream_id.into()), diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 831f18823..4dcb556ed 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -1453,3 +1453,48 @@ async fn client_drop_connection_without_close_notify() { join(client, h2).await; } + +#[tokio::test] +async fn init_window_size_smaller_than_default_should_use_default_before_ack() { + h2_support::trace_init!(); + + let (io, mut client) = mock::new(); + let client = async move { + // Client can send in some data before ACK; + // Server needs to make sure the Recv stream has default window size + // as per https://datatracker.ietf.org/doc/html/rfc9113#name-initial-flow-control-window + client.write_preface().await; + client + .send(frame::Settings::default().into()) + .await + .unwrap(); + client.next().await.expect("unexpected EOF").unwrap(); + client + .send_frame(frames::headers(1).request("GET", "https://example.com/")) + .await; + client.send_frame(frames::data(1, &b"hello"[..])).await; + client.send(frame::Settings::ack().into()).await.unwrap(); + client.next().await; + client + .recv_frame(frames::headers(1).response(200).eos()) + .await; + }; + + let mut builder = server::Builder::new(); + builder.max_concurrent_streams(1); + builder.initial_window_size(1); + let h2 = async move { + let mut srv = builder.handshake::<_, Bytes>(io).await.expect("handshake"); + let (req, mut stream) = srv.next().await.unwrap().unwrap(); + + assert_eq!(req.method(), &http::Method::GET); + + let rsp = http::Response::builder().status(200).body(()).unwrap(); + stream.send_response(rsp, true).unwrap(); + + // Drive the state forward + let _ = poll_fn(|cx| srv.poll_closed(cx)).await.unwrap(); + }; + + join(client, h2).await; +} From f87eca063af0011d4645d1ed25deff335389f25e Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 22 Feb 2024 08:58:26 -0500 Subject: [PATCH 171/178] refactor: cleanup new unused warnings (#749) --- .github/workflows/CI.yml | 12 ++++++------ src/client.rs | 2 ++ src/frame/headers.rs | 2 -- src/hpack/decoder.rs | 1 - src/hpack/encoder.rs | 1 - src/proto/connection.rs | 6 +++--- src/proto/peer.rs | 4 +++- src/proto/settings.rs | 1 - src/proto/streams/prioritize.rs | 4 ++-- src/proto/streams/recv.rs | 6 +++--- src/server.rs | 2 ++ tests/h2-support/src/frames.rs | 2 +- tests/h2-support/src/mock.rs | 2 +- tests/h2-support/src/util.rs | 2 -- tests/h2-tests/tests/hammer.rs | 1 - tests/h2-tests/tests/server.rs | 2 +- tests/h2-tests/tests/stream_states.rs | 2 +- 17 files changed, 25 insertions(+), 27 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 14e4a3149..a23753531 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -61,12 +61,12 @@ jobs: run: ./ci/h2spec.sh if: matrix.rust == 'stable' - clippy_check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Run Clippy - run: cargo clippy --all-targets --all-features + #clippy_check: + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v4 + # - name: Run Clippy + # run: cargo clippy --all-targets --all-features msrv: name: Check MSRV diff --git a/src/client.rs b/src/client.rs index 0dbc5fc97..25b151f53 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1638,9 +1638,11 @@ impl proto::Peer for Peer { proto::DynPeer::Client } + /* fn is_server() -> bool { false } + */ fn convert_poll_message( pseudo: Pseudo, diff --git a/src/frame/headers.rs b/src/frame/headers.rs index 9d5c8cefe..1e6bc9b74 100644 --- a/src/frame/headers.rs +++ b/src/frame/headers.rs @@ -974,8 +974,6 @@ fn decoded_header_size(name: usize, value: usize) -> usize { mod test { use std::iter::FromIterator; - use http::HeaderValue; - use super::*; use crate::frame; use crate::hpack::{huffman, Encoder}; diff --git a/src/hpack/decoder.rs b/src/hpack/decoder.rs index 960cbb143..e48976c36 100644 --- a/src/hpack/decoder.rs +++ b/src/hpack/decoder.rs @@ -829,7 +829,6 @@ pub fn get_static(idx: usize) -> Header { #[cfg(test)] mod test { use super::*; - use crate::hpack::Header; #[test] fn test_peek_u8() { diff --git a/src/hpack/encoder.rs b/src/hpack/encoder.rs index d121a2aaf..bd49056f6 100644 --- a/src/hpack/encoder.rs +++ b/src/hpack/encoder.rs @@ -299,7 +299,6 @@ fn position(buf: &BytesMut) -> usize { #[cfg(test)] mod test { use super::*; - use crate::hpack::Header; use http::*; #[test] diff --git a/src/proto/connection.rs b/src/proto/connection.rs index 6b970c1a3..5969bb841 100644 --- a/src/proto/connection.rs +++ b/src/proto/connection.rs @@ -1,18 +1,18 @@ use crate::codec::UserError; use crate::frame::{Reason, StreamId}; -use crate::{client, frame, server}; +use crate::{client, server}; use crate::frame::DEFAULT_INITIAL_WINDOW_SIZE; use crate::proto::*; -use bytes::{Buf, Bytes}; +use bytes::Bytes; use futures_core::Stream; use std::io; use std::marker::PhantomData; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; -use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::io::AsyncRead; /// An H2 connection #[derive(Debug)] diff --git a/src/proto/peer.rs b/src/proto/peer.rs index d62d9e24e..cbe7fb289 100644 --- a/src/proto/peer.rs +++ b/src/proto/peer.rs @@ -14,7 +14,7 @@ pub(crate) trait Peer { fn r#dyn() -> Dyn; - fn is_server() -> bool; + //fn is_server() -> bool; fn convert_poll_message( pseudo: Pseudo, @@ -22,10 +22,12 @@ pub(crate) trait Peer { stream_id: StreamId, ) -> Result; + /* fn is_local_init(id: StreamId) -> bool { assert!(!id.is_zero()); Self::is_server() == id.is_server_initiated() } + */ } /// A dynamic representation of `Peer`. diff --git a/src/proto/settings.rs b/src/proto/settings.rs index 72ba11fa0..d6155fc3d 100644 --- a/src/proto/settings.rs +++ b/src/proto/settings.rs @@ -1,6 +1,5 @@ use crate::codec::UserError; use crate::error::Reason; -use crate::frame; use crate::proto::*; use std::task::{Context, Poll}; diff --git a/src/proto/streams/prioritize.rs b/src/proto/streams/prioritize.rs index 999bb0759..14b37e223 100644 --- a/src/proto/streams/prioritize.rs +++ b/src/proto/streams/prioritize.rs @@ -1,12 +1,12 @@ use super::store::Resolve; use super::*; -use crate::frame::{Reason, StreamId}; +use crate::frame::Reason; use crate::codec::UserError; use crate::codec::UserError::*; -use bytes::buf::{Buf, Take}; +use bytes::buf::Take; use std::{ cmp::{self, Ordering}, fmt, io, mem, diff --git a/src/proto/streams/recv.rs b/src/proto/streams/recv.rs index 76d197221..46cb87cd0 100644 --- a/src/proto/streams/recv.rs +++ b/src/proto/streams/recv.rs @@ -1,14 +1,14 @@ use super::*; use crate::codec::UserError; -use crate::frame::{self, PushPromiseHeaderError, Reason, DEFAULT_INITIAL_WINDOW_SIZE}; -use crate::proto::{self, Error}; +use crate::frame::{PushPromiseHeaderError, Reason, DEFAULT_INITIAL_WINDOW_SIZE}; +use crate::proto; use http::{HeaderMap, Request, Response}; use std::cmp::Ordering; use std::io; use std::task::{Context, Poll, Waker}; -use std::time::{Duration, Instant}; +use std::time::Instant; #[derive(Debug)] pub(super) struct Recv { diff --git a/src/server.rs b/src/server.rs index d809c0e85..4f8722269 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1495,9 +1495,11 @@ impl proto::Peer for Peer { const NAME: &'static str = "Server"; + /* fn is_server() -> bool { true } + */ fn r#dyn() -> proto::DynPeer { proto::DynPeer::Server diff --git a/tests/h2-support/src/frames.rs b/tests/h2-support/src/frames.rs index a76dd3b60..858bf770b 100644 --- a/tests/h2-support/src/frames.rs +++ b/tests/h2-support/src/frames.rs @@ -2,7 +2,7 @@ use std::convert::TryInto; use std::fmt; use bytes::Bytes; -use http::{self, HeaderMap, StatusCode}; +use http::{HeaderMap, StatusCode}; use h2::{ ext::Protocol, diff --git a/tests/h2-support/src/mock.rs b/tests/h2-support/src/mock.rs index 60539d0a0..9ec5ba379 100644 --- a/tests/h2-support/src/mock.rs +++ b/tests/h2-support/src/mock.rs @@ -2,7 +2,7 @@ use crate::SendFrame; use h2::frame::{self, Frame}; use h2::proto::Error; -use h2::{self, SendError}; +use h2::SendError; use futures::future::poll_fn; use futures::{ready, Stream, StreamExt}; diff --git a/tests/h2-support/src/util.rs b/tests/h2-support/src/util.rs index aa7fb2c54..02b6450d0 100644 --- a/tests/h2-support/src/util.rs +++ b/tests/h2-support/src/util.rs @@ -1,5 +1,3 @@ -use h2; - use bytes::{BufMut, Bytes}; use futures::ready; use std::future::Future; diff --git a/tests/h2-tests/tests/hammer.rs b/tests/h2-tests/tests/hammer.rs index a5cba3dfa..4b5d04341 100644 --- a/tests/h2-tests/tests/hammer.rs +++ b/tests/h2-tests/tests/hammer.rs @@ -8,7 +8,6 @@ use std::{ atomic::{AtomicUsize, Ordering}, Arc, }, - thread, }; use tokio::net::{TcpListener, TcpStream}; diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 4dcb556ed..7990c86a9 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -1,6 +1,6 @@ #![deny(warnings)] -use futures::future::{join, poll_fn}; +use futures::future::join; use futures::StreamExt; use h2_support::prelude::*; use tokio::io::AsyncWriteExt; diff --git a/tests/h2-tests/tests/stream_states.rs b/tests/h2-tests/tests/stream_states.rs index 16d113132..05a96a0f5 100644 --- a/tests/h2-tests/tests/stream_states.rs +++ b/tests/h2-tests/tests/stream_states.rs @@ -1,6 +1,6 @@ #![deny(warnings)] -use futures::future::{join, join3, lazy, poll_fn, try_join}; +use futures::future::{join, join3, lazy, try_join}; use futures::{FutureExt, StreamExt, TryStreamExt}; use h2_support::prelude::*; use h2_support::util::yield_once; From e5db9a309a34e97c3edbfdb3d23d3afa256696c0 Mon Sep 17 00:00:00 2001 From: Simon Date: Thu, 22 Feb 2024 20:47:11 +0100 Subject: [PATCH 172/178] chore: set rust `edition` to `2021` (#751) --- Cargo.toml | 2 +- examples/akamai.rs | 1 - src/frame/headers.rs | 2 -- 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index cbb4bafd6..50098342e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,7 +16,7 @@ readme = "README.md" keywords = ["http", "async", "non-blocking"] categories = ["asynchronous", "web-programming", "network-programming"] exclude = ["fixtures/**", "ci/**"] -edition = "2018" +edition = "2021" rust-version = "1.63" [features] diff --git a/examples/akamai.rs b/examples/akamai.rs index 788bf3005..8d87b778e 100644 --- a/examples/akamai.rs +++ b/examples/akamai.rs @@ -5,7 +5,6 @@ use tokio_rustls::TlsConnector; use tokio_rustls::rustls::{OwnedTrustAnchor, RootCertStore, ServerName}; -use std::convert::TryFrom; use std::error::Error; use std::net::ToSocketAddrs; diff --git a/src/frame/headers.rs b/src/frame/headers.rs index 1e6bc9b74..7f4ab8477 100644 --- a/src/frame/headers.rs +++ b/src/frame/headers.rs @@ -972,8 +972,6 @@ fn decoded_header_size(name: usize, value: usize) -> usize { #[cfg(test)] mod test { - use std::iter::FromIterator; - use super::*; use crate::frame; use crate::hpack::{huffman, Encoder}; From a8af2358647cd2a68f851dfe3228cc3116654223 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Thu, 22 Feb 2024 16:11:07 -0500 Subject: [PATCH 173/178] perf: optimize header list size calculations (#752) This speeds up loading blocks in cases where we have many headers already. --- src/frame/headers.rs | 23 ++++++++++++++++++----- src/proto/streams/store.rs | 1 + 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/src/frame/headers.rs b/src/frame/headers.rs index 7f4ab8477..e9b163e56 100644 --- a/src/frame/headers.rs +++ b/src/frame/headers.rs @@ -12,6 +12,7 @@ use std::fmt; use std::io::Cursor; type EncodeBuf<'a> = bytes::buf::Limit<&'a mut BytesMut>; + /// Header frame /// /// This could be either a request or a response. @@ -87,6 +88,9 @@ struct HeaderBlock { /// The decoded header fields fields: HeaderMap, + /// Precomputed size of all of our header fields, for perf reasons + field_size: usize, + /// Set to true if decoding went over the max header list size. is_over_size: bool, @@ -115,6 +119,7 @@ impl Headers { stream_id, stream_dep: None, header_block: HeaderBlock { + field_size: calculate_headermap_size(&fields), fields, is_over_size: false, pseudo, @@ -131,6 +136,7 @@ impl Headers { stream_id, stream_dep: None, header_block: HeaderBlock { + field_size: calculate_headermap_size(&fields), fields, is_over_size: false, pseudo: Pseudo::default(), @@ -196,6 +202,7 @@ impl Headers { stream_dep, header_block: HeaderBlock { fields: HeaderMap::new(), + field_size: 0, is_over_size: false, pseudo: Pseudo::default(), }, @@ -350,6 +357,7 @@ impl PushPromise { PushPromise { flags: PushPromiseFlag::default(), header_block: HeaderBlock { + field_size: calculate_headermap_size(&fields), fields, is_over_size: false, pseudo, @@ -441,6 +449,7 @@ impl PushPromise { flags, header_block: HeaderBlock { fields: HeaderMap::new(), + field_size: 0, is_over_size: false, pseudo: Pseudo::default(), }, @@ -892,6 +901,8 @@ impl HeaderBlock { headers_size += decoded_header_size(name.as_str().len(), value.len()); if headers_size < max_header_list_size { + self.field_size += + decoded_header_size(name.as_str().len(), value.len()); self.fields.append(name, value); } else if !self.is_over_size { tracing::trace!("load_hpack; header list size over max"); @@ -958,14 +969,16 @@ impl HeaderBlock { + pseudo_size!(status) + pseudo_size!(authority) + pseudo_size!(path) - + self - .fields - .iter() - .map(|(name, value)| decoded_header_size(name.as_str().len(), value.len())) - .sum::() + + self.field_size } } +fn calculate_headermap_size(map: &HeaderMap) -> usize { + map.iter() + .map(|(name, value)| decoded_header_size(name.as_str().len(), value.len())) + .sum::() +} + fn decoded_header_size(name: usize, value: usize) -> usize { name + value + 32 } diff --git a/src/proto/streams/store.rs b/src/proto/streams/store.rs index 67b377b12..35fd6f25e 100644 --- a/src/proto/streams/store.rs +++ b/src/proto/streams/store.rs @@ -127,6 +127,7 @@ impl Store { } } + #[allow(clippy::blocks_in_conditions)] pub(crate) fn for_each(&mut self, mut f: F) where F: FnMut(Ptr), From 07fc8245ff8b58f581e0d3c5035b54eb14b69192 Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Fri, 15 Mar 2024 11:21:33 -0400 Subject: [PATCH 174/178] v0.4.3 --- CHANGELOG.md | 6 ++++++ Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 307021dcb..07bf533cd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# 0.4.3 (March 15, 2024) + +* Fix flow control limits to not apply until receiving SETTINGS ack. +* Fix not returning an error if IO ended without `close_notify`. +* Improve performance of decoding many headers. + # 0.4.2 (January 17th, 2024) * Limit error resets for misbehaving connections. diff --git a/Cargo.toml b/Cargo.toml index 50098342e..3e4b7fe6d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "h2" # When releasing to crates.io: # - Update CHANGELOG.md. # - Create git tag -version = "0.4.2" +version = "0.4.3" license = "MIT" authors = [ "Carl Lerche ", From e76bd740334795a1ebb32dd38756bbedfda09b9f Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 3 Apr 2024 13:32:18 -0400 Subject: [PATCH 175/178] fix: limit number of CONTINUATION frames allowed Calculate the amount of allowed CONTINUATION frames based on other settings. max_header_list_size / max_frame_size That is about how many CONTINUATION frames would be needed to send headers up to the max allowed size. We then multiply by that by a small amount, to allow for implementations that don't perfectly pack into the minimum frames *needed*. In practice, *much* more than that would be a very inefficient peer, or a peer trying to waste resources. See https://seanmonstar.com/blog/hyper-http2-continuation-flood/ for more info. --- src/codec/framed_read.rs | 53 +++++++++++++++++++++++++++++++--- tests/h2-tests/tests/server.rs | 49 +++++++++++++++++++++++++++++++ 2 files changed, 98 insertions(+), 4 deletions(-) diff --git a/src/codec/framed_read.rs b/src/codec/framed_read.rs index 3b0030d93..9270a8635 100644 --- a/src/codec/framed_read.rs +++ b/src/codec/framed_read.rs @@ -30,6 +30,8 @@ pub struct FramedRead { max_header_list_size: usize, + max_continuation_frames: usize, + partial: Option, } @@ -41,6 +43,8 @@ struct Partial { /// Partial header payload buf: BytesMut, + + continuation_frames_count: usize, } #[derive(Debug)] @@ -51,10 +55,14 @@ enum Continuable { impl FramedRead { pub fn new(inner: InnerFramedRead) -> FramedRead { + let max_header_list_size = DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE; + let max_continuation_frames = + calc_max_continuation_frames(max_header_list_size, inner.decoder().max_frame_length()); FramedRead { inner, hpack: hpack::Decoder::new(DEFAULT_SETTINGS_HEADER_TABLE_SIZE), - max_header_list_size: DEFAULT_SETTINGS_MAX_HEADER_LIST_SIZE, + max_header_list_size, + max_continuation_frames, partial: None, } } @@ -68,7 +76,6 @@ impl FramedRead { } /// Returns the current max frame size setting - #[cfg(feature = "unstable")] #[inline] pub fn max_frame_size(&self) -> usize { self.inner.decoder().max_frame_length() @@ -80,13 +87,17 @@ impl FramedRead { #[inline] pub fn set_max_frame_size(&mut self, val: usize) { assert!(DEFAULT_MAX_FRAME_SIZE as usize <= val && val <= MAX_MAX_FRAME_SIZE as usize); - self.inner.decoder_mut().set_max_frame_length(val) + self.inner.decoder_mut().set_max_frame_length(val); + // Update max CONTINUATION frames too, since its based on this + self.max_continuation_frames = calc_max_continuation_frames(self.max_header_list_size, val); } /// Update the max header list size setting. #[inline] pub fn set_max_header_list_size(&mut self, val: usize) { self.max_header_list_size = val; + // Update max CONTINUATION frames too, since its based on this + self.max_continuation_frames = calc_max_continuation_frames(val, self.max_frame_size()); } /// Update the header table size setting. @@ -96,12 +107,22 @@ impl FramedRead { } } +fn calc_max_continuation_frames(header_max: usize, frame_max: usize) -> usize { + // At least this many frames needed to use max header list size + let min_frames_for_list = (header_max / frame_max).max(1); + // Some padding for imperfectly packed frames + // 25% without floats + let padding = min_frames_for_list >> 2; + min_frames_for_list.saturating_add(padding).max(5) +} + /// Decodes a frame. /// /// This method is intentionally de-generified and outlined because it is very large. fn decode_frame( hpack: &mut hpack::Decoder, max_header_list_size: usize, + max_continuation_frames: usize, partial_inout: &mut Option, mut bytes: BytesMut, ) -> Result, Error> { @@ -169,6 +190,7 @@ fn decode_frame( *partial_inout = Some(Partial { frame: Continuable::$frame(frame), buf: payload, + continuation_frames_count: 0, }); return Ok(None); @@ -273,6 +295,22 @@ fn decode_frame( return Err(Error::library_go_away(Reason::PROTOCOL_ERROR)); } + // Check for CONTINUATION flood + if is_end_headers { + partial.continuation_frames_count = 0; + } else { + let cnt = partial.continuation_frames_count + 1; + if cnt > max_continuation_frames { + tracing::debug!("too_many_continuations, max = {}", max_continuation_frames); + return Err(Error::library_go_away_data( + Reason::ENHANCE_YOUR_CALM, + "too_many_continuations", + )); + } else { + partial.continuation_frames_count = cnt; + } + } + // Extend the buf if partial.buf.is_empty() { partial.buf = bytes.split_off(frame::HEADER_LEN); @@ -354,9 +392,16 @@ where ref mut hpack, max_header_list_size, ref mut partial, + max_continuation_frames, .. } = *self; - if let Some(frame) = decode_frame(hpack, max_header_list_size, partial, bytes)? { + if let Some(frame) = decode_frame( + hpack, + max_header_list_size, + max_continuation_frames, + partial, + bytes, + )? { tracing::debug!(?frame, "received"); return Poll::Ready(Some(Ok(frame))); } diff --git a/tests/h2-tests/tests/server.rs b/tests/h2-tests/tests/server.rs index 7990c86a9..a4b983a0a 100644 --- a/tests/h2-tests/tests/server.rs +++ b/tests/h2-tests/tests/server.rs @@ -883,6 +883,55 @@ async fn too_big_headers_sends_reset_after_431_if_not_eos() { join(client, srv).await; } +#[tokio::test] +async fn too_many_continuation_frames_sends_goaway() { + h2_support::trace_init!(); + let (io, mut client) = mock::new(); + + let client = async move { + let settings = client.assert_server_handshake().await; + assert_frame_eq(settings, frames::settings().max_header_list_size(1024 * 32)); + + // the mock impl automatically splits into CONTINUATION frames if the + // headers are too big for one frame. So without a max header list size + // set, we'll send a bunch of headers that will eventually get nuked. + client + .send_frame( + frames::headers(1) + .request("GET", "https://example.com/") + .field("a".repeat(10_000), "b".repeat(10_000)) + .field("c".repeat(10_000), "d".repeat(10_000)) + .field("e".repeat(10_000), "f".repeat(10_000)) + .field("g".repeat(10_000), "h".repeat(10_000)) + .field("i".repeat(10_000), "j".repeat(10_000)) + .field("k".repeat(10_000), "l".repeat(10_000)) + .field("m".repeat(10_000), "n".repeat(10_000)) + .field("o".repeat(10_000), "p".repeat(10_000)) + .field("y".repeat(10_000), "z".repeat(10_000)), + ) + .await; + client + .recv_frame(frames::go_away(0).calm().data("too_many_continuations")) + .await; + }; + + let srv = async move { + let mut srv = server::Builder::new() + // should mean ~3 continuation + .max_header_list_size(1024 * 32) + .handshake::<_, Bytes>(io) + .await + .expect("handshake"); + + let err = srv.next().await.unwrap().expect_err("server"); + assert!(err.is_go_away()); + assert!(err.is_library()); + assert_eq!(err.reason(), Some(Reason::ENHANCE_YOUR_CALM)); + }; + + join(client, srv).await; +} + #[tokio::test] async fn pending_accept_recv_illegal_content_length_data() { h2_support::trace_init!(); From 51fe05acbdb61645a45e13a91b99d39a7fedda1f Mon Sep 17 00:00:00 2001 From: Sean McArthur Date: Wed, 3 Apr 2024 14:01:07 -0400 Subject: [PATCH 176/178] v0.4.4 --- CHANGELOG.md | 4 ++++ Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 07bf533cd..3b9663dbf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +# 0.4.4 (April 3, 2024) + +* Limit number of CONTINUATION frames for misbehaving connections. + # 0.4.3 (March 15, 2024) * Fix flow control limits to not apply until receiving SETTINGS ack. diff --git a/Cargo.toml b/Cargo.toml index 3e4b7fe6d..452d2e813 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,7 +3,7 @@ name = "h2" # When releasing to crates.io: # - Update CHANGELOG.md. # - Create git tag -version = "0.4.3" +version = "0.4.4" license = "MIT" authors = [ "Carl Lerche ", From e2168def328e20553f18dc9bd6108dd83f340010 Mon Sep 17 00:00:00 2001 From: Trung Dinh Date: Tue, 9 Apr 2024 08:40:59 -0700 Subject: [PATCH 177/178] chore: add simple h2 benchmark (#762) This PR adds a simple benchmark to measure perf improvement changes. E.g., a potential fix for this issue: https://github.com/hyperium/h2/issues/531 The benchmark is simple: have a client send `100_000` requests to a server and wait for a response. Output: ``` cargo bench H2 running in current-thread runtime at 127.0.0.1:5928: Overall: 353ms. Fastest: 91ms Slowest: 315ms Avg : 249ms H2 running in multi-thread runtime at 127.0.0.1:5929: Overall: 533ms. Fastest: 88ms Slowest: 511ms Avg : 456ms ``` --- Cargo.toml | 4 ++ benches/main.rs | 148 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 152 insertions(+) create mode 100644 benches/main.rs diff --git a/Cargo.toml b/Cargo.toml index 452d2e813..c76b9ecf9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,3 +71,7 @@ webpki-roots = "0.25" [package.metadata.docs.rs] features = ["stream"] + +[[bench]] +name = "main" +harness = false diff --git a/benches/main.rs b/benches/main.rs new file mode 100644 index 000000000..b1e64edf4 --- /dev/null +++ b/benches/main.rs @@ -0,0 +1,148 @@ +use bytes::Bytes; +use h2::{ + client, + server::{self, SendResponse}, + RecvStream, +}; +use http::Request; + +use std::{ + error::Error, + time::{Duration, Instant}, +}; + +use tokio::net::{TcpListener, TcpStream}; + +const NUM_REQUESTS_TO_SEND: usize = 100_000; + +// The actual server. +async fn server(addr: &str) -> Result<(), Box> { + let listener = TcpListener::bind(addr).await?; + + loop { + if let Ok((socket, _peer_addr)) = listener.accept().await { + tokio::spawn(async move { + if let Err(e) = serve(socket).await { + println!(" -> err={:?}", e); + } + }); + } + } +} + +async fn serve(socket: TcpStream) -> Result<(), Box> { + let mut connection = server::handshake(socket).await?; + while let Some(result) = connection.accept().await { + let (request, respond) = result?; + tokio::spawn(async move { + if let Err(e) = handle_request(request, respond).await { + println!("error while handling request: {}", e); + } + }); + } + Ok(()) +} + +async fn handle_request( + mut request: Request, + mut respond: SendResponse, +) -> Result<(), Box> { + let body = request.body_mut(); + while let Some(data) = body.data().await { + let data = data?; + let _ = body.flow_control().release_capacity(data.len()); + } + let response = http::Response::new(()); + let mut send = respond.send_response(response, false)?; + send.send_data(Bytes::from_static(b"pong"), true)?; + + Ok(()) +} + +// The benchmark +async fn send_requests(addr: &str) -> Result<(), Box> { + let tcp = loop { + let Ok(tcp) = TcpStream::connect(addr).await else { + continue; + }; + break tcp; + }; + let (client, h2) = client::handshake(tcp).await?; + // Spawn a task to run the conn... + tokio::spawn(async move { + if let Err(e) = h2.await { + println!("GOT ERR={:?}", e); + } + }); + + let mut handles = Vec::with_capacity(NUM_REQUESTS_TO_SEND); + for _i in 0..NUM_REQUESTS_TO_SEND { + let mut client = client.clone(); + let task = tokio::spawn(async move { + let request = Request::builder().body(()).unwrap(); + + let instant = Instant::now(); + let (response, _) = client.send_request(request, true).unwrap(); + let response = response.await.unwrap(); + let mut body = response.into_body(); + while let Some(_chunk) = body.data().await {} + instant.elapsed() + }); + handles.push(task); + } + + let instant = Instant::now(); + let mut result = Vec::with_capacity(NUM_REQUESTS_TO_SEND); + for handle in handles { + result.push(handle.await.unwrap()); + } + let mut sum = Duration::new(0, 0); + for r in result.iter() { + sum = sum.checked_add(*r).unwrap(); + } + + println!("Overall: {}ms.", instant.elapsed().as_millis()); + println!("Fastest: {}ms", result.iter().min().unwrap().as_millis()); + println!("Slowest: {}ms", result.iter().max().unwrap().as_millis()); + println!( + "Avg : {}ms", + sum.div_f64(NUM_REQUESTS_TO_SEND as f64).as_millis() + ); + Ok(()) +} + +fn main() { + let _ = env_logger::try_init(); + let addr = "127.0.0.1:5928"; + println!("H2 running in current-thread runtime at {addr}:"); + std::thread::spawn(|| { + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap(); + rt.block_on(server(addr)).unwrap(); + }); + + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap(); + rt.block_on(send_requests(addr)).unwrap(); + + let addr = "127.0.0.1:5929"; + println!("H2 running in multi-thread runtime at {addr}:"); + std::thread::spawn(|| { + let rt = tokio::runtime::Builder::new_multi_thread() + .worker_threads(4) + .enable_all() + .build() + .unwrap(); + rt.block_on(server(addr)).unwrap(); + }); + + let rt = tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .unwrap(); + rt.block_on(send_requests(addr)).unwrap(); +} From 0d66e3cba2da9925dc3f277b9c71b96356789a76 Mon Sep 17 00:00:00 2001 From: Yury Yarashevich Date: Wed, 24 Apr 2024 03:23:56 +0200 Subject: [PATCH 178/178] readme: Added link to Tokio Discord. (#771) Gitter chat has notification that community discussion were moved to Discord. Link to community discussion updated to point directly to Discord. --- CONTRIBUTING.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8af0abcc7..4b69dc699 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,7 +5,7 @@ ## Getting Help ## If you have a question about the h2 library or have encountered problems using it, you may -[file an issue][issue] or ask a question on the [Tokio Gitter][gitter]. +[file an issue][issue] or ask a question on the [Tokio Discord][discord]. ## Submitting a Pull Request ## @@ -81,4 +81,4 @@ Describe the testing you've done to validate your change. Performance-related changes should include before- and after- benchmark results. [issue]: https://github.com/hyperium/h2/issues/new -[gitter]: https://gitter.im/tokio-rs/tokio +[discord]: https://discord.gg/tokio