From 2db74f37862aabcd91c833067b3157b064d0990e Mon Sep 17 00:00:00 2001 From: Erick Tryzelaar Date: Fri, 20 Feb 2026 23:40:45 +0000 Subject: [PATCH 1/2] Up minimum socket2 to 0.6.0 Now that hyper-util uses `socket2::Socket::set_tcp_nodelay`, which was added in [0.6.0]. It used to be called `socket2::Socket::set_nodelay` in [0.5.10]. It also updates the MSRV to Rust 1.70 since that's the minimum supported version in socket2 0.6.0. [0.5.10]: https://docs.rs/socket2/0.5.10/socket2/struct.Socket.html#method.set_nodelay [0.6.0]: https://docs.rs/socket2/0.6.0/socket2/struct.Socket.html#method.set_tcp_nodelay --- .github/workflows/CI.yml | 2 +- Cargo.toml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index c69589c6..459b4d59 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -60,7 +60,7 @@ jobs: needs: [style] strategy: matrix: - rust: [ 1.64 ] # keep in sync with 'rust-version' in Cargo.toml + rust: [ 1.70 ] # keep in sync with 'rust-version' in Cargo.toml os: - ubuntu-latest - windows-latest diff --git a/Cargo.toml b/Cargo.toml index f1a8cd7b..8f15ba74 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,7 @@ authors = ["Sean McArthur "] keywords = ["http", "hyper", "hyperium"] categories = ["network-programming", "web-programming::http-client", "web-programming::http-server"] edition = "2021" -rust-version = "1.64" +rust-version = "1.70" [package.metadata.docs.rs] features = ["full"] @@ -29,7 +29,7 @@ ipnet = { version = "2.9", optional = true } libc = { version = "0.2", optional = true } percent-encoding = { version = "2.3", optional = true } pin-project-lite = "0.2.4" -socket2 = { version = ">=0.5.9, <0.7", optional = true, features = ["all"] } +socket2 = { version = ">=0.6.0, <0.7", optional = true, features = ["all"] } tracing = { version = "0.1", default-features = false, features = ["std"], optional = true } tokio = { version = "1", optional = true, default-features = false } tower-layer = { version = "0.3", optional = true } From 22c3809f582c188b55fee2daa482356b2f00d886 Mon Sep 17 00:00:00 2001 From: Erick Tryzelaar Date: Thu, 19 Feb 2026 22:31:51 +0000 Subject: [PATCH 2/2] Refactor legacy client to be independent of tokio This patch decouples the functionality of `client::legacy` from tokio, by lifting up a `Client`, `HttpConnection`, and `Resolver` into the `client` module. For backwards compatibility, it reimplements `client::legacy` to be based off of the new code. As best as I can tell it should be backwards compatible, and passes all the tests. There's a couple of areas where I'm not sure if we got quite the right API: * Capturing connection info - This adds a helper closure you can call to get it, which is then used by he legacy client to send the info to the tokio watch. * This extracts the pool info, but there's been a long standing plan to replace it with something better. I'm not sure what that would look like though. * I set up the legacy Client and HttpConnector to wrap the new generic APIs. That adds a bit of code duplication, but as far as I can tell we can't use a type alias due to the `new` method. --- Cargo.toml | 11 +- src/client/client.rs | 1664 ++++++++++++++++++++++++++ src/client/connect/config.rs | 182 +++ src/client/connect/dns/mod.rs | 204 ++++ src/client/connect/dns/tokio.rs | 111 ++ src/client/connect/http/mod.rs | 408 +++++++ src/client/connect/http/tokio.rs | 240 ++++ src/client/connect/mod.rs | 406 +++++++ src/client/connect/pool.rs | 1107 +++++++++++++++++ src/client/connect/tcp/mod.rs | 409 +++++++ src/client/connect/tcp/tokio.rs | 76 ++ src/client/legacy/client.rs | 1245 ++----------------- src/client/legacy/connect/capture.rs | 10 +- src/client/legacy/connect/dns.rs | 338 +----- src/client/legacy/connect/http.rs | 1451 +--------------------- src/client/legacy/connect/mod.rs | 380 +----- src/client/legacy/pool.rs | 1118 +---------------- src/client/mod.rs | 9 + src/client/pool/cache.rs | 11 +- src/common/mod.rs | 10 +- src/lib.rs | 2 +- src/rt/mod.rs | 5 +- src/rt/tokio.rs | 11 +- src/server/conn/auto/mod.rs | 2 +- src/service/mod.rs | 4 +- tests/legacy_client.rs | 6 +- 26 files changed, 4943 insertions(+), 4477 deletions(-) create mode 100644 src/client/client.rs create mode 100644 src/client/connect/config.rs create mode 100644 src/client/connect/dns/mod.rs create mode 100644 src/client/connect/dns/tokio.rs create mode 100644 src/client/connect/http/mod.rs create mode 100644 src/client/connect/http/tokio.rs create mode 100644 src/client/connect/mod.rs create mode 100644 src/client/connect/pool.rs create mode 100644 src/client/connect/tcp/mod.rs create mode 100644 src/client/connect/tcp/tokio.rs diff --git a/Cargo.toml b/Cargo.toml index 8f15ba74..8c02c993 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -71,12 +71,13 @@ full = [ "http1", "http2", "tokio", + "tokio-net", "tracing", ] -client = ["hyper/client", "tokio/net", "dep:tracing", "dep:futures-channel", "dep:tower-service"] -client-legacy = ["client", "dep:socket2", "tokio/sync", "dep:libc", "dep:futures-util"] -client-pool = ["client", "dep:futures-util", "dep:tower-layer"] +client = ["hyper/client", "dep:futures-channel", "dep:futures-util", "dep:socket2", "dep:tracing", "dep:tower-service"] +client-legacy = ["client", "tokio/sync", "dep:libc", "dep:futures-util"] +client-pool = ["client", "dep:futures-util", "dep:tower-layer", "tokio/sync"] client-proxy = ["client", "dep:base64", "dep:ipnet", "dep:percent-encoding"] client-proxy-system = ["dep:system-configuration", "dep:windows-registry"] @@ -89,7 +90,9 @@ service = ["dep:tower-service"] http1 = ["hyper/http1"] http2 = ["hyper/http2"] -tokio = ["dep:tokio", "tokio/rt", "tokio/time"] +tokio = ["dep:tokio", "tokio/time"] +tokio-rt = ["dep:tokio", "tokio/rt"] +tokio-net = ["tokio-rt", "tokio/net"] tracing = ["dep:tracing"] diff --git a/src/client/client.rs b/src/client/client.rs new file mode 100644 index 00000000..c0670e1f --- /dev/null +++ b/src/client/client.rs @@ -0,0 +1,1664 @@ +//! The legacy HTTP Client from 0.14.x +//! +//! This `Client` will eventually be deconstructed into more composable parts. +//! For now, to enable people to use hyper 1.0 quicker, this `Client` exists +//! in much the same way it did in hyper 0.14. + +use crate::client::connect::http::HttpConnect; +use crate::client::connect::pool::{self, Ver}; +use crate::client::connect::{Alpn, Connect, Connected, Connection}; +use crate::common::{lazy as hyper_lazy, timer, Exec, Lazy, SyncWrapper}; +use futures_util::future::{self, Either, FutureExt, TryFutureExt}; +use http::uri::Scheme; +use hyper::client::conn::TrySendError as ConnTrySendError; +use hyper::header::{HeaderValue, HOST}; +use hyper::rt::Timer; +use hyper::{body::Body, Method, Request, Response, Uri, Version}; +use std::error::Error as StdError; +use std::fmt; +use std::future::{poll_fn, Future}; +use std::pin::Pin; +use std::task::{self, Poll}; +use std::time::Duration; +use tracing::{debug, trace, warn}; + +pub(crate) type BoxSendFuture = Pin + Send>>; + +/// A Client to make outgoing HTTP requests. +/// +/// `Client` is cheap to clone and cloning is the recommended way to share a `Client`. The +/// underlying connection pool will be reused. +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] +pub struct Client { + config: Config, + connector: C, + exec: Exec, + #[cfg(feature = "http1")] + h1_builder: hyper::client::conn::http1::Builder, + #[cfg(feature = "http2")] + h2_builder: hyper::client::conn::http2::Builder, + pool: pool::Pool, PoolKey>, +} + +#[derive(Clone, Copy, Debug)] +struct Config { + retry_canceled_requests: bool, + set_host: bool, + ver: Ver, +} + +/// Client errors +pub struct Error { + kind: ErrorKind, + source: Option>, + #[cfg(any(feature = "http1", feature = "http2"))] + connect_info: Option, +} + +#[derive(Debug)] +enum ErrorKind { + Canceled, + ChannelClosed, + Connect, + UserUnsupportedRequestMethod, + UserUnsupportedVersion, + UserAbsoluteUriRequired, + SendRequest, +} + +macro_rules! e { + ($kind:ident) => { + Error { + kind: ErrorKind::$kind, + source: None, + connect_info: None, + } + }; + ($kind:ident, $src:expr) => { + Error { + kind: ErrorKind::$kind, + source: Some($src.into()), + connect_info: None, + } + }; +} + +// We might change this... :shrug: +type PoolKey = (http::uri::Scheme, http::uri::Authority); + +enum TrySendError { + Retryable { + error: Error, + req: Request, + connection_reused: bool, + }, + Nope(Error), +} + +/// A `Future` that will resolve to an HTTP Response. +/// +/// This is returned by `Client::request` (and `Client::get`). +#[must_use = "futures do nothing unless polled"] +pub struct ResponseFuture { + inner: SyncWrapper< + Pin, Error>> + Send>>, + >, +} + +// ===== impl Client ===== + +impl Client<(), ()> { + /// Create a builder to configure a new `Client`. + /// + /// # Example + /// + /// ``` + /// # #[cfg(all(feature = "tokio", feature = "http2"))] + /// # fn run () { + /// use std::time::Duration; + /// use hyper_util::client::legacy::Client; + /// use hyper_util::rt::{TokioExecutor, TokioTimer}; + /// + /// let client = Client::builder(TokioExecutor::new()) + /// .pool_timer(TokioTimer::new()) + /// .pool_idle_timeout(Duration::from_secs(30)) + /// .http2_only(true) + /// .build_http(); + /// # let infer: Client<_, http_body_util::Full> = client; + /// # drop(infer); + /// # } + /// # fn main() {} + /// ``` + pub fn builder(executor: E) -> Builder + where + E: hyper::rt::Executor + Send + Sync + Clone + 'static, + { + Builder::new(executor) + } +} + +impl Client +where + C: Connect + Clone + Send + Sync + 'static, + B: Body + Send + 'static + Unpin, + B::Data: Send, + B::Error: Into>, +{ + /// Send a `GET` request to the supplied `Uri`. + /// + /// # Note + /// + /// This requires that the `Body` type have a `Default` implementation. + /// It *should* return an "empty" version of itself, such that + /// `Body::is_end_stream` is `true`. + /// + /// # Example + /// + /// ``` + /// # #[cfg(feature = "tokio")] + /// # fn run () { + /// use hyper::Uri; + /// use hyper_util::client::legacy::Client; + /// use hyper_util::rt::TokioExecutor; + /// use bytes::Bytes; + /// use http_body_util::Full; + /// + /// let client: Client<_, Full> = Client::builder(TokioExecutor::new()).build_http(); + /// + /// let future = client.get(Uri::from_static("http://httpbin.org/ip")); + /// # } + /// # fn main() {} + /// ``` + pub fn get(&self, uri: Uri) -> ResponseFuture + where + B: Default, + { + let body = B::default(); + if !body.is_end_stream() { + warn!("default Body used for get() does not return true for is_end_stream"); + } + + let mut req = Request::new(body); + *req.uri_mut() = uri; + self.request(req) + } + + /// Send a constructed `Request` using this `Client`. + /// + /// # Example + /// + /// ``` + /// # #[cfg(feature = "tokio")] + /// # fn run () { + /// use hyper::{Method, Request}; + /// use hyper_util::client::legacy::Client; + /// use http_body_util::Full; + /// use hyper_util::rt::TokioExecutor; + /// use bytes::Bytes; + /// + /// let client: Client<_, Full> = Client::builder(TokioExecutor::new()).build_http(); + /// + /// let req: Request> = Request::builder() + /// .method(Method::POST) + /// .uri("http://httpbin.org/post") + /// .body(Full::from("Hallo!")) + /// .expect("request builder"); + /// + /// let future = client.request(req); + /// # } + /// # fn main() {} + /// ``` + pub fn request(&self, mut req: Request) -> ResponseFuture { + let is_http_connect = req.method() == Method::CONNECT; + match req.version() { + Version::HTTP_11 => (), + Version::HTTP_10 => { + if is_http_connect { + warn!("CONNECT is not allowed for HTTP/1.0"); + return ResponseFuture::new(future::err(e!(UserUnsupportedRequestMethod))); + } + } + Version::HTTP_2 => (), + // completely unsupported HTTP version (like HTTP/0.9)! + other => return ResponseFuture::error_version(other), + }; + + let pool_key = match extract_domain(req.uri_mut(), is_http_connect) { + Ok(s) => s, + Err(err) => { + return ResponseFuture::new(future::err(err)); + } + }; + + ResponseFuture::new(self.clone().send_request(req, pool_key)) + } + + async fn send_request( + self, + mut req: Request, + pool_key: PoolKey, + ) -> Result, Error> { + let uri = req.uri().clone(); + + loop { + req = match self.try_send_request(req, pool_key.clone()).await { + Ok(resp) => return Ok(resp), + Err(TrySendError::Nope(err)) => return Err(err), + Err(TrySendError::Retryable { + mut req, + error, + connection_reused, + }) => { + if !self.config.retry_canceled_requests || !connection_reused { + // if client disabled, don't retry + // a fresh connection means we definitely can't retry + return Err(error); + } + + trace!( + "unstarted request canceled, trying again (reason={:?})", + error + ); + *req.uri_mut() = uri.clone(); + req + } + } + } + } + + async fn try_send_request( + &self, + mut req: Request, + pool_key: PoolKey, + ) -> Result, TrySendError> { + let mut pooled = self + .connection_for(pool_key) + .await + // `connection_for` already retries checkout errors, so if + // it returns an error, there's not much else to retry + .map_err(TrySendError::Nope)?; + + if let Some(on_conn) = req + .extensions_mut() + .get_mut::() + { + on_conn.call(&pooled.conn_info); + } + + if pooled.is_http1() { + if req.version() == Version::HTTP_2 { + warn!("Connection is HTTP/1, but request requires HTTP/2"); + return Err(TrySendError::Nope( + e!(UserUnsupportedVersion).with_connect_info(pooled.conn_info.clone()), + )); + } + + if self.config.set_host { + let uri = req.uri().clone(); + req.headers_mut().entry(HOST).or_insert_with(|| { + let hostname = uri.host().expect("authority implies host"); + if let Some(port) = get_non_default_port(&uri) { + let s = format!("{hostname}:{port}"); + HeaderValue::from_maybe_shared(bytes::Bytes::from(s)) + } else { + HeaderValue::from_str(hostname) + } + .expect("uri host is valid header value") + }); + } + + // CONNECT always sends authority-form, so check it first... + if req.method() == Method::CONNECT { + authority_form(req.uri_mut()); + } else if pooled.conn_info.is_proxied { + absolute_form(req.uri_mut()); + } else { + origin_form(req.uri_mut()); + } + } else if req.method() == Method::CONNECT && !pooled.is_http2() { + authority_form(req.uri_mut()); + } + + let mut res = match pooled.try_send_request(req).await { + Ok(res) => res, + Err(mut err) => { + return if let Some(req) = err.take_message() { + Err(TrySendError::Retryable { + connection_reused: pooled.is_reused(), + error: e!(Canceled, err.into_error()) + .with_connect_info(pooled.conn_info.clone()), + req, + }) + } else { + Err(TrySendError::Nope( + e!(SendRequest, err.into_error()) + .with_connect_info(pooled.conn_info.clone()), + )) + }; + } + }; + + // If the Connector included 'extra' info, add to Response... + if let Some(extra) = &pooled.conn_info.extra { + extra.set(res.extensions_mut()); + } + + // If pooled is HTTP/2, we can toss this reference immediately. + // + // when pooled is dropped, it will try to insert back into the + // pool. To delay that, spawn a future that completes once the + // sender is ready again. + // + // This *should* only be once the related `Connection` has polled + // for a new request to start. + // + // It won't be ready if there is a body to stream. + if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() { + drop(pooled); + } else { + let on_idle = poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ()); + self.exec.execute(on_idle); + } + + Ok(res) + } + + async fn connection_for( + &self, + pool_key: PoolKey, + ) -> Result, PoolKey>, Error> { + loop { + match self.one_connection_for(pool_key.clone()).await { + Ok(pooled) => return Ok(pooled), + Err(ClientConnectError::Normal(err)) => return Err(err), + Err(ClientConnectError::CheckoutIsClosed(reason)) => { + if !self.config.retry_canceled_requests { + return Err(e!(Connect, reason)); + } + + trace!( + "unstarted request canceled, trying again (reason={:?})", + reason, + ); + continue; + } + }; + } + } + + async fn one_connection_for( + &self, + pool_key: PoolKey, + ) -> Result, PoolKey>, ClientConnectError> { + // Return a single connection if pooling is not enabled + if !self.pool.is_enabled() { + return self + .connect_to(pool_key) + .await + .map_err(ClientConnectError::Normal); + } + + // This actually races 2 different futures to try to get a ready + // connection the fastest, and to reduce connection churn. + // + // - If the pool has an idle connection waiting, that's used + // immediately. + // - Otherwise, the Connector is asked to start connecting to + // the destination Uri. + // - Meanwhile, the pool Checkout is watching to see if any other + // request finishes and tries to insert an idle connection. + // - If a new connection is started, but the Checkout wins after + // (an idle connection became available first), the started + // connection future is spawned into the runtime to complete, + // and then be inserted into the pool as an idle connection. + let checkout = self.pool.checkout(pool_key.clone()); + let connect = self.connect_to(pool_key); + let is_ver_h2 = self.config.ver == Ver::Http2; + + // The order of the `select` is depended on below... + + match future::select(checkout, connect).await { + // Checkout won, connect future may have been started or not. + // + // If it has, let it finish and insert back into the pool, + // so as to not waste the socket... + Either::Left((Ok(checked_out), connecting)) => { + // This depends on the `select` above having the correct + // order, such that if the checkout future were ready + // immediately, the connect future will never have been + // started. + // + // If it *wasn't* ready yet, then the connect future will + // have been started... + if connecting.started() { + let bg = connecting + .map_err(|err| { + trace!("background connect error: {}", err); + }) + .map(|_pooled| { + // dropping here should just place it in + // the Pool for us... + }); + // An execute error here isn't important, we're just trying + // to prevent a waste of a socket... + self.exec.execute(bg); + } + Ok(checked_out) + } + // Connect won, checkout can just be dropped. + Either::Right((Ok(connected), _checkout)) => Ok(connected), + // Either checkout or connect could get canceled: + // + // 1. Connect is canceled if this is HTTP/2 and there is + // an outstanding HTTP/2 connecting task. + // 2. Checkout is canceled if the pool cannot deliver an + // idle connection reliably. + // + // In both cases, we should just wait for the other future. + Either::Left((Err(err), connecting)) => { + if err.is_canceled() { + connecting.await.map_err(ClientConnectError::Normal) + } else { + Err(ClientConnectError::Normal(e!(Connect, err))) + } + } + Either::Right((Err(err), checkout)) => { + if err.is_canceled() { + checkout.await.map_err(move |err| { + if is_ver_h2 && err.is_canceled() { + ClientConnectError::CheckoutIsClosed(err) + } else { + ClientConnectError::Normal(e!(Connect, err)) + } + }) + } else { + Err(ClientConnectError::Normal(err)) + } + } + } + } + + #[cfg(any(feature = "http1", feature = "http2"))] + fn connect_to( + &self, + pool_key: PoolKey, + ) -> impl Lazy, PoolKey>, Error>> + Send + Unpin + { + let executor = self.exec.clone(); + let pool = self.pool.clone(); + #[cfg(feature = "http1")] + let h1_builder = self.h1_builder.clone(); + #[cfg(feature = "http2")] + let h2_builder = self.h2_builder.clone(); + let ver = self.config.ver; + let is_ver_h2 = ver == Ver::Http2; + let connector = self.connector.clone(); + hyper_lazy(move || { + // Try to take a "connecting lock". + // + // If the pool_key is for HTTP/2, and there is already a + // connection being established, then this can't take a + // second lock. The "connect_to" future is Canceled. + let connecting = match pool.connecting(&pool_key, ver) { + Some(lock) => lock, + None => { + let canceled = e!(Canceled); + // TODO + //crate::Error::new_canceled().with("HTTP/2 connection in progress"); + return Either::Right(future::err(canceled)); + } + }; + let dst = domain_as_uri(pool_key); + Either::Left( + connector + .connect(crate::client::connect::sealed::Internal, dst) + .map_err(|src| e!(Connect, src)) + .and_then(move |io| { + let connected = io.connected(); + // If ALPN is h2 and we aren't http2_only already, + // then we need to convert our pool checkout into + // a single HTTP2 one. + let connecting = if connected.alpn == Alpn::H2 && !is_ver_h2 { + match connecting.alpn_h2(&pool) { + Some(lock) => { + trace!("ALPN negotiated h2, updating pool"); + lock + } + None => { + // Another connection has already upgraded, + // the pool checkout should finish up for us. + let canceled = e!(Canceled, "ALPN upgraded to HTTP/2"); + return Either::Right(future::err(canceled)); + } + } + } else { + connecting + }; + + #[cfg_attr(not(feature = "http2"), allow(unused))] + let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2; + + Either::Left(Box::pin(async move { + let tx = if is_h2 { + #[cfg(feature = "http2")] { + let (mut tx, conn) = + h2_builder.handshake(io).await.map_err(Error::tx)?; + + trace!( + "http2 handshake complete, spawning background dispatcher task" + ); + executor.execute( + conn.map_err(|e| debug!("client connection error: {}", e)) + .map(|_| ()), + ); + + // Wait for 'conn' to ready up before we + // declare this tx as usable + tx.ready().await.map_err(Error::tx)?; + PoolTx::Http2(tx) + } + #[cfg(not(feature = "http2"))] + panic!("http2 feature is not enabled"); + } else { + #[cfg(feature = "http1")] { + // Perform the HTTP/1.1 handshake on the provided I/O stream. + // Uses the h1_builder to establish a connection, returning a sender (tx) for requests + // and a connection task (conn) that manages the connection lifecycle. + let (mut tx, conn) = + h1_builder.handshake(io).await.map_err(Error::tx)?; + + // Log that the HTTP/1.1 handshake has completed successfully. + // This indicates the connection is established and ready for request processing. + trace!( + "http1 handshake complete, spawning background dispatcher task" + ); + // Create a oneshot channel to communicate errors from the connection task. + // err_tx sends errors from the connection task, and err_rx receives them + // to correlate connection failures with request readiness errors. + let (err_tx, err_rx) = futures_channel::oneshot::channel(); + // Spawn the connection task in the background using the executor. + // The task manages the HTTP/1.1 connection, including upgrades (e.g., WebSocket). + // Errors are sent via err_tx to ensure they can be checked if the sender (tx) fails. + executor.execute( + conn.with_upgrades() + .map_err(|e| { + // Log the connection error at debug level for diagnostic purposes. + debug!("client connection error: {:?}", e); + // Log that the error is being sent to the error channel. + trace!("sending connection error to error channel"); + // Send the error via the oneshot channel, ignoring send failures + // (e.g., if the receiver is dropped, which is handled later). + let _ =err_tx.send(e); + }) + .map(|_| ()), + ); + // Log that the client is waiting for the connection to be ready. + // Readiness indicates the sender (tx) can accept a request without blocking. + trace!("waiting for connection to be ready"); + // Check if the sender is ready to accept a request. + // This ensures the connection is fully established before proceeding. + // aka: + // Wait for 'conn' to ready up before we + // declare this tx as usable + match tx.ready().await { + // If ready, the connection is usable for sending requests. + Ok(_) => { + // Log that the connection is ready for use. + trace!("connection is ready"); + // Drop the error receiver, as it’s no longer needed since the sender is ready. + // This prevents waiting for errors that won’t occur in a successful case. + drop(err_rx); + // Wrap the sender in PoolTx::Http1 for use in the connection pool. + PoolTx::Http1(tx) + } + // If the sender fails with a closed channel error, check for a specific connection error. + // This distinguishes between a vague ChannelClosed error and an actual connection failure. + Err(e) if e.is_closed() => { + // Log that the channel is closed, indicating a potential connection issue. + trace!("connection channel closed, checking for connection error"); + // Check the oneshot channel for a specific error from the connection task. + match err_rx.await { + // If an error was received, it’s a specific connection failure. + Ok(err) => { + // Log the specific connection error for diagnostics. + trace!("received connection error: {:?}", err); + // Return the error wrapped in Error::tx to propagate it. + return Err(Error::tx(err)); + } + // If the error channel is closed, no specific error was sent. + // Fall back to the vague ChannelClosed error. + Err(_) => { + // Log that the error channel is closed, indicating no specific error. + trace!("error channel closed, returning the vague ChannelClosed error"); + // Return the original error wrapped in Error::tx. + return Err(Error::tx(e)); + } + } + } + // For other errors (e.g., timeout, I/O issues), propagate them directly. + // These are not ChannelClosed errors and don’t require error channel checks. + Err(e) => { + // Log the specific readiness failure for diagnostics. + trace!("connection readiness failed: {:?}", e); + // Return the error wrapped in Error::tx to propagate it. + return Err(Error::tx(e)); + } + } + } + #[cfg(not(feature = "http1"))] { + panic!("http1 feature is not enabled"); + } + }; + + Ok(pool.pooled( + connecting, + PoolClient { + conn_info: connected, + tx, + }, + )) + })) + }), + ) + }) + } +} + +impl tower_service::Service> for Client +where + C: Connect + Clone + Send + Sync + 'static, + B: Body + Send + 'static + Unpin, + B::Data: Send, + B::Error: Into>, +{ + type Response = Response; + type Error = Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + self.request(req) + } +} + +impl tower_service::Service> for &'_ Client +where + C: Connect + Clone + Send + Sync + 'static, + B: Body + Send + 'static + Unpin, + B::Data: Send, + B::Error: Into>, +{ + type Response = Response; + type Error = Error; + type Future = ResponseFuture; + + fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, req: Request) -> Self::Future { + self.request(req) + } +} + +impl Clone for Client { + fn clone(&self) -> Client { + Client { + config: self.config, + exec: self.exec.clone(), + #[cfg(feature = "http1")] + h1_builder: self.h1_builder.clone(), + #[cfg(feature = "http2")] + h2_builder: self.h2_builder.clone(), + connector: self.connector.clone(), + pool: self.pool.clone(), + } + } +} + +impl fmt::Debug for Client { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Client").finish() + } +} + +// ===== impl ResponseFuture ===== + +impl ResponseFuture { + fn new(value: F) -> Self + where + F: Future, Error>> + Send + 'static, + { + Self { + inner: SyncWrapper::new(Box::pin(value)), + } + } + + fn error_version(ver: Version) -> Self { + warn!("Request has unsupported version \"{:?}\"", ver); + ResponseFuture::new(Box::pin(future::err(e!(UserUnsupportedVersion)))) + } +} + +impl fmt::Debug for ResponseFuture { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("Future") + } +} + +impl Future for ResponseFuture { + type Output = Result, Error>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + self.inner.get_mut().as_mut().poll(cx) + } +} + +// ===== impl PoolClient ===== + +// FIXME: allow() required due to `impl Trait` leaking types to this lint +#[allow(missing_debug_implementations)] +struct PoolClient { + conn_info: Connected, + tx: PoolTx, +} + +enum PoolTx { + #[cfg(feature = "http1")] + Http1(hyper::client::conn::http1::SendRequest), + #[cfg(feature = "http2")] + Http2(hyper::client::conn::http2::SendRequest), +} + +impl PoolClient { + fn poll_ready( + &mut self, + #[allow(unused_variables)] cx: &mut task::Context<'_>, + ) -> Poll> { + match self.tx { + #[cfg(feature = "http1")] + PoolTx::Http1(ref mut tx) => tx.poll_ready(cx).map_err(Error::closed), + #[cfg(feature = "http2")] + PoolTx::Http2(_) => Poll::Ready(Ok(())), + } + } + + fn is_http1(&self) -> bool { + !self.is_http2() + } + + fn is_http2(&self) -> bool { + match self.tx { + #[cfg(feature = "http1")] + PoolTx::Http1(_) => false, + #[cfg(feature = "http2")] + PoolTx::Http2(_) => true, + } + } + + fn is_poisoned(&self) -> bool { + self.conn_info.poisoned.poisoned() + } + + fn is_ready(&self) -> bool { + match self.tx { + #[cfg(feature = "http1")] + PoolTx::Http1(ref tx) => tx.is_ready(), + #[cfg(feature = "http2")] + PoolTx::Http2(ref tx) => tx.is_ready(), + } + } +} + +impl PoolClient { + fn try_send_request( + &mut self, + req: Request, + ) -> impl Future, ConnTrySendError>>> + where + B: Send, + { + #[cfg(all(feature = "http1", feature = "http2"))] + return match self.tx { + #[cfg(feature = "http1")] + PoolTx::Http1(ref mut tx) => Either::Left(tx.try_send_request(req)), + #[cfg(feature = "http2")] + PoolTx::Http2(ref mut tx) => Either::Right(tx.try_send_request(req)), + }; + + #[cfg(feature = "http1")] + #[cfg(not(feature = "http2"))] + return match self.tx { + #[cfg(feature = "http1")] + PoolTx::Http1(ref mut tx) => tx.try_send_request(req), + }; + + #[cfg(not(feature = "http1"))] + #[cfg(feature = "http2")] + return match self.tx { + #[cfg(feature = "http2")] + PoolTx::Http2(ref mut tx) => tx.try_send_request(req), + }; + } +} + +impl pool::Poolable for PoolClient +where + B: Send + 'static, +{ + fn is_open(&self) -> bool { + !self.is_poisoned() && self.is_ready() + } + + fn reserve(self) -> pool::Reservation { + match self.tx { + #[cfg(feature = "http1")] + PoolTx::Http1(tx) => pool::Reservation::Unique(PoolClient { + conn_info: self.conn_info, + tx: PoolTx::Http1(tx), + }), + #[cfg(feature = "http2")] + PoolTx::Http2(tx) => { + let b = PoolClient { + conn_info: self.conn_info.clone(), + tx: PoolTx::Http2(tx.clone()), + }; + let a = PoolClient { + conn_info: self.conn_info, + tx: PoolTx::Http2(tx), + }; + pool::Reservation::Shared(a, b) + } + } + } + + fn can_share(&self) -> bool { + self.is_http2() + } +} + +enum ClientConnectError { + Normal(Error), + CheckoutIsClosed(pool::Error), +} + +fn origin_form(uri: &mut Uri) { + let path = match uri.path_and_query() { + Some(path) if path.as_str() != "/" => { + let mut parts = ::http::uri::Parts::default(); + parts.path_and_query = Some(path.clone()); + Uri::from_parts(parts).expect("path is valid uri") + } + _none_or_just_slash => { + debug_assert!(Uri::default() == "/"); + Uri::default() + } + }; + *uri = path +} + +fn absolute_form(uri: &mut Uri) { + debug_assert!(uri.scheme().is_some(), "absolute_form needs a scheme"); + debug_assert!( + uri.authority().is_some(), + "absolute_form needs an authority" + ); +} + +fn authority_form(uri: &mut Uri) { + if let Some(path) = uri.path_and_query() { + // `https://hyper.rs` would parse with `/` path, don't + // annoy people about that... + if path != "/" { + warn!("HTTP/1.1 CONNECT request stripping path: {:?}", path); + } + } + *uri = match uri.authority() { + Some(auth) => { + let mut parts = ::http::uri::Parts::default(); + parts.authority = Some(auth.clone()); + Uri::from_parts(parts).expect("authority is valid") + } + None => { + unreachable!("authority_form with relative uri"); + } + }; +} + +fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> Result { + let uri_clone = uri.clone(); + match (uri_clone.scheme(), uri_clone.authority()) { + (Some(scheme), Some(auth)) => Ok((scheme.clone(), auth.clone())), + (None, Some(auth)) if is_http_connect => { + let scheme = match auth.port_u16() { + Some(443) => { + set_scheme(uri, Scheme::HTTPS); + Scheme::HTTPS + } + _ => { + set_scheme(uri, Scheme::HTTP); + Scheme::HTTP + } + }; + Ok((scheme, auth.clone())) + } + _ => { + debug!("Client requires absolute-form URIs, received: {:?}", uri); + Err(e!(UserAbsoluteUriRequired)) + } + } +} + +fn domain_as_uri((scheme, auth): PoolKey) -> Uri { + http::uri::Builder::new() + .scheme(scheme) + .authority(auth) + .path_and_query("/") + .build() + .expect("domain is valid Uri") +} + +fn set_scheme(uri: &mut Uri, scheme: Scheme) { + debug_assert!( + uri.scheme().is_none(), + "set_scheme expects no existing scheme" + ); + let old = std::mem::take(uri); + let mut parts: ::http::uri::Parts = old.into(); + parts.scheme = Some(scheme); + parts.path_and_query = Some("/".parse().expect("slash is a valid path")); + *uri = Uri::from_parts(parts).expect("scheme is valid"); +} + +fn get_non_default_port(uri: &Uri) -> Option> { + match (uri.port().map(|p| p.as_u16()), is_schema_secure(uri)) { + (Some(443), true) => None, + (Some(80), false) => None, + _ => uri.port(), + } +} + +fn is_schema_secure(uri: &Uri) -> bool { + uri.scheme_str() + .map(|scheme_str| matches!(scheme_str, "wss" | "https")) + .unwrap_or_default() +} + +/// A builder to configure a new [`Client`](Client). +/// +/// # Example +/// +/// ``` +/// # #[cfg(all(feature = "tokio", feature = "http2"))] +/// # fn run () { +/// use std::time::Duration; +/// use hyper_util::client::legacy::Client; +/// use hyper_util::rt::TokioExecutor; +/// +/// let client = Client::builder(TokioExecutor::new()) +/// .pool_idle_timeout(Duration::from_secs(30)) +/// .http2_only(true) +/// .build_http(); +/// # let infer: Client<_, http_body_util::Full> = client; +/// # drop(infer); +/// # } +/// # fn main() {} +/// ``` +#[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] +#[derive(Clone)] +pub struct Builder { + client_config: Config, + exec: Exec, + #[cfg(feature = "http1")] + h1_builder: hyper::client::conn::http1::Builder, + #[cfg(feature = "http2")] + h2_builder: hyper::client::conn::http2::Builder, + pub(crate) pool_config: pool::Config, + pub(crate) pool_timer: Option, +} + +impl Builder { + /// Construct a new Builder. + pub fn new(executor: E) -> Self + where + E: hyper::rt::Executor + Send + Sync + Clone + 'static, + { + let exec = Exec::new(executor); + Self { + client_config: Config { + retry_canceled_requests: true, + set_host: true, + ver: Ver::Auto, + }, + exec: exec.clone(), + #[cfg(feature = "http1")] + h1_builder: hyper::client::conn::http1::Builder::new(), + #[cfg(feature = "http2")] + h2_builder: hyper::client::conn::http2::Builder::new(exec), + pool_config: pool::Config { + idle_timeout: Some(Duration::from_secs(90)), + max_idle_per_host: usize::MAX, + }, + pool_timer: None, + } + } + /// Set an optional timeout for idle sockets being kept-alive. + /// A `Timer` is required for this to take effect. See `Builder::pool_timer` + /// + /// Pass `None` to disable timeout. + /// + /// Default is 90 seconds. + /// + /// # Example + /// + /// ``` + /// # #[cfg(all(feature = "tokio", feature = "http2"))] + /// # fn run () { + /// use std::time::Duration; + /// use hyper_util::client::legacy::Client; + /// use hyper_util::rt::{TokioExecutor, TokioTimer}; + /// + /// let client = Client::builder(TokioExecutor::new()) + /// .pool_idle_timeout(Duration::from_secs(30)) + /// .pool_timer(TokioTimer::new()) + /// .build_http(); + /// + /// # let infer: Client<_, http_body_util::Full> = client; + /// # } + /// # fn main() {} + /// ``` + pub fn pool_idle_timeout(&mut self, val: D) -> &mut Self + where + D: Into>, + { + self.pool_config.idle_timeout = val.into(); + self + } + + #[doc(hidden)] + #[deprecated(note = "renamed to `pool_max_idle_per_host`")] + pub fn max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { + self.pool_config.max_idle_per_host = max_idle; + self + } + + /// Sets the maximum idle connection per host allowed in the pool. + /// + /// Default is `usize::MAX` (no limit). + pub fn pool_max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { + self.pool_config.max_idle_per_host = max_idle; + self + } + + // HTTP/1 options + + /// Sets the exact size of the read buffer to *always* use. + /// + /// Note that setting this option unsets the `http1_max_buf_size` option. + /// + /// Default is an adaptive read buffer. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_read_buf_exact_size(&mut self, sz: usize) -> &mut Self { + self.h1_builder.read_buf_exact_size(Some(sz)); + self + } + + /// Set the maximum buffer size for the connection. + /// + /// Default is ~400kb. + /// + /// Note that setting this option unsets the `http1_read_exact_buf_size` option. + /// + /// # Panics + /// + /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self { + self.h1_builder.max_buf_size(max); + self + } + + /// Set whether HTTP/1 connections will accept spaces between header names + /// and the colon that follow them in responses. + /// + /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when + /// parsing. + /// + /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has + /// to say about it: + /// + /// > No whitespace is allowed between the header field-name and colon. In + /// > the past, differences in the handling of such whitespace have led to + /// > security vulnerabilities in request routing and response handling. A + /// > server MUST reject any received request message that contains + /// > whitespace between a header field-name and colon with a response code + /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a + /// > response message before forwarding the message downstream. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + /// + /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_allow_spaces_after_header_name_in_responses(&mut self, val: bool) -> &mut Self { + self.h1_builder + .allow_spaces_after_header_name_in_responses(val); + self + } + + /// Set whether HTTP/1 connections will accept obsolete line folding for + /// header values. + /// + /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has + /// to say about it: + /// + /// > A server that receives an obs-fold in a request message that is not + /// > within a message/http container MUST either reject the message by + /// > sending a 400 (Bad Request), preferably with a representation + /// > explaining that obsolete line folding is unacceptable, or replace + /// > each received obs-fold with one or more SP octets prior to + /// > interpreting the field value or forwarding the message downstream. + /// + /// > A proxy or gateway that receives an obs-fold in a response message + /// > that is not within a message/http container MUST either discard the + /// > message and replace it with a 502 (Bad Gateway) response, preferably + /// > with a representation explaining that unacceptable line folding was + /// > received, or replace each received obs-fold with one or more SP + /// > octets prior to interpreting the field value or forwarding the + /// > message downstream. + /// + /// > A user agent that receives an obs-fold in a response message that is + /// > not within a message/http container MUST replace each received + /// > obs-fold with one or more SP octets prior to interpreting the field + /// > value. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + /// + /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_allow_obsolete_multiline_headers_in_responses(&mut self, val: bool) -> &mut Self { + self.h1_builder + .allow_obsolete_multiline_headers_in_responses(val); + self + } + + /// Sets whether invalid header lines should be silently ignored in HTTP/1 responses. + /// + /// This mimics the behaviour of major browsers. You probably don't want this. + /// You should only want this if you are implementing a proxy whose main + /// purpose is to sit in front of browsers whose users access arbitrary content + /// which may be malformed, and they expect everything that works without + /// the proxy to keep working with the proxy. + /// + /// This option will prevent Hyper's client from returning an error encountered + /// when parsing a header, except if the error was caused by the character NUL + /// (ASCII code 0), as Chrome specifically always reject those. + /// + /// The ignorable errors are: + /// * empty header names; + /// * characters that are not allowed in header names, except for `\0` and `\r`; + /// * when `allow_spaces_after_header_name_in_responses` is not enabled, + /// spaces and tabs between the header name and the colon; + /// * missing colon between header name and colon; + /// * characters that are not allowed in header values except for `\0` and `\r`. + /// + /// If an ignorable error is encountered, the parser tries to find the next + /// line in the input to resume parsing the rest of the headers. An error + /// will be emitted nonetheless if it finds `\0` or a lone `\r` while + /// looking for the next line. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_ignore_invalid_headers_in_responses(&mut self, val: bool) -> &mut Builder { + self.h1_builder.ignore_invalid_headers_in_responses(val); + self + } + + /// Set whether HTTP/1 connections should try to use vectored writes, + /// or always flatten into a single buffer. + /// + /// Note that setting this to false may mean more copies of body data, + /// but may also improve performance when an IO transport doesn't + /// support vectored writes well, such as most TLS implementations. + /// + /// Setting this to true will force hyper to use queued strategy + /// which may eliminate unnecessary cloning on some TLS backends + /// + /// Default is `auto`. In this mode hyper will try to guess which + /// mode to use + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_writev(&mut self, enabled: bool) -> &mut Builder { + self.h1_builder.writev(enabled); + self + } + + /// Set whether HTTP/1 connections will write header names as title case at + /// the socket level. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self { + self.h1_builder.title_case_headers(val); + self + } + + /// Set whether to support preserving original header cases. + /// + /// Currently, this will record the original cases received, and store them + /// in a private extension on the `Response`. It will also look for and use + /// such an extension in any provided `Request`. + /// + /// Since the relevant extension is still private, there is no way to + /// interact with the original cases. The only effect this can have now is + /// to forward the cases in a proxy-like fashion. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is false. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_preserve_header_case(&mut self, val: bool) -> &mut Self { + self.h1_builder.preserve_header_case(val); + self + } + + /// Set the maximum number of headers. + /// + /// When a response is received, the parser will reserve a buffer to store headers for optimal + /// performance. + /// + /// If client receives more headers than the buffer size, the error "message header too large" + /// is returned. + /// + /// The headers is allocated on the stack by default, which has higher performance. After + /// setting this value, headers will be allocated in heap memory, that is, heap memory + /// allocation will occur for each response, and there will be a performance drop of about 5%. + /// + /// Note that this setting does not affect HTTP/2. + /// + /// Default is 100. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http1_max_headers(&mut self, val: usize) -> &mut Self { + self.h1_builder.max_headers(val); + self + } + + /// Set whether HTTP/0.9 responses should be tolerated. + /// + /// Default is false. + #[cfg(feature = "http1")] + #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] + pub fn http09_responses(&mut self, val: bool) -> &mut Self { + self.h1_builder.http09_responses(val); + self + } + + /// Set whether the connection **must** use HTTP/2. + /// + /// The destination must either allow HTTP2 Prior Knowledge, or the + /// `Connect` should be configured to do use ALPN to upgrade to `h2` + /// as part of the connection process. This will not make the `Client` + /// utilize ALPN by itself. + /// + /// Note that setting this to true prevents HTTP/1 from being allowed. + /// + /// Default is false. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_only(&mut self, val: bool) -> &mut Self { + self.client_config.ver = if val { Ver::Http2 } else { Ver::Auto }; + self + } + + /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. + /// + /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2). + /// As of v0.4.0, it is 20. + /// + /// See for more information. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_pending_accept_reset_streams( + &mut self, + max: impl Into>, + ) -> &mut Self { + self.h2_builder.max_pending_accept_reset_streams(max.into()); + self + } + + /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 + /// stream-level flow control. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + /// + /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { + self.h2_builder.initial_stream_window_size(sz.into()); + self + } + + /// Sets the max connection-level flow control for HTTP2 + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_initial_connection_window_size( + &mut self, + sz: impl Into>, + ) -> &mut Self { + self.h2_builder.initial_connection_window_size(sz.into()); + self + } + + /// Sets the initial maximum of locally initiated (send) streams. + /// + /// This value will be overwritten by the value included in the initial + /// SETTINGS frame received from the peer as part of a [connection preface]. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + /// + /// [connection preface]: https://httpwg.org/specs/rfc9113.html#preface + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_initial_max_send_streams( + &mut self, + initial: impl Into>, + ) -> &mut Self { + self.h2_builder.initial_max_send_streams(initial); + self + } + + /// Sets whether to use an adaptive flow control. + /// + /// Enabling this will override the limits set in + /// `http2_initial_stream_window_size` and + /// `http2_initial_connection_window_size`. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self { + self.h2_builder.adaptive_window(enabled); + self + } + + /// Sets the maximum frame size to use for HTTP2. + /// + /// Passing `None` will do nothing. + /// + /// If not set, hyper will use a default. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_frame_size(&mut self, sz: impl Into>) -> &mut Self { + self.h2_builder.max_frame_size(sz); + self + } + + /// Sets the max size of received header frames for HTTP2. + /// + /// Default is currently 16KB, but can change. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_header_list_size(&mut self, max: u32) -> &mut Self { + self.h2_builder.max_header_list_size(max); + self + } + + /// Sets an interval for HTTP2 Ping frames should be sent to keep a + /// connection alive. + /// + /// Pass `None` to disable HTTP2 keep-alive. + /// + /// Default is currently disabled. + /// + /// Return a mutable reference to the inner HTTP/2 builder. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_keep_alive_interval( + &mut self, + interval: impl Into>, + ) -> &mut Self { + self.h2_builder.keep_alive_interval(interval); + self + } + + /// Sets a timeout for receiving an acknowledgement of the keep-alive ping. + /// + /// If the ping is not acknowledged within the timeout, the connection will + /// be closed. Does nothing if `http2_keep_alive_interval` is disabled. + /// + /// Default is 20 seconds. + /// + /// # Cargo Feature + /// + /// Requires the `tokio` cargo feature to be enabled. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { + self.h2_builder.keep_alive_timeout(timeout); + self + } + + /// Sets whether HTTP2 keep-alive should apply while the connection is idle. + /// + /// If disabled, keep-alive pings are only sent while there are open + /// request/responses streams. If enabled, pings are also sent when no + /// streams are active. Does nothing if `http2_keep_alive_interval` is + /// disabled. + /// + /// Default is `false`. + /// + /// # Cargo Feature + /// + /// Requires the `tokio` cargo feature to be enabled. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self { + self.h2_builder.keep_alive_while_idle(enabled); + self + } + + /// Sets the maximum number of HTTP2 concurrent locally reset streams. + /// + /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more + /// details. + /// + /// The default value is determined by the `h2` crate. + /// + /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { + self.h2_builder.max_concurrent_reset_streams(max); + self + } + + /// Provide a timer to be used for h2 + /// + /// See the documentation of [`h2::client::Builder::timer`] for more + /// details. + /// + /// [`h2::client::Builder::timer`]: https://docs.rs/h2/client/struct.Builder.html#method.timer + pub fn timer(&mut self, timer: M) -> &mut Self + where + M: Timer + Send + Sync + 'static, + { + #[cfg(feature = "http2")] + self.h2_builder.timer(timer); + #[cfg(not(feature = "http2"))] + let _ = timer; + self + } + + /// Provide a timer to be used for timeouts and intervals in connection pools. + pub fn pool_timer(&mut self, timer: M) -> &mut Self + where + M: Timer + Clone + Send + Sync + 'static, + { + self.pool_timer = Some(timer::Timer::new(timer.clone())); + self + } + + /// Set the maximum write buffer size for each HTTP/2 stream. + /// + /// Default is currently 1MB, but may change. + /// + /// # Panics + /// + /// The value must be no larger than `u32::MAX`. + #[cfg(feature = "http2")] + #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] + pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self { + self.h2_builder.max_send_buf_size(max); + self + } + + /// Set whether to retry requests that get disrupted before ever starting + /// to write. + /// + /// This means a request that is queued, and gets given an idle, reused + /// connection, and then encounters an error immediately as the idle + /// connection was found to be unusable. + /// + /// When this is set to `false`, the related `ResponseFuture` would instead + /// resolve to an `Error::Cancel`. + /// + /// Default is `true`. + #[inline] + pub fn retry_canceled_requests(&mut self, val: bool) -> &mut Self { + self.client_config.retry_canceled_requests = val; + self + } + + /// Set whether to automatically add the `Host` header to requests. + /// + /// If true, and a request does not include a `Host` header, one will be + /// added automatically, derived from the authority of the `Uri`. + /// + /// Default is `true`. + #[inline] + pub fn set_host(&mut self, val: bool) -> &mut Self { + self.client_config.set_host = val; + self + } + + /// Build a client with this configuration and the default `HttpConnector`. + pub fn build_http(&self, mut connector: H) -> Client + where + H: HttpConnect, + B: Body + Send, + B::Data: Send, + { + if self.pool_config.is_enabled() { + connector.set_keepalive(self.pool_config.idle_timeout); + } + self.build(connector) + } + + /// Combine the configuration of this builder with a connector to create a `Client`. + pub fn build(&self, connector: C) -> Client + where + C: Connect + Clone, + B: Body + Send, + B::Data: Send, + { + let exec = self.exec.clone(); + let timer = self.pool_timer.clone(); + Client { + config: self.client_config, + exec: exec.clone(), + #[cfg(feature = "http1")] + h1_builder: self.h1_builder.clone(), + #[cfg(feature = "http2")] + h2_builder: self.h2_builder.clone(), + connector, + pool: pool::Pool::new(self.pool_config, exec, timer), + } + } +} + +impl fmt::Debug for Builder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Builder") + .field("client_config", &self.client_config) + .field("pool_config", &self.pool_config) + .finish() + } +} + +// ==== impl Error ==== + +impl fmt::Debug for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut f = f.debug_tuple("hyper_util::client::Error"); + f.field(&self.kind); + if let Some(ref cause) = self.source { + f.field(cause); + } + f.finish() + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "client error ({:?})", self.kind) + } +} + +impl StdError for Error { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + self.source.as_ref().map(|e| &**e as _) + } +} + +impl Error { + /// Returns true if this was an error from `Connect`. + pub fn is_connect(&self) -> bool { + matches!(self.kind, ErrorKind::Connect) + } + + /// Returns the info of the client connection on which this error occurred. + #[cfg(any(feature = "http1", feature = "http2"))] + pub fn connect_info(&self) -> Option<&Connected> { + self.connect_info.as_ref() + } + + #[cfg(any(feature = "http1", feature = "http2"))] + fn with_connect_info(self, connect_info: Connected) -> Self { + Self { + connect_info: Some(connect_info), + ..self + } + } + fn is_canceled(&self) -> bool { + matches!(self.kind, ErrorKind::Canceled) + } + + fn tx(src: hyper::Error) -> Self { + e!(SendRequest, src) + } + + fn closed(src: hyper::Error) -> Self { + e!(ChannelClosed, src) + } +} diff --git a/src/client/connect/config.rs b/src/client/connect/config.rs new file mode 100644 index 00000000..a0cb44b3 --- /dev/null +++ b/src/client/connect/config.rs @@ -0,0 +1,182 @@ +use socket2::TcpKeepalive; +use std::net::{Ipv4Addr, Ipv6Addr}; +use std::time::Duration; + +#[derive(Clone)] +pub(crate) struct Config { + pub connect_timeout: Option, + pub enforce_http: bool, + pub happy_eyeballs_timeout: Option, + pub tcp_keepalive_config: TcpKeepaliveConfig, + pub local_address_ipv4: Option, + pub local_address_ipv6: Option, + pub nodelay: bool, + pub reuse_address: bool, + pub send_buffer_size: Option, + pub recv_buffer_size: Option, + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + pub interface: Option, + #[cfg(any( + target_os = "illumos", + target_os = "ios", + target_os = "macos", + target_os = "solaris", + target_os = "tvos", + target_os = "visionos", + target_os = "watchos", + ))] + pub interface: Option, + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + pub tcp_user_timeout: Option, +} + +#[derive(Default, Debug, Clone, Copy)] +pub(crate) struct TcpKeepaliveConfig { + pub time: Option, + pub interval: Option, + pub retries: Option, +} + +impl TcpKeepaliveConfig { + pub(crate) fn into_tcpkeepalive(self) -> Option { + let mut dirty = false; + let mut ka = TcpKeepalive::new(); + if let Some(time) = self.time { + ka = ka.with_time(time); + dirty = true + } + if let Some(interval) = self.interval { + ka = Self::ka_with_interval(ka, interval, &mut dirty) + }; + if let Some(retries) = self.retries { + ka = Self::ka_with_retries(ka, retries, &mut dirty) + }; + if dirty { + Some(ka) + } else { + None + } + } + + #[cfg(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "ios", + target_os = "visionos", + target_os = "linux", + target_os = "macos", + target_os = "netbsd", + target_os = "tvos", + target_os = "watchos", + target_os = "windows", + ))] + fn ka_with_interval(ka: TcpKeepalive, interval: Duration, dirty: &mut bool) -> TcpKeepalive { + *dirty = true; + ka.with_interval(interval) + } + + #[cfg(not(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "ios", + target_os = "visionos", + target_os = "linux", + target_os = "macos", + target_os = "netbsd", + target_os = "tvos", + target_os = "watchos", + target_os = "windows", + )))] + fn ka_with_interval(ka: TcpKeepalive, _: Duration, _: &mut bool) -> TcpKeepalive { + ka + } + + #[cfg(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "ios", + target_os = "visionos", + target_os = "linux", + target_os = "macos", + target_os = "netbsd", + target_os = "tvos", + target_os = "watchos", + ))] + fn ka_with_retries(ka: TcpKeepalive, retries: u32, dirty: &mut bool) -> TcpKeepalive { + *dirty = true; + ka.with_retries(retries) + } + + #[cfg(not(any( + target_os = "android", + target_os = "dragonfly", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "illumos", + target_os = "ios", + target_os = "visionos", + target_os = "linux", + target_os = "macos", + target_os = "netbsd", + target_os = "tvos", + target_os = "watchos", + )))] + fn ka_with_retries(ka: TcpKeepalive, _: u32, _: &mut bool) -> TcpKeepalive { + ka + } +} + +impl Config { + pub(crate) fn tcp_keepalive(&self) -> Option { + self.tcp_keepalive_config.into_tcpkeepalive() + } + + pub(crate) fn tcp_user_timeout(&self) -> Option { + self.tcp_user_timeout + } + + pub(crate) fn connect_timeout(&self) -> Option { + self.connect_timeout + } + + pub(crate) fn happy_eyeballs_timeout(&self) -> Option { + self.happy_eyeballs_timeout + } + + pub(crate) fn local_address_ipv4(&self) -> Option { + self.local_address_ipv4 + } + + pub(crate) fn local_address_ipv6(&self) -> Option { + self.local_address_ipv6 + } + + pub(crate) fn interface(&self) -> &Option { + &self.interface + } + + pub(crate) fn nodelay(&self) -> bool { + self.nodelay + } + + pub(crate) fn reuse_address(&self) -> bool { + self.reuse_address + } + + pub(crate) fn send_buffer_size(&self) -> Option { + self.send_buffer_size + } + + pub(crate) fn recv_buffer_size(&self) -> Option { + self.recv_buffer_size + } +} diff --git a/src/client/connect/dns/mod.rs b/src/client/connect/dns/mod.rs new file mode 100644 index 00000000..87cbfa52 --- /dev/null +++ b/src/client/connect/dns/mod.rs @@ -0,0 +1,204 @@ +//! DNS Resolution used by the `HttpConnector`. +//! +//! This module contains: +//! +//! - The `Name` type used as an argument to custom resolvers. +//! +//! # Resolvers are `Service`s +//! +//! A resolver is just a +//! `Service>`. +//! +//! A simple resolver that ignores the name and always returns a specific +//! address: +//! +//! ```rust,ignore +//! use std::{convert::Infallible, iter, net::SocketAddr}; +//! +//! let resolver = tower::service_fn(|_name| async { +//! Ok::<_, Infallible>(iter::once(SocketAddr::from(([127, 0, 0, 1], 8080)))) +//! }); +//! ``` + +use std::error::Error; +use std::fmt; +use std::future::Future; +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; +use std::str::FromStr; +use std::task::{Context, Poll}; +use std::vec; + +use tower_service::Service; + +#[cfg(feature = "tokio-rt")] +pub mod tokio; + +/// A domain name to resolve into IP addresses. +#[derive(Clone, Hash, Eq, PartialEq)] +pub struct Name { + host: Box, +} + +impl Name { + pub(crate) fn new(host: Box) -> Name { + Name { host } + } + + /// View the hostname as a string slice. + pub fn as_str(&self) -> &str { + &self.host + } +} + +impl fmt::Debug for Name { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Debug::fmt(&self.host, f) + } +} + +impl fmt::Display for Name { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.host, f) + } +} + +impl FromStr for Name { + type Err = InvalidNameError; + + fn from_str(host: &str) -> Result { + // Possibly add validation later + Ok(Name::new(host.into())) + } +} + +/// Error indicating a given string was not a valid domain name. +#[derive(Debug)] +pub struct InvalidNameError(()); + +impl fmt::Display for InvalidNameError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("Not a valid domain name") + } +} + +impl Error for InvalidNameError {} + +/// Iterator over `SocketAddr`s. +pub struct SocketAddrs { + iter: vec::IntoIter, +} + +impl SocketAddrs { + pub(crate) fn new(addrs: Vec) -> Self { + SocketAddrs { + iter: addrs.into_iter(), + } + } + + pub(crate) fn try_parse(host: &str, port: u16) -> Option { + if let Ok(addr) = host.parse::() { + let addr = SocketAddrV4::new(addr, port); + return Some(SocketAddrs { + iter: vec![SocketAddr::V4(addr)].into_iter(), + }); + } + if let Ok(addr) = host.parse::() { + let addr = SocketAddrV6::new(addr, port, 0, 0); + return Some(SocketAddrs { + iter: vec![SocketAddr::V6(addr)].into_iter(), + }); + } + None + } + + #[inline] + fn filter(self, predicate: impl FnMut(&SocketAddr) -> bool) -> SocketAddrs { + SocketAddrs::new(self.iter.filter(predicate).collect()) + } + + pub(crate) fn split_by_preference( + self, + local_addr_ipv4: Option, + local_addr_ipv6: Option, + ) -> (SocketAddrs, SocketAddrs) { + match (local_addr_ipv4, local_addr_ipv6) { + (Some(_), None) => (self.filter(SocketAddr::is_ipv4), SocketAddrs::new(vec![])), + (None, Some(_)) => (self.filter(SocketAddr::is_ipv6), SocketAddrs::new(vec![])), + _ => { + let preferring_v6 = self + .iter + .as_slice() + .first() + .map(SocketAddr::is_ipv6) + .unwrap_or(false); + + let (preferred, fallback) = self + .iter + .partition::, _>(|addr| addr.is_ipv6() == preferring_v6); + + (SocketAddrs::new(preferred), SocketAddrs::new(fallback)) + } + } + } + + pub(crate) fn is_empty(&self) -> bool { + self.iter.as_slice().is_empty() + } + + pub(crate) fn len(&self) -> usize { + self.iter.as_slice().len() + } +} + +impl Iterator for SocketAddrs { + type Item = SocketAddr; + #[inline] + fn next(&mut self) -> Option { + self.iter.next() + } +} + +/// A trait alias for a `Service` that resolves a `Name` to an iterator of `SocketAddr`s. +pub trait Resolve { + /// The iterator of socket addresses returned by the resolver. + type Addrs: Iterator; + /// The error type returned by the resolver. + type Error: Into>; + /// The future returned by the resolver. + type Future: Future>; + + /// Polls the resolver to determine if it's ready to accept a new query. + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + /// Resolve the given name into an iterator of socket addresses. + fn resolve(&mut self, name: Name) -> Self::Future; +} + +impl Resolve for S +where + S: Service, + S::Response: Iterator, + S::Error: Into>, +{ + type Addrs = S::Response; + type Error = S::Error; + type Future = S::Future; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + Service::poll_ready(self, cx) + } + + fn resolve(&mut self, name: Name) -> Self::Future { + Service::call(self, name) + } +} + +pub(crate) async fn resolve(resolver: &mut R, name: Name) -> Result +where + R: Resolve, +{ + std::future::poll_fn(|cx| resolver.poll_ready(cx)).await?; + resolver.resolve(name).await +} diff --git a/src/client/connect/dns/tokio.rs b/src/client/connect/dns/tokio.rs new file mode 100644 index 00000000..a236c164 --- /dev/null +++ b/src/client/connect/dns/tokio.rs @@ -0,0 +1,111 @@ +//! DNS Resolution used by the `HttpConnector`. +//! +//! This module contains: +//! +//! - A [`GaiResolver`] that is the default resolver for the `HttpConnector`. +//! - The `Name` type used as an argument to custom resolvers. +//! +//! # Resolvers are `Service`s +//! +//! A resolver is just a +//! `Service>`. +//! +//! A simple resolver that ignores the name and always returns a specific +//! address: +//! +//! ```rust,ignore +//! use std::{convert::Infallible, iter, net::SocketAddr}; +//! +//! let resolver = tower::service_fn(|_name| async { +//! Ok::<_, Infallible>(iter::once(SocketAddr::from(([127, 0, 0, 1], 8080)))) +//! }); +//! ``` +use std::fmt; +use std::future::Future; +use std::io; +use std::net::ToSocketAddrs; +use std::pin::Pin; +use std::task::{self, Poll}; +use tokio::task::JoinHandle; + +pub use super::{Name, Resolve, SocketAddrs}; + +/// A resolver using blocking `getaddrinfo` calls in a threadpool. +#[derive(Clone)] +pub struct TokioGaiResolver { + _priv: (), +} + +impl Default for TokioGaiResolver { + fn default() -> Self { + TokioGaiResolver::new() + } +} + +impl TokioGaiResolver { + /// Construct a new `GaiResolver`. + pub fn new() -> Self { + TokioGaiResolver { _priv: () } + } +} + +impl Resolve for TokioGaiResolver { + type Addrs = SocketAddrs; + type Error = io::Error; + type Future = GaiFuture; + + fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn resolve(&mut self, name: Name) -> Self::Future { + let blocking = tokio::task::spawn_blocking(move || { + (name.as_str(), 0) + .to_socket_addrs() + .map(|i| SocketAddrs::new(i.collect::>())) + }); + + GaiFuture { inner: blocking } + } +} + +impl fmt::Debug for TokioGaiResolver { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("GaiResolver") + } +} + +/// A future to resolve a name returned by `GaiResolver`. +pub struct GaiFuture { + inner: JoinHandle>, +} + +impl Future for GaiFuture { + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + Pin::new(&mut self.inner).poll(cx).map(|res| match res { + Ok(Ok(addrs)) => Ok(addrs), + Ok(Err(err)) => Err(err), + Err(join_err) => { + if join_err.is_cancelled() { + Err(io::Error::new(io::ErrorKind::Interrupted, join_err)) + } else { + panic!("gai background task failed: {join_err:?}") + } + } + }) + } +} + +impl fmt::Debug for GaiFuture { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.pad("GaiFuture") + } +} + +impl Drop for GaiFuture { + fn drop(&mut self) { + self.inner.abort(); + } +} diff --git a/src/client/connect/http/mod.rs b/src/client/connect/http/mod.rs new file mode 100644 index 00000000..82b17226 --- /dev/null +++ b/src/client/connect/http/mod.rs @@ -0,0 +1,408 @@ +use crate::client::connect::{ + config::{Config, TcpKeepaliveConfig}, + dns::{self, Resolve}, + tcp::{ConnectError, ConnectingTcp, TcpConnector}, + Connect, +}; +use http::uri::{Scheme, Uri}; +use pin_project_lite::pin_project; +use std::fmt; +use std::future::Future; +use std::marker::PhantomData; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{self, ready, Poll}; +use std::time::Duration; +use tower_service::Service; +use tracing::trace; + +#[cfg(feature = "tokio-net")] +pub mod tokio; + +const INVALID_NOT_HTTP: &str = "invalid URL, scheme is not http"; +const INVALID_MISSING_SCHEME: &str = "invalid URL, scheme is missing"; +const INVALID_MISSING_HOST: &str = "invalid URL, host is missing"; + +/// TBD +pub trait HttpConnect: Connect + Clone { + /// Set whether to enforce using the `http` scheme. + fn enforce_http(&mut self, is_enforced: bool); + + /// Set the TCP keepalive time. + fn set_keepalive(&mut self, time: Option); + + /// Set the TCP keepalive interval. + fn set_keepalive_interval(&mut self, interval: Option); + + /// Set the number of TCP keepalive retries. + fn set_keepalive_retries(&mut self, retries: Option); + + /// Set the TCP nodelay option. + fn set_nodelay(&mut self, nodelay: bool); + + /// Set the TCP send buffer size. + fn set_send_buffer_size(&mut self, size: Option); + + /// Set the TCP receive buffer size. + fn set_recv_buffer_size(&mut self, size: Option); + + /// Set the local address to bind to. + fn set_local_address(&mut self, addr: Option); + + /// Set the local IPv4 and IPv6 addresses to bind to. + fn set_local_addresses(&mut self, addr_ipv4: Ipv4Addr, addr_ipv6: Ipv6Addr); + + /// Set the connect timeout. + fn set_connect_timeout(&mut self, dur: Option); + + /// Set the happy eyeballs timeout. + fn set_happy_eyeballs_timeout(&mut self, dur: Option); + + /// Set whether to reuse the address. + fn set_reuse_address(&mut self, reuse_address: bool); + + #[cfg(any( + target_os = "android", + target_os = "fuchsia", + target_os = "illumos", + target_os = "ios", + target_os = "linux", + target_os = "macos", + target_os = "solaris", + target_os = "tvos", + target_os = "visionos", + target_os = "watchos", + ))] + /// Set the network interface to bind to. + fn set_interface>(&mut self, interface: I) -> &mut Self; + + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + /// Set the TCP user timeout. + fn set_tcp_user_timeout(&mut self, time: Option); +} + +#[derive(Clone)] +/// A connector for the `http` scheme. +pub struct HttpConnector { + config: Arc, + resolver: R, + connector: S, +} + +impl HttpConnector { + /// Construct a new HttpConnector. + pub fn new(resolver: R, connector: S) -> Self { + HttpConnector { + config: Arc::new(Config { + connect_timeout: None, + enforce_http: true, + happy_eyeballs_timeout: Some(Duration::from_millis(300)), + tcp_keepalive_config: TcpKeepaliveConfig::default(), + local_address_ipv4: None, + local_address_ipv6: None, + nodelay: false, + reuse_address: false, + send_buffer_size: None, + recv_buffer_size: None, + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + interface: None, + #[cfg(any( + target_os = "illumos", + target_os = "ios", + target_os = "macos", + target_os = "solaris", + target_os = "tvos", + target_os = "visionos", + target_os = "watchos", + ))] + interface: None, + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + tcp_user_timeout: None, + }), + resolver, + connector, + } + } + + fn config_mut(&mut self) -> &mut Config { + Arc::make_mut(&mut self.config) + } +} + +impl HttpConnect for HttpConnector +where + R: Resolve + Clone + Send + Sync + 'static, + R::Future: Send, + S: TcpConnector, +{ + #[inline] + fn enforce_http(&mut self, is_enforced: bool) { + self.config_mut().enforce_http = is_enforced; + } + + #[inline] + fn set_keepalive(&mut self, time: Option) { + self.config_mut().tcp_keepalive_config.time = time; + } + + #[inline] + fn set_keepalive_interval(&mut self, interval: Option) { + self.config_mut().tcp_keepalive_config.interval = interval; + } + + #[inline] + fn set_keepalive_retries(&mut self, retries: Option) { + self.config_mut().tcp_keepalive_config.retries = retries; + } + + #[inline] + fn set_nodelay(&mut self, nodelay: bool) { + self.config_mut().nodelay = nodelay; + } + + #[inline] + fn set_send_buffer_size(&mut self, size: Option) { + self.config_mut().send_buffer_size = size; + } + + #[inline] + fn set_recv_buffer_size(&mut self, size: Option) { + self.config_mut().recv_buffer_size = size; + } + + #[inline] + fn set_local_address(&mut self, addr: Option) { + let (v4, v6) = match addr { + Some(IpAddr::V4(a)) => (Some(a), None), + Some(IpAddr::V6(a)) => (None, Some(a)), + _ => (None, None), + }; + + let cfg = self.config_mut(); + + cfg.local_address_ipv4 = v4; + cfg.local_address_ipv6 = v6; + } + + #[inline] + fn set_local_addresses(&mut self, addr_ipv4: Ipv4Addr, addr_ipv6: Ipv6Addr) { + let cfg = self.config_mut(); + + cfg.local_address_ipv4 = Some(addr_ipv4); + cfg.local_address_ipv6 = Some(addr_ipv6); + } + + #[inline] + fn set_connect_timeout(&mut self, dur: Option) { + self.config_mut().connect_timeout = dur; + } + + #[inline] + fn set_happy_eyeballs_timeout(&mut self, dur: Option) { + self.config_mut().happy_eyeballs_timeout = dur; + } + + #[inline] + fn set_reuse_address(&mut self, reuse_address: bool) { + self.config_mut().reuse_address = reuse_address; + } + + #[cfg(any( + target_os = "android", + target_os = "fuchsia", + target_os = "illumos", + target_os = "ios", + target_os = "linux", + target_os = "macos", + target_os = "solaris", + target_os = "tvos", + target_os = "visionos", + target_os = "watchos", + ))] + #[inline] + fn set_interface>(&mut self, interface: I) -> &mut Self { + let interface = interface.into(); + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + { + self.config_mut().interface = Some(interface); + } + #[cfg(not(any(target_os = "android", target_os = "fuchsia", target_os = "linux")))] + { + let interface = std::ffi::CString::new(interface) + .expect("interface name should not have nulls in it"); + self.config_mut().interface = Some(interface); + } + self + } + + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + #[inline] + fn set_tcp_user_timeout(&mut self, time: Option) { + self.config_mut().tcp_user_timeout = time; + } +} + +impl fmt::Debug for HttpConnector { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("HttpConnector").finish() + } +} + +impl Service for HttpConnector +where + R: Resolve + Clone + Send + Sync + 'static, + R::Future: Send, + S: TcpConnector, + S::TcpStream: From, +{ + type Response = S::Connection; + type Error = ConnectError; + type Future = HttpConnecting; + + fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + ready!(self.resolver.poll_ready(cx)).map_err(ConnectError::dns)?; + Poll::Ready(Ok(())) + } + + fn call(&mut self, dst: Uri) -> Self::Future { + let mut self_ = self.clone(); + HttpConnecting { + fut: Box::pin(async move { self_.call_async(dst).await }), + _marker: PhantomData, + } + } +} + +fn get_host_port<'u>(config: &Config, dst: &'u Uri) -> Result<(&'u str, u16), ConnectError> { + trace!( + "Http::connect; scheme={:?}, host={:?}, port={:?}", + dst.scheme(), + dst.host(), + dst.port(), + ); + + if config.enforce_http { + if dst.scheme() != Some(&Scheme::HTTP) { + return Err(ConnectError { + msg: INVALID_NOT_HTTP, + addr: None, + cause: None, + }); + } + } else if dst.scheme().is_none() { + return Err(ConnectError { + msg: INVALID_MISSING_SCHEME, + addr: None, + cause: None, + }); + } + + let host = match dst.host() { + Some(s) => s, + None => { + return Err(ConnectError { + msg: INVALID_MISSING_HOST, + addr: None, + cause: None, + }); + } + }; + let port = match dst.port() { + Some(port) => port.as_u16(), + None => { + if dst.scheme() == Some(&Scheme::HTTPS) { + 443 + } else { + 80 + } + } + }; + + Ok((host, port)) +} + +impl HttpConnector +where + R: Resolve, + S: TcpConnector, + S::TcpStream: From, +{ + async fn call_async(&mut self, dst: Uri) -> Result { + let config = &self.config; + + let (host, port) = get_host_port(config, &dst)?; + let host = host.trim_start_matches('[').trim_end_matches(']'); + + let addrs = if let Some(addrs) = dns::SocketAddrs::try_parse(host, port) { + addrs + } else { + let addrs = dns::resolve(&mut self.resolver, dns::Name::new(host.into())) + .await + .map_err(ConnectError::dns)?; + let addrs = addrs + .map(|mut addr| { + set_port(&mut addr, port, dst.port().is_some()); + addr + }) + .collect(); + dns::SocketAddrs::new(addrs) + }; + + let c = ConnectingTcp::new(addrs, config, self.connector.clone()); + let sock = c.connect(config).await?; + Ok(sock) + } +} + +/// Extra information about the transport when an HttpConnector is used. +#[derive(Clone, Debug)] +pub struct HttpInfo { + pub(crate) remote_addr: SocketAddr, + pub(crate) local_addr: SocketAddr, +} + +impl HttpInfo { + /// The remote address of the connection. + pub fn remote_addr(&self) -> SocketAddr { + self.remote_addr + } + + /// The local address of the connection. + pub fn local_addr(&self) -> SocketAddr { + self.local_addr + } +} + +pin_project! { + #[must_use = "futures do nothing unless polled"] + #[allow(missing_debug_implementations)] + /// Future returned by `HttpConnector::call`. + pub struct HttpConnecting { + + #[pin] + fut: BoxConnecting, + _marker: PhantomData, + } +} + +type ConnectResult = Result<::Connection, ConnectError>; +type BoxConnecting = Pin> + Send>>; + +impl Future for HttpConnecting +where + R: Resolve, + S: TcpConnector, +{ + type Output = ConnectResult; + + fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + self.project().fut.poll(cx) + } +} + +fn set_port(addr: &mut SocketAddr, host_port: u16, explicit: bool) { + if explicit || addr.port() == 0 { + addr.set_port(host_port) + }; +} diff --git a/src/client/connect/http/tokio.rs b/src/client/connect/http/tokio.rs new file mode 100644 index 00000000..fa46e39e --- /dev/null +++ b/src/client/connect/http/tokio.rs @@ -0,0 +1,240 @@ +//! TBD + +use crate::client::connect::{ + dns::{tokio::TokioGaiResolver, Resolve}, + http::{HttpConnecting, HttpConnector}, + tcp::{tokio::TokioTcpConnector, ConnectError}, +}; +use crate::rt::TokioIo; +use hyper::Uri; +use std::fmt; +use std::net::{Ipv4Addr, Ipv6Addr}; +use std::time::Duration; +use tokio::net::TcpStream; + +use super::HttpConnect; + +/// A connector for the `http` scheme. +/// +/// Performs DNS resolution in a thread pool, and then connects over TCP. +/// +/// # Note +/// +/// Sets the [`HttpInfo`](HttpInfo) value on responses, which includes +/// transport information such as the remote socket address used. +#[derive(Clone)] +pub struct TokioHttpConnector { + inner: HttpConnector, +} + +impl TokioHttpConnector { + /// Construct a new TokioHttpConnector. + pub fn new() -> Self { + Self::new_with_resolver(TokioGaiResolver::new()) + } +} + +impl Default for TokioHttpConnector { + fn default() -> Self { + Self::new_with_resolver(R::default()) + } +} + +impl TokioHttpConnector { + /// Construct a new TokioHttpConnector. + /// + /// Takes a [`Resolver`](crate::client::legacy::connect::dns#resolvers-are-services) to handle DNS lookups. + pub fn new_with_resolver(resolver: R) -> Self { + Self { + inner: HttpConnector::new(resolver, TokioTcpConnector::new()), + } + } +} + +impl HttpConnect for TokioHttpConnector +where + R: Resolve + Clone + Send + Sync + 'static, + R::Future: Send, +{ + /// Option to enforce all `Uri`s have the `http` scheme. + /// + /// Enabled by default. + #[inline] + fn enforce_http(&mut self, is_enforced: bool) { + self.inner.enforce_http(is_enforced); + } + + /// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration. + /// to remain idle before sending TCP keepalive probes. + /// + /// If `None`, keepalive is disabled. + /// + /// Default is `None`. + #[inline] + fn set_keepalive(&mut self, time: Option) { + self.inner.set_keepalive(time); + } + + /// Set the duration between two successive TCP keepalive retransmissions, + /// if acknowledgement to the previous keepalive transmission is not received. + #[inline] + fn set_keepalive_interval(&mut self, interval: Option) { + self.inner.set_keepalive_interval(interval); + } + + /// Set the number of retransmissions to be carried out before declaring that remote end is not available. + #[inline] + fn set_keepalive_retries(&mut self, retries: Option) { + self.inner.set_keepalive_retries(retries); + } + + /// Set that all sockets have `SO_NODELAY` set to the supplied value `nodelay`. + /// + /// Default is `false`. + #[inline] + fn set_nodelay(&mut self, nodelay: bool) { + self.inner.set_nodelay(nodelay); + } + + /// Sets the value of the SO_SNDBUF option on the socket. + #[inline] + fn set_send_buffer_size(&mut self, size: Option) { + self.inner.set_send_buffer_size(size); + } + + /// Sets the value of the SO_RCVBUF option on the socket. + #[inline] + fn set_recv_buffer_size(&mut self, size: Option) { + self.inner.set_recv_buffer_size(size); + } + + /// Set that all sockets are bound to the configured address before connection. + /// + /// If `None`, the sockets will not be bound. + /// + /// Default is `None`. + #[inline] + fn set_local_address(&mut self, addr: Option) { + self.inner.set_local_address(addr); + } + + /// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's + /// preferences) before connection. + #[inline] + fn set_local_addresses(&mut self, addr_ipv4: Ipv4Addr, addr_ipv6: Ipv6Addr) { + self.inner.set_local_addresses(addr_ipv4, addr_ipv6); + } + + /// Set the connect timeout. + /// + /// If a domain resolves to multiple IP addresses, the timeout will be + /// evenly divided across them. + /// + /// Default is `None`. + #[inline] + fn set_connect_timeout(&mut self, dur: Option) { + self.inner.set_connect_timeout(dur); + } + + /// Set timeout for [RFC 6555 (Happy Eyeballs)][RFC 6555] algorithm. + /// + /// If hostname resolves to both IPv4 and IPv6 addresses and connection + /// cannot be established using preferred address family before timeout + /// elapses, then connector will in parallel attempt connection using other + /// address family. + /// + /// If `None`, parallel connection attempts are disabled. + /// + /// Default is 300 milliseconds. + /// + /// [RFC 6555]: https://tools.ietf.org/html/rfc6555 + #[inline] + fn set_happy_eyeballs_timeout(&mut self, dur: Option) { + self.inner.set_happy_eyeballs_timeout(dur); + } + + /// Set that all socket have `SO_REUSEADDR` set to the supplied value `reuse_address`. + /// + /// Default is `false`. + #[inline] + fn set_reuse_address(&mut self, reuse_address: bool) { + self.inner.set_reuse_address(reuse_address); + } + + /// Sets the name of the interface to bind sockets produced by this + /// connector. + /// + /// On Linux, this sets the `SO_BINDTODEVICE` option on this socket (see + /// [`man 7 socket`] for details). On macOS (and macOS-derived systems like + /// iOS), illumos, and Solaris, this will instead use the `IP_BOUND_IF` + /// socket option (see [`man 7p ip`]). + /// + /// If a socket is bound to an interface, only packets received from that particular + /// interface are processed by the socket. Note that this only works for some socket + /// types, particularly `AF_INET`` sockets. + /// + /// On Linux it can be used to specify a [VRF], but the binary needs + /// to either have `CAP_NET_RAW` or to be run as root. + /// + /// This function is only available on the following operating systems: + /// - Linux, including Android + /// - Fuchsia + /// - illumos and Solaris + /// - macOS, iOS, visionOS, watchOS, and tvOS + /// + /// [VRF]: https://www.kernel.org/doc/Documentation/networking/vrf.txt + /// [`man 7 socket`]: https://man7.org/linux/man-pages/man7/socket.7.html + /// [`man 7p ip`]: https://docs.oracle.com/cd/E86824_01/html/E54777/ip-7p.html + #[cfg(any( + target_os = "android", + target_os = "fuchsia", + target_os = "illumos", + target_os = "ios", + target_os = "linux", + target_os = "macos", + target_os = "solaris", + target_os = "tvos", + target_os = "visionos", + target_os = "watchos", + ))] + #[inline] + fn set_interface>(&mut self, interface: I) -> &mut Self { + self.inner.set_interface(interface); + self + } + + /// Sets the value of the TCP_USER_TIMEOUT option on the socket. + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + #[inline] + fn set_tcp_user_timeout(&mut self, time: Option) { + self.inner.set_tcp_user_timeout(time); + } +} + +// R: Debug required for now to allow adding it to debug output later... +impl fmt::Debug for TokioHttpConnector { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TokioHttpConnector").finish() + } +} + +impl tower_service::Service for TokioHttpConnector +where + R: Resolve + Clone + Send + Sync + 'static, + R::Future: Send, +{ + type Response = TokioIo; + type Error = ConnectError; + type Future = HttpConnecting; + + fn poll_ready( + &mut self, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, dst: http::Uri) -> Self::Future { + self.inner.call(dst) + } +} diff --git a/src/client/connect/mod.rs b/src/client/connect/mod.rs new file mode 100644 index 00000000..c7d886ad --- /dev/null +++ b/src/client/connect/mod.rs @@ -0,0 +1,406 @@ +use ::http::Extensions; +use core::fmt; +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, +}; + +mod config; +/// TBD +pub mod dns; +/// TBD +pub mod http; +/// TBD +pub mod tcp; + +#[doc(hidden)] +pub(crate) mod pool; + +pub use self::sealed::Connect; + +/// Describes a type returned by a connector. +pub trait Connection { + /// Return metadata describing the connection. + fn connected(&self) -> Connected; +} + +/// A generic callback to be triggered when a connection is established. +#[derive(Clone)] +pub struct CaptureConnection(std::sync::Arc); + +impl CaptureConnection { + /// Create a new `OnConnection` callback. + pub fn new(f: F) -> Self + where + F: Fn(&Connected) + Send + Sync + 'static, + { + Self(std::sync::Arc::new(f)) + } + + /// Execute the callback with the connection information. + pub fn call(&self, c: &Connected) { + (self.0)(c) + } +} + +impl std::fmt::Debug for CaptureConnection { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("OnConnection").finish() + } +} + +/// Extra information about the connected transport. +/// +/// This can be used to inform recipients about things like if ALPN +/// was used, or if connected to an HTTP proxy. +#[derive(Debug)] +pub struct Connected { + pub(super) alpn: Alpn, + pub(super) is_proxied: bool, + pub(super) extra: Option, + pub(super) poisoned: PoisonPill, +} + +#[derive(Clone)] +pub(crate) struct PoisonPill { + poisoned: Arc, +} + +impl fmt::Debug for PoisonPill { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // print the address of the pill—this makes debugging issues much easier + write!( + f, + "PoisonPill@{:p} {{ poisoned: {} }}", + self.poisoned, + self.poisoned.load(Ordering::Relaxed) + ) + } +} + +impl PoisonPill { + pub(crate) fn healthy() -> Self { + Self { + poisoned: Arc::new(AtomicBool::new(false)), + } + } + pub(crate) fn poison(&self) { + self.poisoned.store(true, Ordering::Relaxed) + } + + pub(crate) fn poisoned(&self) -> bool { + self.poisoned.load(Ordering::Relaxed) + } +} + +pub(super) struct Extra(Box); + +#[derive(Clone, Copy, Debug, PartialEq)] +pub(super) enum Alpn { + H2, + None, +} + +impl Connected { + /// Create new `Connected` type with empty metadata. + pub fn new() -> Connected { + Connected { + alpn: Alpn::None, + is_proxied: false, + extra: None, + poisoned: PoisonPill::healthy(), + } + } + + /// Set whether the connected transport is to an HTTP proxy. + /// + /// This setting will affect if HTTP/1 requests written on the transport + /// will have the request-target in absolute-form or origin-form: + /// + /// - When `proxy(false)`: + /// + /// ```http + /// GET /guide HTTP/1.1 + /// ``` + /// + /// - When `proxy(true)`: + /// + /// ```http + /// GET http://hyper.rs/guide HTTP/1.1 + /// ``` + /// + /// Default is `false`. + pub fn proxy(mut self, is_proxied: bool) -> Connected { + self.is_proxied = is_proxied; + self + } + + /// Determines if the connected transport is to an HTTP proxy. + pub fn is_proxied(&self) -> bool { + self.is_proxied + } + + /// Set extra connection information to be set in the extensions of every `Response`. + pub fn extra(mut self, extra: T) -> Connected { + if let Some(prev) = self.extra { + self.extra = Some(Extra(Box::new(ExtraChain(prev.0, extra)))); + } else { + self.extra = Some(Extra(Box::new(ExtraEnvelope(extra)))); + } + self + } + + /// Copies the extra connection information into an `Extensions` map. + pub fn get_extras(&self, extensions: &mut Extensions) { + if let Some(extra) = &self.extra { + extra.set(extensions); + } + } + + /// Set that the connected transport negotiated HTTP/2 as its next protocol. + pub fn negotiated_h2(mut self) -> Connected { + self.alpn = Alpn::H2; + self + } + + /// Determines if the connected transport negotiated HTTP/2 as its next protocol. + pub fn is_negotiated_h2(&self) -> bool { + self.alpn == Alpn::H2 + } + + /// Poison this connection + /// + /// A poisoned connection will not be reused for subsequent requests by the pool + pub fn poison(&self) { + self.poisoned.poison(); + tracing::debug!( + poison_pill = ?self.poisoned, "connection was poisoned. this connection will not be reused for subsequent requests" + ); + } + + // Don't public expose that `Connected` is `Clone`, unsure if we want to + // keep that contract... + pub(super) fn clone(&self) -> Connected { + Connected { + alpn: self.alpn, + is_proxied: self.is_proxied, + extra: self.extra.clone(), + poisoned: self.poisoned.clone(), + } + } +} + +impl Default for Connected { + fn default() -> Self { + Self::new() + } +} + +// ===== impl Extra ===== + +impl Extra { + pub(super) fn set(&self, res: &mut Extensions) { + self.0.set(res); + } +} + +impl Clone for Extra { + fn clone(&self) -> Extra { + Extra(self.0.clone_box()) + } +} + +impl fmt::Debug for Extra { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Extra").finish() + } +} + +trait ExtraInner: Send + Sync { + fn clone_box(&self) -> Box; + fn set(&self, res: &mut Extensions); +} + +// This indirection allows the `Connected` to have a type-erased "extra" value, +// while that type still knows its inner extra type. This allows the correct +// TypeId to be used when inserting into `res.extensions_mut()`. +#[derive(Clone)] +struct ExtraEnvelope(T); + +impl ExtraInner for ExtraEnvelope +where + T: Clone + Send + Sync + 'static, +{ + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } + + fn set(&self, res: &mut Extensions) { + res.insert(self.0.clone()); + } +} + +struct ExtraChain(Box, T); + +impl Clone for ExtraChain { + fn clone(&self) -> Self { + ExtraChain(self.0.clone_box(), self.1.clone()) + } +} + +impl ExtraInner for ExtraChain +where + T: Clone + Send + Sync + 'static, +{ + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } + + fn set(&self, res: &mut Extensions) { + self.0.set(res); + res.insert(self.1.clone()); + } +} + +pub(super) mod sealed { + use std::error::Error as StdError; + use std::future::Future; + + use ::http::Uri; + use hyper::rt::{Read, Write}; + + use super::Connection; + + /// Connect to a destination, returning an IO transport. + /// + /// A connector receives a [`Uri`](::http::Uri) and returns a `Future` of the + /// ready connection. + /// + /// # Trait Alias + /// + /// This is really just an *alias* for the `tower::Service` trait, with + /// additional bounds set for convenience *inside* hyper. You don't actually + /// implement this trait, but `tower::Service` instead. + // The `Sized` bound is to prevent creating `dyn Connect`, since they cannot + // fit the `Connect` bounds because of the blanket impl for `Service`. + pub trait Connect: Sealed + Sized { + #[doc(hidden)] + type _Svc: ConnectSvc; + #[doc(hidden)] + fn connect(self, internal_only: Internal, dst: Uri) -> ::Future; + } + + pub trait ConnectSvc { + type Connection: Read + Write + Connection + Unpin + Send + 'static; + type Error: Into>; + type Future: Future> + Unpin + Send + 'static; + + fn connect(self, internal_only: Internal, dst: Uri) -> Self::Future; + } + + impl Connect for S + where + S: tower_service::Service + Send + 'static, + S::Error: Into>, + S::Future: Unpin + Send, + T: Read + Write + Connection + Unpin + Send + 'static, + { + type _Svc = S; + + fn connect(self, _: Internal, dst: Uri) -> crate::service::Oneshot { + crate::service::Oneshot::new(self, dst) + } + } + + impl ConnectSvc for S + where + S: tower_service::Service + Send + 'static, + S::Error: Into>, + S::Future: Unpin + Send, + T: Read + Write + Connection + Unpin + Send + 'static, + { + type Connection = T; + type Error = S::Error; + type Future = crate::service::Oneshot; + + fn connect(self, _: Internal, dst: Uri) -> Self::Future { + crate::service::Oneshot::new(self, dst) + } + } + + impl Sealed for S + where + S: tower_service::Service + Send, + S::Error: Into>, + S::Future: Unpin + Send, + T: Read + Write + Connection + Unpin + Send + 'static, + { + } + + pub trait Sealed {} + #[allow(missing_debug_implementations)] + pub struct Internal; +} + +#[cfg(test)] +mod tests { + use super::Connected; + + #[derive(Clone, Debug, PartialEq)] + struct Ex1(usize); + + #[derive(Clone, Debug, PartialEq)] + struct Ex2(&'static str); + + #[derive(Clone, Debug, PartialEq)] + struct Ex3(&'static str); + + #[test] + fn test_connected_extra() { + let c1 = Connected::new().extra(Ex1(41)); + + let mut ex = ::http::Extensions::new(); + + assert_eq!(ex.get::(), None); + + c1.extra.as_ref().expect("c1 extra").set(&mut ex); + + assert_eq!(ex.get::(), Some(&Ex1(41))); + } + + #[test] + fn test_connected_extra_chain() { + // If a user composes connectors and at each stage, there's "extra" + // info to attach, it shouldn't override the previous extras. + + let c1 = Connected::new() + .extra(Ex1(45)) + .extra(Ex2("zoom")) + .extra(Ex3("pew pew")); + + let mut ex1 = ::http::Extensions::new(); + + assert_eq!(ex1.get::(), None); + assert_eq!(ex1.get::(), None); + assert_eq!(ex1.get::(), None); + + c1.extra.as_ref().expect("c1 extra").set(&mut ex1); + + assert_eq!(ex1.get::(), Some(&Ex1(45))); + assert_eq!(ex1.get::(), Some(&Ex2("zoom"))); + assert_eq!(ex1.get::(), Some(&Ex3("pew pew"))); + + // Just like extensions, inserting the same type overrides previous type. + let c2 = Connected::new() + .extra(Ex1(33)) + .extra(Ex2("hiccup")) + .extra(Ex1(99)); + + let mut ex2 = ::http::Extensions::new(); + + c2.extra.as_ref().expect("c2 extra").set(&mut ex2); + + assert_eq!(ex2.get::(), Some(&Ex1(99))); + assert_eq!(ex2.get::(), Some(&Ex2("hiccup"))); + } +} diff --git a/src/client/connect/pool.rs b/src/client/connect/pool.rs new file mode 100644 index 00000000..734264b5 --- /dev/null +++ b/src/client/connect/pool.rs @@ -0,0 +1,1107 @@ +use std::collections::{HashMap, HashSet, VecDeque}; +use std::convert::Infallible; +use std::error::Error as StdError; +use std::fmt::{self, Debug}; +use std::future::Future; +use std::hash::Hash; +use std::ops::{Deref, DerefMut}; +use std::pin::Pin; +use std::sync::{Arc, Mutex, Weak}; +use std::task::{self, ready, Poll}; + +use std::time::{Duration, Instant}; + +use futures_channel::oneshot; +use tracing::{debug, trace}; + +use hyper::rt::Timer as _; + +use crate::common::{exec, exec::Exec, timer::Timer}; + +// FIXME: allow() required due to `impl Trait` leaking types to this lint +#[allow(missing_debug_implementations)] +pub struct Pool { + // If the pool is disabled, this is None. + inner: Option>>>, +} + +// Before using a pooled connection, make sure the sender is not dead. +// +// This is a trait to allow the `client::pool::tests` to work for `i32`. +// +// See https://github.com/hyperium/hyper/issues/1429 +pub trait Poolable: Unpin + Send + Sized + 'static { + fn is_open(&self) -> bool; + /// Reserve this connection. + /// + /// Allows for HTTP/2 to return a shared reservation. + fn reserve(self) -> Reservation; + fn can_share(&self) -> bool; +} + +pub trait Key: Eq + Hash + Clone + Debug + Unpin + Send + 'static {} + +impl Key for T where T: Eq + Hash + Clone + Debug + Unpin + Send + 'static {} + +/// A marker to identify what version a pooled connection is. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +#[allow(dead_code)] +pub enum Ver { + Auto, + Http2, +} + +/// When checking out a pooled connection, it might be that the connection +/// only supports a single reservation, or it might be usable for many. +/// +/// Specifically, HTTP/1 requires a unique reservation, but HTTP/2 can be +/// used for multiple requests. +// FIXME: allow() required due to `impl Trait` leaking types to this lint +#[allow(missing_debug_implementations)] +pub enum Reservation { + /// This connection could be used multiple times, the first one will be + /// reinserted into the `idle` pool, and the second will be given to + /// the `Checkout`. + #[cfg(feature = "http2")] + Shared(T, T), + /// This connection requires unique access. It will be returned after + /// use is complete. + Unique(T), +} + +/// Simple type alias in case the key type needs to be adjusted. +// pub type Key = (http::uri::Scheme, http::uri::Authority); //Arc; +struct PoolInner { + // A flag that a connection is being established, and the connection + // should be shared. This prevents making multiple HTTP/2 connections + // to the same host. + connecting: HashSet, + // These are internal Conns sitting in the event loop in the KeepAlive + // state, waiting to receive a new Request to send on the socket. + idle: HashMap>>, + max_idle_per_host: usize, + // These are outstanding Checkouts that are waiting for a socket to be + // able to send a Request one. This is used when "racing" for a new + // connection. + // + // The Client starts 2 tasks, 1 to connect a new socket, and 1 to wait + // for the Pool to receive an idle Conn. When a Conn becomes idle, + // this list is checked for any parked Checkouts, and tries to notify + // them that the Conn could be used instead of waiting for a brand new + // connection. + waiters: HashMap>>, + // A oneshot channel is used to allow the interval to be notified when + // the Pool completely drops. That way, the interval can cancel immediately. + idle_interval_ref: Option>, + exec: Exec, + timer: Option, + timeout: Option, +} + +// This is because `Weak::new()` *allocates* space for `T`, even if it +// doesn't need it! +struct WeakOpt(Option>); + +#[derive(Clone, Copy, Debug)] +pub struct Config { + pub idle_timeout: Option, + pub max_idle_per_host: usize, +} + +impl Config { + pub fn is_enabled(&self) -> bool { + self.max_idle_per_host > 0 + } +} + +impl Pool { + pub fn new(config: Config, executor: E, timer: Option) -> Pool + where + E: hyper::rt::Executor + Send + Sync + Clone + 'static, + M: hyper::rt::Timer + Send + Sync + Clone + 'static, + { + let exec = Exec::new(executor); + let timer = timer.map(|t| Timer::new(t)); + let inner = if config.is_enabled() { + Some(Arc::new(Mutex::new(PoolInner { + connecting: HashSet::new(), + idle: HashMap::new(), + idle_interval_ref: None, + max_idle_per_host: config.max_idle_per_host, + waiters: HashMap::new(), + exec, + timer, + timeout: config.idle_timeout, + }))) + } else { + None + }; + + Pool { inner } + } + + pub(crate) fn is_enabled(&self) -> bool { + self.inner.is_some() + } + + #[cfg(test)] + pub(super) fn no_timer(&self) { + // Prevent an actual interval from being created for this pool... + { + let mut inner = self.inner.as_ref().unwrap().lock().unwrap(); + assert!(inner.idle_interval_ref.is_none(), "timer already spawned"); + let (tx, _) = oneshot::channel(); + inner.idle_interval_ref = Some(tx); + } + } +} + +impl Pool { + /// Returns a `Checkout` which is a future that resolves if an idle + /// connection becomes available. + pub fn checkout(&self, key: K) -> Checkout { + Checkout { + key, + pool: self.clone(), + waiter: None, + } + } + + /// Ensure that there is only ever 1 connecting task for HTTP/2 + /// connections. This does nothing for HTTP/1. + pub fn connecting(&self, key: &K, ver: Ver) -> Option> { + if ver == Ver::Http2 { + if let Some(ref enabled) = self.inner { + let mut inner = enabled.lock().unwrap(); + return if inner.connecting.insert(key.clone()) { + let connecting = Connecting { + key: key.clone(), + pool: WeakOpt::downgrade(enabled), + }; + Some(connecting) + } else { + trace!("HTTP/2 connecting already in progress for {:?}", key); + None + }; + } + } + + // else + Some(Connecting { + key: key.clone(), + // in HTTP/1's case, there is never a lock, so we don't + // need to do anything in Drop. + pool: WeakOpt::none(), + }) + } + + #[cfg(test)] + fn locked(&self) -> std::sync::MutexGuard<'_, PoolInner> { + self.inner.as_ref().expect("enabled").lock().expect("lock") + } + + /* Used in client/tests.rs... + #[cfg(test)] + pub(super) fn h1_key(&self, s: &str) -> Key { + Arc::new(s.to_string()) + } + + #[cfg(test)] + pub(super) fn idle_count(&self, key: &Key) -> usize { + self + .locked() + .idle + .get(key) + .map(|list| list.len()) + .unwrap_or(0) + } + */ + + pub fn pooled( + &self, + #[cfg_attr(not(feature = "http2"), allow(unused_mut))] mut connecting: Connecting, + value: T, + ) -> Pooled { + let (value, pool_ref) = if let Some(ref enabled) = self.inner { + match value.reserve() { + #[cfg(feature = "http2")] + Reservation::Shared(to_insert, to_return) => { + let mut inner = enabled.lock().unwrap(); + inner.put(connecting.key.clone(), to_insert, enabled); + // Do this here instead of Drop for Connecting because we + // already have a lock, no need to lock the mutex twice. + inner.connected(&connecting.key); + // prevent the Drop of Connecting from repeating inner.connected() + connecting.pool = WeakOpt::none(); + + // Shared reservations don't need a reference to the pool, + // since the pool always keeps a copy. + (to_return, WeakOpt::none()) + } + Reservation::Unique(value) => { + // Unique reservations must take a reference to the pool + // since they hope to reinsert once the reservation is + // completed + (value, WeakOpt::downgrade(enabled)) + } + } + } else { + // If pool is not enabled, skip all the things... + + // The Connecting should have had no pool ref + debug_assert!(connecting.pool.upgrade().is_none()); + + (value, WeakOpt::none()) + }; + Pooled { + key: connecting.key.clone(), + is_reused: false, + pool: pool_ref, + value: Some(value), + } + } + + fn reuse(&self, key: &K, value: T) -> Pooled { + debug!("reuse idle connection for {:?}", key); + // TODO: unhack this + // In Pool::pooled(), which is used for inserting brand new connections, + // there's some code that adjusts the pool reference taken depending + // on if the Reservation can be shared or is unique. By the time + // reuse() is called, the reservation has already been made, and + // we just have the final value, without knowledge of if this is + // unique or shared. So, the hack is to just assume Ver::Http2 means + // shared... :( + let mut pool_ref = WeakOpt::none(); + if !value.can_share() { + if let Some(ref enabled) = self.inner { + pool_ref = WeakOpt::downgrade(enabled); + } + } + + Pooled { + is_reused: true, + key: key.clone(), + pool: pool_ref, + value: Some(value), + } + } +} + +/// Pop off this list, looking for a usable connection that hasn't expired. +struct IdlePopper<'a, T, K> { + key: &'a K, + list: &'a mut Vec>, +} + +impl<'a, T: Poolable + 'a, K: Debug> IdlePopper<'a, T, K> { + fn pop(self, expiration: &Expiration, now: Instant) -> Option> { + while let Some(entry) = self.list.pop() { + // If the connection has been closed, or is older than our idle + // timeout, simply drop it and keep looking... + if !entry.value.is_open() { + trace!("removing closed connection for {:?}", self.key); + continue; + } + // TODO: Actually, since the `idle` list is pushed to the end always, + // that would imply that if *this* entry is expired, then anything + // "earlier" in the list would *have* to be expired also... Right? + // + // In that case, we could just break out of the loop and drop the + // whole list... + if expiration.expires(entry.idle_at, now) { + trace!("removing expired connection for {:?}", self.key); + continue; + } + + let value = match entry.value.reserve() { + #[cfg(feature = "http2")] + Reservation::Shared(to_reinsert, to_checkout) => { + self.list.push(Idle { + idle_at: now, + value: to_reinsert, + }); + to_checkout + } + Reservation::Unique(unique) => unique, + }; + + return Some(Idle { + idle_at: entry.idle_at, + value, + }); + } + + None + } +} + +impl PoolInner { + fn now(&self) -> Instant { + self.timer.as_ref().map_or_else(Instant::now, |t| t.now()) + } + + fn put(&mut self, key: K, value: T, __pool_ref: &Arc>>) { + if value.can_share() && self.idle.contains_key(&key) { + trace!("put; existing idle HTTP/2 connection for {:?}", key); + return; + } + trace!("put; add idle connection for {:?}", key); + let mut remove_waiters = false; + let mut value = Some(value); + if let Some(waiters) = self.waiters.get_mut(&key) { + while let Some(tx) = waiters.pop_front() { + if !tx.is_canceled() { + let reserved = value.take().expect("value already sent"); + let reserved = match reserved.reserve() { + #[cfg(feature = "http2")] + Reservation::Shared(to_keep, to_send) => { + value = Some(to_keep); + to_send + } + Reservation::Unique(uniq) => uniq, + }; + match tx.send(reserved) { + Ok(()) => { + if value.is_none() { + break; + } else { + continue; + } + } + Err(e) => { + value = Some(e); + } + } + } + + trace!("put; removing canceled waiter for {:?}", key); + } + remove_waiters = waiters.is_empty(); + } + if remove_waiters { + self.waiters.remove(&key); + } + + match value { + Some(value) => { + // borrow-check scope... + { + let now = self.now(); + let idle_list = self.idle.entry(key.clone()).or_default(); + if self.max_idle_per_host <= idle_list.len() { + trace!("max idle per host for {:?}, dropping connection", key); + return; + } + + debug!("pooling idle connection for {:?}", key); + idle_list.push(Idle { + value, + idle_at: now, + }); + } + + self.spawn_idle_interval(__pool_ref); + } + None => trace!("put; found waiter for {:?}", key), + } + } + + /// A `Connecting` task is complete. Not necessarily successfully, + /// but the lock is going away, so clean up. + fn connected(&mut self, key: &K) { + let existed = self.connecting.remove(key); + debug_assert!(existed, "Connecting dropped, key not in pool.connecting"); + // cancel any waiters. if there are any, it's because + // this Connecting task didn't complete successfully. + // those waiters would never receive a connection. + self.waiters.remove(key); + } + + fn spawn_idle_interval(&mut self, pool_ref: &Arc>>) { + if self.idle_interval_ref.is_some() { + return; + } + let dur = if let Some(dur) = self.timeout { + dur + } else { + return; + }; + if dur == Duration::ZERO { + return; + } + let timer = if let Some(timer) = self.timer.clone() { + timer + } else { + return; + }; + + // While someone might want a shorter duration, and it will be respected + // at checkout time, there's no need to wake up and proactively evict + // faster than this. + const MIN_CHECK: Duration = Duration::from_millis(90); + + let dur = dur.max(MIN_CHECK); + + let (tx, rx) = oneshot::channel(); + self.idle_interval_ref = Some(tx); + + let interval = IdleTask { + timer: timer.clone(), + duration: dur, + pool: WeakOpt::downgrade(pool_ref), + pool_drop_notifier: rx, + }; + + self.exec.execute(interval.run()); + } +} + +impl PoolInner { + /// Any `FutureResponse`s that were created will have made a `Checkout`, + /// and possibly inserted into the pool that it is waiting for an idle + /// connection. If a user ever dropped that future, we need to clean out + /// those parked senders. + fn clean_waiters(&mut self, key: &K) { + let mut remove_waiters = false; + if let Some(waiters) = self.waiters.get_mut(key) { + waiters.retain(|tx| !tx.is_canceled()); + remove_waiters = waiters.is_empty(); + } + if remove_waiters { + self.waiters.remove(key); + } + } +} + +impl PoolInner { + /// This should *only* be called by the IdleTask + fn clear_expired(&mut self) { + let dur = self.timeout.expect("interval assumes timeout"); + + let now = self.now(); + //self.last_idle_check_at = now; + + self.idle.retain(|key, values| { + values.retain(|entry| { + if !entry.value.is_open() { + trace!("idle interval evicting closed for {:?}", key); + return false; + } + + // Avoid `Instant::sub` to avoid issues like rust-lang/rust#86470. + if now.saturating_duration_since(entry.idle_at) > dur { + trace!("idle interval evicting expired for {:?}", key); + return false; + } + + // Otherwise, keep this value... + true + }); + + // returning false evicts this key/val + !values.is_empty() + }); + } +} + +impl Clone for Pool { + fn clone(&self) -> Pool { + Pool { + inner: self.inner.clone(), + } + } +} + +/// A wrapped poolable value that tries to reinsert to the Pool on Drop. +// Note: The bounds `T: Poolable` is needed for the Drop impl. +pub struct Pooled { + value: Option, + is_reused: bool, + key: K, + pool: WeakOpt>>, +} + +impl Pooled { + pub fn is_reused(&self) -> bool { + self.is_reused + } + + pub fn is_pool_enabled(&self) -> bool { + self.pool.0.is_some() + } + + fn as_ref(&self) -> &T { + self.value.as_ref().expect("not dropped") + } + + fn as_mut(&mut self) -> &mut T { + self.value.as_mut().expect("not dropped") + } +} + +impl Deref for Pooled { + type Target = T; + fn deref(&self) -> &T { + self.as_ref() + } +} + +impl DerefMut for Pooled { + fn deref_mut(&mut self) -> &mut T { + self.as_mut() + } +} + +impl Drop for Pooled { + fn drop(&mut self) { + if let Some(value) = self.value.take() { + if !value.is_open() { + // If we *already* know the connection is done here, + // it shouldn't be re-inserted back into the pool. + return; + } + + if let Some(pool) = self.pool.upgrade() { + if let Ok(mut inner) = pool.lock() { + inner.put(self.key.clone(), value, &pool); + } + } else if !value.can_share() { + trace!("pool dropped, dropping pooled ({:?})", self.key); + } + // Ver::Http2 is already in the Pool (or dead), so we wouldn't + // have an actual reference to the Pool. + } + } +} + +impl fmt::Debug for Pooled { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Pooled").field("key", &self.key).finish() + } +} + +struct Idle { + idle_at: Instant, + value: T, +} + +// FIXME: allow() required due to `impl Trait` leaking types to this lint +#[allow(missing_debug_implementations)] +pub struct Checkout { + key: K, + pool: Pool, + waiter: Option>, +} + +#[derive(Debug)] +#[non_exhaustive] +pub enum Error { + PoolDisabled, + CheckoutNoLongerWanted, + CheckedOutClosedValue, +} + +impl Error { + pub(crate) fn is_canceled(&self) -> bool { + matches!(self, Error::CheckedOutClosedValue) + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(match self { + Error::PoolDisabled => "pool is disabled", + Error::CheckedOutClosedValue => "checked out connection was closed", + Error::CheckoutNoLongerWanted => "request was canceled", + }) + } +} + +impl StdError for Error {} + +impl Checkout { + fn poll_waiter( + &mut self, + cx: &mut task::Context<'_>, + ) -> Poll, Error>>> { + if let Some(mut rx) = self.waiter.take() { + match Pin::new(&mut rx).poll(cx) { + Poll::Ready(Ok(value)) => { + if value.is_open() { + Poll::Ready(Some(Ok(self.pool.reuse(&self.key, value)))) + } else { + Poll::Ready(Some(Err(Error::CheckedOutClosedValue))) + } + } + Poll::Pending => { + self.waiter = Some(rx); + Poll::Pending + } + Poll::Ready(Err(_canceled)) => { + Poll::Ready(Some(Err(Error::CheckoutNoLongerWanted))) + } + } + } else { + Poll::Ready(None) + } + } + + fn checkout(&mut self, cx: &mut task::Context<'_>) -> Option> { + let entry = { + let mut inner = self.pool.inner.as_ref()?.lock().unwrap(); + let expiration = Expiration::new(inner.timeout); + let now = inner.now(); + let maybe_entry = inner.idle.get_mut(&self.key).and_then(|list| { + trace!("take? {:?}: expiration = {:?}", self.key, expiration.0); + // A block to end the mutable borrow on list, + // so the map below can check is_empty() + { + let popper = IdlePopper { + key: &self.key, + list, + }; + popper.pop(&expiration, now) + } + .map(|e| (e, list.is_empty())) + }); + + let (entry, empty) = if let Some((e, empty)) = maybe_entry { + (Some(e), empty) + } else { + // No entry found means nuke the list for sure. + (None, true) + }; + if empty { + //TODO: This could be done with the HashMap::entry API instead. + inner.idle.remove(&self.key); + } + + if entry.is_none() && self.waiter.is_none() { + let (tx, mut rx) = oneshot::channel(); + trace!("checkout waiting for idle connection: {:?}", self.key); + inner + .waiters + .entry(self.key.clone()) + .or_insert_with(VecDeque::new) + .push_back(tx); + + // register the waker with this oneshot + assert!(Pin::new(&mut rx).poll(cx).is_pending()); + self.waiter = Some(rx); + } + + entry + }; + + entry.map(|e| self.pool.reuse(&self.key, e.value)) + } +} + +impl Future for Checkout { + type Output = Result, Error>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + if let Some(pooled) = ready!(self.poll_waiter(cx)?) { + return Poll::Ready(Ok(pooled)); + } + + if let Some(pooled) = self.checkout(cx) { + Poll::Ready(Ok(pooled)) + } else if !self.pool.is_enabled() { + Poll::Ready(Err(Error::PoolDisabled)) + } else { + // There's a new waiter, already registered in self.checkout() + debug_assert!(self.waiter.is_some()); + Poll::Pending + } + } +} + +impl Drop for Checkout { + fn drop(&mut self) { + if self.waiter.take().is_some() { + trace!("checkout dropped for {:?}", self.key); + if let Some(Ok(mut inner)) = self.pool.inner.as_ref().map(|i| i.lock()) { + inner.clean_waiters(&self.key); + } + } + } +} + +// FIXME: allow() required due to `impl Trait` leaking types to this lint +#[allow(missing_debug_implementations)] +pub struct Connecting { + key: K, + pool: WeakOpt>>, +} + +impl Connecting { + pub fn alpn_h2(self, pool: &Pool) -> Option { + debug_assert!( + self.pool.0.is_none(), + "Connecting::alpn_h2 but already Http2" + ); + + pool.connecting(&self.key, Ver::Http2) + } +} + +impl Drop for Connecting { + fn drop(&mut self) { + if let Some(pool) = self.pool.upgrade() { + // No need to panic on drop, that could abort! + if let Ok(mut inner) = pool.lock() { + inner.connected(&self.key); + } + } + } +} + +struct Expiration(Option); + +impl Expiration { + fn new(dur: Option) -> Expiration { + Expiration(dur) + } + + fn expires(&self, instant: Instant, now: Instant) -> bool { + match self.0 { + // Avoid `Instant::elapsed` to avoid issues like rust-lang/rust#86470. + Some(timeout) => now.saturating_duration_since(instant) > timeout, + None => false, + } + } +} + +struct IdleTask { + timer: Timer, + duration: Duration, + pool: WeakOpt>>, + // This allows the IdleTask to be notified as soon as the entire + // Pool is fully dropped, and shutdown. This channel is never sent on, + // but Err(Canceled) will be received when the Pool is dropped. + pool_drop_notifier: oneshot::Receiver, +} + +impl IdleTask { + async fn run(self) { + use futures_util::future; + + let mut sleep = self.timer.sleep_until(self.timer.now() + self.duration); + let mut on_pool_drop = self.pool_drop_notifier; + loop { + match future::select(&mut on_pool_drop, &mut sleep).await { + future::Either::Left(_) => { + // pool dropped, bah-bye + break; + } + future::Either::Right(((), _)) => { + if let Some(inner) = self.pool.upgrade() { + if let Ok(mut inner) = inner.lock() { + trace!("idle interval checking for expired"); + inner.clear_expired(); + } + } + + let deadline = self.timer.now() + self.duration; + self.timer.reset(&mut sleep, deadline); + } + } + } + + trace!("pool closed, canceling idle interval"); + } +} + +impl WeakOpt { + fn none() -> Self { + WeakOpt(None) + } + + fn downgrade(arc: &Arc) -> Self { + WeakOpt(Some(Arc::downgrade(arc))) + } + + fn upgrade(&self) -> Option> { + self.0.as_ref().and_then(Weak::upgrade) + } +} + +#[cfg(all(test, feature = "tokio-net"))] +mod tests { + use std::fmt::Debug; + use std::future::Future; + use std::hash::Hash; + use std::pin::Pin; + use std::task::{self, Poll}; + use std::time::Duration; + + use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt}; + use crate::rt::{TokioExecutor, TokioTimer}; + + use crate::common::timer; + + #[derive(Clone, Debug, PartialEq, Eq, Hash)] + struct KeyImpl(http::uri::Scheme, http::uri::Authority); + + /// Test unique reservations. + #[derive(Debug, PartialEq, Eq)] + struct Uniq(T); + + impl Poolable for Uniq { + fn is_open(&self) -> bool { + true + } + + fn reserve(self) -> Reservation { + Reservation::Unique(self) + } + + fn can_share(&self) -> bool { + false + } + } + + fn c(key: K) -> Connecting { + Connecting { + key, + pool: WeakOpt::none(), + } + } + + fn host_key(s: &str) -> KeyImpl { + KeyImpl(http::uri::Scheme::HTTP, s.parse().expect("host key")) + } + + fn pool_no_timer() -> Pool { + pool_max_idle_no_timer(usize::MAX) + } + + fn pool_max_idle_no_timer(max_idle: usize) -> Pool { + let pool = Pool::new( + super::Config { + idle_timeout: Some(Duration::from_millis(100)), + max_idle_per_host: max_idle, + }, + TokioExecutor::new(), + Option::::None, + ); + pool.no_timer(); + pool + } + + #[tokio::test] + async fn test_pool_checkout_smoke() { + let pool = pool_no_timer(); + let key = host_key("foo"); + let pooled = pool.pooled(c(key.clone()), Uniq(41)); + + drop(pooled); + + match pool.checkout(key).await { + Ok(pooled) => assert_eq!(*pooled, Uniq(41)), + Err(_) => panic!("not ready"), + }; + } + + /// Helper to check if the future is ready after polling once. + struct PollOnce<'a, F>(&'a mut F); + + impl Future for PollOnce<'_, F> + where + F: Future> + Unpin, + { + type Output = Option<()>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { + match Pin::new(&mut self.0).poll(cx) { + Poll::Ready(Ok(_)) => Poll::Ready(Some(())), + Poll::Ready(Err(_)) => Poll::Ready(Some(())), + Poll::Pending => Poll::Ready(None), + } + } + } + + #[tokio::test] + async fn test_pool_checkout_returns_none_if_expired() { + let pool = pool_no_timer(); + let key = host_key("foo"); + let pooled = pool.pooled(c(key.clone()), Uniq(41)); + + drop(pooled); + tokio::time::sleep(pool.locked().timeout.unwrap()).await; + let mut checkout = pool.checkout(key); + let poll_once = PollOnce(&mut checkout); + let is_not_ready = poll_once.await.is_none(); + assert!(is_not_ready); + } + + #[tokio::test] + async fn test_pool_checkout_removes_expired() { + let pool = pool_no_timer(); + let key = host_key("foo"); + + pool.pooled(c(key.clone()), Uniq(41)); + pool.pooled(c(key.clone()), Uniq(5)); + pool.pooled(c(key.clone()), Uniq(99)); + + assert_eq!( + pool.locked().idle.get(&key).map(|entries| entries.len()), + Some(3) + ); + tokio::time::sleep(pool.locked().timeout.unwrap()).await; + + let mut checkout = pool.checkout(key.clone()); + let poll_once = PollOnce(&mut checkout); + // checkout.await should clean out the expired + poll_once.await; + assert!(!pool.locked().idle.contains_key(&key)); + } + + #[test] + fn test_pool_max_idle_per_host() { + let pool = pool_max_idle_no_timer(2); + let key = host_key("foo"); + + pool.pooled(c(key.clone()), Uniq(41)); + pool.pooled(c(key.clone()), Uniq(5)); + pool.pooled(c(key.clone()), Uniq(99)); + + // pooled and dropped 3, max_idle should only allow 2 + assert_eq!( + pool.locked().idle.get(&key).map(|entries| entries.len()), + Some(2) + ); + } + + #[tokio::test] + async fn test_pool_timer_removes_expired_realtime() { + test_pool_timer_removes_expired_inner().await + } + + #[tokio::test(start_paused = true)] + async fn test_pool_timer_removes_expired_faketime() { + test_pool_timer_removes_expired_inner().await + } + + async fn test_pool_timer_removes_expired_inner() { + let pool = Pool::new( + super::Config { + idle_timeout: Some(Duration::from_millis(10)), + max_idle_per_host: usize::MAX, + }, + TokioExecutor::new(), + Some(TokioTimer::new()), + ); + + let key = host_key("foo"); + + pool.pooled(c(key.clone()), Uniq(41)); + pool.pooled(c(key.clone()), Uniq(5)); + pool.pooled(c(key.clone()), Uniq(99)); + + assert_eq!( + pool.locked().idle.get(&key).map(|entries| entries.len()), + Some(3) + ); + + // Let the timer tick passed the expiration... + tokio::time::sleep(Duration::from_millis(30)).await; + + // But minimum interval is higher, so nothing should have been reaped + assert_eq!( + pool.locked().idle.get(&key).map(|entries| entries.len()), + Some(3) + ); + + // Now wait passed the minimum interval more + tokio::time::sleep(Duration::from_millis(70)).await; + // Yield in case other task hasn't been able to run :shrug: + tokio::task::yield_now().await; + + assert!(!pool.locked().idle.contains_key(&key)); + } + + #[tokio::test] + async fn test_pool_checkout_task_unparked() { + use futures_util::future::join; + use futures_util::FutureExt; + + let pool = pool_no_timer(); + let key = host_key("foo"); + let pooled = pool.pooled(c(key.clone()), Uniq(41)); + + let checkout = join(pool.checkout(key), async { + // the checkout future will park first, + // and then this lazy future will be polled, which will insert + // the pooled back into the pool + // + // this test makes sure that doing so will unpark the checkout + drop(pooled); + }) + .map(|(entry, _)| entry); + + assert_eq!(*checkout.await.unwrap(), Uniq(41)); + } + + #[tokio::test] + async fn test_pool_checkout_drop_cleans_up_waiters() { + let pool = pool_no_timer::, KeyImpl>(); + let key = host_key("foo"); + + let mut checkout1 = pool.checkout(key.clone()); + let mut checkout2 = pool.checkout(key.clone()); + + let poll_once1 = PollOnce(&mut checkout1); + let poll_once2 = PollOnce(&mut checkout2); + + // first poll needed to get into Pool's parked + poll_once1.await; + assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1); + poll_once2.await; + assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 2); + + // on drop, clean up Pool + drop(checkout1); + assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1); + + drop(checkout2); + assert!(!pool.locked().waiters.contains_key(&key)); + } + + #[derive(Debug)] + struct CanClose { + #[allow(unused)] + val: i32, + closed: bool, + } + + impl Poolable for CanClose { + fn is_open(&self) -> bool { + !self.closed + } + + fn reserve(self) -> Reservation { + Reservation::Unique(self) + } + + fn can_share(&self) -> bool { + false + } + } + + #[test] + fn pooled_drop_if_closed_doesnt_reinsert() { + let pool = pool_no_timer(); + let key = host_key("foo"); + pool.pooled( + c(key.clone()), + CanClose { + val: 57, + closed: true, + }, + ); + + assert!(!pool.locked().idle.contains_key(&key)); + } +} diff --git a/src/client/connect/tcp/mod.rs b/src/client/connect/tcp/mod.rs new file mode 100644 index 00000000..24e624d3 --- /dev/null +++ b/src/client/connect/tcp/mod.rs @@ -0,0 +1,409 @@ +use crate::client::connect::config::Config; +use crate::client::connect::dns; +use crate::client::connect::Connection; +use core::fmt; +use futures_util::future::Either; +use socket2::{Domain, Protocol, Socket, Type}; +use std::error::Error as StdError; +use std::future::Future; +use std::io; +use std::net::SocketAddr; +use std::net::{Ipv4Addr, Ipv6Addr}; +use std::pin::pin; +use std::time::Duration; +use tracing::{debug, trace, warn}; + +#[cfg(feature = "tokio-net")] +/// Tokio-based socket connector. +pub mod tokio; + +/// A builder for tcp connections. +pub trait TcpConnector: Clone + Send + Sync + 'static { + /// The underlying stream type. + type TcpStream: From + Send + Sync + 'static; + + /// The type of connection returned by this builder. + type Connection: hyper::rt::Read + hyper::rt::Write + Connection + Send + Unpin + 'static; + + /// The type of error returned by this builder. + type Error: Into>; + + /// The future type returned by this builder. + type Future: Future> + Send + 'static; + + /// The future type returned by this builder's sleep. + type Sleep: Future + Send + Sync + 'static; + + /// Build a connection from the given socket and connect to the address. + fn connect(&self, socket: Self::TcpStream, addr: SocketAddr) -> Self::Future; + + /// Return a future that sleeps for the given duration. + fn sleep(&self, duration: Duration) -> Self::Sleep; +} + +pub(crate) struct ConnectingTcp { + preferred: ConnectingTcpRemote, + fallback: Option>, +} + +impl ConnectingTcp +where + ::TcpStream: From, +{ + pub(crate) fn new(remote_addrs: dns::SocketAddrs, config: &Config, connector: S) -> Self { + if let Some(fallback_timeout) = config.happy_eyeballs_timeout() { + let (preferred_addrs, fallback_addrs) = remote_addrs + .split_by_preference(config.local_address_ipv4(), config.local_address_ipv6()); + if fallback_addrs.is_empty() { + return ConnectingTcp { + preferred: ConnectingTcpRemote::new( + preferred_addrs, + config.connect_timeout(), + connector, + ), + fallback: None, + }; + } + + ConnectingTcp { + preferred: ConnectingTcpRemote::new( + preferred_addrs, + config.connect_timeout(), + connector.clone(), + ), + fallback: Some(ConnectingTcpFallback { + delay: connector.sleep(fallback_timeout), + remote: ConnectingTcpRemote::new( + fallback_addrs, + config.connect_timeout(), + connector, + ), + }), + } + } else { + ConnectingTcp { + preferred: ConnectingTcpRemote::new( + remote_addrs, + config.connect_timeout(), + connector, + ), + fallback: None, + } + } + } +} + +struct ConnectingTcpFallback { + delay: S::Sleep, + remote: ConnectingTcpRemote, +} + +struct ConnectingTcpRemote { + addrs: dns::SocketAddrs, + connect_timeout: Option, + connector: S, +} + +impl ConnectingTcpRemote +where + ::TcpStream: From, +{ + fn new(addrs: dns::SocketAddrs, connect_timeout: Option, connector: S) -> Self { + let connect_timeout = connect_timeout.and_then(|t| t.checked_div(addrs.len() as u32)); + + Self { + addrs, + connect_timeout, + connector, + } + } + + async fn connect(&mut self, config: &Config) -> Result { + let mut err = None; + for addr in &mut self.addrs { + debug!("connecting to {}", addr); + match connect(&addr, config, self.connect_timeout, &self.connector) { + Ok(fut) => match fut.await { + Ok(tcp) => { + debug!("connected to {}", addr); + return Ok(tcp); + } + Err(mut e) => { + trace!("connect error for {}: {:?}", addr, e); + e.addr = Some(addr); + if err.is_none() { + err = Some(e); + } + } + }, + Err(mut e) => { + trace!("connect error for {}: {:?}", addr, e); + e.addr = Some(addr); + if err.is_none() { + err = Some(e); + } + } + } + } + + match err { + Some(e) => Err(e), + None => Err(ConnectError::new( + "tcp connect error", + std::io::Error::new(std::io::ErrorKind::NotConnected, "Network unreachable"), + )), + } + } +} + +fn bind_local_address( + socket: &socket2::Socket, + dst_addr: &SocketAddr, + local_addr_ipv4: &Option, + local_addr_ipv6: &Option, +) -> io::Result<()> { + match (*dst_addr, local_addr_ipv4, local_addr_ipv6) { + (SocketAddr::V4(_), Some(addr), _) => { + socket.bind(&SocketAddr::new((*addr).into(), 0).into())?; + } + (SocketAddr::V6(_), _, Some(addr)) => { + socket.bind(&SocketAddr::new((*addr).into(), 0).into())?; + } + _ => { + if cfg!(windows) { + // Windows requires a socket be bound before calling connect + let any: SocketAddr = match *dst_addr { + SocketAddr::V4(_) => ([0, 0, 0, 0], 0).into(), + SocketAddr::V6(_) => ([0, 0, 0, 0, 0, 0, 0, 0], 0).into(), + }; + socket.bind(&any.into())?; + } + } + } + + Ok(()) +} + +fn connect( + addr: &SocketAddr, + config: &Config, + connect_timeout: Option, + connector: &S, +) -> Result>, ConnectError> +where + S::TcpStream: From, +{ + let domain = Domain::for_address(*addr); + let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP)) + .map_err(ConnectError::m("tcp open error"))?; + + // When constructing a Tokio `TcpSocket` from a raw fd/socket, the user is + // responsible for ensuring O_NONBLOCK is set. + socket + .set_nonblocking(true) + .map_err(ConnectError::m("tcp set_nonblocking error"))?; + + if let Some(tcp_keepalive) = &config.tcp_keepalive() { + if let Err(e) = socket.set_tcp_keepalive(tcp_keepalive) { + warn!("tcp set_keepalive error: {}", e); + } + } + + #[cfg(any( + target_os = "android", + target_os = "fuchsia", + target_os = "illumos", + target_os = "ios", + target_os = "linux", + target_os = "macos", + target_os = "solaris", + target_os = "tvos", + target_os = "visionos", + target_os = "watchos", + ))] + if let Some(interface) = &config.interface() { + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + socket + .bind_device(Some(interface.as_bytes())) + .map_err(ConnectError::m("tcp bind interface error"))?; + + #[cfg(any( + target_os = "illumos", + target_os = "ios", + target_os = "macos", + target_os = "solaris", + target_os = "tvos", + target_os = "visionos", + target_os = "watchos", + ))] + { + let idx = unsafe { libc::if_nametoindex(interface.as_ptr()) }; + let idx = std::num::NonZeroU32::new(idx).ok_or_else(|| { + ConnectError::new( + "error converting interface name to index", + io::Error::last_os_error(), + ) + })?; + match addr { + SocketAddr::V4(_) => socket.bind_device_by_index_v4(Some(idx)), + SocketAddr::V6(_) => socket.bind_device_by_index_v6(Some(idx)), + } + .map_err(ConnectError::m("tcp bind interface error"))?; + } + } + + #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] + if let Some(tcp_user_timeout) = &config.tcp_user_timeout() { + if let Err(e) = socket.set_tcp_user_timeout(Some(*tcp_user_timeout)) { + warn!("tcp set_tcp_user_timeout error: {}", e); + } + } + + bind_local_address( + &socket, + addr, + &config.local_address_ipv4(), + &config.local_address_ipv6(), + ) + .map_err(ConnectError::m("tcp bind local error"))?; + + if config.reuse_address() { + if let Err(e) = socket.set_reuse_address(true) { + warn!("tcp set_reuse_address error: {}", e); + } + } + + if let Some(size) = config.send_buffer_size() { + if let Err(e) = socket.set_send_buffer_size(size) { + warn!("tcp set_buffer_size error: {}", e); + } + } + + if let Some(size) = config.recv_buffer_size() { + if let Err(e) = socket.set_recv_buffer_size(size) { + warn!("tcp set_recv_buffer_size error: {}", e); + } + } + + if let Err(e) = socket.set_tcp_nodelay(config.nodelay()) { + warn!("tcp set_nodelay error: {}", e); + } + + let connect = connector.connect(socket.into(), *addr); + let sleep = connect_timeout.map(|dur| connector.sleep(dur)); + Ok(async move { + match sleep { + Some(sleep) => match futures_util::future::select(pin!(sleep), pin!(connect)).await { + Either::Left(((), _)) => { + Err(io::Error::new(io::ErrorKind::TimedOut, "connect timeout").into()) + } + Either::Right((Ok(s), _)) => Ok(s), + Either::Right((Err(e), _)) => Err(e.into()), + }, + None => connect.await.map_err(Into::into), + } + .map_err(ConnectError::m("tcp connect error")) + }) +} + +impl ConnectingTcp +where + S::TcpStream: From, +{ + pub(crate) async fn connect(mut self, config: &Config) -> Result { + match self.fallback { + None => self.preferred.connect(config).await, + Some(mut fallback) => { + let preferred_fut = self.preferred.connect(config); + futures_util::pin_mut!(preferred_fut); + + let fallback_fut = fallback.remote.connect(config); + futures_util::pin_mut!(fallback_fut); + + let fallback_delay = fallback.delay; + futures_util::pin_mut!(fallback_delay); + + let (result, future) = + match futures_util::future::select(preferred_fut, fallback_delay).await { + Either::Left((result, _fallback_delay)) => { + (result, Either::Right(fallback_fut)) + } + Either::Right(((), preferred_fut)) => { + // Delay is done, start polling both the preferred and the fallback + futures_util::future::select(preferred_fut, fallback_fut) + .await + .factor_first() + } + }; + + if result.is_err() { + // Fallback to the remaining future (could be preferred or fallback) + // if we get an error + future.await + } else { + result + } + } + } + } +} + +/// TBD +pub struct ConnectError { + pub(super) msg: &'static str, + pub(super) addr: Option, + pub(super) cause: Option>, +} + +impl ConnectError { + pub(super) fn new(msg: &'static str, cause: E) -> ConnectError + where + E: Into>, + { + ConnectError { + msg, + addr: None, + cause: Some(cause.into()), + } + } + + pub(super) fn dns(cause: E) -> ConnectError + where + E: Into>, + { + ConnectError::new("dns error", cause) + } + + pub(super) fn m(msg: &'static str) -> impl FnOnce(E) -> ConnectError + where + E: Into>, + { + move |cause| ConnectError::new(msg, cause) + } +} + +impl fmt::Debug for ConnectError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut b = f.debug_tuple("ConnectError"); + b.field(&self.msg); + if let Some(ref addr) = self.addr { + b.field(addr); + } + if let Some(ref cause) = self.cause { + b.field(cause); + } + b.finish() + } +} + +impl fmt::Display for ConnectError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.msg) + } +} + +impl StdError for ConnectError { + fn source(&self) -> Option<&(dyn StdError + 'static)> { + self.cause.as_ref().map(|e| &**e as _) + } +} diff --git a/src/client/connect/tcp/tokio.rs b/src/client/connect/tcp/tokio.rs new file mode 100644 index 00000000..5be574fb --- /dev/null +++ b/src/client/connect/tcp/tokio.rs @@ -0,0 +1,76 @@ +use crate::client::connect::{Connected, Connection}; +use crate::rt::TokioIo; +use std::future::Future; +use std::io; +use std::net::SocketAddr; +use std::pin::Pin; +use std::time::Duration; +use tokio::net::{TcpSocket, TcpStream}; + +/// A connector that uses `tokio` for TCP connections. +#[derive(Clone, Copy, Debug, Default)] +pub struct TokioTcpConnector { + _priv: (), +} + +impl TokioTcpConnector { + /// Create a new `TokioTcpConnector`. + pub fn new() -> Self { + Self { _priv: () } + } +} + +impl super::TcpConnector for TokioTcpConnector { + type TcpStream = std::net::TcpStream; + type Connection = TokioIo; + type Error = io::Error; + type Future = Pin> + Send>>; + type Sleep = tokio::time::Sleep; + + fn connect(&self, socket: std::net::TcpStream, addr: SocketAddr) -> Self::Future { + let socket = TcpSocket::from_std_stream(socket); + let connect = socket.connect(addr); + Box::pin(async move { + let stream = connect.await?; + Ok(TokioIo::new(stream)) + }) + } + + fn sleep(&self, duration: Duration) -> Self::Sleep { + tokio::time::sleep(duration) + } +} + +impl Connection for tokio::net::TcpStream { + fn connected(&self) -> Connected { + let connected = Connected::new(); + if let (Ok(remote_addr), Ok(local_addr)) = (self.peer_addr(), self.local_addr()) { + connected.extra(crate::client::connect::http::HttpInfo { + remote_addr, + local_addr, + }) + } else { + connected + } + } +} + +impl Connection for crate::rt::TokioIo { + fn connected(&self) -> Connected { + self.inner().connected() + } +} + +#[cfg(unix)] +impl Connection for std::os::unix::net::UnixStream { + fn connected(&self) -> Connected { + Connected::new() + } +} + +#[cfg(windows)] +impl Connection for ::tokio::net::windows::named_pipe::NamedPipeClient { + fn connected(&self) -> Connected { + Connected::new() + } +} diff --git a/src/client/legacy/client.rs b/src/client/legacy/client.rs index c9d00800..d8859a81 100644 --- a/src/client/legacy/client.rs +++ b/src/client/legacy/client.rs @@ -6,28 +6,19 @@ use std::error::Error as StdError; use std::fmt; -use std::future::{poll_fn, Future}; -use std::pin::Pin; use std::task::{self, Poll}; use std::time::Duration; -use futures_util::future::{self, Either, FutureExt, TryFutureExt}; -use http::uri::Scheme; -use hyper::client::conn::TrySendError as ConnTrySendError; -use hyper::header::{HeaderValue, HOST}; -use hyper::rt::Timer; -use hyper::{body::Body, Method, Request, Response, Uri, Version}; -use tracing::{debug, trace, warn}; +#[cfg(feature = "tokio-net")] +use crate::client::legacy::connect::HttpConnector; -use super::connect::capture::CaptureConnectionExtension; -#[cfg(feature = "tokio")] -use super::connect::HttpConnector; -use super::connect::{Alpn, Connect, Connected, Connection}; -use super::pool::{self, Ver}; +use hyper::rt::Timer; +use hyper::{body::Body, Request, Response, Uri}; -use crate::common::{lazy as hyper_lazy, timer, Exec, Lazy, SyncWrapper}; +use crate::client::client::BoxSendFuture; +use crate::client::connect::{http::HttpConnect as _, Connect}; -type BoxSendFuture = Pin + Send>>; +pub use crate::client::client::{Error, ResponseFuture}; /// A Client to make outgoing HTTP requests. /// @@ -35,83 +26,9 @@ type BoxSendFuture = Pin + Send>>; /// underlying connection pool will be reused. #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] pub struct Client { - config: Config, - connector: C, - exec: Exec, - #[cfg(feature = "http1")] - h1_builder: hyper::client::conn::http1::Builder, - #[cfg(feature = "http2")] - h2_builder: hyper::client::conn::http2::Builder, - pool: pool::Pool, PoolKey>, -} - -#[derive(Clone, Copy, Debug)] -struct Config { - retry_canceled_requests: bool, - set_host: bool, - ver: Ver, -} - -/// Client errors -pub struct Error { - kind: ErrorKind, - source: Option>, - #[cfg(any(feature = "http1", feature = "http2"))] - connect_info: Option, -} - -#[derive(Debug)] -enum ErrorKind { - Canceled, - ChannelClosed, - Connect, - UserUnsupportedRequestMethod, - UserUnsupportedVersion, - UserAbsoluteUriRequired, - SendRequest, -} - -macro_rules! e { - ($kind:ident) => { - Error { - kind: ErrorKind::$kind, - source: None, - connect_info: None, - } - }; - ($kind:ident, $src:expr) => { - Error { - kind: ErrorKind::$kind, - source: Some($src.into()), - connect_info: None, - } - }; -} - -// We might change this... :shrug: -type PoolKey = (http::uri::Scheme, http::uri::Authority); - -enum TrySendError { - Retryable { - error: Error, - req: Request, - connection_reused: bool, - }, - Nope(Error), + inner: crate::client::client::Client, } -/// A `Future` that will resolve to an HTTP Response. -/// -/// This is returned by `Client::request` (and `Client::get`). -#[must_use = "futures do nothing unless polled"] -pub struct ResponseFuture { - inner: SyncWrapper< - Pin, Error>> + Send>>, - >, -} - -// ===== impl Client ===== - impl Client<(), ()> { /// Create a builder to configure a new `Client`. /// @@ -138,7 +55,9 @@ impl Client<(), ()> { where E: hyper::rt::Executor + Send + Sync + Clone + 'static, { - Builder::new(executor) + Builder { + inner: crate::client::client::Client::builder(executor), + } } } @@ -178,14 +97,7 @@ where where B: Default, { - let body = B::default(); - if !body.is_end_stream() { - warn!("default Body used for get() does not return true for is_end_stream"); - } - - let mut req = Request::new(body); - *req.uri_mut() = uri; - self.request(req) + self.inner.get(uri) } /// Send a constructed `Request` using this `Client`. @@ -213,456 +125,8 @@ where /// # } /// # fn main() {} /// ``` - pub fn request(&self, mut req: Request) -> ResponseFuture { - let is_http_connect = req.method() == Method::CONNECT; - match req.version() { - Version::HTTP_11 => (), - Version::HTTP_10 => { - if is_http_connect { - warn!("CONNECT is not allowed for HTTP/1.0"); - return ResponseFuture::new(future::err(e!(UserUnsupportedRequestMethod))); - } - } - Version::HTTP_2 => (), - // completely unsupported HTTP version (like HTTP/0.9)! - other => return ResponseFuture::error_version(other), - }; - - let pool_key = match extract_domain(req.uri_mut(), is_http_connect) { - Ok(s) => s, - Err(err) => { - return ResponseFuture::new(future::err(err)); - } - }; - - ResponseFuture::new(self.clone().send_request(req, pool_key)) - } - - async fn send_request( - self, - mut req: Request, - pool_key: PoolKey, - ) -> Result, Error> { - let uri = req.uri().clone(); - - loop { - req = match self.try_send_request(req, pool_key.clone()).await { - Ok(resp) => return Ok(resp), - Err(TrySendError::Nope(err)) => return Err(err), - Err(TrySendError::Retryable { - mut req, - error, - connection_reused, - }) => { - if !self.config.retry_canceled_requests || !connection_reused { - // if client disabled, don't retry - // a fresh connection means we definitely can't retry - return Err(error); - } - - trace!( - "unstarted request canceled, trying again (reason={:?})", - error - ); - *req.uri_mut() = uri.clone(); - req - } - } - } - } - - async fn try_send_request( - &self, - mut req: Request, - pool_key: PoolKey, - ) -> Result, TrySendError> { - let mut pooled = self - .connection_for(pool_key) - .await - // `connection_for` already retries checkout errors, so if - // it returns an error, there's not much else to retry - .map_err(TrySendError::Nope)?; - - if let Some(conn) = req.extensions_mut().get_mut::() { - conn.set(&pooled.conn_info); - } - - if pooled.is_http1() { - if req.version() == Version::HTTP_2 { - warn!("Connection is HTTP/1, but request requires HTTP/2"); - return Err(TrySendError::Nope( - e!(UserUnsupportedVersion).with_connect_info(pooled.conn_info.clone()), - )); - } - - if self.config.set_host { - let uri = req.uri().clone(); - req.headers_mut().entry(HOST).or_insert_with(|| { - let hostname = uri.host().expect("authority implies host"); - if let Some(port) = get_non_default_port(&uri) { - let s = format!("{hostname}:{port}"); - HeaderValue::from_maybe_shared(bytes::Bytes::from(s)) - } else { - HeaderValue::from_str(hostname) - } - .expect("uri host is valid header value") - }); - } - - // CONNECT always sends authority-form, so check it first... - if req.method() == Method::CONNECT { - authority_form(req.uri_mut()); - } else if pooled.conn_info.is_proxied { - absolute_form(req.uri_mut()); - } else { - origin_form(req.uri_mut()); - } - } else if req.method() == Method::CONNECT && !pooled.is_http2() { - authority_form(req.uri_mut()); - } - - let mut res = match pooled.try_send_request(req).await { - Ok(res) => res, - Err(mut err) => { - return if let Some(req) = err.take_message() { - Err(TrySendError::Retryable { - connection_reused: pooled.is_reused(), - error: e!(Canceled, err.into_error()) - .with_connect_info(pooled.conn_info.clone()), - req, - }) - } else { - Err(TrySendError::Nope( - e!(SendRequest, err.into_error()) - .with_connect_info(pooled.conn_info.clone()), - )) - }; - } - }; - - // If the Connector included 'extra' info, add to Response... - if let Some(extra) = &pooled.conn_info.extra { - extra.set(res.extensions_mut()); - } - - // If pooled is HTTP/2, we can toss this reference immediately. - // - // when pooled is dropped, it will try to insert back into the - // pool. To delay that, spawn a future that completes once the - // sender is ready again. - // - // This *should* only be once the related `Connection` has polled - // for a new request to start. - // - // It won't be ready if there is a body to stream. - if pooled.is_http2() || !pooled.is_pool_enabled() || pooled.is_ready() { - drop(pooled); - } else { - let on_idle = poll_fn(move |cx| pooled.poll_ready(cx)).map(|_| ()); - self.exec.execute(on_idle); - } - - Ok(res) - } - - async fn connection_for( - &self, - pool_key: PoolKey, - ) -> Result, PoolKey>, Error> { - loop { - match self.one_connection_for(pool_key.clone()).await { - Ok(pooled) => return Ok(pooled), - Err(ClientConnectError::Normal(err)) => return Err(err), - Err(ClientConnectError::CheckoutIsClosed(reason)) => { - if !self.config.retry_canceled_requests { - return Err(e!(Connect, reason)); - } - - trace!( - "unstarted request canceled, trying again (reason={:?})", - reason, - ); - continue; - } - }; - } - } - - async fn one_connection_for( - &self, - pool_key: PoolKey, - ) -> Result, PoolKey>, ClientConnectError> { - // Return a single connection if pooling is not enabled - if !self.pool.is_enabled() { - return self - .connect_to(pool_key) - .await - .map_err(ClientConnectError::Normal); - } - - // This actually races 2 different futures to try to get a ready - // connection the fastest, and to reduce connection churn. - // - // - If the pool has an idle connection waiting, that's used - // immediately. - // - Otherwise, the Connector is asked to start connecting to - // the destination Uri. - // - Meanwhile, the pool Checkout is watching to see if any other - // request finishes and tries to insert an idle connection. - // - If a new connection is started, but the Checkout wins after - // (an idle connection became available first), the started - // connection future is spawned into the runtime to complete, - // and then be inserted into the pool as an idle connection. - let checkout = self.pool.checkout(pool_key.clone()); - let connect = self.connect_to(pool_key); - let is_ver_h2 = self.config.ver == Ver::Http2; - - // The order of the `select` is depended on below... - - match future::select(checkout, connect).await { - // Checkout won, connect future may have been started or not. - // - // If it has, let it finish and insert back into the pool, - // so as to not waste the socket... - Either::Left((Ok(checked_out), connecting)) => { - // This depends on the `select` above having the correct - // order, such that if the checkout future were ready - // immediately, the connect future will never have been - // started. - // - // If it *wasn't* ready yet, then the connect future will - // have been started... - if connecting.started() { - let bg = connecting - .map_err(|err| { - trace!("background connect error: {}", err); - }) - .map(|_pooled| { - // dropping here should just place it in - // the Pool for us... - }); - // An execute error here isn't important, we're just trying - // to prevent a waste of a socket... - self.exec.execute(bg); - } - Ok(checked_out) - } - // Connect won, checkout can just be dropped. - Either::Right((Ok(connected), _checkout)) => Ok(connected), - // Either checkout or connect could get canceled: - // - // 1. Connect is canceled if this is HTTP/2 and there is - // an outstanding HTTP/2 connecting task. - // 2. Checkout is canceled if the pool cannot deliver an - // idle connection reliably. - // - // In both cases, we should just wait for the other future. - Either::Left((Err(err), connecting)) => { - if err.is_canceled() { - connecting.await.map_err(ClientConnectError::Normal) - } else { - Err(ClientConnectError::Normal(e!(Connect, err))) - } - } - Either::Right((Err(err), checkout)) => { - if err.is_canceled() { - checkout.await.map_err(move |err| { - if is_ver_h2 && err.is_canceled() { - ClientConnectError::CheckoutIsClosed(err) - } else { - ClientConnectError::Normal(e!(Connect, err)) - } - }) - } else { - Err(ClientConnectError::Normal(err)) - } - } - } - } - - #[cfg(any(feature = "http1", feature = "http2"))] - fn connect_to( - &self, - pool_key: PoolKey, - ) -> impl Lazy, PoolKey>, Error>> + Send + Unpin - { - let executor = self.exec.clone(); - let pool = self.pool.clone(); - #[cfg(feature = "http1")] - let h1_builder = self.h1_builder.clone(); - #[cfg(feature = "http2")] - let h2_builder = self.h2_builder.clone(); - let ver = self.config.ver; - let is_ver_h2 = ver == Ver::Http2; - let connector = self.connector.clone(); - hyper_lazy(move || { - // Try to take a "connecting lock". - // - // If the pool_key is for HTTP/2, and there is already a - // connection being established, then this can't take a - // second lock. The "connect_to" future is Canceled. - let connecting = match pool.connecting(&pool_key, ver) { - Some(lock) => lock, - None => { - let canceled = e!(Canceled); - // TODO - //crate::Error::new_canceled().with("HTTP/2 connection in progress"); - return Either::Right(future::err(canceled)); - } - }; - let dst = domain_as_uri(pool_key); - Either::Left( - connector - .connect(super::connect::sealed::Internal, dst) - .map_err(|src| e!(Connect, src)) - .and_then(move |io| { - let connected = io.connected(); - // If ALPN is h2 and we aren't http2_only already, - // then we need to convert our pool checkout into - // a single HTTP2 one. - let connecting = if connected.alpn == Alpn::H2 && !is_ver_h2 { - match connecting.alpn_h2(&pool) { - Some(lock) => { - trace!("ALPN negotiated h2, updating pool"); - lock - } - None => { - // Another connection has already upgraded, - // the pool checkout should finish up for us. - let canceled = e!(Canceled, "ALPN upgraded to HTTP/2"); - return Either::Right(future::err(canceled)); - } - } - } else { - connecting - }; - - #[cfg_attr(not(feature = "http2"), allow(unused))] - let is_h2 = is_ver_h2 || connected.alpn == Alpn::H2; - - Either::Left(Box::pin(async move { - let tx = if is_h2 { - #[cfg(feature = "http2")] { - let (mut tx, conn) = - h2_builder.handshake(io).await.map_err(Error::tx)?; - - trace!( - "http2 handshake complete, spawning background dispatcher task" - ); - executor.execute( - conn.map_err(|e| debug!("client connection error: {}", e)) - .map(|_| ()), - ); - - // Wait for 'conn' to ready up before we - // declare this tx as usable - tx.ready().await.map_err(Error::tx)?; - PoolTx::Http2(tx) - } - #[cfg(not(feature = "http2"))] - panic!("http2 feature is not enabled"); - } else { - #[cfg(feature = "http1")] { - // Perform the HTTP/1.1 handshake on the provided I/O stream. - // Uses the h1_builder to establish a connection, returning a sender (tx) for requests - // and a connection task (conn) that manages the connection lifecycle. - let (mut tx, conn) = - h1_builder.handshake(io).await.map_err(crate::client::legacy::client::Error::tx)?; - - // Log that the HTTP/1.1 handshake has completed successfully. - // This indicates the connection is established and ready for request processing. - trace!( - "http1 handshake complete, spawning background dispatcher task" - ); - // Create a oneshot channel to communicate errors from the connection task. - // err_tx sends errors from the connection task, and err_rx receives them - // to correlate connection failures with request readiness errors. - let (err_tx, err_rx) = tokio::sync::oneshot::channel(); - // Spawn the connection task in the background using the executor. - // The task manages the HTTP/1.1 connection, including upgrades (e.g., WebSocket). - // Errors are sent via err_tx to ensure they can be checked if the sender (tx) fails. - executor.execute( - conn.with_upgrades() - .map_err(|e| { - // Log the connection error at debug level for diagnostic purposes. - debug!("client connection error: {:?}", e); - // Log that the error is being sent to the error channel. - trace!("sending connection error to error channel"); - // Send the error via the oneshot channel, ignoring send failures - // (e.g., if the receiver is dropped, which is handled later). - let _ =err_tx.send(e); - }) - .map(|_| ()), - ); - // Log that the client is waiting for the connection to be ready. - // Readiness indicates the sender (tx) can accept a request without blocking. - trace!("waiting for connection to be ready"); - // Check if the sender is ready to accept a request. - // This ensures the connection is fully established before proceeding. - // aka: - // Wait for 'conn' to ready up before we - // declare this tx as usable - match tx.ready().await { - // If ready, the connection is usable for sending requests. - Ok(_) => { - // Log that the connection is ready for use. - trace!("connection is ready"); - // Drop the error receiver, as it’s no longer needed since the sender is ready. - // This prevents waiting for errors that won’t occur in a successful case. - drop(err_rx); - // Wrap the sender in PoolTx::Http1 for use in the connection pool. - PoolTx::Http1(tx) - } - // If the sender fails with a closed channel error, check for a specific connection error. - // This distinguishes between a vague ChannelClosed error and an actual connection failure. - Err(e) if e.is_closed() => { - // Log that the channel is closed, indicating a potential connection issue. - trace!("connection channel closed, checking for connection error"); - // Check the oneshot channel for a specific error from the connection task. - match err_rx.await { - // If an error was received, it’s a specific connection failure. - Ok(err) => { - // Log the specific connection error for diagnostics. - trace!("received connection error: {:?}", err); - // Return the error wrapped in Error::tx to propagate it. - return Err(crate::client::legacy::client::Error::tx(err)); - } - // If the error channel is closed, no specific error was sent. - // Fall back to the vague ChannelClosed error. - Err(_) => { - // Log that the error channel is closed, indicating no specific error. - trace!("error channel closed, returning the vague ChannelClosed error"); - // Return the original error wrapped in Error::tx. - return Err(crate::client::legacy::client::Error::tx(e)); - } - } - } - // For other errors (e.g., timeout, I/O issues), propagate them directly. - // These are not ChannelClosed errors and don’t require error channel checks. - Err(e) => { - // Log the specific readiness failure for diagnostics. - trace!("connection readiness failed: {:?}", e); - // Return the error wrapped in Error::tx to propagate it. - return Err(crate::client::legacy::client::Error::tx(e)); - } - } - } - #[cfg(not(feature = "http1"))] { - panic!("http1 feature is not enabled"); - } - }; - - Ok(pool.pooled( - connecting, - PoolClient { - conn_info: connected, - tx, - }, - )) - })) - }), - ) - }) + pub fn request(&self, req: Request) -> ResponseFuture { + self.inner.request(req) } } @@ -677,12 +141,12 @@ where type Error = Error; type Future = ResponseFuture; - fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { - Poll::Ready(Ok(())) + fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + tower_service::Service::poll_ready(&mut self.inner, cx) } fn call(&mut self, req: Request) -> Self::Future { - self.request(req) + tower_service::Service::call(&mut self.inner, req) } } @@ -697,26 +161,21 @@ where type Error = Error; type Future = ResponseFuture; - fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll> { - Poll::Ready(Ok(())) + fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { + let mut inner = &self.inner; + tower_service::Service::poll_ready(&mut inner, cx) } fn call(&mut self, req: Request) -> Self::Future { - self.request(req) + let mut inner = &self.inner; + tower_service::Service::call(&mut inner, req) } } impl Clone for Client { fn clone(&self) -> Client { Client { - config: self.config, - exec: self.exec.clone(), - #[cfg(feature = "http1")] - h1_builder: self.h1_builder.clone(), - #[cfg(feature = "http2")] - h2_builder: self.h2_builder.clone(), - connector: self.connector.clone(), - pool: self.pool.clone(), + inner: self.inner.clone(), } } } @@ -727,268 +186,6 @@ impl fmt::Debug for Client { } } -// ===== impl ResponseFuture ===== - -impl ResponseFuture { - fn new(value: F) -> Self - where - F: Future, Error>> + Send + 'static, - { - Self { - inner: SyncWrapper::new(Box::pin(value)), - } - } - - fn error_version(ver: Version) -> Self { - warn!("Request has unsupported version \"{:?}\"", ver); - ResponseFuture::new(Box::pin(future::err(e!(UserUnsupportedVersion)))) - } -} - -impl fmt::Debug for ResponseFuture { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("Future") - } -} - -impl Future for ResponseFuture { - type Output = Result, Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - self.inner.get_mut().as_mut().poll(cx) - } -} - -// ===== impl PoolClient ===== - -// FIXME: allow() required due to `impl Trait` leaking types to this lint -#[allow(missing_debug_implementations)] -struct PoolClient { - conn_info: Connected, - tx: PoolTx, -} - -enum PoolTx { - #[cfg(feature = "http1")] - Http1(hyper::client::conn::http1::SendRequest), - #[cfg(feature = "http2")] - Http2(hyper::client::conn::http2::SendRequest), -} - -impl PoolClient { - fn poll_ready( - &mut self, - #[allow(unused_variables)] cx: &mut task::Context<'_>, - ) -> Poll> { - match self.tx { - #[cfg(feature = "http1")] - PoolTx::Http1(ref mut tx) => tx.poll_ready(cx).map_err(Error::closed), - #[cfg(feature = "http2")] - PoolTx::Http2(_) => Poll::Ready(Ok(())), - } - } - - fn is_http1(&self) -> bool { - !self.is_http2() - } - - fn is_http2(&self) -> bool { - match self.tx { - #[cfg(feature = "http1")] - PoolTx::Http1(_) => false, - #[cfg(feature = "http2")] - PoolTx::Http2(_) => true, - } - } - - fn is_poisoned(&self) -> bool { - self.conn_info.poisoned.poisoned() - } - - fn is_ready(&self) -> bool { - match self.tx { - #[cfg(feature = "http1")] - PoolTx::Http1(ref tx) => tx.is_ready(), - #[cfg(feature = "http2")] - PoolTx::Http2(ref tx) => tx.is_ready(), - } - } -} - -impl PoolClient { - fn try_send_request( - &mut self, - req: Request, - ) -> impl Future, ConnTrySendError>>> - where - B: Send, - { - #[cfg(all(feature = "http1", feature = "http2"))] - return match self.tx { - #[cfg(feature = "http1")] - PoolTx::Http1(ref mut tx) => Either::Left(tx.try_send_request(req)), - #[cfg(feature = "http2")] - PoolTx::Http2(ref mut tx) => Either::Right(tx.try_send_request(req)), - }; - - #[cfg(feature = "http1")] - #[cfg(not(feature = "http2"))] - return match self.tx { - #[cfg(feature = "http1")] - PoolTx::Http1(ref mut tx) => tx.try_send_request(req), - }; - - #[cfg(not(feature = "http1"))] - #[cfg(feature = "http2")] - return match self.tx { - #[cfg(feature = "http2")] - PoolTx::Http2(ref mut tx) => tx.try_send_request(req), - }; - } -} - -impl pool::Poolable for PoolClient -where - B: Send + 'static, -{ - fn is_open(&self) -> bool { - !self.is_poisoned() && self.is_ready() - } - - fn reserve(self) -> pool::Reservation { - match self.tx { - #[cfg(feature = "http1")] - PoolTx::Http1(tx) => pool::Reservation::Unique(PoolClient { - conn_info: self.conn_info, - tx: PoolTx::Http1(tx), - }), - #[cfg(feature = "http2")] - PoolTx::Http2(tx) => { - let b = PoolClient { - conn_info: self.conn_info.clone(), - tx: PoolTx::Http2(tx.clone()), - }; - let a = PoolClient { - conn_info: self.conn_info, - tx: PoolTx::Http2(tx), - }; - pool::Reservation::Shared(a, b) - } - } - } - - fn can_share(&self) -> bool { - self.is_http2() - } -} - -enum ClientConnectError { - Normal(Error), - CheckoutIsClosed(pool::Error), -} - -fn origin_form(uri: &mut Uri) { - let path = match uri.path_and_query() { - Some(path) if path.as_str() != "/" => { - let mut parts = ::http::uri::Parts::default(); - parts.path_and_query = Some(path.clone()); - Uri::from_parts(parts).expect("path is valid uri") - } - _none_or_just_slash => { - debug_assert!(Uri::default() == "/"); - Uri::default() - } - }; - *uri = path -} - -fn absolute_form(uri: &mut Uri) { - debug_assert!(uri.scheme().is_some(), "absolute_form needs a scheme"); - debug_assert!( - uri.authority().is_some(), - "absolute_form needs an authority" - ); -} - -fn authority_form(uri: &mut Uri) { - if let Some(path) = uri.path_and_query() { - // `https://hyper.rs` would parse with `/` path, don't - // annoy people about that... - if path != "/" { - warn!("HTTP/1.1 CONNECT request stripping path: {:?}", path); - } - } - *uri = match uri.authority() { - Some(auth) => { - let mut parts = ::http::uri::Parts::default(); - parts.authority = Some(auth.clone()); - Uri::from_parts(parts).expect("authority is valid") - } - None => { - unreachable!("authority_form with relative uri"); - } - }; -} - -fn extract_domain(uri: &mut Uri, is_http_connect: bool) -> Result { - let uri_clone = uri.clone(); - match (uri_clone.scheme(), uri_clone.authority()) { - (Some(scheme), Some(auth)) => Ok((scheme.clone(), auth.clone())), - (None, Some(auth)) if is_http_connect => { - let scheme = match auth.port_u16() { - Some(443) => { - set_scheme(uri, Scheme::HTTPS); - Scheme::HTTPS - } - _ => { - set_scheme(uri, Scheme::HTTP); - Scheme::HTTP - } - }; - Ok((scheme, auth.clone())) - } - _ => { - debug!("Client requires absolute-form URIs, received: {:?}", uri); - Err(e!(UserAbsoluteUriRequired)) - } - } -} - -fn domain_as_uri((scheme, auth): PoolKey) -> Uri { - http::uri::Builder::new() - .scheme(scheme) - .authority(auth) - .path_and_query("/") - .build() - .expect("domain is valid Uri") -} - -fn set_scheme(uri: &mut Uri, scheme: Scheme) { - debug_assert!( - uri.scheme().is_none(), - "set_scheme expects no existing scheme" - ); - let old = std::mem::take(uri); - let mut parts: ::http::uri::Parts = old.into(); - parts.scheme = Some(scheme); - parts.path_and_query = Some("/".parse().expect("slash is a valid path")); - *uri = Uri::from_parts(parts).expect("scheme is valid"); -} - -fn get_non_default_port(uri: &Uri) -> Option> { - match (uri.port().map(|p| p.as_u16()), is_schema_secure(uri)) { - (Some(443), true) => None, - (Some(80), false) => None, - _ => uri.port(), - } -} - -fn is_schema_secure(uri: &Uri) -> bool { - uri.scheme_str() - .map(|scheme_str| matches!(scheme_str, "wss" | "https")) - .unwrap_or_default() -} - /// A builder to configure a new [`Client`](Client). /// /// # Example @@ -1012,14 +209,7 @@ fn is_schema_secure(uri: &Uri) -> bool { #[cfg_attr(docsrs, doc(cfg(any(feature = "http1", feature = "http2"))))] #[derive(Clone)] pub struct Builder { - client_config: Config, - exec: Exec, - #[cfg(feature = "http1")] - h1_builder: hyper::client::conn::http1::Builder, - #[cfg(feature = "http2")] - h2_builder: hyper::client::conn::http2::Builder, - pool_config: pool::Config, - pool_timer: Option, + inner: crate::client::client::Builder, } impl Builder { @@ -1028,62 +218,30 @@ impl Builder { where E: hyper::rt::Executor + Send + Sync + Clone + 'static, { - let exec = Exec::new(executor); Self { - client_config: Config { - retry_canceled_requests: true, - set_host: true, - ver: Ver::Auto, - }, - exec: exec.clone(), - #[cfg(feature = "http1")] - h1_builder: hyper::client::conn::http1::Builder::new(), - #[cfg(feature = "http2")] - h2_builder: hyper::client::conn::http2::Builder::new(exec), - pool_config: pool::Config { - idle_timeout: Some(Duration::from_secs(90)), - max_idle_per_host: usize::MAX, - }, - pool_timer: None, + inner: crate::client::client::Builder::new(executor), } } + /// Set an optional timeout for idle sockets being kept-alive. /// A `Timer` is required for this to take effect. See `Builder::pool_timer` /// /// Pass `None` to disable timeout. /// /// Default is 90 seconds. - /// - /// # Example - /// - /// ``` - /// # #[cfg(all(feature = "tokio", feature = "http2"))] - /// # fn run () { - /// use std::time::Duration; - /// use hyper_util::client::legacy::Client; - /// use hyper_util::rt::{TokioExecutor, TokioTimer}; - /// - /// let client = Client::builder(TokioExecutor::new()) - /// .pool_idle_timeout(Duration::from_secs(30)) - /// .pool_timer(TokioTimer::new()) - /// .build_http(); - /// - /// # let infer: Client<_, http_body_util::Full> = client; - /// # } - /// # fn main() {} - /// ``` pub fn pool_idle_timeout(&mut self, val: D) -> &mut Self where D: Into>, { - self.pool_config.idle_timeout = val.into(); + self.inner.pool_idle_timeout(val); self } #[doc(hidden)] #[deprecated(note = "renamed to `pool_max_idle_per_host`")] pub fn max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { - self.pool_config.max_idle_per_host = max_idle; + #[allow(deprecated)] + self.inner.max_idle_per_host(max_idle); self } @@ -1091,338 +249,171 @@ impl Builder { /// /// Default is `usize::MAX` (no limit). pub fn pool_max_idle_per_host(&mut self, max_idle: usize) -> &mut Self { - self.pool_config.max_idle_per_host = max_idle; + self.inner.pool_max_idle_per_host(max_idle); self } // HTTP/1 options /// Sets the exact size of the read buffer to *always* use. - /// - /// Note that setting this option unsets the `http1_max_buf_size` option. - /// - /// Default is an adaptive read buffer. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_read_buf_exact_size(&mut self, sz: usize) -> &mut Self { - self.h1_builder.read_buf_exact_size(Some(sz)); + self.inner.http1_read_buf_exact_size(sz); self } /// Set the maximum buffer size for the connection. - /// - /// Default is ~400kb. - /// - /// Note that setting this option unsets the `http1_read_exact_buf_size` option. - /// - /// # Panics - /// - /// The minimum value allowed is 8192. This method panics if the passed `max` is less than the minimum. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_max_buf_size(&mut self, max: usize) -> &mut Self { - self.h1_builder.max_buf_size(max); + self.inner.http1_max_buf_size(max); self } /// Set whether HTTP/1 connections will accept spaces between header names /// and the colon that follow them in responses. - /// - /// Newline codepoints (`\r` and `\n`) will be transformed to spaces when - /// parsing. - /// - /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has - /// to say about it: - /// - /// > No whitespace is allowed between the header field-name and colon. In - /// > the past, differences in the handling of such whitespace have led to - /// > security vulnerabilities in request routing and response handling. A - /// > server MUST reject any received request message that contains - /// > whitespace between a header field-name and colon with a response code - /// > of 400 (Bad Request). A proxy MUST remove any such whitespace from a - /// > response message before forwarding the message downstream. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - /// - /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_allow_spaces_after_header_name_in_responses(&mut self, val: bool) -> &mut Self { - self.h1_builder - .allow_spaces_after_header_name_in_responses(val); + self.inner + .http1_allow_spaces_after_header_name_in_responses(val); self } /// Set whether HTTP/1 connections will accept obsolete line folding for /// header values. - /// - /// You probably don't need this, here is what [RFC 7230 Section 3.2.4.] has - /// to say about it: - /// - /// > A server that receives an obs-fold in a request message that is not - /// > within a message/http container MUST either reject the message by - /// > sending a 400 (Bad Request), preferably with a representation - /// > explaining that obsolete line folding is unacceptable, or replace - /// > each received obs-fold with one or more SP octets prior to - /// > interpreting the field value or forwarding the message downstream. - /// - /// > A proxy or gateway that receives an obs-fold in a response message - /// > that is not within a message/http container MUST either discard the - /// > message and replace it with a 502 (Bad Gateway) response, preferably - /// > with a representation explaining that unacceptable line folding was - /// > received, or replace each received obs-fold with one or more SP - /// > octets prior to interpreting the field value or forwarding the - /// > message downstream. - /// - /// > A user agent that receives an obs-fold in a response message that is - /// > not within a message/http container MUST replace each received - /// > obs-fold with one or more SP octets prior to interpreting the field - /// > value. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. - /// - /// [RFC 7230 Section 3.2.4.]: https://tools.ietf.org/html/rfc7230#section-3.2.4 #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_allow_obsolete_multiline_headers_in_responses(&mut self, val: bool) -> &mut Self { - self.h1_builder - .allow_obsolete_multiline_headers_in_responses(val); + self.inner + .http1_allow_obsolete_multiline_headers_in_responses(val); self } /// Sets whether invalid header lines should be silently ignored in HTTP/1 responses. - /// - /// This mimics the behaviour of major browsers. You probably don't want this. - /// You should only want this if you are implementing a proxy whose main - /// purpose is to sit in front of browsers whose users access arbitrary content - /// which may be malformed, and they expect everything that works without - /// the proxy to keep working with the proxy. - /// - /// This option will prevent Hyper's client from returning an error encountered - /// when parsing a header, except if the error was caused by the character NUL - /// (ASCII code 0), as Chrome specifically always reject those. - /// - /// The ignorable errors are: - /// * empty header names; - /// * characters that are not allowed in header names, except for `\0` and `\r`; - /// * when `allow_spaces_after_header_name_in_responses` is not enabled, - /// spaces and tabs between the header name and the colon; - /// * missing colon between header name and colon; - /// * characters that are not allowed in header values except for `\0` and `\r`. - /// - /// If an ignorable error is encountered, the parser tries to find the next - /// line in the input to resume parsing the rest of the headers. An error - /// will be emitted nonetheless if it finds `\0` or a lone `\r` while - /// looking for the next line. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_ignore_invalid_headers_in_responses(&mut self, val: bool) -> &mut Builder { - self.h1_builder.ignore_invalid_headers_in_responses(val); + self.inner.http1_ignore_invalid_headers_in_responses(val); self } /// Set whether HTTP/1 connections should try to use vectored writes, /// or always flatten into a single buffer. - /// - /// Note that setting this to false may mean more copies of body data, - /// but may also improve performance when an IO transport doesn't - /// support vectored writes well, such as most TLS implementations. - /// - /// Setting this to true will force hyper to use queued strategy - /// which may eliminate unnecessary cloning on some TLS backends - /// - /// Default is `auto`. In this mode hyper will try to guess which - /// mode to use #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_writev(&mut self, enabled: bool) -> &mut Builder { - self.h1_builder.writev(enabled); + self.inner.http1_writev(enabled); self } /// Set whether HTTP/1 connections will write header names as title case at /// the socket level. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_title_case_headers(&mut self, val: bool) -> &mut Self { - self.h1_builder.title_case_headers(val); + self.inner.http1_title_case_headers(val); self } /// Set whether to support preserving original header cases. - /// - /// Currently, this will record the original cases received, and store them - /// in a private extension on the `Response`. It will also look for and use - /// such an extension in any provided `Request`. - /// - /// Since the relevant extension is still private, there is no way to - /// interact with the original cases. The only effect this can have now is - /// to forward the cases in a proxy-like fashion. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is false. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_preserve_header_case(&mut self, val: bool) -> &mut Self { - self.h1_builder.preserve_header_case(val); + self.inner.http1_preserve_header_case(val); self } /// Set the maximum number of headers. - /// - /// When a response is received, the parser will reserve a buffer to store headers for optimal - /// performance. - /// - /// If client receives more headers than the buffer size, the error "message header too large" - /// is returned. - /// - /// The headers is allocated on the stack by default, which has higher performance. After - /// setting this value, headers will be allocated in heap memory, that is, heap memory - /// allocation will occur for each response, and there will be a performance drop of about 5%. - /// - /// Note that this setting does not affect HTTP/2. - /// - /// Default is 100. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http1_max_headers(&mut self, val: usize) -> &mut Self { - self.h1_builder.max_headers(val); + self.inner.http1_max_headers(val); self } /// Set whether HTTP/0.9 responses should be tolerated. - /// - /// Default is false. #[cfg(feature = "http1")] #[cfg_attr(docsrs, doc(cfg(feature = "http1")))] pub fn http09_responses(&mut self, val: bool) -> &mut Self { - self.h1_builder.http09_responses(val); + self.inner.http09_responses(val); self } /// Set whether the connection **must** use HTTP/2. - /// - /// The destination must either allow HTTP2 Prior Knowledge, or the - /// `Connect` should be configured to do use ALPN to upgrade to `h2` - /// as part of the connection process. This will not make the `Client` - /// utilize ALPN by itself. - /// - /// Note that setting this to true prevents HTTP/1 from being allowed. - /// - /// Default is false. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_only(&mut self, val: bool) -> &mut Self { - self.client_config.ver = if val { Ver::Http2 } else { Ver::Auto }; + self.inner.http2_only(val); self } /// Configures the maximum number of pending reset streams allowed before a GOAWAY will be sent. - /// - /// This will default to the default value set by the [`h2` crate](https://crates.io/crates/h2). - /// As of v0.4.0, it is 20. - /// - /// See for more information. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_pending_accept_reset_streams( &mut self, max: impl Into>, ) -> &mut Self { - self.h2_builder.max_pending_accept_reset_streams(max.into()); + self.inner.http2_max_pending_accept_reset_streams(max); self } /// Sets the [`SETTINGS_INITIAL_WINDOW_SIZE`][spec] option for HTTP2 /// stream-level flow control. /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - /// /// [spec]: https://http2.github.io/http2-spec/#SETTINGS_INITIAL_WINDOW_SIZE #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_initial_stream_window_size(&mut self, sz: impl Into>) -> &mut Self { - self.h2_builder.initial_stream_window_size(sz.into()); + self.inner.http2_initial_stream_window_size(sz); self } /// Sets the max connection-level flow control for HTTP2 - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_initial_connection_window_size( &mut self, sz: impl Into>, ) -> &mut Self { - self.h2_builder.initial_connection_window_size(sz.into()); + self.inner.http2_initial_connection_window_size(sz); self } /// Sets the initial maximum of locally initiated (send) streams. - /// - /// This value will be overwritten by the value included in the initial - /// SETTINGS frame received from the peer as part of a [connection preface]. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. - /// - /// [connection preface]: https://httpwg.org/specs/rfc9113.html#preface #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_initial_max_send_streams( &mut self, initial: impl Into>, ) -> &mut Self { - self.h2_builder.initial_max_send_streams(initial); + self.inner.http2_initial_max_send_streams(initial); self } /// Sets whether to use an adaptive flow control. - /// - /// Enabling this will override the limits set in - /// `http2_initial_stream_window_size` and - /// `http2_initial_connection_window_size`. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_adaptive_window(&mut self, enabled: bool) -> &mut Self { - self.h2_builder.adaptive_window(enabled); + self.inner.http2_adaptive_window(enabled); self } /// Sets the maximum frame size to use for HTTP2. - /// - /// Passing `None` will do nothing. - /// - /// If not set, hyper will use a default. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_frame_size(&mut self, sz: impl Into>) -> &mut Self { - self.h2_builder.max_frame_size(sz); + self.inner.http2_max_frame_size(sz); self } /// Sets the max size of received header frames for HTTP2. - /// - /// Default is currently 16KB, but can change. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_header_list_size(&mut self, max: u32) -> &mut Self { - self.h2_builder.max_header_list_size(max); + self.inner.http2_max_header_list_size(max); self } @@ -1443,7 +434,7 @@ impl Builder { &mut self, interval: impl Into>, ) -> &mut Self { - self.h2_builder.keep_alive_interval(interval); + self.inner.http2_keep_alive_interval(interval); self } @@ -1461,7 +452,7 @@ impl Builder { #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_keep_alive_timeout(&mut self, timeout: Duration) -> &mut Self { - self.h2_builder.keep_alive_timeout(timeout); + self.inner.http2_keep_alive_timeout(timeout); self } @@ -1481,37 +472,24 @@ impl Builder { #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_keep_alive_while_idle(&mut self, enabled: bool) -> &mut Self { - self.h2_builder.keep_alive_while_idle(enabled); + self.inner.http2_keep_alive_while_idle(enabled); self } /// Sets the maximum number of HTTP2 concurrent locally reset streams. - /// - /// See the documentation of [`h2::client::Builder::max_concurrent_reset_streams`] for more - /// details. - /// - /// The default value is determined by the `h2` crate. - /// - /// [`h2::client::Builder::max_concurrent_reset_streams`]: https://docs.rs/h2/client/struct.Builder.html#method.max_concurrent_reset_streams #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_concurrent_reset_streams(&mut self, max: usize) -> &mut Self { - self.h2_builder.max_concurrent_reset_streams(max); + self.inner.http2_max_concurrent_reset_streams(max); self } /// Provide a timer to be used for h2 - /// - /// See the documentation of [`h2::client::Builder::timer`] for more - /// details. - /// - /// [`h2::client::Builder::timer`]: https://docs.rs/h2/client/struct.Builder.html#method.timer pub fn timer(&mut self, timer: M) -> &mut Self where M: Timer + Send + Sync + 'static, { - #[cfg(feature = "http2")] - self.h2_builder.timer(timer); + self.inner.timer(timer); self } @@ -1520,65 +498,47 @@ impl Builder { where M: Timer + Clone + Send + Sync + 'static, { - self.pool_timer = Some(timer::Timer::new(timer.clone())); + self.inner.pool_timer(timer); self } /// Set the maximum write buffer size for each HTTP/2 stream. - /// - /// Default is currently 1MB, but may change. - /// - /// # Panics - /// - /// The value must be no larger than `u32::MAX`. #[cfg(feature = "http2")] #[cfg_attr(docsrs, doc(cfg(feature = "http2")))] pub fn http2_max_send_buf_size(&mut self, max: usize) -> &mut Self { - self.h2_builder.max_send_buf_size(max); + self.inner.http2_max_send_buf_size(max); self } /// Set whether to retry requests that get disrupted before ever starting /// to write. - /// - /// This means a request that is queued, and gets given an idle, reused - /// connection, and then encounters an error immediately as the idle - /// connection was found to be unusable. - /// - /// When this is set to `false`, the related `ResponseFuture` would instead - /// resolve to an `Error::Cancel`. - /// - /// Default is `true`. #[inline] pub fn retry_canceled_requests(&mut self, val: bool) -> &mut Self { - self.client_config.retry_canceled_requests = val; + self.inner.retry_canceled_requests(val); self } /// Set whether to automatically add the `Host` header to requests. - /// - /// If true, and a request does not include a `Host` header, one will be - /// added automatically, derived from the authority of the `Uri`. - /// - /// Default is `true`. #[inline] pub fn set_host(&mut self, val: bool) -> &mut Self { - self.client_config.set_host = val; + self.inner.set_host(val); self } - /// Build a client with this configuration and the default `HttpConnector`. - #[cfg(feature = "tokio")] + /// Build a client with this configuration and the default `TokioHttpConnector`. + #[cfg(feature = "tokio-net")] pub fn build_http(&self) -> Client where B: Body + Send, B::Data: Send, { let mut connector = HttpConnector::new(); - if self.pool_config.is_enabled() { - connector.set_keepalive(self.pool_config.idle_timeout); + if self.inner.pool_config.is_enabled() { + connector.set_keepalive(self.inner.pool_config.idle_timeout); + } + Client { + inner: self.inner.build(connector), } - self.build(connector) } /// Combine the configuration of this builder with a connector to create a `Client`. @@ -1588,83 +548,14 @@ impl Builder { B: Body + Send, B::Data: Send, { - let exec = self.exec.clone(); - let timer = self.pool_timer.clone(); Client { - config: self.client_config, - exec: exec.clone(), - #[cfg(feature = "http1")] - h1_builder: self.h1_builder.clone(), - #[cfg(feature = "http2")] - h2_builder: self.h2_builder.clone(), - connector, - pool: pool::Pool::new(self.pool_config, exec, timer), + inner: self.inner.build(connector), } } } impl fmt::Debug for Builder { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Builder") - .field("client_config", &self.client_config) - .field("pool_config", &self.pool_config) - .finish() - } -} - -// ==== impl Error ==== - -impl fmt::Debug for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut f = f.debug_tuple("hyper_util::client::legacy::Error"); - f.field(&self.kind); - if let Some(ref cause) = self.source { - f.field(cause); - } - f.finish() - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "client error ({:?})", self.kind) - } -} - -impl StdError for Error { - fn source(&self) -> Option<&(dyn StdError + 'static)> { - self.source.as_ref().map(|e| &**e as _) - } -} - -impl Error { - /// Returns true if this was an error from `Connect`. - pub fn is_connect(&self) -> bool { - matches!(self.kind, ErrorKind::Connect) - } - - /// Returns the info of the client connection on which this error occurred. - #[cfg(any(feature = "http1", feature = "http2"))] - pub fn connect_info(&self) -> Option<&Connected> { - self.connect_info.as_ref() - } - - #[cfg(any(feature = "http1", feature = "http2"))] - fn with_connect_info(self, connect_info: Connected) -> Self { - Self { - connect_info: Some(connect_info), - ..self - } - } - fn is_canceled(&self) -> bool { - matches!(self.kind, ErrorKind::Canceled) - } - - fn tx(src: hyper::Error) -> Self { - e!(SendRequest, src) - } - - fn closed(src: hyper::Error) -> Self { - e!(ChannelClosed, src) + f.debug_tuple("Builder").field(&self.inner).finish() } } diff --git a/src/client/legacy/connect/capture.rs b/src/client/legacy/connect/capture.rs index 67b2a5a6..d51c1bec 100644 --- a/src/client/legacy/connect/capture.rs +++ b/src/client/legacy/connect/capture.rs @@ -3,7 +3,7 @@ use std::{ops::Deref, sync::Arc}; use http::Request; use tokio::sync::watch; -use super::Connected; +use crate::client::connect::{self, Connected}; /// [`CaptureConnection`] allows callers to capture [`Connected`] information /// @@ -71,7 +71,13 @@ pub struct CaptureConnection { /// ``` pub fn capture_connection(request: &mut Request) -> CaptureConnection { let (tx, rx) = CaptureConnection::new(); - request.extensions_mut().insert(tx); + request + .extensions_mut() + .insert(connect::CaptureConnection::new( + move |connected: &Connected| { + tx.set(connected); + }, + )); rx } diff --git a/src/client/legacy/connect/dns.rs b/src/client/legacy/connect/dns.rs index 13beb542..c85db935 100644 --- a/src/client/legacy/connect/dns.rs +++ b/src/client/legacy/connect/dns.rs @@ -20,341 +20,5 @@ //! Ok::<_, Infallible>(iter::once(SocketAddr::from(([127, 0, 0, 1], 8080)))) //! }); //! ``` -use std::error::Error; -use std::future::Future; -use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs}; -use std::pin::Pin; -use std::str::FromStr; -use std::task::{self, Poll}; -use std::{fmt, io, vec}; -use tokio::task::JoinHandle; -use tower_service::Service; - -pub(super) use self::sealed::Resolve; - -/// A domain name to resolve into IP addresses. -#[derive(Clone, Hash, Eq, PartialEq)] -pub struct Name { - host: Box, -} - -/// A resolver using blocking `getaddrinfo` calls in a threadpool. -#[derive(Clone)] -pub struct GaiResolver { - _priv: (), -} - -/// An iterator of IP addresses returned from `getaddrinfo`. -pub struct GaiAddrs { - inner: SocketAddrs, -} - -/// A future to resolve a name returned by `GaiResolver`. -pub struct GaiFuture { - inner: JoinHandle>, -} - -impl Name { - pub(super) fn new(host: Box) -> Name { - Name { host } - } - - /// View the hostname as a string slice. - pub fn as_str(&self) -> &str { - &self.host - } -} - -impl fmt::Debug for Name { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Debug::fmt(&self.host, f) - } -} - -impl fmt::Display for Name { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(&self.host, f) - } -} - -impl FromStr for Name { - type Err = InvalidNameError; - - fn from_str(host: &str) -> Result { - // Possibly add validation later - Ok(Name::new(host.into())) - } -} - -/// Error indicating a given string was not a valid domain name. -#[derive(Debug)] -pub struct InvalidNameError(()); - -impl fmt::Display for InvalidNameError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("Not a valid domain name") - } -} - -impl Error for InvalidNameError {} - -impl GaiResolver { - /// Construct a new `GaiResolver`. - pub fn new() -> Self { - GaiResolver { _priv: () } - } -} - -impl Service for GaiResolver { - type Response = GaiAddrs; - type Error = io::Error; - type Future = GaiFuture; - - fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll> { - Poll::Ready(Ok(())) - } - - fn call(&mut self, name: Name) -> Self::Future { - let blocking = tokio::task::spawn_blocking(move || { - (&*name.host, 0) - .to_socket_addrs() - .map(|i| SocketAddrs { iter: i }) - }); - - GaiFuture { inner: blocking } - } -} - -impl fmt::Debug for GaiResolver { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("GaiResolver") - } -} - -impl Future for GaiFuture { - type Output = Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - Pin::new(&mut self.inner).poll(cx).map(|res| match res { - Ok(Ok(addrs)) => Ok(GaiAddrs { inner: addrs }), - Ok(Err(err)) => Err(err), - Err(join_err) => { - if join_err.is_cancelled() { - Err(io::Error::new(io::ErrorKind::Interrupted, join_err)) - } else { - panic!("gai background task failed: {join_err:?}") - } - } - }) - } -} - -impl fmt::Debug for GaiFuture { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("GaiFuture") - } -} - -impl Drop for GaiFuture { - fn drop(&mut self) { - self.inner.abort(); - } -} - -impl Iterator for GaiAddrs { - type Item = SocketAddr; - - fn next(&mut self) -> Option { - self.inner.next() - } -} - -impl fmt::Debug for GaiAddrs { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.pad("GaiAddrs") - } -} - -pub(super) struct SocketAddrs { - iter: vec::IntoIter, -} - -impl SocketAddrs { - pub(super) fn new(addrs: Vec) -> Self { - SocketAddrs { - iter: addrs.into_iter(), - } - } - - pub(super) fn try_parse(host: &str, port: u16) -> Option { - if let Ok(addr) = host.parse::() { - let addr = SocketAddrV4::new(addr, port); - return Some(SocketAddrs { - iter: vec![SocketAddr::V4(addr)].into_iter(), - }); - } - if let Ok(addr) = host.parse::() { - let addr = SocketAddrV6::new(addr, port, 0, 0); - return Some(SocketAddrs { - iter: vec![SocketAddr::V6(addr)].into_iter(), - }); - } - None - } - - #[inline] - fn filter(self, predicate: impl FnMut(&SocketAddr) -> bool) -> SocketAddrs { - SocketAddrs::new(self.iter.filter(predicate).collect()) - } - - pub(super) fn split_by_preference( - self, - local_addr_ipv4: Option, - local_addr_ipv6: Option, - ) -> (SocketAddrs, SocketAddrs) { - match (local_addr_ipv4, local_addr_ipv6) { - (Some(_), None) => (self.filter(SocketAddr::is_ipv4), SocketAddrs::new(vec![])), - (None, Some(_)) => (self.filter(SocketAddr::is_ipv6), SocketAddrs::new(vec![])), - _ => { - let preferring_v6 = self - .iter - .as_slice() - .first() - .map(SocketAddr::is_ipv6) - .unwrap_or(false); - - let (preferred, fallback) = self - .iter - .partition::, _>(|addr| addr.is_ipv6() == preferring_v6); - - (SocketAddrs::new(preferred), SocketAddrs::new(fallback)) - } - } - } - - pub(super) fn is_empty(&self) -> bool { - self.iter.as_slice().is_empty() - } - - pub(super) fn len(&self) -> usize { - self.iter.as_slice().len() - } -} - -impl Iterator for SocketAddrs { - type Item = SocketAddr; - #[inline] - fn next(&mut self) -> Option { - self.iter.next() - } -} - -mod sealed { - use std::future::Future; - use std::task::{self, Poll}; - - use super::{Name, SocketAddr}; - use tower_service::Service; - - // "Trait alias" for `Service` - pub trait Resolve { - type Addrs: Iterator; - type Error: Into>; - type Future: Future>; - - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll>; - fn resolve(&mut self, name: Name) -> Self::Future; - } - - impl Resolve for S - where - S: Service, - S::Response: Iterator, - S::Error: Into>, - { - type Addrs = S::Response; - type Error = S::Error; - type Future = S::Future; - - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - Service::poll_ready(self, cx) - } - - fn resolve(&mut self, name: Name) -> Self::Future { - Service::call(self, name) - } - } -} - -pub(super) async fn resolve(resolver: &mut R, name: Name) -> Result -where - R: Resolve, -{ - std::future::poll_fn(|cx| resolver.poll_ready(cx)).await?; - resolver.resolve(name).await -} - -#[cfg(test)] -mod tests { - use super::*; - use std::net::{Ipv4Addr, Ipv6Addr}; - - #[test] - fn test_ip_addrs_split_by_preference() { - let ip_v4 = Ipv4Addr::new(127, 0, 0, 1); - let ip_v6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); - let v4_addr = (ip_v4, 80).into(); - let v6_addr = (ip_v6, 80).into(); - - let (mut preferred, mut fallback) = SocketAddrs { - iter: vec![v4_addr, v6_addr].into_iter(), - } - .split_by_preference(None, None); - assert!(preferred.next().unwrap().is_ipv4()); - assert!(fallback.next().unwrap().is_ipv6()); - - let (mut preferred, mut fallback) = SocketAddrs { - iter: vec![v6_addr, v4_addr].into_iter(), - } - .split_by_preference(None, None); - assert!(preferred.next().unwrap().is_ipv6()); - assert!(fallback.next().unwrap().is_ipv4()); - - let (mut preferred, mut fallback) = SocketAddrs { - iter: vec![v4_addr, v6_addr].into_iter(), - } - .split_by_preference(Some(ip_v4), Some(ip_v6)); - assert!(preferred.next().unwrap().is_ipv4()); - assert!(fallback.next().unwrap().is_ipv6()); - - let (mut preferred, mut fallback) = SocketAddrs { - iter: vec![v6_addr, v4_addr].into_iter(), - } - .split_by_preference(Some(ip_v4), Some(ip_v6)); - assert!(preferred.next().unwrap().is_ipv6()); - assert!(fallback.next().unwrap().is_ipv4()); - - let (mut preferred, fallback) = SocketAddrs { - iter: vec![v4_addr, v6_addr].into_iter(), - } - .split_by_preference(Some(ip_v4), None); - assert!(preferred.next().unwrap().is_ipv4()); - assert!(fallback.is_empty()); - - let (mut preferred, fallback) = SocketAddrs { - iter: vec![v4_addr, v6_addr].into_iter(), - } - .split_by_preference(None, Some(ip_v6)); - assert!(preferred.next().unwrap().is_ipv6()); - assert!(fallback.is_empty()); - } - - #[test] - fn test_name_from_str() { - const DOMAIN: &str = "test.example.com"; - let name = Name::from_str(DOMAIN).expect("Should be a valid domain"); - assert_eq!(name.as_str(), DOMAIN); - assert_eq!(name.to_string(), DOMAIN); - } -} +pub use crate::client::connect::dns::tokio::TokioGaiResolver; diff --git a/src/client/legacy/connect/http.rs b/src/client/legacy/connect/http.rs index 00d65664..5c105abb 100644 --- a/src/client/legacy/connect/http.rs +++ b/src/client/legacy/connect/http.rs @@ -1,1449 +1,2 @@ -use std::error::Error as StdError; -use std::fmt; -use std::future::Future; -use std::io; -use std::marker::PhantomData; -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{self, ready, Poll}; -use std::time::Duration; - -use futures_util::future::Either; -use http::uri::{Scheme, Uri}; -use pin_project_lite::pin_project; -use socket2::TcpKeepalive; -use tokio::net::{TcpSocket, TcpStream}; -use tokio::time::Sleep; -use tracing::{debug, trace, warn}; - -use super::dns::{self, resolve, GaiResolver, Resolve}; -use super::{Connected, Connection}; -use crate::rt::TokioIo; - -/// A connector for the `http` scheme. -/// -/// Performs DNS resolution in a thread pool, and then connects over TCP. -/// -/// # Note -/// -/// Sets the [`HttpInfo`](HttpInfo) value on responses, which includes -/// transport information such as the remote socket address used. -#[derive(Clone)] -pub struct HttpConnector { - config: Arc, - resolver: R, -} - -/// Extra information about the transport when an HttpConnector is used. -/// -/// # Example -/// -/// ``` -/// # fn doc(res: http::Response<()>) { -/// use hyper_util::client::legacy::connect::HttpInfo; -/// -/// // res = http::Response -/// res -/// .extensions() -/// .get::() -/// .map(|info| { -/// println!("remote addr = {}", info.remote_addr()); -/// }); -/// # } -/// ``` -/// -/// # Note -/// -/// If a different connector is used besides [`HttpConnector`](HttpConnector), -/// this value will not exist in the extensions. Consult that specific -/// connector to see what "extra" information it might provide to responses. -#[derive(Clone, Debug)] -pub struct HttpInfo { - remote_addr: SocketAddr, - local_addr: SocketAddr, -} - -#[derive(Clone)] -struct Config { - connect_timeout: Option, - enforce_http: bool, - happy_eyeballs_timeout: Option, - tcp_keepalive_config: TcpKeepaliveConfig, - local_address_ipv4: Option, - local_address_ipv6: Option, - nodelay: bool, - reuse_address: bool, - send_buffer_size: Option, - recv_buffer_size: Option, - #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] - interface: Option, - #[cfg(any( - target_os = "illumos", - target_os = "ios", - target_os = "macos", - target_os = "solaris", - target_os = "tvos", - target_os = "visionos", - target_os = "watchos", - ))] - interface: Option, - #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] - tcp_user_timeout: Option, -} - -#[derive(Default, Debug, Clone, Copy)] -struct TcpKeepaliveConfig { - time: Option, - interval: Option, - retries: Option, -} - -impl TcpKeepaliveConfig { - /// Converts into a `socket2::TcpKeealive` if there is any keep alive configuration. - fn into_tcpkeepalive(self) -> Option { - let mut dirty = false; - let mut ka = TcpKeepalive::new(); - if let Some(time) = self.time { - ka = ka.with_time(time); - dirty = true - } - if let Some(interval) = self.interval { - ka = Self::ka_with_interval(ka, interval, &mut dirty) - }; - if let Some(retries) = self.retries { - ka = Self::ka_with_retries(ka, retries, &mut dirty) - }; - if dirty { - Some(ka) - } else { - None - } - } - - #[cfg( - // See https://docs.rs/socket2/0.5.8/src/socket2/lib.rs.html#511-525 - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "ios", - target_os = "visionos", - target_os = "linux", - target_os = "macos", - target_os = "netbsd", - target_os = "tvos", - target_os = "watchos", - target_os = "windows", - ) - )] - fn ka_with_interval(ka: TcpKeepalive, interval: Duration, dirty: &mut bool) -> TcpKeepalive { - *dirty = true; - ka.with_interval(interval) - } - - #[cfg(not( - // See https://docs.rs/socket2/0.5.8/src/socket2/lib.rs.html#511-525 - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "ios", - target_os = "visionos", - target_os = "linux", - target_os = "macos", - target_os = "netbsd", - target_os = "tvos", - target_os = "watchos", - target_os = "windows", - ) - ))] - fn ka_with_interval(ka: TcpKeepalive, _: Duration, _: &mut bool) -> TcpKeepalive { - ka // no-op as keepalive interval is not supported on this platform - } - - #[cfg( - // See https://docs.rs/socket2/0.5.8/src/socket2/lib.rs.html#557-570 - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "ios", - target_os = "visionos", - target_os = "linux", - target_os = "macos", - target_os = "netbsd", - target_os = "tvos", - target_os = "watchos", - ) - )] - fn ka_with_retries(ka: TcpKeepalive, retries: u32, dirty: &mut bool) -> TcpKeepalive { - *dirty = true; - ka.with_retries(retries) - } - - #[cfg(not( - // See https://docs.rs/socket2/0.5.8/src/socket2/lib.rs.html#557-570 - any( - target_os = "android", - target_os = "dragonfly", - target_os = "freebsd", - target_os = "fuchsia", - target_os = "illumos", - target_os = "ios", - target_os = "visionos", - target_os = "linux", - target_os = "macos", - target_os = "netbsd", - target_os = "tvos", - target_os = "watchos", - ) - ))] - fn ka_with_retries(ka: TcpKeepalive, _: u32, _: &mut bool) -> TcpKeepalive { - ka // no-op as keepalive retries is not supported on this platform - } -} - -// ===== impl HttpConnector ===== - -impl HttpConnector { - /// Construct a new HttpConnector. - pub fn new() -> HttpConnector { - HttpConnector::new_with_resolver(GaiResolver::new()) - } -} - -impl HttpConnector { - /// Construct a new HttpConnector. - /// - /// Takes a [`Resolver`](crate::client::legacy::connect::dns#resolvers-are-services) to handle DNS lookups. - pub fn new_with_resolver(resolver: R) -> HttpConnector { - HttpConnector { - config: Arc::new(Config { - connect_timeout: None, - enforce_http: true, - happy_eyeballs_timeout: Some(Duration::from_millis(300)), - tcp_keepalive_config: TcpKeepaliveConfig::default(), - local_address_ipv4: None, - local_address_ipv6: None, - nodelay: false, - reuse_address: false, - send_buffer_size: None, - recv_buffer_size: None, - #[cfg(any( - target_os = "android", - target_os = "fuchsia", - target_os = "illumos", - target_os = "ios", - target_os = "linux", - target_os = "macos", - target_os = "solaris", - target_os = "tvos", - target_os = "visionos", - target_os = "watchos", - ))] - interface: None, - #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] - tcp_user_timeout: None, - }), - resolver, - } - } - - /// Option to enforce all `Uri`s have the `http` scheme. - /// - /// Enabled by default. - #[inline] - pub fn enforce_http(&mut self, is_enforced: bool) { - self.config_mut().enforce_http = is_enforced; - } - - /// Set that all sockets have `SO_KEEPALIVE` set with the supplied duration - /// to remain idle before sending TCP keepalive probes. - /// - /// If `None`, keepalive is disabled. - /// - /// Default is `None`. - #[inline] - pub fn set_keepalive(&mut self, time: Option) { - self.config_mut().tcp_keepalive_config.time = time; - } - - /// Set the duration between two successive TCP keepalive retransmissions, - /// if acknowledgement to the previous keepalive transmission is not received. - #[inline] - pub fn set_keepalive_interval(&mut self, interval: Option) { - self.config_mut().tcp_keepalive_config.interval = interval; - } - - /// Set the number of retransmissions to be carried out before declaring that remote end is not available. - #[inline] - pub fn set_keepalive_retries(&mut self, retries: Option) { - self.config_mut().tcp_keepalive_config.retries = retries; - } - - /// Set that all sockets have `SO_NODELAY` set to the supplied value `nodelay`. - /// - /// Default is `false`. - #[inline] - pub fn set_nodelay(&mut self, nodelay: bool) { - self.config_mut().nodelay = nodelay; - } - - /// Sets the value of the SO_SNDBUF option on the socket. - #[inline] - pub fn set_send_buffer_size(&mut self, size: Option) { - self.config_mut().send_buffer_size = size; - } - - /// Sets the value of the SO_RCVBUF option on the socket. - #[inline] - pub fn set_recv_buffer_size(&mut self, size: Option) { - self.config_mut().recv_buffer_size = size; - } - - /// Set that all sockets are bound to the configured address before connection. - /// - /// If `None`, the sockets will not be bound. - /// - /// Default is `None`. - #[inline] - pub fn set_local_address(&mut self, addr: Option) { - let (v4, v6) = match addr { - Some(IpAddr::V4(a)) => (Some(a), None), - Some(IpAddr::V6(a)) => (None, Some(a)), - _ => (None, None), - }; - - let cfg = self.config_mut(); - - cfg.local_address_ipv4 = v4; - cfg.local_address_ipv6 = v6; - } - - /// Set that all sockets are bound to the configured IPv4 or IPv6 address (depending on host's - /// preferences) before connection. - #[inline] - pub fn set_local_addresses(&mut self, addr_ipv4: Ipv4Addr, addr_ipv6: Ipv6Addr) { - let cfg = self.config_mut(); - - cfg.local_address_ipv4 = Some(addr_ipv4); - cfg.local_address_ipv6 = Some(addr_ipv6); - } - - /// Set the connect timeout. - /// - /// If a domain resolves to multiple IP addresses, the timeout will be - /// evenly divided across them. - /// - /// Default is `None`. - #[inline] - pub fn set_connect_timeout(&mut self, dur: Option) { - self.config_mut().connect_timeout = dur; - } - - /// Set timeout for [RFC 6555 (Happy Eyeballs)][RFC 6555] algorithm. - /// - /// If hostname resolves to both IPv4 and IPv6 addresses and connection - /// cannot be established using preferred address family before timeout - /// elapses, then connector will in parallel attempt connection using other - /// address family. - /// - /// If `None`, parallel connection attempts are disabled. - /// - /// Default is 300 milliseconds. - /// - /// [RFC 6555]: https://tools.ietf.org/html/rfc6555 - #[inline] - pub fn set_happy_eyeballs_timeout(&mut self, dur: Option) { - self.config_mut().happy_eyeballs_timeout = dur; - } - - /// Set that all socket have `SO_REUSEADDR` set to the supplied value `reuse_address`. - /// - /// Default is `false`. - #[inline] - pub fn set_reuse_address(&mut self, reuse_address: bool) -> &mut Self { - self.config_mut().reuse_address = reuse_address; - self - } - - /// Sets the name of the interface to bind sockets produced by this - /// connector. - /// - /// On Linux, this sets the `SO_BINDTODEVICE` option on this socket (see - /// [`man 7 socket`] for details). On macOS (and macOS-derived systems like - /// iOS), illumos, and Solaris, this will instead use the `IP_BOUND_IF` - /// socket option (see [`man 7p ip`]). - /// - /// If a socket is bound to an interface, only packets received from that particular - /// interface are processed by the socket. Note that this only works for some socket - /// types, particularly `AF_INET`` sockets. - /// - /// On Linux it can be used to specify a [VRF], but the binary needs - /// to either have `CAP_NET_RAW` or to be run as root. - /// - /// This function is only available on the following operating systems: - /// - Linux, including Android - /// - Fuchsia - /// - illumos and Solaris - /// - macOS, iOS, visionOS, watchOS, and tvOS - /// - /// [VRF]: https://www.kernel.org/doc/Documentation/networking/vrf.txt - /// [`man 7 socket`]: https://man7.org/linux/man-pages/man7/socket.7.html - /// [`man 7p ip`]: https://docs.oracle.com/cd/E86824_01/html/E54777/ip-7p.html - #[cfg(any( - target_os = "android", - target_os = "fuchsia", - target_os = "illumos", - target_os = "ios", - target_os = "linux", - target_os = "macos", - target_os = "solaris", - target_os = "tvos", - target_os = "visionos", - target_os = "watchos", - ))] - #[inline] - pub fn set_interface>(&mut self, interface: S) -> &mut Self { - let interface = interface.into(); - #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] - { - self.config_mut().interface = Some(interface); - } - #[cfg(not(any(target_os = "android", target_os = "fuchsia", target_os = "linux")))] - { - let interface = std::ffi::CString::new(interface) - .expect("interface name should not have nulls in it"); - self.config_mut().interface = Some(interface); - } - self - } - - /// Sets the value of the TCP_USER_TIMEOUT option on the socket. - #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] - #[inline] - pub fn set_tcp_user_timeout(&mut self, time: Option) { - self.config_mut().tcp_user_timeout = time; - } - - // private - - fn config_mut(&mut self) -> &mut Config { - // If the are HttpConnector clones, this will clone the inner - // config. So mutating the config won't ever affect previous - // clones. - Arc::make_mut(&mut self.config) - } -} - -static INVALID_NOT_HTTP: &str = "invalid URL, scheme is not http"; -static INVALID_MISSING_SCHEME: &str = "invalid URL, scheme is missing"; -static INVALID_MISSING_HOST: &str = "invalid URL, host is missing"; - -// R: Debug required for now to allow adding it to debug output later... -impl fmt::Debug for HttpConnector { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("HttpConnector").finish() - } -} - -impl tower_service::Service for HttpConnector -where - R: Resolve + Clone + Send + Sync + 'static, - R::Future: Send, -{ - type Response = TokioIo; - type Error = ConnectError; - type Future = HttpConnecting; - - fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll> { - ready!(self.resolver.poll_ready(cx)).map_err(ConnectError::dns)?; - Poll::Ready(Ok(())) - } - - fn call(&mut self, dst: Uri) -> Self::Future { - let mut self_ = self.clone(); - HttpConnecting { - fut: Box::pin(async move { self_.call_async(dst).await }), - _marker: PhantomData, - } - } -} - -fn get_host_port<'u>(config: &Config, dst: &'u Uri) -> Result<(&'u str, u16), ConnectError> { - trace!( - "Http::connect; scheme={:?}, host={:?}, port={:?}", - dst.scheme(), - dst.host(), - dst.port(), - ); - - if config.enforce_http { - if dst.scheme() != Some(&Scheme::HTTP) { - return Err(ConnectError { - msg: INVALID_NOT_HTTP, - addr: None, - cause: None, - }); - } - } else if dst.scheme().is_none() { - return Err(ConnectError { - msg: INVALID_MISSING_SCHEME, - addr: None, - cause: None, - }); - } - - let host = match dst.host() { - Some(s) => s, - None => { - return Err(ConnectError { - msg: INVALID_MISSING_HOST, - addr: None, - cause: None, - }); - } - }; - let port = match dst.port() { - Some(port) => port.as_u16(), - None => { - if dst.scheme() == Some(&Scheme::HTTPS) { - 443 - } else { - 80 - } - } - }; - - Ok((host, port)) -} - -impl HttpConnector -where - R: Resolve, -{ - async fn call_async(&mut self, dst: Uri) -> Result, ConnectError> { - let config = &self.config; - - let (host, port) = get_host_port(config, &dst)?; - let host = host.trim_start_matches('[').trim_end_matches(']'); - - // If the host is already an IP addr (v4 or v6), - // skip resolving the dns and start connecting right away. - let addrs = if let Some(addrs) = dns::SocketAddrs::try_parse(host, port) { - addrs - } else { - let addrs = resolve(&mut self.resolver, dns::Name::new(host.into())) - .await - .map_err(ConnectError::dns)?; - let addrs = addrs - .map(|mut addr| { - set_port(&mut addr, port, dst.port().is_some()); - - addr - }) - .collect(); - dns::SocketAddrs::new(addrs) - }; - - let c = ConnectingTcp::new(addrs, config); - - let sock = c.connect().await?; - - if let Err(e) = sock.set_nodelay(config.nodelay) { - warn!("tcp set_nodelay error: {}", e); - } - - Ok(TokioIo::new(sock)) - } -} - -impl Connection for TcpStream { - fn connected(&self) -> Connected { - let connected = Connected::new(); - if let (Ok(remote_addr), Ok(local_addr)) = (self.peer_addr(), self.local_addr()) { - connected.extra(HttpInfo { - remote_addr, - local_addr, - }) - } else { - connected - } - } -} - -#[cfg(unix)] -impl Connection for tokio::net::UnixStream { - fn connected(&self) -> Connected { - Connected::new() - } -} - -#[cfg(windows)] -impl Connection for tokio::net::windows::named_pipe::NamedPipeClient { - fn connected(&self) -> Connected { - Connected::new() - } -} - -// Implement `Connection` for generic `TokioIo` so that external crates can -// implement their own `HttpConnector` with `TokioIo`. -impl Connection for TokioIo -where - T: Connection, -{ - fn connected(&self) -> Connected { - self.inner().connected() - } -} - -impl HttpInfo { - /// Get the remote address of the transport used. - pub fn remote_addr(&self) -> SocketAddr { - self.remote_addr - } - - /// Get the local address of the transport used. - pub fn local_addr(&self) -> SocketAddr { - self.local_addr - } -} - -pin_project! { - // Not publicly exported (so missing_docs doesn't trigger). - // - // We return this `Future` instead of the `Pin>` directly - // so that users don't rely on it fitting in a `Pin>` slot - // (and thus we can change the type in the future). - #[must_use = "futures do nothing unless polled"] - #[allow(missing_debug_implementations)] - pub struct HttpConnecting { - #[pin] - fut: BoxConnecting, - _marker: PhantomData, - } -} - -type ConnectResult = Result, ConnectError>; -type BoxConnecting = Pin + Send>>; - -impl Future for HttpConnecting { - type Output = ConnectResult; - - fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - self.project().fut.poll(cx) - } -} - -// Not publicly exported (so missing_docs doesn't trigger). -pub struct ConnectError { - msg: &'static str, - addr: Option, - cause: Option>, -} - -impl ConnectError { - fn new(msg: &'static str, cause: E) -> ConnectError - where - E: Into>, - { - ConnectError { - msg, - addr: None, - cause: Some(cause.into()), - } - } - - fn dns(cause: E) -> ConnectError - where - E: Into>, - { - ConnectError::new("dns error", cause) - } - - fn m(msg: &'static str) -> impl FnOnce(E) -> ConnectError - where - E: Into>, - { - move |cause| ConnectError::new(msg, cause) - } -} - -impl fmt::Debug for ConnectError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut b = f.debug_tuple("ConnectError"); - b.field(&self.msg); - if let Some(ref addr) = self.addr { - b.field(addr); - } - if let Some(ref cause) = self.cause { - b.field(cause); - } - b.finish() - } -} - -impl fmt::Display for ConnectError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(self.msg) - } -} - -impl StdError for ConnectError { - fn source(&self) -> Option<&(dyn StdError + 'static)> { - self.cause.as_ref().map(|e| &**e as _) - } -} - -struct ConnectingTcp<'a> { - preferred: ConnectingTcpRemote, - fallback: Option, - config: &'a Config, -} - -impl<'a> ConnectingTcp<'a> { - fn new(remote_addrs: dns::SocketAddrs, config: &'a Config) -> Self { - if let Some(fallback_timeout) = config.happy_eyeballs_timeout { - let (preferred_addrs, fallback_addrs) = remote_addrs - .split_by_preference(config.local_address_ipv4, config.local_address_ipv6); - if fallback_addrs.is_empty() { - return ConnectingTcp { - preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout), - fallback: None, - config, - }; - } - - ConnectingTcp { - preferred: ConnectingTcpRemote::new(preferred_addrs, config.connect_timeout), - fallback: Some(ConnectingTcpFallback { - delay: tokio::time::sleep(fallback_timeout), - remote: ConnectingTcpRemote::new(fallback_addrs, config.connect_timeout), - }), - config, - } - } else { - ConnectingTcp { - preferred: ConnectingTcpRemote::new(remote_addrs, config.connect_timeout), - fallback: None, - config, - } - } - } -} - -struct ConnectingTcpFallback { - delay: Sleep, - remote: ConnectingTcpRemote, -} - -struct ConnectingTcpRemote { - addrs: dns::SocketAddrs, - connect_timeout: Option, -} - -impl ConnectingTcpRemote { - fn new(addrs: dns::SocketAddrs, connect_timeout: Option) -> Self { - let connect_timeout = connect_timeout.and_then(|t| t.checked_div(addrs.len() as u32)); - - Self { - addrs, - connect_timeout, - } - } -} - -impl ConnectingTcpRemote { - async fn connect(&mut self, config: &Config) -> Result { - let mut err = None; - for addr in &mut self.addrs { - debug!("connecting to {}", addr); - match connect(&addr, config, self.connect_timeout)?.await { - Ok(tcp) => { - debug!("connected to {}", addr); - return Ok(tcp); - } - Err(mut e) => { - trace!("connect error for {}: {:?}", addr, e); - e.addr = Some(addr); - // only return the first error, we assume it's the most relevant - if err.is_none() { - err = Some(e); - } - } - } - } - - match err { - Some(e) => Err(e), - None => Err(ConnectError::new( - "tcp connect error", - std::io::Error::new(std::io::ErrorKind::NotConnected, "Network unreachable"), - )), - } - } -} - -fn bind_local_address( - socket: &socket2::Socket, - dst_addr: &SocketAddr, - local_addr_ipv4: &Option, - local_addr_ipv6: &Option, -) -> io::Result<()> { - match (*dst_addr, local_addr_ipv4, local_addr_ipv6) { - (SocketAddr::V4(_), Some(addr), _) => { - socket.bind(&SocketAddr::new((*addr).into(), 0).into())?; - } - (SocketAddr::V6(_), _, Some(addr)) => { - socket.bind(&SocketAddr::new((*addr).into(), 0).into())?; - } - _ => { - if cfg!(windows) { - // Windows requires a socket be bound before calling connect - let any: SocketAddr = match *dst_addr { - SocketAddr::V4(_) => ([0, 0, 0, 0], 0).into(), - SocketAddr::V6(_) => ([0, 0, 0, 0, 0, 0, 0, 0], 0).into(), - }; - socket.bind(&any.into())?; - } - } - } - - Ok(()) -} - -fn connect( - addr: &SocketAddr, - config: &Config, - connect_timeout: Option, -) -> Result>, ConnectError> { - // TODO(eliza): if Tokio's `TcpSocket` gains support for setting the - // keepalive timeout, it would be nice to use that instead of socket2, - // and avoid the unsafe `into_raw_fd`/`from_raw_fd` dance... - use socket2::{Domain, Protocol, Socket, Type}; - - let domain = Domain::for_address(*addr); - let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP)) - .map_err(ConnectError::m("tcp open error"))?; - - // When constructing a Tokio `TcpSocket` from a raw fd/socket, the user is - // responsible for ensuring O_NONBLOCK is set. - socket - .set_nonblocking(true) - .map_err(ConnectError::m("tcp set_nonblocking error"))?; - - if let Some(tcp_keepalive) = &config.tcp_keepalive_config.into_tcpkeepalive() { - if let Err(e) = socket.set_tcp_keepalive(tcp_keepalive) { - warn!("tcp set_keepalive error: {}", e); - } - } - - // That this only works for some socket types, particularly AF_INET sockets. - #[cfg(any( - target_os = "android", - target_os = "fuchsia", - target_os = "illumos", - target_os = "ios", - target_os = "linux", - target_os = "macos", - target_os = "solaris", - target_os = "tvos", - target_os = "visionos", - target_os = "watchos", - ))] - if let Some(interface) = &config.interface { - // On Linux-like systems, set the interface to bind using - // `SO_BINDTODEVICE`. - #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] - socket - .bind_device(Some(interface.as_bytes())) - .map_err(ConnectError::m("tcp bind interface error"))?; - - // On macOS-like and Solaris-like systems, we instead use `IP_BOUND_IF`. - // This socket option desires an integer index for the interface, so we - // must first determine the index of the requested interface name using - // `if_nametoindex`. - #[cfg(any( - target_os = "illumos", - target_os = "ios", - target_os = "macos", - target_os = "solaris", - target_os = "tvos", - target_os = "visionos", - target_os = "watchos", - ))] - { - let idx = unsafe { libc::if_nametoindex(interface.as_ptr()) }; - let idx = std::num::NonZeroU32::new(idx).ok_or_else(|| { - // If the index is 0, check errno and return an I/O error. - ConnectError::new( - "error converting interface name to index", - io::Error::last_os_error(), - ) - })?; - // Different setsockopt calls are necessary depending on whether the - // address is IPv4 or IPv6. - match addr { - SocketAddr::V4(_) => socket.bind_device_by_index_v4(Some(idx)), - SocketAddr::V6(_) => socket.bind_device_by_index_v6(Some(idx)), - } - .map_err(ConnectError::m("tcp bind interface error"))?; - } - } - - #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] - if let Some(tcp_user_timeout) = &config.tcp_user_timeout { - if let Err(e) = socket.set_tcp_user_timeout(Some(*tcp_user_timeout)) { - warn!("tcp set_tcp_user_timeout error: {}", e); - } - } - - bind_local_address( - &socket, - addr, - &config.local_address_ipv4, - &config.local_address_ipv6, - ) - .map_err(ConnectError::m("tcp bind local error"))?; - - // Convert the `Socket` to a Tokio `TcpSocket`. - let socket = TcpSocket::from_std_stream(socket.into()); - - if config.reuse_address { - if let Err(e) = socket.set_reuseaddr(true) { - warn!("tcp set_reuse_address error: {}", e); - } - } - - if let Some(size) = config.send_buffer_size { - if let Err(e) = socket.set_send_buffer_size(size.try_into().unwrap_or(u32::MAX)) { - warn!("tcp set_buffer_size error: {}", e); - } - } - - if let Some(size) = config.recv_buffer_size { - if let Err(e) = socket.set_recv_buffer_size(size.try_into().unwrap_or(u32::MAX)) { - warn!("tcp set_recv_buffer_size error: {}", e); - } - } - - let connect = socket.connect(*addr); - Ok(async move { - match connect_timeout { - Some(dur) => match tokio::time::timeout(dur, connect).await { - Ok(Ok(s)) => Ok(s), - Ok(Err(e)) => Err(e), - Err(e) => Err(io::Error::new(io::ErrorKind::TimedOut, e)), - }, - None => connect.await, - } - .map_err(ConnectError::m("tcp connect error")) - }) -} - -impl ConnectingTcp<'_> { - async fn connect(mut self) -> Result { - match self.fallback { - None => self.preferred.connect(self.config).await, - Some(mut fallback) => { - let preferred_fut = self.preferred.connect(self.config); - futures_util::pin_mut!(preferred_fut); - - let fallback_fut = fallback.remote.connect(self.config); - futures_util::pin_mut!(fallback_fut); - - let fallback_delay = fallback.delay; - futures_util::pin_mut!(fallback_delay); - - let (result, future) = - match futures_util::future::select(preferred_fut, fallback_delay).await { - Either::Left((result, _fallback_delay)) => { - (result, Either::Right(fallback_fut)) - } - Either::Right(((), preferred_fut)) => { - // Delay is done, start polling both the preferred and the fallback - futures_util::future::select(preferred_fut, fallback_fut) - .await - .factor_first() - } - }; - - if result.is_err() { - // Fallback to the remaining future (could be preferred or fallback) - // if we get an error - future.await - } else { - result - } - } - } - } -} - -/// Respect explicit ports in the URI, if none, either -/// keep non `0` ports resolved from a custom dns resolver, -/// or use the default port for the scheme. -fn set_port(addr: &mut SocketAddr, host_port: u16, explicit: bool) { - if explicit || addr.port() == 0 { - addr.set_port(host_port) - }; -} - -#[cfg(test)] -mod tests { - use std::io; - use std::net::SocketAddr; - - use ::http::Uri; - - use crate::client::legacy::connect::http::TcpKeepaliveConfig; - - use super::super::sealed::{Connect, ConnectSvc}; - use super::{Config, ConnectError, HttpConnector}; - - use super::set_port; - - async fn connect( - connector: C, - dst: Uri, - ) -> Result<::Connection, ::Error> - where - C: Connect, - { - connector.connect(super::super::sealed::Internal, dst).await - } - - #[tokio::test] - async fn test_errors_enforce_http() { - let dst = "https://example.domain/foo/bar?baz".parse().unwrap(); - let connector = HttpConnector::new(); - - let err = connect(connector, dst).await.unwrap_err(); - assert_eq!(&*err.msg, super::INVALID_NOT_HTTP); - } - - #[cfg(any(target_os = "linux", target_os = "macos"))] - fn get_local_ips() -> (Option, Option) { - use std::net::{IpAddr, TcpListener}; - - let mut ip_v4 = None; - let mut ip_v6 = None; - - let ips = pnet_datalink::interfaces() - .into_iter() - .flat_map(|i| i.ips.into_iter().map(|n| n.ip())); - - for ip in ips { - match ip { - IpAddr::V4(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v4 = Some(ip), - IpAddr::V6(ip) if TcpListener::bind((ip, 0)).is_ok() => ip_v6 = Some(ip), - _ => (), - } - - if ip_v4.is_some() && ip_v6.is_some() { - break; - } - } - - (ip_v4, ip_v6) - } - - #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] - fn default_interface() -> Option { - pnet_datalink::interfaces() - .iter() - .find(|e| e.is_up() && !e.is_loopback() && !e.ips.is_empty()) - .map(|e| e.name.clone()) - } - - #[tokio::test] - async fn test_errors_missing_scheme() { - let dst = "example.domain".parse().unwrap(); - let mut connector = HttpConnector::new(); - connector.enforce_http(false); - - let err = connect(connector, dst).await.unwrap_err(); - assert_eq!(&*err.msg, super::INVALID_MISSING_SCHEME); - } - - // NOTE: pnet crate that we use in this test doesn't compile on Windows - #[cfg(any(target_os = "linux", target_os = "macos"))] - #[cfg_attr(miri, ignore)] - #[tokio::test] - async fn local_address() { - use std::net::{IpAddr, TcpListener}; - - let (bind_ip_v4, bind_ip_v6) = get_local_ips(); - let server4 = TcpListener::bind("127.0.0.1:0").unwrap(); - let port = server4.local_addr().unwrap().port(); - let server6 = TcpListener::bind(format!("[::1]:{port}")).unwrap(); - - let assert_client_ip = |dst: String, server: TcpListener, expected_ip: IpAddr| async move { - let mut connector = HttpConnector::new(); - - match (bind_ip_v4, bind_ip_v6) { - (Some(v4), Some(v6)) => connector.set_local_addresses(v4, v6), - (Some(v4), None) => connector.set_local_address(Some(v4.into())), - (None, Some(v6)) => connector.set_local_address(Some(v6.into())), - _ => unreachable!(), - } - - connect(connector, dst.parse().unwrap()).await.unwrap(); - - let (_, client_addr) = server.accept().unwrap(); - - assert_eq!(client_addr.ip(), expected_ip); - }; - - if let Some(ip) = bind_ip_v4 { - assert_client_ip(format!("http://127.0.0.1:{port}"), server4, ip.into()).await; - } - - if let Some(ip) = bind_ip_v6 { - assert_client_ip(format!("http://[::1]:{port}"), server6, ip.into()).await; - } - } - - // NOTE: pnet crate that we use in this test doesn't compile on Windows - #[cfg(any(target_os = "android", target_os = "fuchsia", target_os = "linux"))] - #[tokio::test] - #[ignore = "setting `SO_BINDTODEVICE` requires the `CAP_NET_RAW` capability (works when running as root)"] - async fn interface() { - use socket2::{Domain, Protocol, Socket, Type}; - use std::net::TcpListener; - - let interface: Option = default_interface(); - - let server4 = TcpListener::bind("127.0.0.1:0").unwrap(); - let port = server4.local_addr().unwrap().port(); - - let server6 = TcpListener::bind(format!("[::1]:{port}")).unwrap(); - - let assert_interface_name = - |dst: String, - server: TcpListener, - bind_iface: Option, - expected_interface: Option| async move { - let mut connector = HttpConnector::new(); - if let Some(iface) = bind_iface { - connector.set_interface(iface); - } - - connect(connector, dst.parse().unwrap()).await.unwrap(); - let domain = Domain::for_address(server.local_addr().unwrap()); - let socket = Socket::new(domain, Type::STREAM, Some(Protocol::TCP)).unwrap(); - - assert_eq!( - socket.device().unwrap().as_deref(), - expected_interface.as_deref().map(|val| val.as_bytes()) - ); - }; - - assert_interface_name( - format!("http://127.0.0.1:{port}"), - server4, - interface.clone(), - interface.clone(), - ) - .await; - assert_interface_name( - format!("http://[::1]:{port}"), - server6, - interface.clone(), - interface.clone(), - ) - .await; - } - - #[test] - #[ignore] // TODO - #[cfg_attr(not(feature = "__internal_happy_eyeballs_tests"), ignore)] - fn client_happy_eyeballs() { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, TcpListener}; - use std::time::{Duration, Instant}; - - use super::dns; - use super::ConnectingTcp; - - let server4 = TcpListener::bind("127.0.0.1:0").unwrap(); - let addr = server4.local_addr().unwrap(); - let _server6 = TcpListener::bind(format!("[::1]:{}", addr.port())).unwrap(); - let rt = tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .unwrap(); - - let local_timeout = Duration::default(); - let unreachable_v4_timeout = measure_connect(unreachable_ipv4_addr()).1; - let unreachable_v6_timeout = measure_connect(unreachable_ipv6_addr()).1; - let fallback_timeout = std::cmp::max(unreachable_v4_timeout, unreachable_v6_timeout) - + Duration::from_millis(250); - - let scenarios = &[ - // Fast primary, without fallback. - (&[local_ipv4_addr()][..], 4, local_timeout, false), - (&[local_ipv6_addr()][..], 6, local_timeout, false), - // Fast primary, with (unused) fallback. - ( - &[local_ipv4_addr(), local_ipv6_addr()][..], - 4, - local_timeout, - false, - ), - ( - &[local_ipv6_addr(), local_ipv4_addr()][..], - 6, - local_timeout, - false, - ), - // Unreachable + fast primary, without fallback. - ( - &[unreachable_ipv4_addr(), local_ipv4_addr()][..], - 4, - unreachable_v4_timeout, - false, - ), - ( - &[unreachable_ipv6_addr(), local_ipv6_addr()][..], - 6, - unreachable_v6_timeout, - false, - ), - // Unreachable + fast primary, with (unused) fallback. - ( - &[ - unreachable_ipv4_addr(), - local_ipv4_addr(), - local_ipv6_addr(), - ][..], - 4, - unreachable_v4_timeout, - false, - ), - ( - &[ - unreachable_ipv6_addr(), - local_ipv6_addr(), - local_ipv4_addr(), - ][..], - 6, - unreachable_v6_timeout, - true, - ), - // Slow primary, with (used) fallback. - ( - &[slow_ipv4_addr(), local_ipv4_addr(), local_ipv6_addr()][..], - 6, - fallback_timeout, - false, - ), - ( - &[slow_ipv6_addr(), local_ipv6_addr(), local_ipv4_addr()][..], - 4, - fallback_timeout, - true, - ), - // Slow primary, with (used) unreachable + fast fallback. - ( - &[slow_ipv4_addr(), unreachable_ipv6_addr(), local_ipv6_addr()][..], - 6, - fallback_timeout + unreachable_v6_timeout, - false, - ), - ( - &[slow_ipv6_addr(), unreachable_ipv4_addr(), local_ipv4_addr()][..], - 4, - fallback_timeout + unreachable_v4_timeout, - true, - ), - ]; - - // Scenarios for IPv6 -> IPv4 fallback require that host can access IPv6 network. - // Otherwise, connection to "slow" IPv6 address will error-out immediately. - let ipv6_accessible = measure_connect(slow_ipv6_addr()).0; - - for &(hosts, family, timeout, needs_ipv6_access) in scenarios { - if needs_ipv6_access && !ipv6_accessible { - continue; - } - - let (start, stream) = rt - .block_on(async move { - let addrs = hosts - .iter() - .map(|host| (*host, addr.port()).into()) - .collect(); - let cfg = Config { - local_address_ipv4: None, - local_address_ipv6: None, - connect_timeout: None, - tcp_keepalive_config: TcpKeepaliveConfig::default(), - happy_eyeballs_timeout: Some(fallback_timeout), - nodelay: false, - reuse_address: false, - enforce_http: false, - send_buffer_size: None, - recv_buffer_size: None, - #[cfg(any( - target_os = "android", - target_os = "fuchsia", - target_os = "linux" - ))] - interface: None, - #[cfg(any( - target_os = "illumos", - target_os = "ios", - target_os = "macos", - target_os = "solaris", - target_os = "tvos", - target_os = "visionos", - target_os = "watchos", - ))] - interface: None, - #[cfg(any( - target_os = "android", - target_os = "fuchsia", - target_os = "linux" - ))] - tcp_user_timeout: None, - }; - let connecting_tcp = ConnectingTcp::new(dns::SocketAddrs::new(addrs), &cfg); - let start = Instant::now(); - Ok::<_, ConnectError>((start, ConnectingTcp::connect(connecting_tcp).await?)) - }) - .unwrap(); - let res = if stream.peer_addr().unwrap().is_ipv4() { - 4 - } else { - 6 - }; - let duration = start.elapsed(); - - // Allow actual duration to be +/- 150ms off. - let min_duration = if timeout >= Duration::from_millis(150) { - timeout - Duration::from_millis(150) - } else { - Duration::default() - }; - let max_duration = timeout + Duration::from_millis(150); - - assert_eq!(res, family); - assert!(duration >= min_duration); - assert!(duration <= max_duration); - } - - fn local_ipv4_addr() -> IpAddr { - Ipv4Addr::new(127, 0, 0, 1).into() - } - - fn local_ipv6_addr() -> IpAddr { - Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1).into() - } - - fn unreachable_ipv4_addr() -> IpAddr { - Ipv4Addr::new(127, 0, 0, 2).into() - } - - fn unreachable_ipv6_addr() -> IpAddr { - Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 2).into() - } - - fn slow_ipv4_addr() -> IpAddr { - // RFC 6890 reserved IPv4 address. - Ipv4Addr::new(198, 18, 0, 25).into() - } - - fn slow_ipv6_addr() -> IpAddr { - // RFC 6890 reserved IPv6 address. - Ipv6Addr::new(2001, 2, 0, 0, 0, 0, 0, 254).into() - } - - fn measure_connect(addr: IpAddr) -> (bool, Duration) { - let start = Instant::now(); - let result = - std::net::TcpStream::connect_timeout(&(addr, 80).into(), Duration::from_secs(1)); - - let reachable = result.is_ok() || result.unwrap_err().kind() == io::ErrorKind::TimedOut; - let duration = start.elapsed(); - (reachable, duration) - } - } - - use std::time::Duration; - - #[test] - fn no_tcp_keepalive_config() { - assert!(TcpKeepaliveConfig::default().into_tcpkeepalive().is_none()); - } - - #[test] - fn tcp_keepalive_time_config() { - let kac = TcpKeepaliveConfig { - time: Some(Duration::from_secs(60)), - ..Default::default() - }; - if let Some(tcp_keepalive) = kac.into_tcpkeepalive() { - assert!(format!("{tcp_keepalive:?}").contains("time: Some(60s)")); - } else { - panic!("test failed"); - } - } - - #[cfg(not(any(target_os = "openbsd", target_os = "redox", target_os = "solaris")))] - #[test] - fn tcp_keepalive_interval_config() { - let kac = TcpKeepaliveConfig { - interval: Some(Duration::from_secs(1)), - ..Default::default() - }; - if let Some(tcp_keepalive) = kac.into_tcpkeepalive() { - assert!(format!("{tcp_keepalive:?}").contains("interval: Some(1s)")); - } else { - panic!("test failed"); - } - } - - #[cfg(not(any( - target_os = "openbsd", - target_os = "redox", - target_os = "solaris", - target_os = "windows" - )))] - #[test] - fn tcp_keepalive_retries_config() { - let kac = TcpKeepaliveConfig { - retries: Some(3), - ..Default::default() - }; - if let Some(tcp_keepalive) = kac.into_tcpkeepalive() { - assert!(format!("{tcp_keepalive:?}").contains("retries: Some(3)")); - } else { - panic!("test failed"); - } - } - - #[test] - fn test_set_port() { - // Respect explicit ports no matter what the resolved port is. - let mut addr = SocketAddr::from(([0, 0, 0, 0], 6881)); - set_port(&mut addr, 42, true); - assert_eq!(addr.port(), 42); - - // Ignore default host port, and use the socket port instead. - let mut addr = SocketAddr::from(([0, 0, 0, 0], 6881)); - set_port(&mut addr, 443, false); - assert_eq!(addr.port(), 6881); - - // Use the default port if the resolved port is `0`. - let mut addr = SocketAddr::from(([0, 0, 0, 0], 0)); - set_port(&mut addr, 443, false); - assert_eq!(addr.port(), 443); - } -} +pub use crate::client::connect::http::tokio::TokioHttpConnector as HttpConnector; +pub use crate::client::connect::http::HttpInfo; diff --git a/src/client/legacy/connect/mod.rs b/src/client/legacy/connect/mod.rs index 90a97679..0f6d00bc 100644 --- a/src/client/legacy/connect/mod.rs +++ b/src/client/legacy/connect/mod.rs @@ -26,6 +26,10 @@ //! Or, fully written out: //! //! ``` +//! # #[cfg(not(feature = "tokio-net"))] +//! # fn main() {} +//! # #[cfg(feature = "tokio-net")] +//! # fn main() { //! use std::{future::Future, net::SocketAddr, pin::Pin, task::{self, Poll}}; //! use http::Uri; //! use tokio::net::TcpStream; @@ -51,6 +55,7 @@ //! Box::pin(TcpStream::connect(SocketAddr::from(([127, 0, 0, 1], 1337)))) //! } //! } +//! # } //! ``` //! //! It's worth noting that for `TcpStream`s, the [`HttpConnector`][] is a @@ -62,383 +67,18 @@ //! [`Read`]: hyper::rt::Read //! [`Write`]: hyper::rt::Write //! [`Connection`]: Connection -use std::{ - fmt::{self, Formatter}, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, - }, -}; -use ::http::Extensions; +pub use crate::client::connect::{Connect, Connected, Connection}; -#[cfg(feature = "tokio")] -pub use self::http::{HttpConnector, HttpInfo}; +#[cfg(feature = "tokio-net")] +pub use http::{HttpConnector, HttpInfo}; -#[cfg(feature = "tokio")] +#[cfg(feature = "tokio-net")] pub mod dns; -#[cfg(feature = "tokio")] +#[cfg(feature = "tokio-net")] mod http; pub mod proxy; pub(crate) mod capture; pub use capture::{capture_connection, CaptureConnection}; - -pub use self::sealed::Connect; - -/// Describes a type returned by a connector. -pub trait Connection { - /// Return metadata describing the connection. - fn connected(&self) -> Connected; -} - -/// Extra information about the connected transport. -/// -/// This can be used to inform recipients about things like if ALPN -/// was used, or if connected to an HTTP proxy. -#[derive(Debug)] -pub struct Connected { - pub(super) alpn: Alpn, - pub(super) is_proxied: bool, - pub(super) extra: Option, - pub(super) poisoned: PoisonPill, -} - -#[derive(Clone)] -pub(crate) struct PoisonPill { - poisoned: Arc, -} - -impl fmt::Debug for PoisonPill { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - // print the address of the pill—this makes debugging issues much easier - write!( - f, - "PoisonPill@{:p} {{ poisoned: {} }}", - self.poisoned, - self.poisoned.load(Ordering::Relaxed) - ) - } -} - -impl PoisonPill { - pub(crate) fn healthy() -> Self { - Self { - poisoned: Arc::new(AtomicBool::new(false)), - } - } - pub(crate) fn poison(&self) { - self.poisoned.store(true, Ordering::Relaxed) - } - - pub(crate) fn poisoned(&self) -> bool { - self.poisoned.load(Ordering::Relaxed) - } -} - -pub(super) struct Extra(Box); - -#[derive(Clone, Copy, Debug, PartialEq)] -pub(super) enum Alpn { - H2, - None, -} - -impl Connected { - /// Create new `Connected` type with empty metadata. - pub fn new() -> Connected { - Connected { - alpn: Alpn::None, - is_proxied: false, - extra: None, - poisoned: PoisonPill::healthy(), - } - } - - /// Set whether the connected transport is to an HTTP proxy. - /// - /// This setting will affect if HTTP/1 requests written on the transport - /// will have the request-target in absolute-form or origin-form: - /// - /// - When `proxy(false)`: - /// - /// ```http - /// GET /guide HTTP/1.1 - /// ``` - /// - /// - When `proxy(true)`: - /// - /// ```http - /// GET http://hyper.rs/guide HTTP/1.1 - /// ``` - /// - /// Default is `false`. - pub fn proxy(mut self, is_proxied: bool) -> Connected { - self.is_proxied = is_proxied; - self - } - - /// Determines if the connected transport is to an HTTP proxy. - pub fn is_proxied(&self) -> bool { - self.is_proxied - } - - /// Set extra connection information to be set in the extensions of every `Response`. - pub fn extra(mut self, extra: T) -> Connected { - if let Some(prev) = self.extra { - self.extra = Some(Extra(Box::new(ExtraChain(prev.0, extra)))); - } else { - self.extra = Some(Extra(Box::new(ExtraEnvelope(extra)))); - } - self - } - - /// Copies the extra connection information into an `Extensions` map. - pub fn get_extras(&self, extensions: &mut Extensions) { - if let Some(extra) = &self.extra { - extra.set(extensions); - } - } - - /// Set that the connected transport negotiated HTTP/2 as its next protocol. - pub fn negotiated_h2(mut self) -> Connected { - self.alpn = Alpn::H2; - self - } - - /// Determines if the connected transport negotiated HTTP/2 as its next protocol. - pub fn is_negotiated_h2(&self) -> bool { - self.alpn == Alpn::H2 - } - - /// Poison this connection - /// - /// A poisoned connection will not be reused for subsequent requests by the pool - pub fn poison(&self) { - self.poisoned.poison(); - tracing::debug!( - poison_pill = ?self.poisoned, "connection was poisoned. this connection will not be reused for subsequent requests" - ); - } - - // Don't public expose that `Connected` is `Clone`, unsure if we want to - // keep that contract... - pub(super) fn clone(&self) -> Connected { - Connected { - alpn: self.alpn, - is_proxied: self.is_proxied, - extra: self.extra.clone(), - poisoned: self.poisoned.clone(), - } - } -} - -// ===== impl Extra ===== - -impl Extra { - pub(super) fn set(&self, res: &mut Extensions) { - self.0.set(res); - } -} - -impl Clone for Extra { - fn clone(&self) -> Extra { - Extra(self.0.clone_box()) - } -} - -impl fmt::Debug for Extra { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Extra").finish() - } -} - -trait ExtraInner: Send + Sync { - fn clone_box(&self) -> Box; - fn set(&self, res: &mut Extensions); -} - -// This indirection allows the `Connected` to have a type-erased "extra" value, -// while that type still knows its inner extra type. This allows the correct -// TypeId to be used when inserting into `res.extensions_mut()`. -#[derive(Clone)] -struct ExtraEnvelope(T); - -impl ExtraInner for ExtraEnvelope -where - T: Clone + Send + Sync + 'static, -{ - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } - - fn set(&self, res: &mut Extensions) { - res.insert(self.0.clone()); - } -} - -struct ExtraChain(Box, T); - -impl Clone for ExtraChain { - fn clone(&self) -> Self { - ExtraChain(self.0.clone_box(), self.1.clone()) - } -} - -impl ExtraInner for ExtraChain -where - T: Clone + Send + Sync + 'static, -{ - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } - - fn set(&self, res: &mut Extensions) { - self.0.set(res); - res.insert(self.1.clone()); - } -} - -pub(super) mod sealed { - use std::error::Error as StdError; - use std::future::Future; - - use ::http::Uri; - use hyper::rt::{Read, Write}; - - use super::Connection; - - /// Connect to a destination, returning an IO transport. - /// - /// A connector receives a [`Uri`](::http::Uri) and returns a `Future` of the - /// ready connection. - /// - /// # Trait Alias - /// - /// This is really just an *alias* for the `tower::Service` trait, with - /// additional bounds set for convenience *inside* hyper. You don't actually - /// implement this trait, but `tower::Service` instead. - // The `Sized` bound is to prevent creating `dyn Connect`, since they cannot - // fit the `Connect` bounds because of the blanket impl for `Service`. - pub trait Connect: Sealed + Sized { - #[doc(hidden)] - type _Svc: ConnectSvc; - #[doc(hidden)] - fn connect(self, internal_only: Internal, dst: Uri) -> ::Future; - } - - pub trait ConnectSvc { - type Connection: Read + Write + Connection + Unpin + Send + 'static; - type Error: Into>; - type Future: Future> + Unpin + Send + 'static; - - fn connect(self, internal_only: Internal, dst: Uri) -> Self::Future; - } - - impl Connect for S - where - S: tower_service::Service + Send + 'static, - S::Error: Into>, - S::Future: Unpin + Send, - T: Read + Write + Connection + Unpin + Send + 'static, - { - type _Svc = S; - - fn connect(self, _: Internal, dst: Uri) -> crate::service::Oneshot { - crate::service::Oneshot::new(self, dst) - } - } - - impl ConnectSvc for S - where - S: tower_service::Service + Send + 'static, - S::Error: Into>, - S::Future: Unpin + Send, - T: Read + Write + Connection + Unpin + Send + 'static, - { - type Connection = T; - type Error = S::Error; - type Future = crate::service::Oneshot; - - fn connect(self, _: Internal, dst: Uri) -> Self::Future { - crate::service::Oneshot::new(self, dst) - } - } - - impl Sealed for S - where - S: tower_service::Service + Send, - S::Error: Into>, - S::Future: Unpin + Send, - T: Read + Write + Connection + Unpin + Send + 'static, - { - } - - pub trait Sealed {} - #[allow(missing_debug_implementations)] - pub struct Internal; -} - -#[cfg(test)] -mod tests { - use super::Connected; - - #[derive(Clone, Debug, PartialEq)] - struct Ex1(usize); - - #[derive(Clone, Debug, PartialEq)] - struct Ex2(&'static str); - - #[derive(Clone, Debug, PartialEq)] - struct Ex3(&'static str); - - #[test] - fn test_connected_extra() { - let c1 = Connected::new().extra(Ex1(41)); - - let mut ex = ::http::Extensions::new(); - - assert_eq!(ex.get::(), None); - - c1.extra.as_ref().expect("c1 extra").set(&mut ex); - - assert_eq!(ex.get::(), Some(&Ex1(41))); - } - - #[test] - fn test_connected_extra_chain() { - // If a user composes connectors and at each stage, there's "extra" - // info to attach, it shouldn't override the previous extras. - - let c1 = Connected::new() - .extra(Ex1(45)) - .extra(Ex2("zoom")) - .extra(Ex3("pew pew")); - - let mut ex1 = ::http::Extensions::new(); - - assert_eq!(ex1.get::(), None); - assert_eq!(ex1.get::(), None); - assert_eq!(ex1.get::(), None); - - c1.extra.as_ref().expect("c1 extra").set(&mut ex1); - - assert_eq!(ex1.get::(), Some(&Ex1(45))); - assert_eq!(ex1.get::(), Some(&Ex2("zoom"))); - assert_eq!(ex1.get::(), Some(&Ex3("pew pew"))); - - // Just like extensions, inserting the same type overrides previous type. - let c2 = Connected::new() - .extra(Ex1(33)) - .extra(Ex2("hiccup")) - .extra(Ex1(99)); - - let mut ex2 = ::http::Extensions::new(); - - c2.extra.as_ref().expect("c2 extra").set(&mut ex2); - - assert_eq!(ex2.get::(), Some(&Ex1(99))); - assert_eq!(ex2.get::(), Some(&Ex2("hiccup"))); - } -} diff --git a/src/client/legacy/pool.rs b/src/client/legacy/pool.rs index 156f0572..b0f07483 100644 --- a/src/client/legacy/pool.rs +++ b/src/client/legacy/pool.rs @@ -1,1115 +1,3 @@ -#![allow(dead_code)] - -use std::collections::{HashMap, HashSet, VecDeque}; -use std::convert::Infallible; -use std::error::Error as StdError; -use std::fmt::{self, Debug}; -use std::future::Future; -use std::hash::Hash; -use std::ops::{Deref, DerefMut}; -use std::pin::Pin; -use std::sync::{Arc, Mutex, Weak}; -use std::task::{self, ready, Poll}; - -use std::time::{Duration, Instant}; - -use futures_channel::oneshot; -use tracing::{debug, trace}; - -use hyper::rt::Timer as _; - -use crate::common::{exec, exec::Exec, timer::Timer}; - -// FIXME: allow() required due to `impl Trait` leaking types to this lint -#[allow(missing_debug_implementations)] -pub struct Pool { - // If the pool is disabled, this is None. - inner: Option>>>, -} - -// Before using a pooled connection, make sure the sender is not dead. -// -// This is a trait to allow the `client::pool::tests` to work for `i32`. -// -// See https://github.com/hyperium/hyper/issues/1429 -pub trait Poolable: Unpin + Send + Sized + 'static { - fn is_open(&self) -> bool; - /// Reserve this connection. - /// - /// Allows for HTTP/2 to return a shared reservation. - fn reserve(self) -> Reservation; - fn can_share(&self) -> bool; -} - -pub trait Key: Eq + Hash + Clone + Debug + Unpin + Send + 'static {} - -impl Key for T where T: Eq + Hash + Clone + Debug + Unpin + Send + 'static {} - -/// A marker to identify what version a pooled connection is. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -#[allow(dead_code)] -pub enum Ver { - Auto, - Http2, -} - -/// When checking out a pooled connection, it might be that the connection -/// only supports a single reservation, or it might be usable for many. -/// -/// Specifically, HTTP/1 requires a unique reservation, but HTTP/2 can be -/// used for multiple requests. -// FIXME: allow() required due to `impl Trait` leaking types to this lint -#[allow(missing_debug_implementations)] -pub enum Reservation { - /// This connection could be used multiple times, the first one will be - /// reinserted into the `idle` pool, and the second will be given to - /// the `Checkout`. - #[cfg(feature = "http2")] - Shared(T, T), - /// This connection requires unique access. It will be returned after - /// use is complete. - Unique(T), -} - -/// Simple type alias in case the key type needs to be adjusted. -// pub type Key = (http::uri::Scheme, http::uri::Authority); //Arc; - -struct PoolInner { - // A flag that a connection is being established, and the connection - // should be shared. This prevents making multiple HTTP/2 connections - // to the same host. - connecting: HashSet, - // These are internal Conns sitting in the event loop in the KeepAlive - // state, waiting to receive a new Request to send on the socket. - idle: HashMap>>, - max_idle_per_host: usize, - // These are outstanding Checkouts that are waiting for a socket to be - // able to send a Request one. This is used when "racing" for a new - // connection. - // - // The Client starts 2 tasks, 1 to connect a new socket, and 1 to wait - // for the Pool to receive an idle Conn. When a Conn becomes idle, - // this list is checked for any parked Checkouts, and tries to notify - // them that the Conn could be used instead of waiting for a brand new - // connection. - waiters: HashMap>>, - // A oneshot channel is used to allow the interval to be notified when - // the Pool completely drops. That way, the interval can cancel immediately. - idle_interval_ref: Option>, - exec: Exec, - timer: Option, - timeout: Option, -} - -// This is because `Weak::new()` *allocates* space for `T`, even if it -// doesn't need it! -struct WeakOpt(Option>); - -#[derive(Clone, Copy, Debug)] -pub struct Config { - pub idle_timeout: Option, - pub max_idle_per_host: usize, -} - -impl Config { - pub fn is_enabled(&self) -> bool { - self.max_idle_per_host > 0 - } -} - -impl Pool { - pub fn new(config: Config, executor: E, timer: Option) -> Pool - where - E: hyper::rt::Executor + Send + Sync + Clone + 'static, - M: hyper::rt::Timer + Send + Sync + Clone + 'static, - { - let exec = Exec::new(executor); - let timer = timer.map(|t| Timer::new(t)); - let inner = if config.is_enabled() { - Some(Arc::new(Mutex::new(PoolInner { - connecting: HashSet::new(), - idle: HashMap::new(), - idle_interval_ref: None, - max_idle_per_host: config.max_idle_per_host, - waiters: HashMap::new(), - exec, - timer, - timeout: config.idle_timeout, - }))) - } else { - None - }; - - Pool { inner } - } - - pub(crate) fn is_enabled(&self) -> bool { - self.inner.is_some() - } - - #[cfg(test)] - pub(super) fn no_timer(&self) { - // Prevent an actual interval from being created for this pool... - { - let mut inner = self.inner.as_ref().unwrap().lock().unwrap(); - assert!(inner.idle_interval_ref.is_none(), "timer already spawned"); - let (tx, _) = oneshot::channel(); - inner.idle_interval_ref = Some(tx); - } - } -} - -impl Pool { - /// Returns a `Checkout` which is a future that resolves if an idle - /// connection becomes available. - pub fn checkout(&self, key: K) -> Checkout { - Checkout { - key, - pool: self.clone(), - waiter: None, - } - } - - /// Ensure that there is only ever 1 connecting task for HTTP/2 - /// connections. This does nothing for HTTP/1. - pub fn connecting(&self, key: &K, ver: Ver) -> Option> { - if ver == Ver::Http2 { - if let Some(ref enabled) = self.inner { - let mut inner = enabled.lock().unwrap(); - return if inner.connecting.insert(key.clone()) { - let connecting = Connecting { - key: key.clone(), - pool: WeakOpt::downgrade(enabled), - }; - Some(connecting) - } else { - trace!("HTTP/2 connecting already in progress for {:?}", key); - None - }; - } - } - - // else - Some(Connecting { - key: key.clone(), - // in HTTP/1's case, there is never a lock, so we don't - // need to do anything in Drop. - pool: WeakOpt::none(), - }) - } - - #[cfg(test)] - fn locked(&self) -> std::sync::MutexGuard<'_, PoolInner> { - self.inner.as_ref().expect("enabled").lock().expect("lock") - } - - /* Used in client/tests.rs... - #[cfg(test)] - pub(super) fn h1_key(&self, s: &str) -> Key { - Arc::new(s.to_string()) - } - - #[cfg(test)] - pub(super) fn idle_count(&self, key: &Key) -> usize { - self - .locked() - .idle - .get(key) - .map(|list| list.len()) - .unwrap_or(0) - } - */ - - pub fn pooled( - &self, - #[cfg_attr(not(feature = "http2"), allow(unused_mut))] mut connecting: Connecting, - value: T, - ) -> Pooled { - let (value, pool_ref) = if let Some(ref enabled) = self.inner { - match value.reserve() { - #[cfg(feature = "http2")] - Reservation::Shared(to_insert, to_return) => { - let mut inner = enabled.lock().unwrap(); - inner.put(connecting.key.clone(), to_insert, enabled); - // Do this here instead of Drop for Connecting because we - // already have a lock, no need to lock the mutex twice. - inner.connected(&connecting.key); - // prevent the Drop of Connecting from repeating inner.connected() - connecting.pool = WeakOpt::none(); - - // Shared reservations don't need a reference to the pool, - // since the pool always keeps a copy. - (to_return, WeakOpt::none()) - } - Reservation::Unique(value) => { - // Unique reservations must take a reference to the pool - // since they hope to reinsert once the reservation is - // completed - (value, WeakOpt::downgrade(enabled)) - } - } - } else { - // If pool is not enabled, skip all the things... - - // The Connecting should have had no pool ref - debug_assert!(connecting.pool.upgrade().is_none()); - - (value, WeakOpt::none()) - }; - Pooled { - key: connecting.key.clone(), - is_reused: false, - pool: pool_ref, - value: Some(value), - } - } - - fn reuse(&self, key: &K, value: T) -> Pooled { - debug!("reuse idle connection for {:?}", key); - // TODO: unhack this - // In Pool::pooled(), which is used for inserting brand new connections, - // there's some code that adjusts the pool reference taken depending - // on if the Reservation can be shared or is unique. By the time - // reuse() is called, the reservation has already been made, and - // we just have the final value, without knowledge of if this is - // unique or shared. So, the hack is to just assume Ver::Http2 means - // shared... :( - let mut pool_ref = WeakOpt::none(); - if !value.can_share() { - if let Some(ref enabled) = self.inner { - pool_ref = WeakOpt::downgrade(enabled); - } - } - - Pooled { - is_reused: true, - key: key.clone(), - pool: pool_ref, - value: Some(value), - } - } -} - -/// Pop off this list, looking for a usable connection that hasn't expired. -struct IdlePopper<'a, T, K> { - key: &'a K, - list: &'a mut Vec>, -} - -impl<'a, T: Poolable + 'a, K: Debug> IdlePopper<'a, T, K> { - fn pop(self, expiration: &Expiration, now: Instant) -> Option> { - while let Some(entry) = self.list.pop() { - // If the connection has been closed, or is older than our idle - // timeout, simply drop it and keep looking... - if !entry.value.is_open() { - trace!("removing closed connection for {:?}", self.key); - continue; - } - // TODO: Actually, since the `idle` list is pushed to the end always, - // that would imply that if *this* entry is expired, then anything - // "earlier" in the list would *have* to be expired also... Right? - // - // In that case, we could just break out of the loop and drop the - // whole list... - if expiration.expires(entry.idle_at, now) { - trace!("removing expired connection for {:?}", self.key); - continue; - } - - let value = match entry.value.reserve() { - #[cfg(feature = "http2")] - Reservation::Shared(to_reinsert, to_checkout) => { - self.list.push(Idle { - idle_at: now, - value: to_reinsert, - }); - to_checkout - } - Reservation::Unique(unique) => unique, - }; - - return Some(Idle { - idle_at: entry.idle_at, - value, - }); - } - - None - } -} - -impl PoolInner { - fn now(&self) -> Instant { - self.timer - .as_ref() - .map_or_else(|| Instant::now(), |t| t.now()) - } - - fn put(&mut self, key: K, value: T, __pool_ref: &Arc>>) { - if value.can_share() && self.idle.contains_key(&key) { - trace!("put; existing idle HTTP/2 connection for {:?}", key); - return; - } - trace!("put; add idle connection for {:?}", key); - let mut remove_waiters = false; - let mut value = Some(value); - if let Some(waiters) = self.waiters.get_mut(&key) { - while let Some(tx) = waiters.pop_front() { - if !tx.is_canceled() { - let reserved = value.take().expect("value already sent"); - let reserved = match reserved.reserve() { - #[cfg(feature = "http2")] - Reservation::Shared(to_keep, to_send) => { - value = Some(to_keep); - to_send - } - Reservation::Unique(uniq) => uniq, - }; - match tx.send(reserved) { - Ok(()) => { - if value.is_none() { - break; - } else { - continue; - } - } - Err(e) => { - value = Some(e); - } - } - } - - trace!("put; removing canceled waiter for {:?}", key); - } - remove_waiters = waiters.is_empty(); - } - if remove_waiters { - self.waiters.remove(&key); - } - - match value { - Some(value) => { - // borrow-check scope... - { - let now = self.now(); - let idle_list = self.idle.entry(key.clone()).or_default(); - if self.max_idle_per_host <= idle_list.len() { - trace!("max idle per host for {:?}, dropping connection", key); - return; - } - - debug!("pooling idle connection for {:?}", key); - idle_list.push(Idle { - value, - idle_at: now, - }); - } - - self.spawn_idle_interval(__pool_ref); - } - None => trace!("put; found waiter for {:?}", key), - } - } - - /// A `Connecting` task is complete. Not necessarily successfully, - /// but the lock is going away, so clean up. - fn connected(&mut self, key: &K) { - let existed = self.connecting.remove(key); - debug_assert!(existed, "Connecting dropped, key not in pool.connecting"); - // cancel any waiters. if there are any, it's because - // this Connecting task didn't complete successfully. - // those waiters would never receive a connection. - self.waiters.remove(key); - } - - fn spawn_idle_interval(&mut self, pool_ref: &Arc>>) { - if self.idle_interval_ref.is_some() { - return; - } - let dur = if let Some(dur) = self.timeout { - dur - } else { - return; - }; - if dur == Duration::ZERO { - return; - } - let timer = if let Some(timer) = self.timer.clone() { - timer - } else { - return; - }; - - // While someone might want a shorter duration, and it will be respected - // at checkout time, there's no need to wake up and proactively evict - // faster than this. - const MIN_CHECK: Duration = Duration::from_millis(90); - - let dur = dur.max(MIN_CHECK); - - let (tx, rx) = oneshot::channel(); - self.idle_interval_ref = Some(tx); - - let interval = IdleTask { - timer: timer.clone(), - duration: dur, - pool: WeakOpt::downgrade(pool_ref), - pool_drop_notifier: rx, - }; - - self.exec.execute(interval.run()); - } -} - -impl PoolInner { - /// Any `FutureResponse`s that were created will have made a `Checkout`, - /// and possibly inserted into the pool that it is waiting for an idle - /// connection. If a user ever dropped that future, we need to clean out - /// those parked senders. - fn clean_waiters(&mut self, key: &K) { - let mut remove_waiters = false; - if let Some(waiters) = self.waiters.get_mut(key) { - waiters.retain(|tx| !tx.is_canceled()); - remove_waiters = waiters.is_empty(); - } - if remove_waiters { - self.waiters.remove(key); - } - } -} - -impl PoolInner { - /// This should *only* be called by the IdleTask - fn clear_expired(&mut self) { - let dur = self.timeout.expect("interval assumes timeout"); - - let now = self.now(); - //self.last_idle_check_at = now; - - self.idle.retain(|key, values| { - values.retain(|entry| { - if !entry.value.is_open() { - trace!("idle interval evicting closed for {:?}", key); - return false; - } - - // Avoid `Instant::sub` to avoid issues like rust-lang/rust#86470. - if now.saturating_duration_since(entry.idle_at) > dur { - trace!("idle interval evicting expired for {:?}", key); - return false; - } - - // Otherwise, keep this value... - true - }); - - // returning false evicts this key/val - !values.is_empty() - }); - } -} - -impl Clone for Pool { - fn clone(&self) -> Pool { - Pool { - inner: self.inner.clone(), - } - } -} - -/// A wrapped poolable value that tries to reinsert to the Pool on Drop. -// Note: The bounds `T: Poolable` is needed for the Drop impl. -pub struct Pooled { - value: Option, - is_reused: bool, - key: K, - pool: WeakOpt>>, -} - -impl Pooled { - pub fn is_reused(&self) -> bool { - self.is_reused - } - - pub fn is_pool_enabled(&self) -> bool { - self.pool.0.is_some() - } - - fn as_ref(&self) -> &T { - self.value.as_ref().expect("not dropped") - } - - fn as_mut(&mut self) -> &mut T { - self.value.as_mut().expect("not dropped") - } -} - -impl Deref for Pooled { - type Target = T; - fn deref(&self) -> &T { - self.as_ref() - } -} - -impl DerefMut for Pooled { - fn deref_mut(&mut self) -> &mut T { - self.as_mut() - } -} - -impl Drop for Pooled { - fn drop(&mut self) { - if let Some(value) = self.value.take() { - if !value.is_open() { - // If we *already* know the connection is done here, - // it shouldn't be re-inserted back into the pool. - return; - } - - if let Some(pool) = self.pool.upgrade() { - if let Ok(mut inner) = pool.lock() { - inner.put(self.key.clone(), value, &pool); - } - } else if !value.can_share() { - trace!("pool dropped, dropping pooled ({:?})", self.key); - } - // Ver::Http2 is already in the Pool (or dead), so we wouldn't - // have an actual reference to the Pool. - } - } -} - -impl fmt::Debug for Pooled { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Pooled").field("key", &self.key).finish() - } -} - -struct Idle { - idle_at: Instant, - value: T, -} - -// FIXME: allow() required due to `impl Trait` leaking types to this lint -#[allow(missing_debug_implementations)] -pub struct Checkout { - key: K, - pool: Pool, - waiter: Option>, -} - -#[derive(Debug)] -#[non_exhaustive] -pub enum Error { - PoolDisabled, - CheckoutNoLongerWanted, - CheckedOutClosedValue, -} - -impl Error { - pub(super) fn is_canceled(&self) -> bool { - matches!(self, Error::CheckedOutClosedValue) - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str(match self { - Error::PoolDisabled => "pool is disabled", - Error::CheckedOutClosedValue => "checked out connection was closed", - Error::CheckoutNoLongerWanted => "request was canceled", - }) - } -} - -impl StdError for Error {} - -impl Checkout { - fn poll_waiter( - &mut self, - cx: &mut task::Context<'_>, - ) -> Poll, Error>>> { - if let Some(mut rx) = self.waiter.take() { - match Pin::new(&mut rx).poll(cx) { - Poll::Ready(Ok(value)) => { - if value.is_open() { - Poll::Ready(Some(Ok(self.pool.reuse(&self.key, value)))) - } else { - Poll::Ready(Some(Err(Error::CheckedOutClosedValue))) - } - } - Poll::Pending => { - self.waiter = Some(rx); - Poll::Pending - } - Poll::Ready(Err(_canceled)) => { - Poll::Ready(Some(Err(Error::CheckoutNoLongerWanted))) - } - } - } else { - Poll::Ready(None) - } - } - - fn checkout(&mut self, cx: &mut task::Context<'_>) -> Option> { - let entry = { - let mut inner = self.pool.inner.as_ref()?.lock().unwrap(); - let expiration = Expiration::new(inner.timeout); - let now = inner.now(); - let maybe_entry = inner.idle.get_mut(&self.key).and_then(|list| { - trace!("take? {:?}: expiration = {:?}", self.key, expiration.0); - // A block to end the mutable borrow on list, - // so the map below can check is_empty() - { - let popper = IdlePopper { - key: &self.key, - list, - }; - popper.pop(&expiration, now) - } - .map(|e| (e, list.is_empty())) - }); - - let (entry, empty) = if let Some((e, empty)) = maybe_entry { - (Some(e), empty) - } else { - // No entry found means nuke the list for sure. - (None, true) - }; - if empty { - //TODO: This could be done with the HashMap::entry API instead. - inner.idle.remove(&self.key); - } - - if entry.is_none() && self.waiter.is_none() { - let (tx, mut rx) = oneshot::channel(); - trace!("checkout waiting for idle connection: {:?}", self.key); - inner - .waiters - .entry(self.key.clone()) - .or_insert_with(VecDeque::new) - .push_back(tx); - - // register the waker with this oneshot - assert!(Pin::new(&mut rx).poll(cx).is_pending()); - self.waiter = Some(rx); - } - - entry - }; - - entry.map(|e| self.pool.reuse(&self.key, e.value)) - } -} - -impl Future for Checkout { - type Output = Result, Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - if let Some(pooled) = ready!(self.poll_waiter(cx)?) { - return Poll::Ready(Ok(pooled)); - } - - if let Some(pooled) = self.checkout(cx) { - Poll::Ready(Ok(pooled)) - } else if !self.pool.is_enabled() { - Poll::Ready(Err(Error::PoolDisabled)) - } else { - // There's a new waiter, already registered in self.checkout() - debug_assert!(self.waiter.is_some()); - Poll::Pending - } - } -} - -impl Drop for Checkout { - fn drop(&mut self) { - if self.waiter.take().is_some() { - trace!("checkout dropped for {:?}", self.key); - if let Some(Ok(mut inner)) = self.pool.inner.as_ref().map(|i| i.lock()) { - inner.clean_waiters(&self.key); - } - } - } -} - -// FIXME: allow() required due to `impl Trait` leaking types to this lint -#[allow(missing_debug_implementations)] -pub struct Connecting { - key: K, - pool: WeakOpt>>, -} - -impl Connecting { - pub fn alpn_h2(self, pool: &Pool) -> Option { - debug_assert!( - self.pool.0.is_none(), - "Connecting::alpn_h2 but already Http2" - ); - - pool.connecting(&self.key, Ver::Http2) - } -} - -impl Drop for Connecting { - fn drop(&mut self) { - if let Some(pool) = self.pool.upgrade() { - // No need to panic on drop, that could abort! - if let Ok(mut inner) = pool.lock() { - inner.connected(&self.key); - } - } - } -} - -struct Expiration(Option); - -impl Expiration { - fn new(dur: Option) -> Expiration { - Expiration(dur) - } - - fn expires(&self, instant: Instant, now: Instant) -> bool { - match self.0 { - // Avoid `Instant::elapsed` to avoid issues like rust-lang/rust#86470. - Some(timeout) => now.saturating_duration_since(instant) > timeout, - None => false, - } - } -} - -struct IdleTask { - timer: Timer, - duration: Duration, - pool: WeakOpt>>, - // This allows the IdleTask to be notified as soon as the entire - // Pool is fully dropped, and shutdown. This channel is never sent on, - // but Err(Canceled) will be received when the Pool is dropped. - pool_drop_notifier: oneshot::Receiver, -} - -impl IdleTask { - async fn run(self) { - use futures_util::future; - - let mut sleep = self.timer.sleep_until(self.timer.now() + self.duration); - let mut on_pool_drop = self.pool_drop_notifier; - loop { - match future::select(&mut on_pool_drop, &mut sleep).await { - future::Either::Left(_) => { - // pool dropped, bah-bye - break; - } - future::Either::Right(((), _)) => { - if let Some(inner) = self.pool.upgrade() { - if let Ok(mut inner) = inner.lock() { - trace!("idle interval checking for expired"); - inner.clear_expired(); - } - } - - let deadline = self.timer.now() + self.duration; - self.timer.reset(&mut sleep, deadline); - } - } - } - - trace!("pool closed, canceling idle interval"); - return; - } -} - -impl WeakOpt { - fn none() -> Self { - WeakOpt(None) - } - - fn downgrade(arc: &Arc) -> Self { - WeakOpt(Some(Arc::downgrade(arc))) - } - - fn upgrade(&self) -> Option> { - self.0.as_ref().and_then(Weak::upgrade) - } -} - -#[cfg(test)] -mod tests { - use std::fmt::Debug; - use std::future::Future; - use std::hash::Hash; - use std::pin::Pin; - use std::task::{self, Poll}; - use std::time::Duration; - - use super::{Connecting, Key, Pool, Poolable, Reservation, WeakOpt}; - use crate::rt::{TokioExecutor, TokioTimer}; - - use crate::common::timer; - - #[derive(Clone, Debug, PartialEq, Eq, Hash)] - struct KeyImpl(http::uri::Scheme, http::uri::Authority); - - type KeyTuple = (http::uri::Scheme, http::uri::Authority); - - /// Test unique reservations. - #[derive(Debug, PartialEq, Eq)] - struct Uniq(T); - - impl Poolable for Uniq { - fn is_open(&self) -> bool { - true - } - - fn reserve(self) -> Reservation { - Reservation::Unique(self) - } - - fn can_share(&self) -> bool { - false - } - } - - fn c(key: K) -> Connecting { - Connecting { - key, - pool: WeakOpt::none(), - } - } - - fn host_key(s: &str) -> KeyImpl { - KeyImpl(http::uri::Scheme::HTTP, s.parse().expect("host key")) - } - - fn pool_no_timer() -> Pool { - pool_max_idle_no_timer(usize::MAX) - } - - fn pool_max_idle_no_timer(max_idle: usize) -> Pool { - let pool = Pool::new( - super::Config { - idle_timeout: Some(Duration::from_millis(100)), - max_idle_per_host: max_idle, - }, - TokioExecutor::new(), - Option::::None, - ); - pool.no_timer(); - pool - } - - #[tokio::test] - async fn test_pool_checkout_smoke() { - let pool = pool_no_timer(); - let key = host_key("foo"); - let pooled = pool.pooled(c(key.clone()), Uniq(41)); - - drop(pooled); - - match pool.checkout(key).await { - Ok(pooled) => assert_eq!(*pooled, Uniq(41)), - Err(_) => panic!("not ready"), - }; - } - - /// Helper to check if the future is ready after polling once. - struct PollOnce<'a, F>(&'a mut F); - - impl Future for PollOnce<'_, F> - where - F: Future> + Unpin, - { - type Output = Option<()>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll { - match Pin::new(&mut self.0).poll(cx) { - Poll::Ready(Ok(_)) => Poll::Ready(Some(())), - Poll::Ready(Err(_)) => Poll::Ready(Some(())), - Poll::Pending => Poll::Ready(None), - } - } - } - - #[tokio::test] - async fn test_pool_checkout_returns_none_if_expired() { - let pool = pool_no_timer(); - let key = host_key("foo"); - let pooled = pool.pooled(c(key.clone()), Uniq(41)); - - drop(pooled); - tokio::time::sleep(pool.locked().timeout.unwrap()).await; - let mut checkout = pool.checkout(key); - let poll_once = PollOnce(&mut checkout); - let is_not_ready = poll_once.await.is_none(); - assert!(is_not_ready); - } - - #[tokio::test] - async fn test_pool_checkout_removes_expired() { - let pool = pool_no_timer(); - let key = host_key("foo"); - - pool.pooled(c(key.clone()), Uniq(41)); - pool.pooled(c(key.clone()), Uniq(5)); - pool.pooled(c(key.clone()), Uniq(99)); - - assert_eq!( - pool.locked().idle.get(&key).map(|entries| entries.len()), - Some(3) - ); - tokio::time::sleep(pool.locked().timeout.unwrap()).await; - - let mut checkout = pool.checkout(key.clone()); - let poll_once = PollOnce(&mut checkout); - // checkout.await should clean out the expired - poll_once.await; - assert!(!pool.locked().idle.contains_key(&key)); - } - - #[test] - fn test_pool_max_idle_per_host() { - let pool = pool_max_idle_no_timer(2); - let key = host_key("foo"); - - pool.pooled(c(key.clone()), Uniq(41)); - pool.pooled(c(key.clone()), Uniq(5)); - pool.pooled(c(key.clone()), Uniq(99)); - - // pooled and dropped 3, max_idle should only allow 2 - assert_eq!( - pool.locked().idle.get(&key).map(|entries| entries.len()), - Some(2) - ); - } - - #[tokio::test] - async fn test_pool_timer_removes_expired_realtime() { - test_pool_timer_removes_expired_inner().await - } - - #[tokio::test(start_paused = true)] - async fn test_pool_timer_removes_expired_faketime() { - test_pool_timer_removes_expired_inner().await - } - - async fn test_pool_timer_removes_expired_inner() { - let pool = Pool::new( - super::Config { - idle_timeout: Some(Duration::from_millis(10)), - max_idle_per_host: usize::MAX, - }, - TokioExecutor::new(), - Some(TokioTimer::new()), - ); - - let key = host_key("foo"); - - pool.pooled(c(key.clone()), Uniq(41)); - pool.pooled(c(key.clone()), Uniq(5)); - pool.pooled(c(key.clone()), Uniq(99)); - - assert_eq!( - pool.locked().idle.get(&key).map(|entries| entries.len()), - Some(3) - ); - - // Let the timer tick passed the expiration... - tokio::time::sleep(Duration::from_millis(30)).await; - - // But minimum interval is higher, so nothing should have been reaped - assert_eq!( - pool.locked().idle.get(&key).map(|entries| entries.len()), - Some(3) - ); - - // Now wait passed the minimum interval more - tokio::time::sleep(Duration::from_millis(70)).await; - // Yield in case other task hasn't been able to run :shrug: - tokio::task::yield_now().await; - - assert!(!pool.locked().idle.contains_key(&key)); - } - - #[tokio::test] - async fn test_pool_checkout_task_unparked() { - use futures_util::future::join; - use futures_util::FutureExt; - - let pool = pool_no_timer(); - let key = host_key("foo"); - let pooled = pool.pooled(c(key.clone()), Uniq(41)); - - let checkout = join(pool.checkout(key), async { - // the checkout future will park first, - // and then this lazy future will be polled, which will insert - // the pooled back into the pool - // - // this test makes sure that doing so will unpark the checkout - drop(pooled); - }) - .map(|(entry, _)| entry); - - assert_eq!(*checkout.await.unwrap(), Uniq(41)); - } - - #[tokio::test] - async fn test_pool_checkout_drop_cleans_up_waiters() { - let pool = pool_no_timer::, KeyImpl>(); - let key = host_key("foo"); - - let mut checkout1 = pool.checkout(key.clone()); - let mut checkout2 = pool.checkout(key.clone()); - - let poll_once1 = PollOnce(&mut checkout1); - let poll_once2 = PollOnce(&mut checkout2); - - // first poll needed to get into Pool's parked - poll_once1.await; - assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1); - poll_once2.await; - assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 2); - - // on drop, clean up Pool - drop(checkout1); - assert_eq!(pool.locked().waiters.get(&key).unwrap().len(), 1); - - drop(checkout2); - assert!(!pool.locked().waiters.contains_key(&key)); - } - - #[derive(Debug)] - struct CanClose { - #[allow(unused)] - val: i32, - closed: bool, - } - - impl Poolable for CanClose { - fn is_open(&self) -> bool { - !self.closed - } - - fn reserve(self) -> Reservation { - Reservation::Unique(self) - } - - fn can_share(&self) -> bool { - false - } - } - - #[test] - fn pooled_drop_if_closed_doesnt_reinsert() { - let pool = pool_no_timer(); - let key = host_key("foo"); - pool.pooled( - c(key.clone()), - CanClose { - val: 57, - closed: true, - }, - ); - - assert!(!pool.locked().idle.contains_key(&key)); - } -} +pub use crate::client::connect::pool::{ + Checkout, Config, Connecting, Error, Key, Pool, Poolable, Pooled, Reservation, Ver, +}; diff --git a/src/client/mod.rs b/src/client/mod.rs index 268cadf0..e7d224f5 100644 --- a/src/client/mod.rs +++ b/src/client/mod.rs @@ -1,5 +1,14 @@ //! HTTP client utilities +/// Connectors used by the `Client`. +pub mod connect; + +#[cfg(any(feature = "http1", feature = "http2"))] +mod client; + +#[cfg(any(feature = "http1", feature = "http2"))] +pub use client::{Builder, Client, Error, ResponseFuture}; + /// Legacy implementations of `connect` module and `Client` #[cfg(feature = "client-legacy")] pub mod legacy; diff --git a/src/client/pool/cache.rs b/src/client/pool/cache.rs index 45f5336e..c827edac 100644 --- a/src/client/pool/cache.rs +++ b/src/client/pool/cache.rs @@ -263,25 +263,22 @@ mod internal { future::Either::Left((Ok(pool_got), connecting)) => { events.on_race_lost(BackgroundConnect { future: connecting, - shared: Arc::downgrade(&shared), + shared: Arc::downgrade(shared), }); return Poll::Ready(Ok(Cached::new( pool_got, - Arc::downgrade(&shared), + Arc::downgrade(shared), ))); } future::Either::Right((connected, _waiter)) => { let inner = connected?; - return Poll::Ready(Ok(Cached::new( - inner, - Arc::downgrade(&shared), - ))); + return Poll::Ready(Ok(Cached::new(inner, Arc::downgrade(shared)))); } } } CacheFuture::Connecting { shared, future } => { let inner = ready!(Pin::new(future).poll(cx))?; - return Poll::Ready(Ok(Cached::new(inner, Arc::downgrade(&shared)))); + return Poll::Ready(Ok(Cached::new(inner, Arc::downgrade(shared)))); } CacheFuture::Cached { svc } => { return Poll::Ready(Ok(svc.take().unwrap())); diff --git a/src/common/mod.rs b/src/common/mod.rs index 6586aabf..9340b71d 100644 --- a/src/common/mod.rs +++ b/src/common/mod.rs @@ -1,19 +1,19 @@ #![allow(missing_docs)] pub(crate) mod exec; -#[cfg(feature = "client-legacy")] +#[cfg(any(feature = "client", feature = "client-legacy"))] mod lazy; #[cfg(feature = "server")] // #[cfg(feature = "server-auto")] pub(crate) mod rewind; -#[cfg(feature = "client-legacy")] +#[cfg(any(feature = "client", feature = "client-legacy"))] mod sync; pub(crate) mod timer; -#[cfg(feature = "client-legacy")] +#[cfg(any(feature = "client", feature = "client-legacy"))] pub(crate) use exec::Exec; -#[cfg(feature = "client-legacy")] +#[cfg(any(feature = "client", feature = "client-legacy"))] pub(crate) use lazy::{lazy, Started as Lazy}; -#[cfg(feature = "client-legacy")] +#[cfg(any(feature = "client", feature = "client-legacy"))] pub(crate) use sync::SyncWrapper; diff --git a/src/lib.rs b/src/lib.rs index 65bbe465..6fb8d727 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -12,7 +12,7 @@ mod common; pub mod rt; #[cfg(feature = "server")] pub mod server; -#[cfg(any(feature = "service", feature = "client-legacy"))] +#[cfg(any(feature = "service", feature = "client", feature = "client-legacy"))] pub mod service; mod error; diff --git a/src/rt/mod.rs b/src/rt/mod.rs index 71363ccd..278d1f97 100644 --- a/src/rt/mod.rs +++ b/src/rt/mod.rs @@ -9,4 +9,7 @@ pub(crate) use self::io::{read, write_all}; pub mod tokio; #[cfg(feature = "tokio")] -pub use self::tokio::{TokioExecutor, TokioIo, TokioTimer}; +pub use self::tokio::{TokioIo, TokioTimer}; + +#[cfg(feature = "tokio-rt")] +pub use self::tokio::TokioExecutor; diff --git a/src/rt/tokio.rs b/src/rt/tokio.rs index bd5fd50f..1c6c14b7 100644 --- a/src/rt/tokio.rs +++ b/src/rt/tokio.rs @@ -51,6 +51,8 @@ //! [`Write`]: hyper::rt::Write //! [tokio-async-docs]: https://docs.rs/tokio/latest/tokio/#asynchronous-io +use hyper::rt::{Sleep, Timer}; +use pin_project_lite::pin_project; use std::{ future::Future, pin::Pin, @@ -58,8 +60,8 @@ use std::{ time::{Duration, Instant}, }; -use hyper::rt::{Executor, Sleep, Timer}; -use pin_project_lite::pin_project; +#[cfg(feature = "tokio-rt")] +use hyper::rt::Executor; #[cfg(feature = "tracing")] use tracing::instrument::Instrument; @@ -70,6 +72,7 @@ mod with_hyper_io; mod with_tokio_io; /// Future executor that utilises `tokio` threads. +#[cfg(feature = "tokio-rt")] #[non_exhaustive] #[derive(Default, Debug, Clone)] pub struct TokioExecutor {} @@ -102,6 +105,7 @@ pin_project! { // ===== impl TokioExecutor ===== +#[cfg(feature = "tokio-rt")] impl Executor for TokioExecutor where Fut: Future + Send + 'static, @@ -116,6 +120,7 @@ where } } +#[cfg(feature = "tokio-rt")] impl TokioExecutor { /// Create new executor that relies on [`tokio::spawn`] to execute futures. pub fn new() -> Self { @@ -324,7 +329,7 @@ impl TokioSleep { } } -#[cfg(test)] +#[cfg(all(test, feature = "tokio-rt"))] mod tests { use crate::rt::TokioExecutor; use hyper::rt::Executor; diff --git a/src/server/conn/auto/mod.rs b/src/server/conn/auto/mod.rs index 5e334163..00aebbb0 100644 --- a/src/server/conn/auto/mod.rs +++ b/src/server/conn/auto/mod.rs @@ -397,7 +397,7 @@ impl std::ops::Deref for Cow<'_, T> { type Target = T; fn deref(&self) -> &T { match self { - Cow::Borrowed(t) => &*t, + Cow::Borrowed(t) => t, Cow::Owned(ref t) => t, } } diff --git a/src/service/mod.rs b/src/service/mod.rs index 34796431..a7be7b73 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -23,10 +23,10 @@ #[cfg(feature = "service")] mod glue; -#[cfg(any(feature = "client-legacy", feature = "service"))] +#[cfg(any(feature = "client", feature = "client-legacy", feature = "service"))] mod oneshot; #[cfg(feature = "service")] pub use self::glue::{TowerToHyperService, TowerToHyperServiceFuture}; -#[cfg(any(feature = "client-legacy", feature = "service"))] +#[cfg(any(feature = "client", feature = "client-legacy", feature = "service"))] pub(crate) use self::oneshot::Oneshot; diff --git a/tests/legacy_client.rs b/tests/legacy_client.rs index 8f887c9f..cf76fdea 100644 --- a/tests/legacy_client.rs +++ b/tests/legacy_client.rs @@ -1346,8 +1346,8 @@ impl tower_service::Service for MockConnector { // Test for connection error propagation with PR #184. // Simulates a connection failure by setting failed=true and returning a custom io::Error. // Verifies the error propagates through hyper’s client as a hyper::Error(Io, ...). -#[cfg(feature = "http1")] #[tokio::test] +#[cfg(feature = "http1")] async fn test_connection_error_propagation_pr184() { // Define the error message for the simulated connection failure. // Reused for creating the error and verifying the result. @@ -1404,8 +1404,8 @@ async fn test_connection_error_propagation_pr184() { // Simulates a connection that returns EOF immediately, causing hyper’s HTTP/1.1 parser // to fail with IncompleteMessage due to no response data. // Uses MockConnector with conn_error=None to keep failed=false, ensuring EOF behavior. -#[cfg(feature = "http1")] #[tokio::test] +#[cfg(feature = "http1")] async fn test_incomplete_message_error_pr184() { // Create an empty IoBuilder to simulate a connection with no data. // No write or read expectations, so poll_read returns EOF (Poll::Ready(Ok(0))). @@ -1464,8 +1464,8 @@ async fn test_incomplete_message_error_pr184() { // Test for a successful HTTP/1.1 connection using a mock connector. // Simulates a server that accepts a request and responds with a 200 OK. // Verifies the client correctly sends the request and receives the response. -#[cfg(feature = "http1")] #[tokio::test] +#[cfg(feature = "http1")] async fn test_successful_connection() { // Define the expected server response: a valid HTTP/1.1 200 OK with no body. let response = b"HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n";