From 6836bb0a997e7e82625ca6967214fce5687c750f Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Wed, 28 May 2025 20:58:57 -0500 Subject: [PATCH 01/13] Increment version to 3.12.5.dev0 (#11080) --- aiohttp/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aiohttp/__init__.py b/aiohttp/__init__.py index 56201805d30..74da5e01c07 100644 --- a/aiohttp/__init__.py +++ b/aiohttp/__init__.py @@ -1,4 +1,4 @@ -__version__ = "3.12.4" +__version__ = "3.12.5.dev0" from typing import TYPE_CHECKING, Tuple From 6bf919ac774507d6b313e2639e2a752426fb2187 Mon Sep 17 00:00:00 2001 From: "patchback[bot]" <45432694+patchback[bot]@users.noreply.github.com> Date: Thu, 29 May 2025 06:39:06 +0000 Subject: [PATCH 02/13] [PR #11081/5da0231f backport][3.13] Revert cache key change for linter (#11084) Co-authored-by: J. Nick Koston --- .github/workflows/ci-cd.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml index 1d44ddda982..1cae0bd57fe 100644 --- a/.github/workflows/ci-cd.yml +++ b/.github/workflows/ci-cd.yml @@ -49,7 +49,7 @@ jobs: - name: Cache PyPI uses: actions/cache@v4.2.3 with: - key: pip-lint-${{ hashFiles('requirements/*.txt') }}-v4 + key: pip-lint-${{ hashFiles('requirements/*.txt') }} path: ~/.cache/pip restore-keys: | pip-lint- From 0cbdc67384306f8300fbaf5014b0decf14b8fe19 Mon Sep 17 00:00:00 2001 From: "patchback[bot]" <45432694+patchback[bot]@users.noreply.github.com> Date: Thu, 29 May 2025 06:42:55 +0000 Subject: [PATCH 03/13] [PR #11081/5da0231f backport][3.12] Revert cache key change for linter (#11083) Co-authored-by: J. Nick Koston --- .github/workflows/ci-cd.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml index 1d44ddda982..1cae0bd57fe 100644 --- a/.github/workflows/ci-cd.yml +++ b/.github/workflows/ci-cd.yml @@ -49,7 +49,7 @@ jobs: - name: Cache PyPI uses: actions/cache@v4.2.3 with: - key: pip-lint-${{ hashFiles('requirements/*.txt') }}-v4 + key: pip-lint-${{ hashFiles('requirements/*.txt') }} path: ~/.cache/pip restore-keys: | pip-lint- From 73cca7c1da9d36c6555fe0a6865a6ccd0002ce3d Mon Sep 17 00:00:00 2001 From: "patchback[bot]" <45432694+patchback[bot]@users.noreply.github.com> Date: Thu, 29 May 2025 22:47:14 +0000 Subject: [PATCH 04/13] [PR #11085/51698fb1 backport][3.12] Replace expensive isinstance checks with faster alternatives (#11086) Co-authored-by: J. Nick Koston --- CHANGES/11085.misc.rst | 1 + aiohttp/client_reqrep.py | 4 ++-- aiohttp/cookiejar.py | 4 ++-- aiohttp/multipart.py | 5 +++-- aiohttp/payload.py | 3 ++- 5 files changed, 10 insertions(+), 7 deletions(-) create mode 100644 CHANGES/11085.misc.rst diff --git a/CHANGES/11085.misc.rst b/CHANGES/11085.misc.rst new file mode 100644 index 00000000000..67b1915cfcb --- /dev/null +++ b/CHANGES/11085.misc.rst @@ -0,0 +1 @@ +Improved performance of isinstance checks by using collections.abc types instead of typing module equivalents -- by :user:`bdraco`. diff --git a/aiohttp/client_reqrep.py b/aiohttp/client_reqrep.py index a04c86b1c53..75df6d0e115 100644 --- a/aiohttp/client_reqrep.py +++ b/aiohttp/client_reqrep.py @@ -7,6 +7,7 @@ import sys import traceback import warnings +from collections.abc import Mapping as ABCMapping from hashlib import md5, sha1, sha256 from http.cookies import CookieError, Morsel, SimpleCookie from types import MappingProxyType, TracebackType @@ -18,7 +19,6 @@ Iterable, List, Literal, - Mapping, NamedTuple, Optional, Tuple, @@ -1085,7 +1085,7 @@ def update_cookies(self, cookies: Optional[LooseCookies]) -> None: c.load(self.headers.get(hdrs.COOKIE, "")) del self.headers[hdrs.COOKIE] - if isinstance(cookies, Mapping): + if isinstance(cookies, ABCMapping): iter_cookies = cookies.items() else: iter_cookies = cookies # type: ignore[assignment] diff --git a/aiohttp/cookiejar.py b/aiohttp/cookiejar.py index 696ffddc315..ca32e4123b1 100644 --- a/aiohttp/cookiejar.py +++ b/aiohttp/cookiejar.py @@ -11,6 +11,7 @@ import time import warnings from collections import defaultdict +from collections.abc import Mapping as ABCMapping from http.cookies import BaseCookie, Morsel, SimpleCookie from typing import ( DefaultDict, @@ -18,7 +19,6 @@ Iterable, Iterator, List, - Mapping, Optional, Set, Tuple, @@ -236,7 +236,7 @@ def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> No # Don't accept cookies from IPs return - if isinstance(cookies, Mapping): + if isinstance(cookies, ABCMapping): cookies = cookies.items() for name, cookie in cookies: diff --git a/aiohttp/multipart.py b/aiohttp/multipart.py index 231c67c7bb7..90a2a5c28e4 100644 --- a/aiohttp/multipart.py +++ b/aiohttp/multipart.py @@ -6,6 +6,7 @@ import uuid import warnings from collections import deque +from collections.abc import Mapping as ABCMapping, Sequence as ABCSequence from types import TracebackType from typing import ( TYPE_CHECKING, @@ -953,12 +954,12 @@ def append_form( headers: Optional[Mapping[str, str]] = None, ) -> Payload: """Helper to append form urlencoded part.""" - assert isinstance(obj, (Sequence, Mapping)) + assert isinstance(obj, (ABCSequence, ABCMapping)) if headers is None: headers = CIMultiDict() - if isinstance(obj, Mapping): + if isinstance(obj, ABCMapping): obj = list(obj.items()) data = urlencode(obj, doseq=True) diff --git a/aiohttp/payload.py b/aiohttp/payload.py index 4a2c7922337..2149b7a4c7a 100644 --- a/aiohttp/payload.py +++ b/aiohttp/payload.py @@ -7,6 +7,7 @@ import sys import warnings from abc import ABC, abstractmethod +from collections.abc import Iterable as ABCIterable from itertools import chain from typing import ( IO, @@ -137,7 +138,7 @@ def register( self._first.append((factory, type)) elif order is Order.normal: self._normal.append((factory, type)) - if isinstance(type, Iterable): + if isinstance(type, ABCIterable): for t in type: self._normal_lookup[t] = factory else: From e004160ab4a2e738361a5509e40717c8830d1502 Mon Sep 17 00:00:00 2001 From: "patchback[bot]" <45432694+patchback[bot]@users.noreply.github.com> Date: Thu, 29 May 2025 22:51:12 +0000 Subject: [PATCH 05/13] [PR #11085/51698fb1 backport][3.13] Replace expensive isinstance checks with faster alternatives (#11087) Co-authored-by: J. Nick Koston --- CHANGES/11085.misc.rst | 1 + aiohttp/client_reqrep.py | 4 ++-- aiohttp/cookiejar.py | 4 ++-- aiohttp/multipart.py | 5 +++-- aiohttp/payload.py | 3 ++- 5 files changed, 10 insertions(+), 7 deletions(-) create mode 100644 CHANGES/11085.misc.rst diff --git a/CHANGES/11085.misc.rst b/CHANGES/11085.misc.rst new file mode 100644 index 00000000000..67b1915cfcb --- /dev/null +++ b/CHANGES/11085.misc.rst @@ -0,0 +1 @@ +Improved performance of isinstance checks by using collections.abc types instead of typing module equivalents -- by :user:`bdraco`. diff --git a/aiohttp/client_reqrep.py b/aiohttp/client_reqrep.py index a04c86b1c53..75df6d0e115 100644 --- a/aiohttp/client_reqrep.py +++ b/aiohttp/client_reqrep.py @@ -7,6 +7,7 @@ import sys import traceback import warnings +from collections.abc import Mapping as ABCMapping from hashlib import md5, sha1, sha256 from http.cookies import CookieError, Morsel, SimpleCookie from types import MappingProxyType, TracebackType @@ -18,7 +19,6 @@ Iterable, List, Literal, - Mapping, NamedTuple, Optional, Tuple, @@ -1085,7 +1085,7 @@ def update_cookies(self, cookies: Optional[LooseCookies]) -> None: c.load(self.headers.get(hdrs.COOKIE, "")) del self.headers[hdrs.COOKIE] - if isinstance(cookies, Mapping): + if isinstance(cookies, ABCMapping): iter_cookies = cookies.items() else: iter_cookies = cookies # type: ignore[assignment] diff --git a/aiohttp/cookiejar.py b/aiohttp/cookiejar.py index 696ffddc315..ca32e4123b1 100644 --- a/aiohttp/cookiejar.py +++ b/aiohttp/cookiejar.py @@ -11,6 +11,7 @@ import time import warnings from collections import defaultdict +from collections.abc import Mapping as ABCMapping from http.cookies import BaseCookie, Morsel, SimpleCookie from typing import ( DefaultDict, @@ -18,7 +19,6 @@ Iterable, Iterator, List, - Mapping, Optional, Set, Tuple, @@ -236,7 +236,7 @@ def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> No # Don't accept cookies from IPs return - if isinstance(cookies, Mapping): + if isinstance(cookies, ABCMapping): cookies = cookies.items() for name, cookie in cookies: diff --git a/aiohttp/multipart.py b/aiohttp/multipart.py index 231c67c7bb7..90a2a5c28e4 100644 --- a/aiohttp/multipart.py +++ b/aiohttp/multipart.py @@ -6,6 +6,7 @@ import uuid import warnings from collections import deque +from collections.abc import Mapping as ABCMapping, Sequence as ABCSequence from types import TracebackType from typing import ( TYPE_CHECKING, @@ -953,12 +954,12 @@ def append_form( headers: Optional[Mapping[str, str]] = None, ) -> Payload: """Helper to append form urlencoded part.""" - assert isinstance(obj, (Sequence, Mapping)) + assert isinstance(obj, (ABCSequence, ABCMapping)) if headers is None: headers = CIMultiDict() - if isinstance(obj, Mapping): + if isinstance(obj, ABCMapping): obj = list(obj.items()) data = urlencode(obj, doseq=True) diff --git a/aiohttp/payload.py b/aiohttp/payload.py index 4a2c7922337..2149b7a4c7a 100644 --- a/aiohttp/payload.py +++ b/aiohttp/payload.py @@ -7,6 +7,7 @@ import sys import warnings from abc import ABC, abstractmethod +from collections.abc import Iterable as ABCIterable from itertools import chain from typing import ( IO, @@ -137,7 +138,7 @@ def register( self._first.append((factory, type)) elif order is Order.normal: self._normal.append((factory, type)) - if isinstance(type, Iterable): + if isinstance(type, ABCIterable): for t in type: self._normal_lookup[t] = factory else: From c7e03efdced9224f1d60bf3992c0767a60a732cf Mon Sep 17 00:00:00 2001 From: "patchback[bot]" <45432694+patchback[bot]@users.noreply.github.com> Date: Thu, 29 May 2025 23:25:09 +0000 Subject: [PATCH 06/13] [PR #11088/b1da65e1 backport][3.12] Remove ABC names for isinstance checks (#11089) --- CHANGES/11088.misc.rst | 1 + aiohttp/client_reqrep.py | 4 ++-- aiohttp/cookiejar.py | 4 ++-- aiohttp/multipart.py | 8 +++----- aiohttp/payload.py | 5 ++--- 5 files changed, 10 insertions(+), 12 deletions(-) create mode 120000 CHANGES/11088.misc.rst diff --git a/CHANGES/11088.misc.rst b/CHANGES/11088.misc.rst new file mode 120000 index 00000000000..c9ebf3c31e1 --- /dev/null +++ b/CHANGES/11088.misc.rst @@ -0,0 +1 @@ +11085.misc.rst \ No newline at end of file diff --git a/aiohttp/client_reqrep.py b/aiohttp/client_reqrep.py index 75df6d0e115..e437ef67aff 100644 --- a/aiohttp/client_reqrep.py +++ b/aiohttp/client_reqrep.py @@ -7,7 +7,7 @@ import sys import traceback import warnings -from collections.abc import Mapping as ABCMapping +from collections.abc import Mapping from hashlib import md5, sha1, sha256 from http.cookies import CookieError, Morsel, SimpleCookie from types import MappingProxyType, TracebackType @@ -1085,7 +1085,7 @@ def update_cookies(self, cookies: Optional[LooseCookies]) -> None: c.load(self.headers.get(hdrs.COOKIE, "")) del self.headers[hdrs.COOKIE] - if isinstance(cookies, ABCMapping): + if isinstance(cookies, Mapping): iter_cookies = cookies.items() else: iter_cookies = cookies # type: ignore[assignment] diff --git a/aiohttp/cookiejar.py b/aiohttp/cookiejar.py index ca32e4123b1..a755a893409 100644 --- a/aiohttp/cookiejar.py +++ b/aiohttp/cookiejar.py @@ -11,7 +11,7 @@ import time import warnings from collections import defaultdict -from collections.abc import Mapping as ABCMapping +from collections.abc import Mapping from http.cookies import BaseCookie, Morsel, SimpleCookie from typing import ( DefaultDict, @@ -236,7 +236,7 @@ def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> No # Don't accept cookies from IPs return - if isinstance(cookies, ABCMapping): + if isinstance(cookies, Mapping): cookies = cookies.items() for name, cookie in cookies: diff --git a/aiohttp/multipart.py b/aiohttp/multipart.py index 90a2a5c28e4..79f8481ee30 100644 --- a/aiohttp/multipart.py +++ b/aiohttp/multipart.py @@ -6,7 +6,7 @@ import uuid import warnings from collections import deque -from collections.abc import Mapping as ABCMapping, Sequence as ABCSequence +from collections.abc import Mapping, Sequence from types import TracebackType from typing import ( TYPE_CHECKING, @@ -15,9 +15,7 @@ Dict, Iterator, List, - Mapping, Optional, - Sequence, Tuple, Type, Union, @@ -954,12 +952,12 @@ def append_form( headers: Optional[Mapping[str, str]] = None, ) -> Payload: """Helper to append form urlencoded part.""" - assert isinstance(obj, (ABCSequence, ABCMapping)) + assert isinstance(obj, (Sequence, Mapping)) if headers is None: headers = CIMultiDict() - if isinstance(obj, ABCMapping): + if isinstance(obj, Mapping): obj = list(obj.items()) data = urlencode(obj, doseq=True) diff --git a/aiohttp/payload.py b/aiohttp/payload.py index 2149b7a4c7a..7180fd2b430 100644 --- a/aiohttp/payload.py +++ b/aiohttp/payload.py @@ -7,7 +7,7 @@ import sys import warnings from abc import ABC, abstractmethod -from collections.abc import Iterable as ABCIterable +from collections.abc import Iterable from itertools import chain from typing import ( IO, @@ -15,7 +15,6 @@ Any, Dict, Final, - Iterable, List, Optional, Set, @@ -138,7 +137,7 @@ def register( self._first.append((factory, type)) elif order is Order.normal: self._normal.append((factory, type)) - if isinstance(type, ABCIterable): + if isinstance(type, Iterable): for t in type: self._normal_lookup[t] = factory else: From fb264818390737f77c20d4504fd2ccd8624bac9b Mon Sep 17 00:00:00 2001 From: "patchback[bot]" <45432694+patchback[bot]@users.noreply.github.com> Date: Thu, 29 May 2025 23:46:03 +0000 Subject: [PATCH 07/13] [PR #11088/b1da65e1 backport][3.13] Remove ABC names for isinstance checks (#11090) --- CHANGES/11088.misc.rst | 1 + aiohttp/client_reqrep.py | 4 ++-- aiohttp/cookiejar.py | 4 ++-- aiohttp/multipart.py | 8 +++----- aiohttp/payload.py | 5 ++--- 5 files changed, 10 insertions(+), 12 deletions(-) create mode 120000 CHANGES/11088.misc.rst diff --git a/CHANGES/11088.misc.rst b/CHANGES/11088.misc.rst new file mode 120000 index 00000000000..c9ebf3c31e1 --- /dev/null +++ b/CHANGES/11088.misc.rst @@ -0,0 +1 @@ +11085.misc.rst \ No newline at end of file diff --git a/aiohttp/client_reqrep.py b/aiohttp/client_reqrep.py index 75df6d0e115..e437ef67aff 100644 --- a/aiohttp/client_reqrep.py +++ b/aiohttp/client_reqrep.py @@ -7,7 +7,7 @@ import sys import traceback import warnings -from collections.abc import Mapping as ABCMapping +from collections.abc import Mapping from hashlib import md5, sha1, sha256 from http.cookies import CookieError, Morsel, SimpleCookie from types import MappingProxyType, TracebackType @@ -1085,7 +1085,7 @@ def update_cookies(self, cookies: Optional[LooseCookies]) -> None: c.load(self.headers.get(hdrs.COOKIE, "")) del self.headers[hdrs.COOKIE] - if isinstance(cookies, ABCMapping): + if isinstance(cookies, Mapping): iter_cookies = cookies.items() else: iter_cookies = cookies # type: ignore[assignment] diff --git a/aiohttp/cookiejar.py b/aiohttp/cookiejar.py index ca32e4123b1..a755a893409 100644 --- a/aiohttp/cookiejar.py +++ b/aiohttp/cookiejar.py @@ -11,7 +11,7 @@ import time import warnings from collections import defaultdict -from collections.abc import Mapping as ABCMapping +from collections.abc import Mapping from http.cookies import BaseCookie, Morsel, SimpleCookie from typing import ( DefaultDict, @@ -236,7 +236,7 @@ def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> No # Don't accept cookies from IPs return - if isinstance(cookies, ABCMapping): + if isinstance(cookies, Mapping): cookies = cookies.items() for name, cookie in cookies: diff --git a/aiohttp/multipart.py b/aiohttp/multipart.py index 90a2a5c28e4..79f8481ee30 100644 --- a/aiohttp/multipart.py +++ b/aiohttp/multipart.py @@ -6,7 +6,7 @@ import uuid import warnings from collections import deque -from collections.abc import Mapping as ABCMapping, Sequence as ABCSequence +from collections.abc import Mapping, Sequence from types import TracebackType from typing import ( TYPE_CHECKING, @@ -15,9 +15,7 @@ Dict, Iterator, List, - Mapping, Optional, - Sequence, Tuple, Type, Union, @@ -954,12 +952,12 @@ def append_form( headers: Optional[Mapping[str, str]] = None, ) -> Payload: """Helper to append form urlencoded part.""" - assert isinstance(obj, (ABCSequence, ABCMapping)) + assert isinstance(obj, (Sequence, Mapping)) if headers is None: headers = CIMultiDict() - if isinstance(obj, ABCMapping): + if isinstance(obj, Mapping): obj = list(obj.items()) data = urlencode(obj, doseq=True) diff --git a/aiohttp/payload.py b/aiohttp/payload.py index 2149b7a4c7a..7180fd2b430 100644 --- a/aiohttp/payload.py +++ b/aiohttp/payload.py @@ -7,7 +7,7 @@ import sys import warnings from abc import ABC, abstractmethod -from collections.abc import Iterable as ABCIterable +from collections.abc import Iterable from itertools import chain from typing import ( IO, @@ -15,7 +15,6 @@ Any, Dict, Final, - Iterable, List, Optional, Set, @@ -138,7 +137,7 @@ def register( self._first.append((factory, type)) elif order is Order.normal: self._normal.append((factory, type)) - if isinstance(type, ABCIterable): + if isinstance(type, Iterable): for t in type: self._normal_lookup[t] = factory else: From fc6cd87433b1f8b0b216d3524897568351d61356 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 30 May 2025 16:13:21 +0000 Subject: [PATCH 08/13] Bump mypy from 1.15.0 to 1.16.0 (#11092) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [mypy](https://github.com/python/mypy) from 1.15.0 to 1.16.0.
Changelog

Sourced from mypy's changelog.

Mypy Release Notes

Next Release

Mypy 1.16

We’ve just uploaded mypy 1.16 to the Python Package Index (PyPI). Mypy is a static type checker for Python. This release includes new features and bug fixes. You can install it as follows:

python3 -m pip install -U mypy

You can read the full documentation for this release on Read the Docs.

Different Property Getter and Setter Types

Mypy now supports using different types for a property getter and setter:

class A:
    _value: int
@property
def foo(self) -&gt; int:
    return self._value

@foo.setter
def foo(self, x: str | int) -&gt; None:
    try:
        self._value = int(x)
    except ValueError:
raise Exception(f&quot;'{x}' is not a valid value for
'foo'&quot;)

This was contributed by Ivan Levkivskyi (PR 18510).

Flexible Variable Redefinitions (Experimental)

Mypy now allows unannotated variables to be freely redefined with different types when using the experimental --allow-redefinition-new flag. You will also need to enable --local-partial-types. Mypy will now infer a union type when different types are assigned to a variable:

# mypy: allow-redefinition-new, local-partial-types

def f(n: int, b: bool) -> int | str: if b: x = n else: </tr></table>

... (truncated)

Commits
  • 9e72e96 Update version to 1.16.0
  • 8fe719f Add changelog for 1.16 (#19138)
  • 2a036e7 Revert "Infer correct types with overloads of Type[Guard | Is] (#19161)
  • b6da4fc Allow enum members to have type objects as values (#19160)
  • 334469f [mypyc] Improve documentation of native and non-native classes (#19154)
  • a499d9f Document --allow-redefinition-new (#19153)
  • 96525a2 Merge commit '9e45dadcf6d8dbab36f83d9df94a706c0b4f9207' into release-1.16
  • 9e45dad Clear more data in TypeChecker.reset() instead of asserting (#19087)
  • 772cd0c Add --strict-bytes to --strict (#19049)
  • 0b65f21 Admit that Final variables are never redefined (#19083)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=mypy&package-manager=pip&previous-version=1.15.0&new-version=1.16.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Sam Bull --- aiohttp/abc.py | 2 +- aiohttp/formdata.py | 2 +- aiohttp/web_fileresponse.py | 8 ++++---- aiohttp/web_urldispatcher.py | 4 +++- requirements/constraints.txt | 4 +++- requirements/dev.txt | 4 +++- requirements/lint.txt | 4 +++- requirements/test.txt | 4 +++- 8 files changed, 21 insertions(+), 11 deletions(-) diff --git a/aiohttp/abc.py b/aiohttp/abc.py index c1bf5032d0d..a5e00a952d0 100644 --- a/aiohttp/abc.py +++ b/aiohttp/abc.py @@ -120,7 +120,7 @@ def request(self) -> Request: return self._request @abstractmethod - def __await__(self) -> Generator[Any, None, StreamResponse]: + def __await__(self) -> Generator[None, None, StreamResponse]: """Execute the view handler.""" diff --git a/aiohttp/formdata.py b/aiohttp/formdata.py index bdf591fae7a..a5a4f603e19 100644 --- a/aiohttp/formdata.py +++ b/aiohttp/formdata.py @@ -110,7 +110,7 @@ def add_fields(self, *fields: Any) -> None: elif isinstance(rec, (list, tuple)) and len(rec) == 2: k, fp = rec - self.add_field(k, fp) # type: ignore[arg-type] + self.add_field(k, fp) else: raise TypeError( diff --git a/aiohttp/web_fileresponse.py b/aiohttp/web_fileresponse.py index 344611cc495..26484b9483a 100644 --- a/aiohttp/web_fileresponse.py +++ b/aiohttp/web_fileresponse.py @@ -164,8 +164,8 @@ async def _not_modified( ) -> Optional[AbstractStreamWriter]: self.set_status(HTTPNotModified.status_code) self._length_check = False - self.etag = etag_value # type: ignore[assignment] - self.last_modified = last_modified # type: ignore[assignment] + self.etag = etag_value + self.last_modified = last_modified # Delete any Content-Length headers provided by user. HTTP 304 # should always have empty response body return await super().prepare(request) @@ -395,8 +395,8 @@ async def _prepare_open_file( # compress. self._compression = False - self.etag = f"{st.st_mtime_ns:x}-{st.st_size:x}" # type: ignore[assignment] - self.last_modified = file_mtime # type: ignore[assignment] + self.etag = f"{st.st_mtime_ns:x}-{st.st_size:x}" + self.last_modified = file_mtime self.content_length = count self._headers[hdrs.ACCEPT_RANGES] = "bytes" diff --git a/aiohttp/web_urldispatcher.py b/aiohttp/web_urldispatcher.py index 28ae2518fec..61766f0c5c6 100644 --- a/aiohttp/web_urldispatcher.py +++ b/aiohttp/web_urldispatcher.py @@ -194,6 +194,8 @@ def __init__( ): pass elif inspect.isgeneratorfunction(handler): + if TYPE_CHECKING: + assert False warnings.warn( "Bare generators are deprecated, use @coroutine wrapper", DeprecationWarning, @@ -978,7 +980,7 @@ async def _iter(self) -> StreamResponse: assert isinstance(ret, StreamResponse) return ret - def __await__(self) -> Generator[Any, None, StreamResponse]: + def __await__(self) -> Generator[None, None, StreamResponse]: return self._iter().__await__() def _raise_allowed_methods(self) -> NoReturn: diff --git a/requirements/constraints.txt b/requirements/constraints.txt index 4381f614a0c..314f88f8462 100644 --- a/requirements/constraints.txt +++ b/requirements/constraints.txt @@ -120,7 +120,7 @@ multidict==6.4.4 # -r requirements/multidict.in # -r requirements/runtime-deps.in # yarl -mypy==1.15.0 ; implementation_name == "cpython" +mypy==1.16.0 ; implementation_name == "cpython" # via # -r requirements/lint.in # -r requirements/test.in @@ -134,6 +134,8 @@ packaging==25.0 # gunicorn # pytest # sphinx +pathspec==0.12.1 + # via mypy pip-tools==7.4.1 # via -r requirements/dev.in pkgconfig==1.5.5 diff --git a/requirements/dev.txt b/requirements/dev.txt index 621605dd83f..55f1b831543 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -117,7 +117,7 @@ multidict==6.4.4 # via # -r requirements/runtime-deps.in # yarl -mypy==1.15.0 ; implementation_name == "cpython" +mypy==1.16.0 ; implementation_name == "cpython" # via # -r requirements/lint.in # -r requirements/test.in @@ -131,6 +131,8 @@ packaging==25.0 # gunicorn # pytest # sphinx +pathspec==0.12.1 + # via mypy pip-tools==7.4.1 # via -r requirements/dev.in pkgconfig==1.5.5 diff --git a/requirements/lint.txt b/requirements/lint.txt index be3f45bdc97..03f43c3352b 100644 --- a/requirements/lint.txt +++ b/requirements/lint.txt @@ -45,7 +45,7 @@ markdown-it-py==3.0.0 # via rich mdurl==0.1.2 # via markdown-it-py -mypy==1.15.0 ; implementation_name == "cpython" +mypy==1.16.0 ; implementation_name == "cpython" # via -r requirements/lint.in mypy-extensions==1.1.0 # via mypy @@ -53,6 +53,8 @@ nodeenv==1.9.1 # via pre-commit packaging==25.0 # via pytest +pathspec==0.12.1 + # via mypy platformdirs==4.3.8 # via virtualenv pluggy==1.6.0 diff --git a/requirements/test.txt b/requirements/test.txt index fc5c4cb7044..9376171d7c0 100644 --- a/requirements/test.txt +++ b/requirements/test.txt @@ -63,7 +63,7 @@ multidict==6.4.4 # via # -r requirements/runtime-deps.in # yarl -mypy==1.15.0 ; implementation_name == "cpython" +mypy==1.16.0 ; implementation_name == "cpython" # via -r requirements/test.in mypy-extensions==1.1.0 # via mypy @@ -71,6 +71,8 @@ packaging==25.0 # via # gunicorn # pytest +pathspec==0.12.1 + # via mypy pkgconfig==1.5.5 # via -r requirements/test.in pluggy==1.6.0 From 50bb06be197b249ab91a4cef33ef7e10ffd4d95a Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 30 May 2025 21:50:23 -0500 Subject: [PATCH 09/13] Fix SSL shutdown timeout for streaming connections (#11094) --- CHANGES/11091.feature.rst | 1 + CHANGES/11094.feature.rst | 1 + aiohttp/client.py | 3 +- aiohttp/connector.py | 43 ++++++++++++--- docs/client_reference.rst | 25 ++++++++- tests/test_client_functional.py | 65 ++++++++++++++++++++++ tests/test_client_session.py | 28 ++++++++++ tests/test_connector.py | 98 +++++++++++++++++++++++++++++++++ tests/test_proxy.py | 25 ++++++--- 9 files changed, 272 insertions(+), 17 deletions(-) create mode 100644 CHANGES/11091.feature.rst create mode 120000 CHANGES/11094.feature.rst diff --git a/CHANGES/11091.feature.rst b/CHANGES/11091.feature.rst new file mode 100644 index 00000000000..a4db2ddced5 --- /dev/null +++ b/CHANGES/11091.feature.rst @@ -0,0 +1 @@ +Added ``ssl_shutdown_timeout`` parameter to :py:class:`~aiohttp.ClientSession` and :py:class:`~aiohttp.TCPConnector` to control the grace period for SSL shutdown handshake on TLS connections. This helps prevent "connection reset" errors on the server side while avoiding excessive delays during connector cleanup. Note: This parameter only takes effect on Python 3.11+ -- by :user:`bdraco`. diff --git a/CHANGES/11094.feature.rst b/CHANGES/11094.feature.rst new file mode 120000 index 00000000000..a21761406a1 --- /dev/null +++ b/CHANGES/11094.feature.rst @@ -0,0 +1 @@ +11091.feature.rst \ No newline at end of file diff --git a/aiohttp/client.py b/aiohttp/client.py index 20e7ce6cebb..b9dde9df6d0 100644 --- a/aiohttp/client.py +++ b/aiohttp/client.py @@ -297,6 +297,7 @@ def __init__( max_field_size: int = 8190, fallback_charset_resolver: _CharsetResolver = lambda r, b: "utf-8", middlewares: Sequence[ClientMiddlewareType] = (), + ssl_shutdown_timeout: Optional[float] = 0.1, ) -> None: # We initialise _connector to None immediately, as it's referenced in __del__() # and could cause issues if an exception occurs during initialisation. @@ -323,7 +324,7 @@ def __init__( self._timeout = timeout if connector is None: - connector = TCPConnector() + connector = TCPConnector(ssl_shutdown_timeout=ssl_shutdown_timeout) # Initialize these three attrs before raising any exception, # they are used in __del__ self._connector = connector diff --git a/aiohttp/connector.py b/aiohttp/connector.py index c525ed92191..4f3f22369a5 100644 --- a/aiohttp/connector.py +++ b/aiohttp/connector.py @@ -836,6 +836,12 @@ class TCPConnector(BaseConnector): socket_factory - A SocketFactoryType function that, if supplied, will be used to create sockets given an AddrInfoType. + ssl_shutdown_timeout - Grace period for SSL shutdown handshake on TLS + connections. Default is 0.1 seconds. This usually + allows for a clean SSL shutdown by notifying the + remote peer of connection closure, while avoiding + excessive delays during connector cleanup. + Note: Only takes effect on Python 3.11+. """ allowed_protocol_schema_set = HIGH_LEVEL_SCHEMA_SET | frozenset({"tcp"}) @@ -858,6 +864,7 @@ def __init__( happy_eyeballs_delay: Optional[float] = 0.25, interleave: Optional[int] = None, socket_factory: Optional[SocketFactoryType] = None, + ssl_shutdown_timeout: Optional[float] = 0.1, ): super().__init__( keepalive_timeout=keepalive_timeout, @@ -889,6 +896,7 @@ def __init__( self._interleave = interleave self._resolve_host_tasks: Set["asyncio.Task[List[ResolveResult]]"] = set() self._socket_factory = socket_factory + self._ssl_shutdown_timeout = ssl_shutdown_timeout def _close_immediately(self) -> List[Awaitable[object]]: for fut in chain.from_iterable(self._throttle_dns_futures.values()): @@ -1131,6 +1139,13 @@ async def _wrap_create_connection( loop=self._loop, socket_factory=self._socket_factory, ) + # Add ssl_shutdown_timeout for Python 3.11+ when SSL is used + if ( + kwargs.get("ssl") + and self._ssl_shutdown_timeout is not None + and sys.version_info >= (3, 11) + ): + kwargs["ssl_shutdown_timeout"] = self._ssl_shutdown_timeout return await self._loop.create_connection(*args, **kwargs, sock=sock) except cert_errors as exc: raise ClientConnectorCertificateError(req.connection_key, exc) from exc @@ -1204,13 +1219,27 @@ async def _start_tls_connection( timeout.sock_connect, ceil_threshold=timeout.ceil_threshold ): try: - tls_transport = await self._loop.start_tls( - underlying_transport, - tls_proto, - sslcontext, - server_hostname=req.server_hostname or req.host, - ssl_handshake_timeout=timeout.total, - ) + # ssl_shutdown_timeout is only available in Python 3.11+ + if ( + sys.version_info >= (3, 11) + and self._ssl_shutdown_timeout is not None + ): + tls_transport = await self._loop.start_tls( + underlying_transport, + tls_proto, + sslcontext, + server_hostname=req.server_hostname or req.host, + ssl_handshake_timeout=timeout.total, + ssl_shutdown_timeout=self._ssl_shutdown_timeout, + ) + else: + tls_transport = await self._loop.start_tls( + underlying_transport, + tls_proto, + sslcontext, + server_hostname=req.server_hostname or req.host, + ssl_handshake_timeout=timeout.total, + ) except BaseException: # We need to close the underlying transport since # `start_tls()` probably failed before it had a diff --git a/docs/client_reference.rst b/docs/client_reference.rst index 287eba0e89d..f9806fdf985 100644 --- a/docs/client_reference.rst +++ b/docs/client_reference.rst @@ -57,7 +57,8 @@ The client session supports the context manager protocol for self closing. read_bufsize=2**16, \ max_line_size=8190, \ max_field_size=8190, \ - fallback_charset_resolver=lambda r, b: "utf-8") + fallback_charset_resolver=lambda r, b: "utf-8", \ + ssl_shutdown_timeout=0.1) The class for creating client sessions and making requests. @@ -240,6 +241,16 @@ The client session supports the context manager protocol for self closing. .. versionadded:: 3.8.6 + :param float ssl_shutdown_timeout: Grace period for SSL shutdown handshake on TLS + connections (``0.1`` seconds by default). This usually provides sufficient time + to notify the remote peer of connection closure, helping prevent broken + connections on the server side, while minimizing delays during connector + cleanup. This timeout is passed to the underlying :class:`TCPConnector` + when one is created automatically. Note: This parameter only takes effect + on Python 3.11+. + + .. versionadded:: 3.12.5 + .. attribute:: closed ``True`` if the session has been closed, ``False`` otherwise. @@ -1169,7 +1180,7 @@ is controlled by *force_close* constructor's parameter). force_close=False, limit=100, limit_per_host=0, \ enable_cleanup_closed=False, timeout_ceil_threshold=5, \ happy_eyeballs_delay=0.25, interleave=None, loop=None, \ - socket_factory=None) + socket_factory=None, ssl_shutdown_timeout=0.1) Connector for working with *HTTP* and *HTTPS* via *TCP* sockets. @@ -1296,6 +1307,16 @@ is controlled by *force_close* constructor's parameter). .. versionadded:: 3.12 + :param float ssl_shutdown_timeout: Grace period for SSL shutdown on TLS + connections (``0.1`` seconds by default). This parameter balances two + important considerations: usually providing sufficient time to notify + the remote server (which helps prevent "connection reset" errors), + while avoiding unnecessary delays during connector cleanup. + The default value provides a reasonable compromise for most use cases. + Note: This parameter only takes effect on Python 3.11+. + + .. versionadded:: 3.12.5 + .. attribute:: family *TCP* socket family e.g. :data:`socket.AF_INET` or diff --git a/tests/test_client_functional.py b/tests/test_client_functional.py index 9433ad2f2bb..ea288657912 100644 --- a/tests/test_client_functional.py +++ b/tests/test_client_functional.py @@ -12,6 +12,7 @@ import tarfile import time import zipfile +from contextlib import suppress from typing import ( Any, AsyncIterator, @@ -704,6 +705,70 @@ async def handler(request: web.Request) -> web.Response: assert txt == "Test message" +@pytest.mark.skipif( + sys.version_info < (3, 11), reason="ssl_shutdown_timeout requires Python 3.11+" +) +async def test_ssl_client_shutdown_timeout( + aiohttp_server: AiohttpServer, + ssl_ctx: ssl.SSLContext, + aiohttp_client: AiohttpClient, + client_ssl_ctx: ssl.SSLContext, +) -> None: + # Test that ssl_shutdown_timeout is properly used during connection closure + + connector = aiohttp.TCPConnector(ssl=client_ssl_ctx, ssl_shutdown_timeout=0.1) + + async def streaming_handler(request: web.Request) -> NoReturn: + # Create a streaming response that continuously sends data + response = web.StreamResponse() + await response.prepare(request) + + # Keep sending data until connection is closed + while True: + await response.write(b"data chunk\n") + await asyncio.sleep(0.01) # Small delay between chunks + + assert False, "not reached" + + app = web.Application() + app.router.add_route("GET", "/stream", streaming_handler) + server = await aiohttp_server(app, ssl=ssl_ctx) + client = await aiohttp_client(server, connector=connector) + + # Verify the connector has the correct timeout + assert connector._ssl_shutdown_timeout == 0.1 + + # Start a streaming request to establish SSL connection with active data transfer + resp = await client.get("/stream") + assert resp.status == 200 + + # Create a background task that continuously reads data + async def read_loop() -> None: + while True: + # Read "data chunk\n" + await resp.content.read(11) + + read_task = asyncio.create_task(read_loop()) + await asyncio.sleep(0) # Yield control to ensure read_task starts + + # Record the time before closing + start_time = time.monotonic() + + # Now close the connector while the stream is still active + # This will test the ssl_shutdown_timeout during an active connection + await connector.close() + + # Verify the connection was closed within a reasonable time + # Should be close to ssl_shutdown_timeout (0.1s) but allow some margin + elapsed = time.monotonic() - start_time + assert elapsed < 0.3, f"Connection closure took too long: {elapsed}s" + + read_task.cancel() + with suppress(asyncio.CancelledError): + await read_task + assert read_task.done(), "Read task should be cancelled after connection closure" + + async def test_ssl_client_alpn( aiohttp_server: AiohttpServer, aiohttp_client: AiohttpClient, diff --git a/tests/test_client_session.py b/tests/test_client_session.py index 4f22b6a3851..0f16a0c7735 100644 --- a/tests/test_client_session.py +++ b/tests/test_client_session.py @@ -347,6 +347,34 @@ async def test_create_connector( assert m.called +async def test_ssl_shutdown_timeout_passed_to_connector() -> None: + # Test default value + async with ClientSession() as session: + assert isinstance(session.connector, TCPConnector) + assert session.connector._ssl_shutdown_timeout == 0.1 + + # Test custom value + async with ClientSession(ssl_shutdown_timeout=1.0) as session: + assert isinstance(session.connector, TCPConnector) + assert session.connector._ssl_shutdown_timeout == 1.0 + + # Test None value + async with ClientSession(ssl_shutdown_timeout=None) as session: + assert isinstance(session.connector, TCPConnector) + assert session.connector._ssl_shutdown_timeout is None + + # Test that it doesn't affect when custom connector is provided + custom_conn = TCPConnector(ssl_shutdown_timeout=2.0) + async with ClientSession( + connector=custom_conn, ssl_shutdown_timeout=1.0 + ) as session: + assert session.connector is not None + assert isinstance(session.connector, TCPConnector) + assert ( + session.connector._ssl_shutdown_timeout == 2.0 + ) # Should use connector's value + + def test_connector_loop(loop: asyncio.AbstractEventLoop) -> None: with contextlib.ExitStack() as stack: another_loop = asyncio.new_event_loop() diff --git a/tests/test_connector.py b/tests/test_connector.py index 5a342ef9641..ab854545882 100644 --- a/tests/test_connector.py +++ b/tests/test_connector.py @@ -2061,6 +2061,104 @@ async def test_tcp_connector_ctor(loop: asyncio.AbstractEventLoop) -> None: await conn.close() +async def test_tcp_connector_ssl_shutdown_timeout( + loop: asyncio.AbstractEventLoop, +) -> None: + # Test default value + conn = aiohttp.TCPConnector() + assert conn._ssl_shutdown_timeout == 0.1 + await conn.close() + + # Test custom value + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=1.0) + assert conn._ssl_shutdown_timeout == 1.0 + await conn.close() + + # Test None value + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=None) + assert conn._ssl_shutdown_timeout is None + await conn.close() + + +@pytest.mark.skipif( + sys.version_info < (3, 11), reason="ssl_shutdown_timeout requires Python 3.11+" +) +async def test_tcp_connector_ssl_shutdown_timeout_passed_to_create_connection( + loop: asyncio.AbstractEventLoop, start_connection: mock.AsyncMock +) -> None: + # Test that ssl_shutdown_timeout is passed to create_connection for SSL connections + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=2.5) + + with mock.patch.object( + conn._loop, "create_connection", autospec=True, spec_set=True + ) as create_connection: + create_connection.return_value = mock.Mock(), mock.Mock() + + req = ClientRequest("GET", URL("https://example.com"), loop=loop) + + with closing(await conn.connect(req, [], ClientTimeout())): + assert create_connection.call_args.kwargs["ssl_shutdown_timeout"] == 2.5 + + await conn.close() + + # Test with None value + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=None) + + with mock.patch.object( + conn._loop, "create_connection", autospec=True, spec_set=True + ) as create_connection: + create_connection.return_value = mock.Mock(), mock.Mock() + + req = ClientRequest("GET", URL("https://example.com"), loop=loop) + + with closing(await conn.connect(req, [], ClientTimeout())): + # When ssl_shutdown_timeout is None, it should not be in kwargs + assert "ssl_shutdown_timeout" not in create_connection.call_args.kwargs + + await conn.close() + + # Test that ssl_shutdown_timeout is NOT passed for non-SSL connections + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=2.5) + + with mock.patch.object( + conn._loop, "create_connection", autospec=True, spec_set=True + ) as create_connection: + create_connection.return_value = mock.Mock(), mock.Mock() + + req = ClientRequest("GET", URL("http://example.com"), loop=loop) + + with closing(await conn.connect(req, [], ClientTimeout())): + # For non-SSL connections, ssl_shutdown_timeout should not be passed + assert "ssl_shutdown_timeout" not in create_connection.call_args.kwargs + + await conn.close() + + +@pytest.mark.skipif(sys.version_info >= (3, 11), reason="Test for Python < 3.11") +async def test_tcp_connector_ssl_shutdown_timeout_not_passed_pre_311( + loop: asyncio.AbstractEventLoop, start_connection: mock.AsyncMock +) -> None: + # Test that ssl_shutdown_timeout is NOT passed to create_connection on Python < 3.11 + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=2.5) + + with mock.patch.object( + conn._loop, "create_connection", autospec=True, spec_set=True + ) as create_connection: + create_connection.return_value = mock.Mock(), mock.Mock() + + # Test with HTTPS + req = ClientRequest("GET", URL("https://example.com"), loop=loop) + with closing(await conn.connect(req, [], ClientTimeout())): + assert "ssl_shutdown_timeout" not in create_connection.call_args.kwargs + + # Test with HTTP + req = ClientRequest("GET", URL("http://example.com"), loop=loop) + with closing(await conn.connect(req, [], ClientTimeout())): + assert "ssl_shutdown_timeout" not in create_connection.call_args.kwargs + + await conn.close() + + async def test_tcp_connector_allowed_protocols(loop: asyncio.AbstractEventLoop) -> None: conn = aiohttp.TCPConnector() assert conn.allowed_protocol_schema_set == {"", "tcp", "http", "https", "ws", "wss"} diff --git a/tests/test_proxy.py b/tests/test_proxy.py index 906e9128995..6094cdcb894 100644 --- a/tests/test_proxy.py +++ b/tests/test_proxy.py @@ -2,6 +2,7 @@ import gc import socket import ssl +import sys import unittest from unittest import mock @@ -1044,13 +1045,23 @@ async def make_conn() -> aiohttp.TCPConnector: ) ) - tls_m.assert_called_with( - mock.ANY, - mock.ANY, - _SSL_CONTEXT_VERIFIED, - server_hostname="www.python.org", - ssl_handshake_timeout=mock.ANY, - ) + if sys.version_info >= (3, 11): + tls_m.assert_called_with( + mock.ANY, + mock.ANY, + _SSL_CONTEXT_VERIFIED, + server_hostname="www.python.org", + ssl_handshake_timeout=mock.ANY, + ssl_shutdown_timeout=0.1, + ) + else: + tls_m.assert_called_with( + mock.ANY, + mock.ANY, + _SSL_CONTEXT_VERIFIED, + server_hostname="www.python.org", + ssl_handshake_timeout=mock.ANY, + ) self.assertEqual(req.url.path, "/") self.assertEqual(proxy_req.method, "CONNECT") From 8efe84e9c08f238cb13fcc818fbfb85fbaacc5e8 Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 30 May 2025 22:05:59 -0500 Subject: [PATCH 10/13] [PR #11094/50bb06b backport][3.12] Fix SSL shutdown timeout for streaming connections (#11095) --- CHANGES/11091.feature.rst | 1 + CHANGES/11094.feature.rst | 1 + aiohttp/client.py | 3 +- aiohttp/connector.py | 43 ++++++++++++--- docs/client_reference.rst | 25 ++++++++- tests/test_client_functional.py | 65 ++++++++++++++++++++++ tests/test_client_session.py | 30 +++++++++- tests/test_connector.py | 98 +++++++++++++++++++++++++++++++++ tests/test_proxy.py | 24 +++++--- 9 files changed, 272 insertions(+), 18 deletions(-) create mode 100644 CHANGES/11091.feature.rst create mode 120000 CHANGES/11094.feature.rst diff --git a/CHANGES/11091.feature.rst b/CHANGES/11091.feature.rst new file mode 100644 index 00000000000..a4db2ddced5 --- /dev/null +++ b/CHANGES/11091.feature.rst @@ -0,0 +1 @@ +Added ``ssl_shutdown_timeout`` parameter to :py:class:`~aiohttp.ClientSession` and :py:class:`~aiohttp.TCPConnector` to control the grace period for SSL shutdown handshake on TLS connections. This helps prevent "connection reset" errors on the server side while avoiding excessive delays during connector cleanup. Note: This parameter only takes effect on Python 3.11+ -- by :user:`bdraco`. diff --git a/CHANGES/11094.feature.rst b/CHANGES/11094.feature.rst new file mode 120000 index 00000000000..a21761406a1 --- /dev/null +++ b/CHANGES/11094.feature.rst @@ -0,0 +1 @@ +11091.feature.rst \ No newline at end of file diff --git a/aiohttp/client.py b/aiohttp/client.py index 3b2cd2796cc..6457248d5ea 100644 --- a/aiohttp/client.py +++ b/aiohttp/client.py @@ -303,6 +303,7 @@ def __init__( max_field_size: int = 8190, fallback_charset_resolver: _CharsetResolver = lambda r, b: "utf-8", middlewares: Sequence[ClientMiddlewareType] = (), + ssl_shutdown_timeout: Optional[float] = 0.1, ) -> None: # We initialise _connector to None immediately, as it's referenced in __del__() # and could cause issues if an exception occurs during initialisation. @@ -361,7 +362,7 @@ def __init__( ) if connector is None: - connector = TCPConnector(loop=loop) + connector = TCPConnector(ssl_shutdown_timeout=ssl_shutdown_timeout) if connector._loop is not loop: raise RuntimeError("Session and connector has to use same event loop") diff --git a/aiohttp/connector.py b/aiohttp/connector.py index 926a62684f6..6fa75d31a98 100644 --- a/aiohttp/connector.py +++ b/aiohttp/connector.py @@ -879,6 +879,12 @@ class TCPConnector(BaseConnector): socket_factory - A SocketFactoryType function that, if supplied, will be used to create sockets given an AddrInfoType. + ssl_shutdown_timeout - Grace period for SSL shutdown handshake on TLS + connections. Default is 0.1 seconds. This usually + allows for a clean SSL shutdown by notifying the + remote peer of connection closure, while avoiding + excessive delays during connector cleanup. + Note: Only takes effect on Python 3.11+. """ allowed_protocol_schema_set = HIGH_LEVEL_SCHEMA_SET | frozenset({"tcp"}) @@ -905,6 +911,7 @@ def __init__( happy_eyeballs_delay: Optional[float] = 0.25, interleave: Optional[int] = None, socket_factory: Optional[SocketFactoryType] = None, + ssl_shutdown_timeout: Optional[float] = 0.1, ): super().__init__( keepalive_timeout=keepalive_timeout, @@ -932,6 +939,7 @@ def __init__( self._interleave = interleave self._resolve_host_tasks: Set["asyncio.Task[List[ResolveResult]]"] = set() self._socket_factory = socket_factory + self._ssl_shutdown_timeout = ssl_shutdown_timeout def _close(self) -> List[Awaitable[object]]: """Close all ongoing DNS calls.""" @@ -1176,6 +1184,13 @@ async def _wrap_create_connection( loop=self._loop, socket_factory=self._socket_factory, ) + # Add ssl_shutdown_timeout for Python 3.11+ when SSL is used + if ( + kwargs.get("ssl") + and self._ssl_shutdown_timeout is not None + and sys.version_info >= (3, 11) + ): + kwargs["ssl_shutdown_timeout"] = self._ssl_shutdown_timeout return await self._loop.create_connection(*args, **kwargs, sock=sock) except cert_errors as exc: raise ClientConnectorCertificateError(req.connection_key, exc) from exc @@ -1314,13 +1329,27 @@ async def _start_tls_connection( timeout.sock_connect, ceil_threshold=timeout.ceil_threshold ): try: - tls_transport = await self._loop.start_tls( - underlying_transport, - tls_proto, - sslcontext, - server_hostname=req.server_hostname or req.host, - ssl_handshake_timeout=timeout.total, - ) + # ssl_shutdown_timeout is only available in Python 3.11+ + if ( + sys.version_info >= (3, 11) + and self._ssl_shutdown_timeout is not None + ): + tls_transport = await self._loop.start_tls( + underlying_transport, + tls_proto, + sslcontext, + server_hostname=req.server_hostname or req.host, + ssl_handshake_timeout=timeout.total, + ssl_shutdown_timeout=self._ssl_shutdown_timeout, + ) + else: + tls_transport = await self._loop.start_tls( + underlying_transport, + tls_proto, + sslcontext, + server_hostname=req.server_hostname or req.host, + ssl_handshake_timeout=timeout.total, + ) except BaseException: # We need to close the underlying transport since # `start_tls()` probably failed before it had a diff --git a/docs/client_reference.rst b/docs/client_reference.rst index 40fd7cdb276..07839686039 100644 --- a/docs/client_reference.rst +++ b/docs/client_reference.rst @@ -57,7 +57,8 @@ The client session supports the context manager protocol for self closing. read_bufsize=2**16, \ max_line_size=8190, \ max_field_size=8190, \ - fallback_charset_resolver=lambda r, b: "utf-8") + fallback_charset_resolver=lambda r, b: "utf-8", \ + ssl_shutdown_timeout=0.1) The class for creating client sessions and making requests. @@ -256,6 +257,16 @@ The client session supports the context manager protocol for self closing. .. versionadded:: 3.8.6 + :param float ssl_shutdown_timeout: Grace period for SSL shutdown handshake on TLS + connections (``0.1`` seconds by default). This usually provides sufficient time + to notify the remote peer of connection closure, helping prevent broken + connections on the server side, while minimizing delays during connector + cleanup. This timeout is passed to the underlying :class:`TCPConnector` + when one is created automatically. Note: This parameter only takes effect + on Python 3.11+. + + .. versionadded:: 3.12.5 + .. attribute:: closed ``True`` if the session has been closed, ``False`` otherwise. @@ -1185,7 +1196,7 @@ is controlled by *force_close* constructor's parameter). force_close=False, limit=100, limit_per_host=0, \ enable_cleanup_closed=False, timeout_ceil_threshold=5, \ happy_eyeballs_delay=0.25, interleave=None, loop=None, \ - socket_factory=None) + socket_factory=None, ssl_shutdown_timeout=0.1) Connector for working with *HTTP* and *HTTPS* via *TCP* sockets. @@ -1312,6 +1323,16 @@ is controlled by *force_close* constructor's parameter). .. versionadded:: 3.12 + :param float ssl_shutdown_timeout: Grace period for SSL shutdown on TLS + connections (``0.1`` seconds by default). This parameter balances two + important considerations: usually providing sufficient time to notify + the remote server (which helps prevent "connection reset" errors), + while avoiding unnecessary delays during connector cleanup. + The default value provides a reasonable compromise for most use cases. + Note: This parameter only takes effect on Python 3.11+. + + .. versionadded:: 3.12.5 + .. attribute:: family *TCP* socket family e.g. :data:`socket.AF_INET` or diff --git a/tests/test_client_functional.py b/tests/test_client_functional.py index cb4edd3d1e1..1d91956c4a3 100644 --- a/tests/test_client_functional.py +++ b/tests/test_client_functional.py @@ -12,6 +12,7 @@ import tarfile import time import zipfile +from contextlib import suppress from typing import ( Any, AsyncIterator, @@ -685,6 +686,70 @@ async def handler(request): assert txt == "Test message" +@pytest.mark.skipif( + sys.version_info < (3, 11), reason="ssl_shutdown_timeout requires Python 3.11+" +) +async def test_ssl_client_shutdown_timeout( + aiohttp_server: AiohttpServer, + ssl_ctx: ssl.SSLContext, + aiohttp_client: AiohttpClient, + client_ssl_ctx: ssl.SSLContext, +) -> None: + # Test that ssl_shutdown_timeout is properly used during connection closure + + connector = aiohttp.TCPConnector(ssl=client_ssl_ctx, ssl_shutdown_timeout=0.1) + + async def streaming_handler(request: web.Request) -> NoReturn: + # Create a streaming response that continuously sends data + response = web.StreamResponse() + await response.prepare(request) + + # Keep sending data until connection is closed + while True: + await response.write(b"data chunk\n") + await asyncio.sleep(0.01) # Small delay between chunks + + assert False, "not reached" + + app = web.Application() + app.router.add_route("GET", "/stream", streaming_handler) + server = await aiohttp_server(app, ssl=ssl_ctx) + client = await aiohttp_client(server, connector=connector) + + # Verify the connector has the correct timeout + assert connector._ssl_shutdown_timeout == 0.1 + + # Start a streaming request to establish SSL connection with active data transfer + resp = await client.get("/stream") + assert resp.status == 200 + + # Create a background task that continuously reads data + async def read_loop() -> None: + while True: + # Read "data chunk\n" + await resp.content.read(11) + + read_task = asyncio.create_task(read_loop()) + await asyncio.sleep(0) # Yield control to ensure read_task starts + + # Record the time before closing + start_time = time.monotonic() + + # Now close the connector while the stream is still active + # This will test the ssl_shutdown_timeout during an active connection + await connector.close() + + # Verify the connection was closed within a reasonable time + # Should be close to ssl_shutdown_timeout (0.1s) but allow some margin + elapsed = time.monotonic() - start_time + assert elapsed < 0.3, f"Connection closure took too long: {elapsed}s" + + read_task.cancel() + with suppress(asyncio.CancelledError): + await read_task + assert read_task.done(), "Read task should be cancelled after connection closure" + + async def test_ssl_client_alpn( aiohttp_server: AiohttpServer, aiohttp_client: AiohttpClient, diff --git a/tests/test_client_session.py b/tests/test_client_session.py index 56c7a5c0c13..0fdfaee6761 100644 --- a/tests/test_client_session.py +++ b/tests/test_client_session.py @@ -310,7 +310,35 @@ async def test_create_connector(create_session, loop, mocker) -> None: assert connector.close.called -def test_connector_loop(loop) -> None: +async def test_ssl_shutdown_timeout_passed_to_connector() -> None: + # Test default value + async with ClientSession() as session: + assert isinstance(session.connector, TCPConnector) + assert session.connector._ssl_shutdown_timeout == 0.1 + + # Test custom value + async with ClientSession(ssl_shutdown_timeout=1.0) as session: + assert isinstance(session.connector, TCPConnector) + assert session.connector._ssl_shutdown_timeout == 1.0 + + # Test None value + async with ClientSession(ssl_shutdown_timeout=None) as session: + assert isinstance(session.connector, TCPConnector) + assert session.connector._ssl_shutdown_timeout is None + + # Test that it doesn't affect when custom connector is provided + custom_conn = TCPConnector(ssl_shutdown_timeout=2.0) + async with ClientSession( + connector=custom_conn, ssl_shutdown_timeout=1.0 + ) as session: + assert session.connector is not None + assert isinstance(session.connector, TCPConnector) + assert ( + session.connector._ssl_shutdown_timeout == 2.0 + ) # Should use connector's value + + +def test_connector_loop(loop: asyncio.AbstractEventLoop) -> None: with contextlib.ExitStack() as stack: another_loop = asyncio.new_event_loop() stack.enter_context(contextlib.closing(another_loop)) diff --git a/tests/test_connector.py b/tests/test_connector.py index f17ded6d960..3b2d28ea46c 100644 --- a/tests/test_connector.py +++ b/tests/test_connector.py @@ -2002,6 +2002,104 @@ async def test_tcp_connector_ctor() -> None: await conn.close() +async def test_tcp_connector_ssl_shutdown_timeout( + loop: asyncio.AbstractEventLoop, +) -> None: + # Test default value + conn = aiohttp.TCPConnector() + assert conn._ssl_shutdown_timeout == 0.1 + await conn.close() + + # Test custom value + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=1.0) + assert conn._ssl_shutdown_timeout == 1.0 + await conn.close() + + # Test None value + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=None) + assert conn._ssl_shutdown_timeout is None + await conn.close() + + +@pytest.mark.skipif( + sys.version_info < (3, 11), reason="ssl_shutdown_timeout requires Python 3.11+" +) +async def test_tcp_connector_ssl_shutdown_timeout_passed_to_create_connection( + loop: asyncio.AbstractEventLoop, start_connection: mock.AsyncMock +) -> None: + # Test that ssl_shutdown_timeout is passed to create_connection for SSL connections + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=2.5) + + with mock.patch.object( + conn._loop, "create_connection", autospec=True, spec_set=True + ) as create_connection: + create_connection.return_value = mock.Mock(), mock.Mock() + + req = ClientRequest("GET", URL("https://example.com"), loop=loop) + + with closing(await conn.connect(req, [], ClientTimeout())): + assert create_connection.call_args.kwargs["ssl_shutdown_timeout"] == 2.5 + + await conn.close() + + # Test with None value + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=None) + + with mock.patch.object( + conn._loop, "create_connection", autospec=True, spec_set=True + ) as create_connection: + create_connection.return_value = mock.Mock(), mock.Mock() + + req = ClientRequest("GET", URL("https://example.com"), loop=loop) + + with closing(await conn.connect(req, [], ClientTimeout())): + # When ssl_shutdown_timeout is None, it should not be in kwargs + assert "ssl_shutdown_timeout" not in create_connection.call_args.kwargs + + await conn.close() + + # Test that ssl_shutdown_timeout is NOT passed for non-SSL connections + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=2.5) + + with mock.patch.object( + conn._loop, "create_connection", autospec=True, spec_set=True + ) as create_connection: + create_connection.return_value = mock.Mock(), mock.Mock() + + req = ClientRequest("GET", URL("http://example.com"), loop=loop) + + with closing(await conn.connect(req, [], ClientTimeout())): + # For non-SSL connections, ssl_shutdown_timeout should not be passed + assert "ssl_shutdown_timeout" not in create_connection.call_args.kwargs + + await conn.close() + + +@pytest.mark.skipif(sys.version_info >= (3, 11), reason="Test for Python < 3.11") +async def test_tcp_connector_ssl_shutdown_timeout_not_passed_pre_311( + loop: asyncio.AbstractEventLoop, start_connection: mock.AsyncMock +) -> None: + # Test that ssl_shutdown_timeout is NOT passed to create_connection on Python < 3.11 + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=2.5) + + with mock.patch.object( + conn._loop, "create_connection", autospec=True, spec_set=True + ) as create_connection: + create_connection.return_value = mock.Mock(), mock.Mock() + + # Test with HTTPS + req = ClientRequest("GET", URL("https://example.com"), loop=loop) + with closing(await conn.connect(req, [], ClientTimeout())): + assert "ssl_shutdown_timeout" not in create_connection.call_args.kwargs + + # Test with HTTP + req = ClientRequest("GET", URL("http://example.com"), loop=loop) + with closing(await conn.connect(req, [], ClientTimeout())): + assert "ssl_shutdown_timeout" not in create_connection.call_args.kwargs + + await conn.close() + + async def test_tcp_connector_allowed_protocols(loop: asyncio.AbstractEventLoop) -> None: conn = aiohttp.TCPConnector() assert conn.allowed_protocol_schema_set == {"", "tcp", "http", "https", "ws", "wss"} diff --git a/tests/test_proxy.py b/tests/test_proxy.py index 0e73210f58b..f5ebf6adc4f 100644 --- a/tests/test_proxy.py +++ b/tests/test_proxy.py @@ -936,13 +936,23 @@ async def make_conn(): connector._create_connection(req, None, aiohttp.ClientTimeout()) ) - self.loop.start_tls.assert_called_with( - mock.ANY, - mock.ANY, - _SSL_CONTEXT_VERIFIED, - server_hostname="www.python.org", - ssl_handshake_timeout=mock.ANY, - ) + if sys.version_info >= (3, 11): + self.loop.start_tls.assert_called_with( + mock.ANY, + mock.ANY, + _SSL_CONTEXT_VERIFIED, + server_hostname="www.python.org", + ssl_handshake_timeout=mock.ANY, + ssl_shutdown_timeout=0.1, + ) + else: + self.loop.start_tls.assert_called_with( + mock.ANY, + mock.ANY, + _SSL_CONTEXT_VERIFIED, + server_hostname="www.python.org", + ssl_handshake_timeout=mock.ANY, + ) self.assertEqual(req.url.path, "/") self.assertEqual(proxy_req.method, "CONNECT") From 2237912f2782797876cc1ce931493a89d1171b9f Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 30 May 2025 22:11:11 -0500 Subject: [PATCH 11/13] [PR #11094/50bb06b backport][3.13] Fix SSL shutdown timeout for streaming connections (#11096) --- CHANGES/11091.feature.rst | 1 + CHANGES/11094.feature.rst | 1 + aiohttp/client.py | 3 +- aiohttp/connector.py | 43 ++++++++++++--- docs/client_reference.rst | 25 ++++++++- tests/test_client_functional.py | 65 ++++++++++++++++++++++ tests/test_client_session.py | 30 +++++++++- tests/test_connector.py | 98 +++++++++++++++++++++++++++++++++ tests/test_proxy.py | 24 +++++--- 9 files changed, 272 insertions(+), 18 deletions(-) create mode 100644 CHANGES/11091.feature.rst create mode 120000 CHANGES/11094.feature.rst diff --git a/CHANGES/11091.feature.rst b/CHANGES/11091.feature.rst new file mode 100644 index 00000000000..a4db2ddced5 --- /dev/null +++ b/CHANGES/11091.feature.rst @@ -0,0 +1 @@ +Added ``ssl_shutdown_timeout`` parameter to :py:class:`~aiohttp.ClientSession` and :py:class:`~aiohttp.TCPConnector` to control the grace period for SSL shutdown handshake on TLS connections. This helps prevent "connection reset" errors on the server side while avoiding excessive delays during connector cleanup. Note: This parameter only takes effect on Python 3.11+ -- by :user:`bdraco`. diff --git a/CHANGES/11094.feature.rst b/CHANGES/11094.feature.rst new file mode 120000 index 00000000000..a21761406a1 --- /dev/null +++ b/CHANGES/11094.feature.rst @@ -0,0 +1 @@ +11091.feature.rst \ No newline at end of file diff --git a/aiohttp/client.py b/aiohttp/client.py index 3b2cd2796cc..6457248d5ea 100644 --- a/aiohttp/client.py +++ b/aiohttp/client.py @@ -303,6 +303,7 @@ def __init__( max_field_size: int = 8190, fallback_charset_resolver: _CharsetResolver = lambda r, b: "utf-8", middlewares: Sequence[ClientMiddlewareType] = (), + ssl_shutdown_timeout: Optional[float] = 0.1, ) -> None: # We initialise _connector to None immediately, as it's referenced in __del__() # and could cause issues if an exception occurs during initialisation. @@ -361,7 +362,7 @@ def __init__( ) if connector is None: - connector = TCPConnector(loop=loop) + connector = TCPConnector(ssl_shutdown_timeout=ssl_shutdown_timeout) if connector._loop is not loop: raise RuntimeError("Session and connector has to use same event loop") diff --git a/aiohttp/connector.py b/aiohttp/connector.py index 926a62684f6..6fa75d31a98 100644 --- a/aiohttp/connector.py +++ b/aiohttp/connector.py @@ -879,6 +879,12 @@ class TCPConnector(BaseConnector): socket_factory - A SocketFactoryType function that, if supplied, will be used to create sockets given an AddrInfoType. + ssl_shutdown_timeout - Grace period for SSL shutdown handshake on TLS + connections. Default is 0.1 seconds. This usually + allows for a clean SSL shutdown by notifying the + remote peer of connection closure, while avoiding + excessive delays during connector cleanup. + Note: Only takes effect on Python 3.11+. """ allowed_protocol_schema_set = HIGH_LEVEL_SCHEMA_SET | frozenset({"tcp"}) @@ -905,6 +911,7 @@ def __init__( happy_eyeballs_delay: Optional[float] = 0.25, interleave: Optional[int] = None, socket_factory: Optional[SocketFactoryType] = None, + ssl_shutdown_timeout: Optional[float] = 0.1, ): super().__init__( keepalive_timeout=keepalive_timeout, @@ -932,6 +939,7 @@ def __init__( self._interleave = interleave self._resolve_host_tasks: Set["asyncio.Task[List[ResolveResult]]"] = set() self._socket_factory = socket_factory + self._ssl_shutdown_timeout = ssl_shutdown_timeout def _close(self) -> List[Awaitable[object]]: """Close all ongoing DNS calls.""" @@ -1176,6 +1184,13 @@ async def _wrap_create_connection( loop=self._loop, socket_factory=self._socket_factory, ) + # Add ssl_shutdown_timeout for Python 3.11+ when SSL is used + if ( + kwargs.get("ssl") + and self._ssl_shutdown_timeout is not None + and sys.version_info >= (3, 11) + ): + kwargs["ssl_shutdown_timeout"] = self._ssl_shutdown_timeout return await self._loop.create_connection(*args, **kwargs, sock=sock) except cert_errors as exc: raise ClientConnectorCertificateError(req.connection_key, exc) from exc @@ -1314,13 +1329,27 @@ async def _start_tls_connection( timeout.sock_connect, ceil_threshold=timeout.ceil_threshold ): try: - tls_transport = await self._loop.start_tls( - underlying_transport, - tls_proto, - sslcontext, - server_hostname=req.server_hostname or req.host, - ssl_handshake_timeout=timeout.total, - ) + # ssl_shutdown_timeout is only available in Python 3.11+ + if ( + sys.version_info >= (3, 11) + and self._ssl_shutdown_timeout is not None + ): + tls_transport = await self._loop.start_tls( + underlying_transport, + tls_proto, + sslcontext, + server_hostname=req.server_hostname or req.host, + ssl_handshake_timeout=timeout.total, + ssl_shutdown_timeout=self._ssl_shutdown_timeout, + ) + else: + tls_transport = await self._loop.start_tls( + underlying_transport, + tls_proto, + sslcontext, + server_hostname=req.server_hostname or req.host, + ssl_handshake_timeout=timeout.total, + ) except BaseException: # We need to close the underlying transport since # `start_tls()` probably failed before it had a diff --git a/docs/client_reference.rst b/docs/client_reference.rst index 40fd7cdb276..07839686039 100644 --- a/docs/client_reference.rst +++ b/docs/client_reference.rst @@ -57,7 +57,8 @@ The client session supports the context manager protocol for self closing. read_bufsize=2**16, \ max_line_size=8190, \ max_field_size=8190, \ - fallback_charset_resolver=lambda r, b: "utf-8") + fallback_charset_resolver=lambda r, b: "utf-8", \ + ssl_shutdown_timeout=0.1) The class for creating client sessions and making requests. @@ -256,6 +257,16 @@ The client session supports the context manager protocol for self closing. .. versionadded:: 3.8.6 + :param float ssl_shutdown_timeout: Grace period for SSL shutdown handshake on TLS + connections (``0.1`` seconds by default). This usually provides sufficient time + to notify the remote peer of connection closure, helping prevent broken + connections on the server side, while minimizing delays during connector + cleanup. This timeout is passed to the underlying :class:`TCPConnector` + when one is created automatically. Note: This parameter only takes effect + on Python 3.11+. + + .. versionadded:: 3.12.5 + .. attribute:: closed ``True`` if the session has been closed, ``False`` otherwise. @@ -1185,7 +1196,7 @@ is controlled by *force_close* constructor's parameter). force_close=False, limit=100, limit_per_host=0, \ enable_cleanup_closed=False, timeout_ceil_threshold=5, \ happy_eyeballs_delay=0.25, interleave=None, loop=None, \ - socket_factory=None) + socket_factory=None, ssl_shutdown_timeout=0.1) Connector for working with *HTTP* and *HTTPS* via *TCP* sockets. @@ -1312,6 +1323,16 @@ is controlled by *force_close* constructor's parameter). .. versionadded:: 3.12 + :param float ssl_shutdown_timeout: Grace period for SSL shutdown on TLS + connections (``0.1`` seconds by default). This parameter balances two + important considerations: usually providing sufficient time to notify + the remote server (which helps prevent "connection reset" errors), + while avoiding unnecessary delays during connector cleanup. + The default value provides a reasonable compromise for most use cases. + Note: This parameter only takes effect on Python 3.11+. + + .. versionadded:: 3.12.5 + .. attribute:: family *TCP* socket family e.g. :data:`socket.AF_INET` or diff --git a/tests/test_client_functional.py b/tests/test_client_functional.py index cb4edd3d1e1..1d91956c4a3 100644 --- a/tests/test_client_functional.py +++ b/tests/test_client_functional.py @@ -12,6 +12,7 @@ import tarfile import time import zipfile +from contextlib import suppress from typing import ( Any, AsyncIterator, @@ -685,6 +686,70 @@ async def handler(request): assert txt == "Test message" +@pytest.mark.skipif( + sys.version_info < (3, 11), reason="ssl_shutdown_timeout requires Python 3.11+" +) +async def test_ssl_client_shutdown_timeout( + aiohttp_server: AiohttpServer, + ssl_ctx: ssl.SSLContext, + aiohttp_client: AiohttpClient, + client_ssl_ctx: ssl.SSLContext, +) -> None: + # Test that ssl_shutdown_timeout is properly used during connection closure + + connector = aiohttp.TCPConnector(ssl=client_ssl_ctx, ssl_shutdown_timeout=0.1) + + async def streaming_handler(request: web.Request) -> NoReturn: + # Create a streaming response that continuously sends data + response = web.StreamResponse() + await response.prepare(request) + + # Keep sending data until connection is closed + while True: + await response.write(b"data chunk\n") + await asyncio.sleep(0.01) # Small delay between chunks + + assert False, "not reached" + + app = web.Application() + app.router.add_route("GET", "/stream", streaming_handler) + server = await aiohttp_server(app, ssl=ssl_ctx) + client = await aiohttp_client(server, connector=connector) + + # Verify the connector has the correct timeout + assert connector._ssl_shutdown_timeout == 0.1 + + # Start a streaming request to establish SSL connection with active data transfer + resp = await client.get("/stream") + assert resp.status == 200 + + # Create a background task that continuously reads data + async def read_loop() -> None: + while True: + # Read "data chunk\n" + await resp.content.read(11) + + read_task = asyncio.create_task(read_loop()) + await asyncio.sleep(0) # Yield control to ensure read_task starts + + # Record the time before closing + start_time = time.monotonic() + + # Now close the connector while the stream is still active + # This will test the ssl_shutdown_timeout during an active connection + await connector.close() + + # Verify the connection was closed within a reasonable time + # Should be close to ssl_shutdown_timeout (0.1s) but allow some margin + elapsed = time.monotonic() - start_time + assert elapsed < 0.3, f"Connection closure took too long: {elapsed}s" + + read_task.cancel() + with suppress(asyncio.CancelledError): + await read_task + assert read_task.done(), "Read task should be cancelled after connection closure" + + async def test_ssl_client_alpn( aiohttp_server: AiohttpServer, aiohttp_client: AiohttpClient, diff --git a/tests/test_client_session.py b/tests/test_client_session.py index 56c7a5c0c13..0fdfaee6761 100644 --- a/tests/test_client_session.py +++ b/tests/test_client_session.py @@ -310,7 +310,35 @@ async def test_create_connector(create_session, loop, mocker) -> None: assert connector.close.called -def test_connector_loop(loop) -> None: +async def test_ssl_shutdown_timeout_passed_to_connector() -> None: + # Test default value + async with ClientSession() as session: + assert isinstance(session.connector, TCPConnector) + assert session.connector._ssl_shutdown_timeout == 0.1 + + # Test custom value + async with ClientSession(ssl_shutdown_timeout=1.0) as session: + assert isinstance(session.connector, TCPConnector) + assert session.connector._ssl_shutdown_timeout == 1.0 + + # Test None value + async with ClientSession(ssl_shutdown_timeout=None) as session: + assert isinstance(session.connector, TCPConnector) + assert session.connector._ssl_shutdown_timeout is None + + # Test that it doesn't affect when custom connector is provided + custom_conn = TCPConnector(ssl_shutdown_timeout=2.0) + async with ClientSession( + connector=custom_conn, ssl_shutdown_timeout=1.0 + ) as session: + assert session.connector is not None + assert isinstance(session.connector, TCPConnector) + assert ( + session.connector._ssl_shutdown_timeout == 2.0 + ) # Should use connector's value + + +def test_connector_loop(loop: asyncio.AbstractEventLoop) -> None: with contextlib.ExitStack() as stack: another_loop = asyncio.new_event_loop() stack.enter_context(contextlib.closing(another_loop)) diff --git a/tests/test_connector.py b/tests/test_connector.py index f17ded6d960..3b2d28ea46c 100644 --- a/tests/test_connector.py +++ b/tests/test_connector.py @@ -2002,6 +2002,104 @@ async def test_tcp_connector_ctor() -> None: await conn.close() +async def test_tcp_connector_ssl_shutdown_timeout( + loop: asyncio.AbstractEventLoop, +) -> None: + # Test default value + conn = aiohttp.TCPConnector() + assert conn._ssl_shutdown_timeout == 0.1 + await conn.close() + + # Test custom value + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=1.0) + assert conn._ssl_shutdown_timeout == 1.0 + await conn.close() + + # Test None value + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=None) + assert conn._ssl_shutdown_timeout is None + await conn.close() + + +@pytest.mark.skipif( + sys.version_info < (3, 11), reason="ssl_shutdown_timeout requires Python 3.11+" +) +async def test_tcp_connector_ssl_shutdown_timeout_passed_to_create_connection( + loop: asyncio.AbstractEventLoop, start_connection: mock.AsyncMock +) -> None: + # Test that ssl_shutdown_timeout is passed to create_connection for SSL connections + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=2.5) + + with mock.patch.object( + conn._loop, "create_connection", autospec=True, spec_set=True + ) as create_connection: + create_connection.return_value = mock.Mock(), mock.Mock() + + req = ClientRequest("GET", URL("https://example.com"), loop=loop) + + with closing(await conn.connect(req, [], ClientTimeout())): + assert create_connection.call_args.kwargs["ssl_shutdown_timeout"] == 2.5 + + await conn.close() + + # Test with None value + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=None) + + with mock.patch.object( + conn._loop, "create_connection", autospec=True, spec_set=True + ) as create_connection: + create_connection.return_value = mock.Mock(), mock.Mock() + + req = ClientRequest("GET", URL("https://example.com"), loop=loop) + + with closing(await conn.connect(req, [], ClientTimeout())): + # When ssl_shutdown_timeout is None, it should not be in kwargs + assert "ssl_shutdown_timeout" not in create_connection.call_args.kwargs + + await conn.close() + + # Test that ssl_shutdown_timeout is NOT passed for non-SSL connections + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=2.5) + + with mock.patch.object( + conn._loop, "create_connection", autospec=True, spec_set=True + ) as create_connection: + create_connection.return_value = mock.Mock(), mock.Mock() + + req = ClientRequest("GET", URL("http://example.com"), loop=loop) + + with closing(await conn.connect(req, [], ClientTimeout())): + # For non-SSL connections, ssl_shutdown_timeout should not be passed + assert "ssl_shutdown_timeout" not in create_connection.call_args.kwargs + + await conn.close() + + +@pytest.mark.skipif(sys.version_info >= (3, 11), reason="Test for Python < 3.11") +async def test_tcp_connector_ssl_shutdown_timeout_not_passed_pre_311( + loop: asyncio.AbstractEventLoop, start_connection: mock.AsyncMock +) -> None: + # Test that ssl_shutdown_timeout is NOT passed to create_connection on Python < 3.11 + conn = aiohttp.TCPConnector(ssl_shutdown_timeout=2.5) + + with mock.patch.object( + conn._loop, "create_connection", autospec=True, spec_set=True + ) as create_connection: + create_connection.return_value = mock.Mock(), mock.Mock() + + # Test with HTTPS + req = ClientRequest("GET", URL("https://example.com"), loop=loop) + with closing(await conn.connect(req, [], ClientTimeout())): + assert "ssl_shutdown_timeout" not in create_connection.call_args.kwargs + + # Test with HTTP + req = ClientRequest("GET", URL("http://example.com"), loop=loop) + with closing(await conn.connect(req, [], ClientTimeout())): + assert "ssl_shutdown_timeout" not in create_connection.call_args.kwargs + + await conn.close() + + async def test_tcp_connector_allowed_protocols(loop: asyncio.AbstractEventLoop) -> None: conn = aiohttp.TCPConnector() assert conn.allowed_protocol_schema_set == {"", "tcp", "http", "https", "ws", "wss"} diff --git a/tests/test_proxy.py b/tests/test_proxy.py index 0e73210f58b..f5ebf6adc4f 100644 --- a/tests/test_proxy.py +++ b/tests/test_proxy.py @@ -936,13 +936,23 @@ async def make_conn(): connector._create_connection(req, None, aiohttp.ClientTimeout()) ) - self.loop.start_tls.assert_called_with( - mock.ANY, - mock.ANY, - _SSL_CONTEXT_VERIFIED, - server_hostname="www.python.org", - ssl_handshake_timeout=mock.ANY, - ) + if sys.version_info >= (3, 11): + self.loop.start_tls.assert_called_with( + mock.ANY, + mock.ANY, + _SSL_CONTEXT_VERIFIED, + server_hostname="www.python.org", + ssl_handshake_timeout=mock.ANY, + ssl_shutdown_timeout=0.1, + ) + else: + self.loop.start_tls.assert_called_with( + mock.ANY, + mock.ANY, + _SSL_CONTEXT_VERIFIED, + server_hostname="www.python.org", + ssl_handshake_timeout=mock.ANY, + ) self.assertEqual(req.url.path, "/") self.assertEqual(proxy_req.method, "CONNECT") From d4e62efaccf9af60eb02f660454b2ee201cfb88d Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Fri, 30 May 2025 22:27:29 -0500 Subject: [PATCH 12/13] Release 3.12.5 (#11097) --- CHANGES.rst | 30 ++++++++++++++++++++++++++++++ CHANGES/11085.misc.rst | 1 - CHANGES/11088.misc.rst | 1 - CHANGES/11091.feature.rst | 1 - CHANGES/11094.feature.rst | 1 - aiohttp/__init__.py | 2 +- 6 files changed, 31 insertions(+), 5 deletions(-) delete mode 100644 CHANGES/11085.misc.rst delete mode 120000 CHANGES/11088.misc.rst delete mode 100644 CHANGES/11091.feature.rst delete mode 120000 CHANGES/11094.feature.rst diff --git a/CHANGES.rst b/CHANGES.rst index 8d3bcbac867..360750dd88f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -10,6 +10,36 @@ .. towncrier release notes start +3.12.5 (2025-05-30) +=================== + +Features +-------- + +- Added ``ssl_shutdown_timeout`` parameter to :py:class:`~aiohttp.ClientSession` and :py:class:`~aiohttp.TCPConnector` to control the grace period for SSL shutdown handshake on TLS connections. This helps prevent "connection reset" errors on the server side while avoiding excessive delays during connector cleanup. Note: This parameter only takes effect on Python 3.11+ -- by :user:`bdraco`. + + + *Related issues and pull requests on GitHub:* + :issue:`11091`, :issue:`11094`. + + + + +Miscellaneous internal changes +------------------------------ + +- Improved performance of isinstance checks by using collections.abc types instead of typing module equivalents -- by :user:`bdraco`. + + + *Related issues and pull requests on GitHub:* + :issue:`11085`, :issue:`11088`. + + + + +---- + + 3.12.4 (2025-05-28) =================== diff --git a/CHANGES/11085.misc.rst b/CHANGES/11085.misc.rst deleted file mode 100644 index 67b1915cfcb..00000000000 --- a/CHANGES/11085.misc.rst +++ /dev/null @@ -1 +0,0 @@ -Improved performance of isinstance checks by using collections.abc types instead of typing module equivalents -- by :user:`bdraco`. diff --git a/CHANGES/11088.misc.rst b/CHANGES/11088.misc.rst deleted file mode 120000 index c9ebf3c31e1..00000000000 --- a/CHANGES/11088.misc.rst +++ /dev/null @@ -1 +0,0 @@ -11085.misc.rst \ No newline at end of file diff --git a/CHANGES/11091.feature.rst b/CHANGES/11091.feature.rst deleted file mode 100644 index a4db2ddced5..00000000000 --- a/CHANGES/11091.feature.rst +++ /dev/null @@ -1 +0,0 @@ -Added ``ssl_shutdown_timeout`` parameter to :py:class:`~aiohttp.ClientSession` and :py:class:`~aiohttp.TCPConnector` to control the grace period for SSL shutdown handshake on TLS connections. This helps prevent "connection reset" errors on the server side while avoiding excessive delays during connector cleanup. Note: This parameter only takes effect on Python 3.11+ -- by :user:`bdraco`. diff --git a/CHANGES/11094.feature.rst b/CHANGES/11094.feature.rst deleted file mode 120000 index a21761406a1..00000000000 --- a/CHANGES/11094.feature.rst +++ /dev/null @@ -1 +0,0 @@ -11091.feature.rst \ No newline at end of file diff --git a/aiohttp/__init__.py b/aiohttp/__init__.py index 74da5e01c07..fc946e05e9f 100644 --- a/aiohttp/__init__.py +++ b/aiohttp/__init__.py @@ -1,4 +1,4 @@ -__version__ = "3.12.5.dev0" +__version__ = "3.12.5" from typing import TYPE_CHECKING, Tuple From 947247fd4aa2954a746c3a8c183bb7acceb8c3cd Mon Sep 17 00:00:00 2001 From: "J. Nick Koston" Date: Sat, 31 May 2025 00:01:02 -0500 Subject: [PATCH 13/13] Fix spurious "Future exception was never retrieved" warnings for connection lost errors (#11100) --- CHANGES/11100.bugfix.rst | 3 +++ aiohttp/client_proto.py | 6 ++++++ tests/test_client_proto.py | 19 +++++++++++++++++++ 3 files changed, 28 insertions(+) create mode 100644 CHANGES/11100.bugfix.rst diff --git a/CHANGES/11100.bugfix.rst b/CHANGES/11100.bugfix.rst new file mode 100644 index 00000000000..a7c54059a14 --- /dev/null +++ b/CHANGES/11100.bugfix.rst @@ -0,0 +1,3 @@ +Fixed spurious "Future exception was never retrieved" warnings for connection lost errors when the connector is not closed -- by :user:`bdraco`. + +When connections are lost, the exception is now marked as retrieved since it is always propagated through other means, preventing unnecessary warnings in logs. diff --git a/aiohttp/client_proto.py b/aiohttp/client_proto.py index 4d559af0a78..be1e7672efe 100644 --- a/aiohttp/client_proto.py +++ b/aiohttp/client_proto.py @@ -98,6 +98,12 @@ def connection_lost(self, exc: Optional[BaseException]) -> None: ), original_connection_error, ) + # Mark the exception as retrieved to prevent + # "Future exception was never retrieved" warnings + # The exception is always passed on through + # other means, so this is safe + with suppress(Exception): + self.closed.exception() if self._payload_parser is not None: with suppress(Exception): # FIXME: log this somehow? diff --git a/tests/test_client_proto.py b/tests/test_client_proto.py index e5d62d1e467..fa39b38d45c 100644 --- a/tests/test_client_proto.py +++ b/tests/test_client_proto.py @@ -252,3 +252,22 @@ async def test_connection_lost_sets_transport_to_none( proto.connection_lost(OSError()) assert proto.transport is None + + +async def test_connection_lost_exception_is_marked_retrieved( + loop: asyncio.AbstractEventLoop, +) -> None: + """Test that connection_lost properly handles exceptions without warnings.""" + proto = ResponseHandler(loop=loop) + proto.connection_made(mock.Mock()) + + # Simulate an SSL shutdown timeout error + ssl_error = TimeoutError("SSL shutdown timed out") + proto.connection_lost(ssl_error) + + # Verify the exception was set on the closed future + assert proto.closed.done() + exc = proto.closed.exception() + assert exc is not None + assert "Connection lost: SSL shutdown timed out" in str(exc) + assert exc.__cause__ is ssl_error