Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 8 additions & 6 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
name: CI
on:
push:
branches-ignore:
- 'generated'
- 'codegen/**'
- 'integrated/**'
- 'stl-preview-head/**'
- 'stl-preview-base/**'
branches:
- '**'
- '!integrated/**'
- '!stl-preview-head/**'
- '!stl-preview-base/**'
- '!generated'
- '!codegen/**'
- 'codegen/stl/**'
pull_request:
branches-ignore:
- 'stl-preview-head/**'
Expand Down
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "2.28.0"
".": "2.29.0"
}
6 changes: 3 additions & 3 deletions .stats.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
configured_endpoints: 152
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-cb3e4451108eed58d59cff25bf77ec0dc960ec9c6f3dba68f90e7a9847c09d21.yml
openapi_spec_hash: dec6d9be64a5ba8f474a1f2a7a4fafef
config_hash: e922f01e25accd07d8fd3641c37fbd62
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-55ef7034334e938c30656a404ce5e21466103be87542a796425346299f450404.yml
openapi_spec_hash: 4a5bfd2ee4ad47f5b7cf6f1ad08d5d7f
config_hash: 96fbf82cf74a44ccd513f5acf0956ffd
22 changes: 22 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,27 @@
# Changelog

## 2.29.0 (2026-03-17)

Full Changelog: [v2.28.0...v2.29.0](https://github.com/openai/openai-python/compare/v2.28.0...v2.29.0)

### Features

* **api:** 5.4 nano and mini model slugs ([3b45666](https://github.com/openai/openai-python/commit/3b456661f77ca3196aceb5ab3350664a63481114))
* **api:** add /v1/videos endpoint to batches create method ([c0e7a16](https://github.com/openai/openai-python/commit/c0e7a161a996854021e9eb69ea2a60ca0d08047f))
* **api:** add defer_loading field to ToolFunction ([3167595](https://github.com/openai/openai-python/commit/3167595432bdda2f90721901d30ad316db49323e))
* **api:** add in and nin operators to ComparisonFilter type ([664f02b](https://github.com/openai/openai-python/commit/664f02b051af84e1ca3fa313981ec72fdea269b3))


### Bug Fixes

* **deps:** bump minimum typing-extensions version ([a2fb2ca](https://github.com/openai/openai-python/commit/a2fb2ca55142c6658a18be7bd1392a01f5a83f35))
* **pydantic:** do not pass `by_alias` unless set ([8ebe8fb](https://github.com/openai/openai-python/commit/8ebe8fbcb011c6a005a715cae50c6400a8596ee0))


### Chores

* **internal:** tweak CI branches ([96ccc3c](https://github.com/openai/openai-python/commit/96ccc3cca35645fd3140f99b0fc8e55545065212))

## 2.28.0 (2026-03-13)

Full Changelog: [v2.27.0...v2.28.0](https://github.com/openai/openai-python/compare/v2.27.0...v2.28.0)
Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "openai"
version = "2.28.0"
version = "2.29.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
Expand All @@ -11,7 +11,7 @@ authors = [
dependencies = [
"httpx>=0.23.0, <1",
"pydantic>=1.9.0, <3",
"typing-extensions>=4.11, <5",
"typing-extensions>=4.11, <5", "typing-extensions>=4.14, <5",
"anyio>=3.5.0, <5",
"distro>=1.7.0, <2",
"sniffio",
Expand Down
11 changes: 9 additions & 2 deletions src/openai/_compat.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from typing import TYPE_CHECKING, Any, Union, Generic, TypeVar, Callable, cast, overload
from datetime import date, datetime
from typing_extensions import Self, Literal
from typing_extensions import Self, Literal, TypedDict

import pydantic
from pydantic.fields import FieldInfo
Expand Down Expand Up @@ -131,6 +131,10 @@ def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:
return model.model_dump_json(indent=indent)


class _ModelDumpKwargs(TypedDict, total=False):
by_alias: bool


def model_dump(
model: pydantic.BaseModel,
*,
Expand All @@ -142,14 +146,17 @@ def model_dump(
by_alias: bool | None = None,
) -> dict[str, Any]:
if (not PYDANTIC_V1) or hasattr(model, "model_dump"):
kwargs: _ModelDumpKwargs = {}
if by_alias is not None:
kwargs["by_alias"] = by_alias
return model.model_dump(
mode=mode,
exclude=exclude,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
# warnings are not supported in Pydantic v1
warnings=True if PYDANTIC_V1 else warnings,
by_alias=by_alias,
**kwargs,
)
return cast(
"dict[str, Any]",
Expand Down
2 changes: 1 addition & 1 deletion src/openai/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

__title__ = "openai"
__version__ = "2.28.0" # x-release-please-version
__version__ = "2.29.0" # x-release-please-version
16 changes: 10 additions & 6 deletions src/openai/resources/batches.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ def create(
"/v1/moderations",
"/v1/images/generations",
"/v1/images/edits",
"/v1/videos",
],
input_file_id: str,
metadata: Optional[Metadata] | Omit = omit,
Expand All @@ -76,9 +77,10 @@ def create(

endpoint: The endpoint to be used for all requests in the batch. Currently
`/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
`/v1/moderations`, `/v1/images/generations`, and `/v1/images/edits` are
supported. Note that `/v1/embeddings` batches are also restricted to a maximum
of 50,000 embedding inputs across all requests in the batch.
`/v1/moderations`, `/v1/images/generations`, `/v1/images/edits`, and
`/v1/videos` are supported. Note that `/v1/embeddings` batches are also
restricted to a maximum of 50,000 embedding inputs across all requests in the
batch.

input_file_id: The ID of an uploaded file that contains requests for the new batch.

Expand Down Expand Up @@ -282,6 +284,7 @@ async def create(
"/v1/moderations",
"/v1/images/generations",
"/v1/images/edits",
"/v1/videos",
],
input_file_id: str,
metadata: Optional[Metadata] | Omit = omit,
Expand All @@ -302,9 +305,10 @@ async def create(

endpoint: The endpoint to be used for all requests in the batch. Currently
`/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
`/v1/moderations`, `/v1/images/generations`, and `/v1/images/edits` are
supported. Note that `/v1/embeddings` batches are also restricted to a maximum
of 50,000 embedding inputs across all requests in the batch.
`/v1/moderations`, `/v1/images/generations`, `/v1/images/edits`, and
`/v1/videos` are supported. Note that `/v1/embeddings` batches are also
restricted to a maximum of 50,000 embedding inputs across all requests in the
batch.

input_file_id: The ID of an uploaded file that contains requests for the new batch.

Expand Down
8 changes: 8 additions & 0 deletions src/openai/resources/responses/responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -1568,6 +1568,10 @@ def compact(
model: Union[
Literal[
"gpt-5.4",
"gpt-5.4-mini",
"gpt-5.4-nano",
"gpt-5.4-mini-2026-03-17",
"gpt-5.4-nano-2026-03-17",
"gpt-5.3-chat-latest",
"gpt-5.2",
"gpt-5.2-2025-12-11",
Expand Down Expand Up @@ -3235,6 +3239,10 @@ async def compact(
model: Union[
Literal[
"gpt-5.4",
"gpt-5.4-mini",
"gpt-5.4-nano",
"gpt-5.4-mini-2026-03-17",
"gpt-5.4-nano-2026-03-17",
"gpt-5.3-chat-latest",
"gpt-5.2",
"gpt-5.2-2025-12-11",
Expand Down
9 changes: 5 additions & 4 deletions src/openai/types/batch_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,16 @@ class BatchCreateParams(TypedDict, total=False):
"/v1/moderations",
"/v1/images/generations",
"/v1/images/edits",
"/v1/videos",
]
]
"""The endpoint to be used for all requests in the batch.

Currently `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`,
`/v1/completions`, `/v1/moderations`, `/v1/images/generations`, and
`/v1/images/edits` are supported. Note that `/v1/embeddings` batches are also
restricted to a maximum of 50,000 embedding inputs across all requests in the
batch.
`/v1/completions`, `/v1/moderations`, `/v1/images/generations`,
`/v1/images/edits`, and `/v1/videos` are supported. Note that `/v1/embeddings`
batches are also restricted to a maximum of 50,000 embedding inputs across all
requests in the batch.
"""

input_file_id: Required[str]
Expand Down
3 changes: 3 additions & 0 deletions src/openai/types/responses/namespace_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@ class ToolFunction(BaseModel):

type: Literal["function"]

defer_loading: Optional[bool] = None
"""Whether this function should be deferred and discovered via tool search."""

description: Optional[str] = None

parameters: Optional[object] = None
Expand Down
3 changes: 3 additions & 0 deletions src/openai/types/responses/namespace_tool_param.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@ class ToolFunction(TypedDict, total=False):

type: Required[Literal["function"]]

defer_loading: bool
"""Whether this function should be deferred and discovered via tool search."""

description: Optional[str]

parameters: Optional[object]
Expand Down
4 changes: 4 additions & 0 deletions src/openai/types/responses/response_compact_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@ class ResponseCompactParams(TypedDict, total=False):
Union[
Literal[
"gpt-5.4",
"gpt-5.4-mini",
"gpt-5.4-nano",
"gpt-5.4-mini-2026-03-17",
"gpt-5.4-nano-2026-03-17",
"gpt-5.3-chat-latest",
"gpt-5.2",
"gpt-5.2-2025-12-11",
Expand Down
4 changes: 4 additions & 0 deletions src/openai/types/shared/chat_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@

ChatModel: TypeAlias = Literal[
"gpt-5.4",
"gpt-5.4-mini",
"gpt-5.4-nano",
"gpt-5.4-mini-2026-03-17",
"gpt-5.4-nano-2026-03-17",
"gpt-5.3-chat-latest",
"gpt-5.2",
"gpt-5.2-2025-12-11",
Expand Down
2 changes: 1 addition & 1 deletion src/openai/types/shared/comparison_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ class ComparisonFilter(BaseModel):
key: str
"""The key to compare against the value."""

type: Literal["eq", "ne", "gt", "gte", "lt", "lte"]
type: Literal["eq", "ne", "gt", "gte", "lt", "lte", "in", "nin"]
"""
Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`,
`nin`.
Expand Down
4 changes: 4 additions & 0 deletions src/openai/types/shared_params/chat_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@

ChatModel: TypeAlias = Literal[
"gpt-5.4",
"gpt-5.4-mini",
"gpt-5.4-nano",
"gpt-5.4-mini-2026-03-17",
"gpt-5.4-nano-2026-03-17",
"gpt-5.3-chat-latest",
"gpt-5.2",
"gpt-5.2-2025-12-11",
Expand Down
2 changes: 1 addition & 1 deletion src/openai/types/shared_params/comparison_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ class ComparisonFilter(TypedDict, total=False):
key: Required[str]
"""The key to compare against the value."""

type: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte"]]
type: Required[Literal["eq", "ne", "gt", "gte", "lt", "lte", "in", "nin"]]
"""
Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`, `in`,
`nin`.
Expand Down
Loading