Skip to content

Commit cc2c1fc

Browse files
release: 1.87.0 (#2410)
* chore(internal): codegen related update * chore(tests): add tests for httpx client instantiation & proxies * feat(api): add reusable prompt IDs * fix(client): update service_tier on `client.beta.chat.completions` * chore(internal): update conftest.py * release: 1.87.0 --------- Co-authored-by: stainless-app[bot] <142633134+stainless-app[bot]@users.noreply.github.com> Co-authored-by: David Meadows <[email protected]>
1 parent eed877f commit cc2c1fc

28 files changed

+627
-65
lines changed

.release-please-manifest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
".": "1.86.0"
2+
".": "1.87.0"
33
}

.stats.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 111
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-3ae9c18dd7ccfc3ac5206f24394665f563a19015cfa8847b2801a2694d012abc.yml
3-
openapi_spec_hash: 48175b03b58805cd5c80793c66fd54e5
4-
config_hash: 4caff63b74a41f71006987db702f2918
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-9e41d2d5471d2c28bff0d616f4476f5b0e6c541ef4cb51bdaaef5fdf5e13c8b2.yml
3+
openapi_spec_hash: 86f765e18d00e32cf2ce9db7ab84d946
4+
config_hash: fd2af1d5eff0995bb7dc02ac9a34851d

CHANGELOG.md

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,25 @@
11
# Changelog
22

3+
## 1.87.0 (2025-06-16)
4+
5+
Full Changelog: [v1.86.0...v1.87.0](https://github.com/openai/openai-python/compare/v1.86.0...v1.87.0)
6+
7+
### Features
8+
9+
* **api:** add reusable prompt IDs ([36bfe6e](https://github.com/openai/openai-python/commit/36bfe6e8ae12a31624ba1a360d9260f0aeec448a))
10+
11+
12+
### Bug Fixes
13+
14+
* **client:** update service_tier on `client.beta.chat.completions` ([aa488d5](https://github.com/openai/openai-python/commit/aa488d5cf210d8640f87216538d4ff79d7181f2a))
15+
16+
17+
### Chores
18+
19+
* **internal:** codegen related update ([b1a31e5](https://github.com/openai/openai-python/commit/b1a31e5ef4387d9f82cf33f9461371651788d381))
20+
* **internal:** update conftest.py ([bba0213](https://github.com/openai/openai-python/commit/bba0213842a4c161f2235e526d50901a336eecef))
21+
* **tests:** add tests for httpx client instantiation & proxies ([bc93712](https://github.com/openai/openai-python/commit/bc9371204f457aee9ed9b6ec1b61c2084f32faf1))
22+
323
## 1.86.0 (2025-06-10)
424

525
Full Changelog: [v1.85.0...v1.86.0](https://github.com/openai/openai-python/compare/v1.85.0...v1.86.0)

api.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -750,6 +750,7 @@ from openai.types.responses import (
750750
ResponseOutputRefusal,
751751
ResponseOutputText,
752752
ResponseOutputTextAnnotationAddedEvent,
753+
ResponsePrompt,
753754
ResponseQueuedEvent,
754755
ResponseReasoningDeltaEvent,
755756
ResponseReasoningDoneEvent,

pyproject.toml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "openai"
3-
version = "1.86.0"
3+
version = "1.87.0"
44
description = "The official Python library for the openai API"
55
dynamic = ["readme"]
66
license = "Apache-2.0"
@@ -68,6 +68,7 @@ dev-dependencies = [
6868
"types-pyaudio > 0",
6969
"trio >=0.22.2",
7070
"nest_asyncio==1.6.0",
71+
"pytest-xdist>=3.6.1",
7172
]
7273

7374
[tool.rye.scripts]
@@ -139,7 +140,7 @@ replacement = '[\1](https://github.com/openai/openai-python/tree/main/\g<2>)'
139140

140141
[tool.pytest.ini_options]
141142
testpaths = ["tests"]
142-
addopts = "--tb=short"
143+
addopts = "--tb=short -n auto"
143144
xfail_strict = true
144145
asyncio_mode = "auto"
145146
asyncio_default_fixture_loop_scope = "session"

requirements-dev.lock

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,8 @@ exceptiongroup==1.2.2
5454
# via anyio
5555
# via pytest
5656
# via trio
57+
execnet==2.1.1
58+
# via pytest-xdist
5759
executing==2.1.0
5860
# via inline-snapshot
5961
filelock==3.12.4
@@ -129,7 +131,9 @@ pyjwt==2.8.0
129131
pyright==1.1.399
130132
pytest==8.3.3
131133
# via pytest-asyncio
134+
# via pytest-xdist
132135
pytest-asyncio==0.24.0
136+
pytest-xdist==3.7.0
133137
python-dateutil==2.8.2
134138
# via pandas
135139
# via time-machine

src/openai/_base_client.py

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1088,7 +1088,14 @@ def _process_response(
10881088

10891089
origin = get_origin(cast_to) or cast_to
10901090

1091-
if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse):
1091+
if (
1092+
inspect.isclass(origin)
1093+
and issubclass(origin, BaseAPIResponse)
1094+
# we only want to actually return the custom BaseAPIResponse class if we're
1095+
# returning the raw response, or if we're not streaming SSE, as if we're streaming
1096+
# SSE then `cast_to` doesn't actively reflect the type we need to parse into
1097+
and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER)))
1098+
):
10921099
if not issubclass(origin, APIResponse):
10931100
raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}")
10941101

@@ -1606,7 +1613,14 @@ async def _process_response(
16061613

16071614
origin = get_origin(cast_to) or cast_to
16081615

1609-
if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse):
1616+
if (
1617+
inspect.isclass(origin)
1618+
and issubclass(origin, BaseAPIResponse)
1619+
# we only want to actually return the custom BaseAPIResponse class if we're
1620+
# returning the raw response, or if we're not streaming SSE, as if we're streaming
1621+
# SSE then `cast_to` doesn't actively reflect the type we need to parse into
1622+
and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER)))
1623+
):
16101624
if not issubclass(origin, AsyncAPIResponse):
16111625
raise TypeError(f"API Response types must subclass {AsyncAPIResponse}; Received {origin}")
16121626

src/openai/_version.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
22

33
__title__ = "openai"
4-
__version__ = "1.86.0" # x-release-please-version
4+
__version__ = "1.87.0" # x-release-please-version

src/openai/resources/beta/chat/completions.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ def parse(
8181
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
8282
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
8383
seed: Optional[int] | NotGiven = NOT_GIVEN,
84-
service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
84+
service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN,
8585
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
8686
store: Optional[bool] | NotGiven = NOT_GIVEN,
8787
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -228,7 +228,7 @@ def stream(
228228
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
229229
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
230230
seed: Optional[int] | NotGiven = NOT_GIVEN,
231-
service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
231+
service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN,
232232
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
233233
store: Optional[bool] | NotGiven = NOT_GIVEN,
234234
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -360,7 +360,7 @@ async def parse(
360360
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
361361
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
362362
seed: Optional[int] | NotGiven = NOT_GIVEN,
363-
service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
363+
service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN,
364364
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
365365
store: Optional[bool] | NotGiven = NOT_GIVEN,
366366
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -507,7 +507,7 @@ def stream(
507507
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
508508
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
509509
seed: Optional[int] | NotGiven = NOT_GIVEN,
510-
service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
510+
service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN,
511511
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
512512
store: Optional[bool] | NotGiven = NOT_GIVEN,
513513
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,

src/openai/resources/chat/completions/completions.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ def create(
9595
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
9696
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
9797
seed: Optional[int] | NotGiven = NOT_GIVEN,
98-
service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
98+
service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN,
9999
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
100100
store: Optional[bool] | NotGiven = NOT_GIVEN,
101101
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
@@ -365,7 +365,7 @@ def create(
365365
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
366366
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
367367
seed: Optional[int] | NotGiven = NOT_GIVEN,
368-
service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
368+
service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN,
369369
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
370370
store: Optional[bool] | NotGiven = NOT_GIVEN,
371371
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -634,7 +634,7 @@ def create(
634634
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
635635
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
636636
seed: Optional[int] | NotGiven = NOT_GIVEN,
637-
service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
637+
service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN,
638638
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
639639
store: Optional[bool] | NotGiven = NOT_GIVEN,
640640
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -902,7 +902,7 @@ def create(
902902
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
903903
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
904904
seed: Optional[int] | NotGiven = NOT_GIVEN,
905-
service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
905+
service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN,
906906
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
907907
store: Optional[bool] | NotGiven = NOT_GIVEN,
908908
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
@@ -1198,7 +1198,7 @@ async def create(
11981198
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
11991199
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
12001200
seed: Optional[int] | NotGiven = NOT_GIVEN,
1201-
service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
1201+
service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN,
12021202
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
12031203
store: Optional[bool] | NotGiven = NOT_GIVEN,
12041204
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
@@ -1468,7 +1468,7 @@ async def create(
14681468
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
14691469
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
14701470
seed: Optional[int] | NotGiven = NOT_GIVEN,
1471-
service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
1471+
service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN,
14721472
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
14731473
store: Optional[bool] | NotGiven = NOT_GIVEN,
14741474
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -1737,7 +1737,7 @@ async def create(
17371737
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
17381738
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
17391739
seed: Optional[int] | NotGiven = NOT_GIVEN,
1740-
service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
1740+
service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN,
17411741
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
17421742
store: Optional[bool] | NotGiven = NOT_GIVEN,
17431743
stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -2005,7 +2005,7 @@ async def create(
20052005
reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
20062006
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
20072007
seed: Optional[int] | NotGiven = NOT_GIVEN,
2008-
service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
2008+
service_tier: Optional[Literal["auto", "default", "flex", "scale"]] | NotGiven = NOT_GIVEN,
20092009
stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
20102010
store: Optional[bool] | NotGiven = NOT_GIVEN,
20112011
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,

src/openai/resources/fine_tuning/jobs/jobs.py

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def create(
8484
Response includes details of the enqueued job including job status and the name
8585
of the fine-tuned models once complete.
8686
87-
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
87+
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
8888
8989
Args:
9090
model: The name of the model to fine-tune. You can select one of the
@@ -105,7 +105,8 @@ def create(
105105
[preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
106106
format.
107107
108-
See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
108+
See the
109+
[fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
109110
for more details.
110111
111112
hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated
@@ -142,7 +143,8 @@ def create(
142143
Your dataset must be formatted as a JSONL file. You must upload your file with
143144
the purpose `fine-tune`.
144145
145-
See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
146+
See the
147+
[fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
146148
for more details.
147149
148150
extra_headers: Send extra headers
@@ -189,7 +191,7 @@ def retrieve(
189191
"""
190192
Get info about a fine-tuning job.
191193
192-
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
194+
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
193195
194196
Args:
195197
extra_headers: Send extra headers
@@ -462,7 +464,7 @@ async def create(
462464
Response includes details of the enqueued job including job status and the name
463465
of the fine-tuned models once complete.
464466
465-
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
467+
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
466468
467469
Args:
468470
model: The name of the model to fine-tune. You can select one of the
@@ -483,7 +485,8 @@ async def create(
483485
[preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
484486
format.
485487
486-
See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
488+
See the
489+
[fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
487490
for more details.
488491
489492
hyperparameters: The hyperparameters used for the fine-tuning job. This value is now deprecated
@@ -520,7 +523,8 @@ async def create(
520523
Your dataset must be formatted as a JSONL file. You must upload your file with
521524
the purpose `fine-tune`.
522525
523-
See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
526+
See the
527+
[fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
524528
for more details.
525529
526530
extra_headers: Send extra headers
@@ -567,7 +571,7 @@ async def retrieve(
567571
"""
568572
Get info about a fine-tuning job.
569573
570-
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning)
574+
[Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
571575
572576
Args:
573577
extra_headers: Send extra headers

src/openai/resources/images.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,8 @@ def edit(
123123
mask: FileTypes | NotGiven = NOT_GIVEN,
124124
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
125125
n: Optional[int] | NotGiven = NOT_GIVEN,
126+
output_compression: Optional[int] | NotGiven = NOT_GIVEN,
127+
output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
126128
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
127129
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
128130
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
@@ -171,6 +173,14 @@ def edit(
171173
172174
n: The number of images to generate. Must be between 1 and 10.
173175
176+
output_compression: The compression level (0-100%) for the generated images. This parameter is only
177+
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
178+
defaults to 100.
179+
180+
output_format: The format in which the generated images are returned. This parameter is only
181+
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
182+
default value is `png`.
183+
174184
quality: The quality of the image that will be generated. `high`, `medium` and `low` are
175185
only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
176186
Defaults to `auto`.
@@ -204,6 +214,8 @@ def edit(
204214
"mask": mask,
205215
"model": model,
206216
"n": n,
217+
"output_compression": output_compression,
218+
"output_format": output_format,
207219
"quality": quality,
208220
"response_format": response_format,
209221
"size": size,
@@ -447,6 +459,8 @@ async def edit(
447459
mask: FileTypes | NotGiven = NOT_GIVEN,
448460
model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
449461
n: Optional[int] | NotGiven = NOT_GIVEN,
462+
output_compression: Optional[int] | NotGiven = NOT_GIVEN,
463+
output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
450464
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
451465
response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
452466
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]]
@@ -495,6 +509,14 @@ async def edit(
495509
496510
n: The number of images to generate. Must be between 1 and 10.
497511
512+
output_compression: The compression level (0-100%) for the generated images. This parameter is only
513+
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
514+
defaults to 100.
515+
516+
output_format: The format in which the generated images are returned. This parameter is only
517+
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
518+
default value is `png`.
519+
498520
quality: The quality of the image that will be generated. `high`, `medium` and `low` are
499521
only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
500522
Defaults to `auto`.
@@ -528,6 +550,8 @@ async def edit(
528550
"mask": mask,
529551
"model": model,
530552
"n": n,
553+
"output_compression": output_compression,
554+
"output_format": output_format,
531555
"quality": quality,
532556
"response_format": response_format,
533557
"size": size,

0 commit comments

Comments
 (0)