Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/openai/types/responses/response.py
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ def output_text(self) -> str:
for output in self.output:
if output.type == "message":
for content in output.content:
if content.type == "output_text":
if content.type == "output_text" and content.text is not None:
texts.append(content.text)

return "".join(texts)
2 changes: 1 addition & 1 deletion src/openai/types/responses/response_output_text.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ class ResponseOutputText(BaseModel):
annotations: List[Annotation]
"""The annotations of the text output."""

text: str
text: Optional[str]
Comment thread
texasich marked this conversation as resolved.
"""The text output from the model."""

type: Literal["output_text"]
Expand Down
18 changes: 18 additions & 0 deletions tests/lib/responses/test_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,24 @@ def test_output_text(client: OpenAI, respx_mock: MockRouter) -> None:
)


@pytest.mark.respx(base_url=base_url)
def test_output_text_ignores_null_text_items(client: OpenAI, respx_mock: MockRouter) -> None:
response = make_snapshot_request(
lambda c: c.responses.create(
model="gpt-4o-mini",
input="hi",
),
content_snapshot=snapshot(
'{"id": "resp_null_output_text", "object": "response", "created_at": 1754925861, "status": "completed", "background": false, "error": null, "incomplete_details": null, "instructions": null, "max_output_tokens": null, "max_tool_calls": null, "model": "gpt-4o-mini-2024-07-18", "output": [{"id": "msg_null_output_text", "type": "message", "status": "completed", "content": [{"type": "output_text", "annotations": [], "logprobs": [], "text": null}, {"type": "output_text", "annotations": [], "logprobs": [], "text": "hello"}], "role": "assistant"}], "parallel_tool_calls": true, "previous_response_id": null, "prompt_cache_key": null, "reasoning": {"effort": null, "summary": null}, "safety_identifier": null, "service_tier": "default", "store": true, "temperature": 1.0, "text": {"format": {"type": "text"}, "verbosity": "medium"}, "tool_choice": "auto", "tools": [], "top_logprobs": 0, "top_p": 1.0, "truncation": "disabled", "usage": {"input_tokens": 1, "input_tokens_details": {"cached_tokens": 0}, "output_tokens": 1, "output_tokens_details": {"reasoning_tokens": 0}, "total_tokens": 2}, "user": null, "metadata": {}}'
),
path="/responses",
mock_client=client,
respx_mock=respx_mock,
)

assert response.output_text == "hello"


@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
def test_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None:
checking_client: OpenAI | AsyncOpenAI = client if sync else async_client
Expand Down