|
28 | 28 | ResponseTextDeltaEvent,
|
29 | 29 | ResponseUsage,
|
30 | 30 | )
|
31 |
| -from openai.types.responses.response_reasoning_item import Summary |
| 31 | +from openai.types.responses.response_reasoning_item import Content, Summary |
32 | 32 | from openai.types.responses.response_reasoning_summary_part_added_event import (
|
33 | 33 | Part as AddedEventPart,
|
34 | 34 | )
|
35 | 35 | from openai.types.responses.response_reasoning_summary_part_done_event import Part as DoneEventPart
|
| 36 | +from openai.types.responses.response_reasoning_text_delta_event import ( |
| 37 | + ResponseReasoningTextDeltaEvent, |
| 38 | +) |
| 39 | +from openai.types.responses.response_reasoning_text_done_event import ( |
| 40 | + ResponseReasoningTextDoneEvent, |
| 41 | +) |
36 | 42 | from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
|
37 | 43 |
|
38 | 44 | from ..items import TResponseStreamEvent
|
@@ -95,7 +101,7 @@ async def handle_stream(
|
95 | 101 |
|
96 | 102 | delta = chunk.choices[0].delta
|
97 | 103 |
|
98 |
| - # Handle reasoning content |
| 104 | + # Handle reasoning content for reasoning summaries |
99 | 105 | if hasattr(delta, "reasoning_content"):
|
100 | 106 | reasoning_content = delta.reasoning_content
|
101 | 107 | if reasoning_content and not state.reasoning_content_index_and_output:
|
@@ -138,10 +144,55 @@ async def handle_stream(
|
138 | 144 | )
|
139 | 145 |
|
140 | 146 | # Create a new summary with updated text
|
141 |
| - current_summary = state.reasoning_content_index_and_output[1].summary[0] |
142 |
| - updated_text = current_summary.text + reasoning_content |
143 |
| - new_summary = Summary(text=updated_text, type="summary_text") |
144 |
| - state.reasoning_content_index_and_output[1].summary[0] = new_summary |
| 147 | + current_content = state.reasoning_content_index_and_output[1].summary[0] |
| 148 | + updated_text = current_content.text + reasoning_content |
| 149 | + new_content = Summary(text=updated_text, type="summary_text") |
| 150 | + state.reasoning_content_index_and_output[1].summary[0] = new_content |
| 151 | + |
| 152 | + # Handle reasoning content from 3rd party platforms |
| 153 | + if hasattr(delta, "reasoning"): |
| 154 | + reasoning_text = delta.reasoning |
| 155 | + if reasoning_text and not state.reasoning_content_index_and_output: |
| 156 | + state.reasoning_content_index_and_output = ( |
| 157 | + 0, |
| 158 | + ResponseReasoningItem( |
| 159 | + id=FAKE_RESPONSES_ID, |
| 160 | + summary=[], |
| 161 | + content=[Content(text="", type="reasoning_text")], |
| 162 | + type="reasoning", |
| 163 | + ), |
| 164 | + ) |
| 165 | + yield ResponseOutputItemAddedEvent( |
| 166 | + item=ResponseReasoningItem( |
| 167 | + id=FAKE_RESPONSES_ID, |
| 168 | + summary=[], |
| 169 | + content=[Content(text="", type="reasoning_text")], |
| 170 | + type="reasoning", |
| 171 | + ), |
| 172 | + output_index=0, |
| 173 | + type="response.output_item.added", |
| 174 | + sequence_number=sequence_number.get_and_increment(), |
| 175 | + ) |
| 176 | + |
| 177 | + if reasoning_text and state.reasoning_content_index_and_output: |
| 178 | + yield ResponseReasoningTextDeltaEvent( |
| 179 | + delta=reasoning_text, |
| 180 | + item_id=FAKE_RESPONSES_ID, |
| 181 | + output_index=0, |
| 182 | + content_index=0, |
| 183 | + type="response.reasoning_text.delta", |
| 184 | + sequence_number=sequence_number.get_and_increment(), |
| 185 | + ) |
| 186 | + |
| 187 | + # Create a new summary with updated text |
| 188 | + if state.reasoning_content_index_and_output[1].content is None: |
| 189 | + state.reasoning_content_index_and_output[1].content = [ |
| 190 | + Content(text="", type="reasoning_text") |
| 191 | + ] |
| 192 | + current_text = state.reasoning_content_index_and_output[1].content[0] |
| 193 | + updated_text = current_text.text + reasoning_text |
| 194 | + new_text_content = Content(text=updated_text, type="reasoning_text") |
| 195 | + state.reasoning_content_index_and_output[1].content[0] = new_text_content |
145 | 196 |
|
146 | 197 | # Handle regular content
|
147 | 198 | if delta.content is not None:
|
@@ -344,17 +395,30 @@ async def handle_stream(
|
344 | 395 | )
|
345 | 396 |
|
346 | 397 | if state.reasoning_content_index_and_output:
|
347 |
| - yield ResponseReasoningSummaryPartDoneEvent( |
348 |
| - item_id=FAKE_RESPONSES_ID, |
349 |
| - output_index=0, |
350 |
| - summary_index=0, |
351 |
| - part=DoneEventPart( |
352 |
| - text=state.reasoning_content_index_and_output[1].summary[0].text, |
353 |
| - type="summary_text", |
354 |
| - ), |
355 |
| - type="response.reasoning_summary_part.done", |
356 |
| - sequence_number=sequence_number.get_and_increment(), |
357 |
| - ) |
| 398 | + if ( |
| 399 | + state.reasoning_content_index_and_output[1].summary |
| 400 | + and len(state.reasoning_content_index_and_output[1].summary) > 0 |
| 401 | + ): |
| 402 | + yield ResponseReasoningSummaryPartDoneEvent( |
| 403 | + item_id=FAKE_RESPONSES_ID, |
| 404 | + output_index=0, |
| 405 | + summary_index=0, |
| 406 | + part=DoneEventPart( |
| 407 | + text=state.reasoning_content_index_and_output[1].summary[0].text, |
| 408 | + type="summary_text", |
| 409 | + ), |
| 410 | + type="response.reasoning_summary_part.done", |
| 411 | + sequence_number=sequence_number.get_and_increment(), |
| 412 | + ) |
| 413 | + elif state.reasoning_content_index_and_output[1].content is not None: |
| 414 | + yield ResponseReasoningTextDoneEvent( |
| 415 | + item_id=FAKE_RESPONSES_ID, |
| 416 | + output_index=0, |
| 417 | + content_index=0, |
| 418 | + text=state.reasoning_content_index_and_output[1].content[0].text, |
| 419 | + type="response.reasoning_text.done", |
| 420 | + sequence_number=sequence_number.get_and_increment(), |
| 421 | + ) |
358 | 422 | yield ResponseOutputItemDoneEvent(
|
359 | 423 | item=state.reasoning_content_index_and_output[1],
|
360 | 424 | output_index=0,
|
|
0 commit comments