@@ -117,6 +117,7 @@ async def run(
117
117
max_turns : int = DEFAULT_MAX_TURNS ,
118
118
hooks : RunHooks [TContext ] | None = None ,
119
119
run_config : RunConfig | None = None ,
120
+ previous_response_id : str | None = None ,
120
121
) -> RunResult :
121
122
"""Run a workflow starting at the given agent. The agent will run in a loop until a final
122
123
output is generated. The loop runs like so:
@@ -141,6 +142,8 @@ async def run(
141
142
AI invocation (including any tool calls that might occur).
142
143
hooks: An object that receives callbacks on various lifecycle events.
143
144
run_config: Global settings for the entire agent run.
145
+ previous_response_id: The ID of the previous response, if using OpenAI models via the
146
+ Responses API, this allows you to skip passing in input from the previous turn.
144
147
145
148
Returns:
146
149
A run result containing all the inputs, guardrail results and the output of the last
@@ -230,6 +233,7 @@ async def run(
230
233
run_config = run_config ,
231
234
should_run_agent_start_hooks = should_run_agent_start_hooks ,
232
235
tool_use_tracker = tool_use_tracker ,
236
+ previous_response_id = previous_response_id ,
233
237
),
234
238
)
235
239
else :
@@ -243,6 +247,7 @@ async def run(
243
247
run_config = run_config ,
244
248
should_run_agent_start_hooks = should_run_agent_start_hooks ,
245
249
tool_use_tracker = tool_use_tracker ,
250
+ previous_response_id = previous_response_id ,
246
251
)
247
252
should_run_agent_start_hooks = False
248
253
@@ -291,6 +296,7 @@ def run_sync(
291
296
max_turns : int = DEFAULT_MAX_TURNS ,
292
297
hooks : RunHooks [TContext ] | None = None ,
293
298
run_config : RunConfig | None = None ,
299
+ previous_response_id : str | None = None ,
294
300
) -> RunResult :
295
301
"""Run a workflow synchronously, starting at the given agent. Note that this just wraps the
296
302
`run` method, so it will not work if there's already an event loop (e.g. inside an async
@@ -319,6 +325,8 @@ def run_sync(
319
325
AI invocation (including any tool calls that might occur).
320
326
hooks: An object that receives callbacks on various lifecycle events.
321
327
run_config: Global settings for the entire agent run.
328
+ previous_response_id: The ID of the previous response, if using OpenAI models via the
329
+ Responses API, this allows you to skip passing in input from the previous turn.
322
330
323
331
Returns:
324
332
A run result containing all the inputs, guardrail results and the output of the last
@@ -332,6 +340,7 @@ def run_sync(
332
340
max_turns = max_turns ,
333
341
hooks = hooks ,
334
342
run_config = run_config ,
343
+ previous_response_id = previous_response_id ,
335
344
)
336
345
)
337
346
@@ -344,6 +353,7 @@ def run_streamed(
344
353
max_turns : int = DEFAULT_MAX_TURNS ,
345
354
hooks : RunHooks [TContext ] | None = None ,
346
355
run_config : RunConfig | None = None ,
356
+ previous_response_id : str | None = None ,
347
357
) -> RunResultStreaming :
348
358
"""Run a workflow starting at the given agent in streaming mode. The returned result object
349
359
contains a method you can use to stream semantic events as they are generated.
@@ -370,7 +380,8 @@ def run_streamed(
370
380
AI invocation (including any tool calls that might occur).
371
381
hooks: An object that receives callbacks on various lifecycle events.
372
382
run_config: Global settings for the entire agent run.
373
-
383
+ previous_response_id: The ID of the previous response, if using OpenAI models via the
384
+ Responses API, this allows you to skip passing in input from the previous turn.
374
385
Returns:
375
386
A result object that contains data about the run, as well as a method to stream events.
376
387
"""
@@ -428,6 +439,7 @@ def run_streamed(
428
439
hooks = hooks ,
429
440
context_wrapper = context_wrapper ,
430
441
run_config = run_config ,
442
+ previous_response_id = previous_response_id ,
431
443
)
432
444
)
433
445
return streamed_result
@@ -485,6 +497,7 @@ async def _run_streamed_impl(
485
497
hooks : RunHooks [TContext ],
486
498
context_wrapper : RunContextWrapper [TContext ],
487
499
run_config : RunConfig ,
500
+ previous_response_id : str | None ,
488
501
):
489
502
current_span : Span [AgentSpanData ] | None = None
490
503
current_agent = starting_agent
@@ -554,6 +567,7 @@ async def _run_streamed_impl(
554
567
should_run_agent_start_hooks ,
555
568
tool_use_tracker ,
556
569
all_tools ,
570
+ previous_response_id ,
557
571
)
558
572
should_run_agent_start_hooks = False
559
573
@@ -623,6 +637,7 @@ async def _run_single_turn_streamed(
623
637
should_run_agent_start_hooks : bool ,
624
638
tool_use_tracker : AgentToolUseTracker ,
625
639
all_tools : list [Tool ],
640
+ previous_response_id : str | None ,
626
641
) -> SingleStepResult :
627
642
if should_run_agent_start_hooks :
628
643
await asyncio .gather (
@@ -662,6 +677,7 @@ async def _run_single_turn_streamed(
662
677
get_model_tracing_impl (
663
678
run_config .tracing_disabled , run_config .trace_include_sensitive_data
664
679
),
680
+ previous_response_id = previous_response_id ,
665
681
):
666
682
if isinstance (event , ResponseCompletedEvent ):
667
683
usage = (
@@ -717,6 +733,7 @@ async def _run_single_turn(
717
733
run_config : RunConfig ,
718
734
should_run_agent_start_hooks : bool ,
719
735
tool_use_tracker : AgentToolUseTracker ,
736
+ previous_response_id : str | None ,
720
737
) -> SingleStepResult :
721
738
# Ensure we run the hooks before anything else
722
739
if should_run_agent_start_hooks :
@@ -746,6 +763,7 @@ async def _run_single_turn(
746
763
context_wrapper ,
747
764
run_config ,
748
765
tool_use_tracker ,
766
+ previous_response_id ,
749
767
)
750
768
751
769
return await cls ._get_single_step_result_from_response (
@@ -888,6 +906,7 @@ async def _get_new_response(
888
906
context_wrapper : RunContextWrapper [TContext ],
889
907
run_config : RunConfig ,
890
908
tool_use_tracker : AgentToolUseTracker ,
909
+ previous_response_id : str | None ,
891
910
) -> ModelResponse :
892
911
model = cls ._get_model (agent , run_config )
893
912
model_settings = agent .model_settings .resolve (run_config .model_settings )
@@ -903,6 +922,7 @@ async def _get_new_response(
903
922
tracing = get_model_tracing_impl (
904
923
run_config .tracing_disabled , run_config .trace_include_sensitive_data
905
924
),
925
+ previous_response_id = previous_response_id ,
906
926
)
907
927
908
928
context_wrapper .usage .add (new_response .usage )
0 commit comments