o
    "ch8                    @  sB  d dl mZ d dlmZmZmZmZmZmZm	Z	 d dl
mZ d dlmZmZ d dlZddlmZ ddlmZmZmZmZmZmZ dd	lmZmZmZ dd
lmZ ddlm Z m!Z! ddl"m#Z#m$Z$ ddl%m&Z&m'Z'm(Z(m)Z)m*Z*m+Z+ ddl,m-Z-m.Z. ddl/m0Z0m1Z1 ddl2m3Z3 ddl4m5Z5m6Z6 ddl7m8Z8m9Z9m:Z; ddl<m=Z= ddl>m?Z? ddl@mAZAmBZB ddlCmDZD ddlEmFZF ddlGmHZH ddlImJZJmKZK ddlLmMZM ddlNmOZO ddlPmQZQ ddlRmSZS ddlTmUZU dd lVmWZW d!d"gZXG d#d! d!e ZYG d$d" d"e!ZZG d%d& d&Z[G d'd( d(Z\G d)d* d*Z]G d+d, d,Z^d3d1d2Z_dS )4    )annotations)AnyListTypeUnionIterableOptionalcast)partial)LiteraloverloadN   )_legacy_response)	NOT_GIVENBodyQueryHeadersNoneTypeNotGiven)is_givenmaybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper   )
InputItemsAsyncInputItemsInputItemsWithRawResponseAsyncInputItemsWithRawResponseInputItemsWithStreamingResponse$AsyncInputItemsWithStreamingResponse)StreamAsyncStream)PydanticFunctionToolResponsesPydanticFunctionTool)make_request_options)response_create_paramsresponse_retrieve_params)TextFormatTparse_responsetype_to_text_format_param)	ChatModel)Response)	ToolParamParseableToolParam)Metadata)	Reasoning)ParsedResponse)ResponseStreamManagerAsyncResponseStreamManager)ResponseIncludable)ResponsesModel)ResponseInputParam)ResponsePromptParam)ResponseStreamEvent)ResponseTextConfigParam	ResponsesAsyncResponsesc                   @  "  e Zd ZedtddZeduddZedvd	d
Zeeeeeeeeeeeeeeeeeeeeeeeedddeddwd>d?Z	eeeeeeeeeeeeeeeeeeeeeeeddded@dxdCd?Z	eeeeeeeeeeeeeeeeeeeeeeeddded@dydFd?Z	eeeeeeeeeeeeeeeeeeeeeeedddeddzdHd?Z	eeeedddedId{dRdSZ
eeeeeeeeeeeeeeeeeedddedTd|dWdSZ
eeeeeeeeeeeeeeeeeeeeedddedXd}dZdSZ
eeeeeeeeeeeeeeeeeddded[d~d]d^Zeeeeddded_ddbdcZeeedddeddddedcZeeedddeddddfdcZedgeedddedhddidcZeeedddeddddjdcZeeedddeddddkdcZeeeddded_ddmdcZdddednddpdqZdddednddrdsZdS )r=   returnr   c                 C  
   t | jS N)r   _clientself rF   b/home/air/goalskill/back/venv/lib/python3.10/site-packages/openai/resources/responses/responses.pyinput_items4      
zResponses.input_itemsResponsesWithRawResponsec                 C     t | S a  
        This property can be used as a prefix for any HTTP method call to return
        the raw response object instead of the parsed content.

        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
        )rJ   rD   rF   rF   rG   with_raw_response8      zResponses.with_raw_responseResponsesWithStreamingResponsec                 C  rK   z
        An alternative to `.with_raw_response` that doesn't eagerly read the response body.

        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
        )rO   rD   rF   rF   rG   with_streaming_responseB      z!Responses.with_streaming_responseN
backgroundincludeinputinstructionsmax_output_tokensmax_tool_callsmetadatamodelparallel_tool_callsprevious_response_idprompt	reasoningservice_tierstorestreamtemperaturetexttool_choicetoolstop_logprobstop_p
truncationuserextra_headersextra_query
extra_bodytimeoutrT   Optional[bool] | NotGivenrU   -Optional[List[ResponseIncludable]] | NotGivenrV   )Union[str, ResponseInputParam] | NotGivenrW   Optional[str] | NotGivenrX   Optional[int] | NotGivenrY   rZ   Optional[Metadata] | NotGivenr[   ResponsesModel | NotGivenr\   r]   r^   (Optional[ResponsePromptParam] | NotGivenr_   Optional[Reasoning] | NotGivenr`   LOptional[Literal['auto', 'default', 'flex', 'scale', 'priority']] | NotGivenra   rb   #Optional[Literal[False]] | NotGivenrc   Optional[float] | NotGivenrd   "ResponseTextConfigParam | NotGivenre   ,response_create_params.ToolChoice | NotGivenrf   Iterable[ToolParam] | NotGivenrg   rh   ri   0Optional[Literal['auto', 'disabled']] | NotGivenrj   str | NotGivenrk   Headers | Nonerl   Query | Nonerm   Body | Nonern   'float | httpx.Timeout | None | NotGivenr/   c                C     dS aQ'  Creates a model response.

        Provide
        [text](https://platform.openai.com/docs/guides/text) or
        [image](https://platform.openai.com/docs/guides/images) inputs to generate
        [text](https://platform.openai.com/docs/guides/text) or
        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
        the model call your own
        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
        built-in [tools](https://platform.openai.com/docs/guides/tools) like
        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
        your own data as input for the model's response.

        Args:
          background: Whether to run the model response in the background.
              [Learn more](https://platform.openai.com/docs/guides/background).

          include: Specify additional output data to include in the model response. Currently
              supported values are:

              - `code_interpreter_call.outputs`: Includes the outputs of python code execution
                in code interpreter tool call items.
              - `computer_call_output.output.image_url`: Include image urls from the computer
                call output.
              - `file_search_call.results`: Include the search results of the file search tool
                call.
              - `message.input_image.image_url`: Include image urls from the input message.
              - `message.output_text.logprobs`: Include logprobs with assistant messages.
              - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
                tokens in reasoning item outputs. This enables reasoning items to be used in
                multi-turn conversations when using the Responses API statelessly (like when
                the `store` parameter is set to `false`, or when an organization is enrolled
                in the zero data retention program).

          input: Text, image, or file inputs to the model, used to generate a response.

              Learn more:

              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
              - [Image inputs](https://platform.openai.com/docs/guides/images)
              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
              - [Function calling](https://platform.openai.com/docs/guides/function-calling)

          instructions: A system (or developer) message inserted into the model's context.

              When using along with `previous_response_id`, the instructions from a previous
              response will not be carried over to the next response. This makes it simple to
              swap out system (or developer) messages in new responses.

          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
              including visible output tokens and
              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).

          max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a
              response. This maximum number applies across all built-in tool calls, not per
              individual tool. Any further attempts to call a tool by the model will be
              ignored.

          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
              for storing additional information about the object in a structured format, and
              querying for objects via API or the dashboard.

              Keys are strings with a maximum length of 64 characters. Values are strings with
              a maximum length of 512 characters.

          model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
              wide range of models with different capabilities, performance characteristics,
              and price points. Refer to the
              [model guide](https://platform.openai.com/docs/models) to browse and compare
              available models.

          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.

          previous_response_id: The unique ID of the previous response to the model. Use this to create
              multi-turn conversations. Learn more about
              [conversation state](https://platform.openai.com/docs/guides/conversation-state).

          prompt: Reference to a prompt template and its variables.
              [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).

          reasoning: **o-series models only**

              Configuration options for
              [reasoning models](https://platform.openai.com/docs/guides/reasoning).

          service_tier: Specifies the processing type used for serving the request.

              - If set to 'auto', then the request will be processed with the service tier
                configured in the Project settings. Unless otherwise configured, the Project
                will use 'default'.
              - If set to 'default', then the requset will be processed with the standard
                pricing and performance for the selected model.
              - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
                'priority', then the request will be processed with the corresponding service
                tier. [Contact sales](https://openai.com/contact-sales) to learn more about
                Priority processing.
              - When not set, the default behavior is 'auto'.

              When the `service_tier` parameter is set, the response body will include the
              `service_tier` value based on the processing mode actually used to serve the
              request. This response value may be different from the value set in the
              parameter.

          store: Whether to store the generated model response for later retrieval via API.

          stream: If set to true, the model response data will be streamed to the client as it is
              generated using
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
              See the
              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
              for more information.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic. We generally recommend altering this or `top_p` but
              not both.

          text: Configuration options for a text response from the model. Can be plain text or
              structured JSON data. Learn more:

              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)

          tool_choice: How the model should select which tool (or tools) to use when generating a
              response. See the `tools` parameter to see how to specify which tools the model
              can call.

          tools: An array of tools the model may call while generating a response. You can
              specify which tool to use by setting the `tool_choice` parameter.

              The two categories of tools you can provide the model are:

              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
                capabilities, like
                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
                [file search](https://platform.openai.com/docs/guides/tools-file-search).
                Learn more about
                [built-in tools](https://platform.openai.com/docs/guides/tools).
              - **Function calls (custom tools)**: Functions that are defined by you, enabling
                the model to call your own code. Learn more about
                [function calling](https://platform.openai.com/docs/guides/function-calling).

          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
              return at each token position, each with an associated log probability.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          truncation: The truncation strategy to use for the model response.

              - `auto`: If the context of this response and previous ones exceeds the model's
                context window size, the model will truncate the response to fit the context
                window by dropping input items in the middle of the conversation.
              - `disabled` (default): If a model response will exceed the context window size
                for a model, the request will fail with a 400 error.

          user: A stable identifier for your end-users. Used to boost cache hit rates by better
              bucketing similar requests and to help OpenAI detect and prevent abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        NrF   rE   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   rF   rF   rG   createK       PzResponses.createrT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   Literal[True]Stream[ResponseStreamEvent]c                C  r   aQ'  Creates a model response.

        Provide
        [text](https://platform.openai.com/docs/guides/text) or
        [image](https://platform.openai.com/docs/guides/images) inputs to generate
        [text](https://platform.openai.com/docs/guides/text) or
        [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
        the model call your own
        [custom code](https://platform.openai.com/docs/guides/function-calling) or use
        built-in [tools](https://platform.openai.com/docs/guides/tools) like
        [web search](https://platform.openai.com/docs/guides/tools-web-search) or
        [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
        your own data as input for the model's response.

        Args:
          stream: If set to true, the model response data will be streamed to the client as it is
              generated using
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
              See the
              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
              for more information.

          background: Whether to run the model response in the background.
              [Learn more](https://platform.openai.com/docs/guides/background).

          include: Specify additional output data to include in the model response. Currently
              supported values are:

              - `code_interpreter_call.outputs`: Includes the outputs of python code execution
                in code interpreter tool call items.
              - `computer_call_output.output.image_url`: Include image urls from the computer
                call output.
              - `file_search_call.results`: Include the search results of the file search tool
                call.
              - `message.input_image.image_url`: Include image urls from the input message.
              - `message.output_text.logprobs`: Include logprobs with assistant messages.
              - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
                tokens in reasoning item outputs. This enables reasoning items to be used in
                multi-turn conversations when using the Responses API statelessly (like when
                the `store` parameter is set to `false`, or when an organization is enrolled
                in the zero data retention program).

          input: Text, image, or file inputs to the model, used to generate a response.

              Learn more:

              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
              - [Image inputs](https://platform.openai.com/docs/guides/images)
              - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
              - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
              - [Function calling](https://platform.openai.com/docs/guides/function-calling)

          instructions: A system (or developer) message inserted into the model's context.

              When using along with `previous_response_id`, the instructions from a previous
              response will not be carried over to the next response. This makes it simple to
              swap out system (or developer) messages in new responses.

          max_output_tokens: An upper bound for the number of tokens that can be generated for a response,
              including visible output tokens and
              [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).

          max_tool_calls: The maximum number of total calls to built-in tools that can be processed in a
              response. This maximum number applies across all built-in tool calls, not per
              individual tool. Any further attempts to call a tool by the model will be
              ignored.

          metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
              for storing additional information about the object in a structured format, and
              querying for objects via API or the dashboard.

              Keys are strings with a maximum length of 64 characters. Values are strings with
              a maximum length of 512 characters.

          model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
              wide range of models with different capabilities, performance characteristics,
              and price points. Refer to the
              [model guide](https://platform.openai.com/docs/models) to browse and compare
              available models.

          parallel_tool_calls: Whether to allow the model to run tool calls in parallel.

          previous_response_id: The unique ID of the previous response to the model. Use this to create
              multi-turn conversations. Learn more about
              [conversation state](https://platform.openai.com/docs/guides/conversation-state).

          prompt: Reference to a prompt template and its variables.
              [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).

          reasoning: **o-series models only**

              Configuration options for
              [reasoning models](https://platform.openai.com/docs/guides/reasoning).

          service_tier: Specifies the processing type used for serving the request.

              - If set to 'auto', then the request will be processed with the service tier
                configured in the Project settings. Unless otherwise configured, the Project
                will use 'default'.
              - If set to 'default', then the requset will be processed with the standard
                pricing and performance for the selected model.
              - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
                'priority', then the request will be processed with the corresponding service
                tier. [Contact sales](https://openai.com/contact-sales) to learn more about
                Priority processing.
              - When not set, the default behavior is 'auto'.

              When the `service_tier` parameter is set, the response body will include the
              `service_tier` value based on the processing mode actually used to serve the
              request. This response value may be different from the value set in the
              parameter.

          store: Whether to store the generated model response for later retrieval via API.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic. We generally recommend altering this or `top_p` but
              not both.

          text: Configuration options for a text response from the model. Can be plain text or
              structured JSON data. Learn more:

              - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
              - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)

          tool_choice: How the model should select which tool (or tools) to use when generating a
              response. See the `tools` parameter to see how to specify which tools the model
              can call.

          tools: An array of tools the model may call while generating a response. You can
              specify which tool to use by setting the `tool_choice` parameter.

              The two categories of tools you can provide the model are:

              - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
                capabilities, like
                [web search](https://platform.openai.com/docs/guides/tools-web-search) or
                [file search](https://platform.openai.com/docs/guides/tools-file-search).
                Learn more about
                [built-in tools](https://platform.openai.com/docs/guides/tools).
              - **Function calls (custom tools)**: Functions that are defined by you, enabling
                the model to call your own code. Learn more about
                [function calling](https://platform.openai.com/docs/guides/function-calling).

          top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
              return at each token position, each with an associated log probability.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          truncation: The truncation strategy to use for the model response.

              - `auto`: If the context of this response and previous ones exceeds the model's
                context window size, the model will truncate the response to fit the context
                window by dropping input items in the middle of the conversation.
              - `disabled` (default): If a model response will exceed the context window size
                for a model, the request will fail with a 400 error.

          user: A stable identifier for your end-users. Used to boost cache hit rates by better
              bucketing similar requests and to help OpenAI detect and prevent abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        NrF   rE   rb   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   rm   rn   rF   rF   rG   r     r   bool&Response | Stream[ResponseStreamEvent]c                C  r   r   rF   r   rF   rF   rG   r     r   3Optional[Literal[False]] | Literal[True] | NotGivenc                C  s   | j dti d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|||||||d|rFtjntjt||||dt|pTdtt dS N
/responsesrT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   rd   )re   rf   rg   rh   ri   rj   rk   rl   rm   rn   F)bodyoptionscast_torb   
stream_cls)	_postr   r)   ResponseCreateParamsStreaming ResponseCreateParamsNonStreamingr(   r/   r$   r;   r   rF   rF   rG   r     sp   !	
۩text_formatstarting_afterrf   rk   rl   rm   rn   response_idstrr   type[TextFormatT] | NotGivenr   int | NotGiven'Iterable[ParseableToolParam] | NotGiven"ResponseStreamManager[TextFormatT]c          	      C     d S rB   rF   	rE   r   r   r   rf   rk   rl   rm   rn   rF   rF   rG   rb        zResponses.streamrT   r   rf   rU   rW   rX   rZ   r\   r]   r_   ra   rc   rd   re   rh   ri   rj   rk   rl   rm   rn   Union[str, ResponseInputParam]Union[str, ChatModel]c                C  r   rB   rF   rE   rV   r[   rT   r   rf   rU   rW   rX   rZ   r\   r]   r_   ra   rc   rd   re   rh   ri   rj   rk   rl   rm   rn   rF   rF   rG   rb        r   rV   r[   rT   r   rf   rU   rW   rX   rZ   r\   r]   r_   ra   rc   rd   re   rh   ri   rj   r   rk   rl   rm   rn    Union[str, ChatModel] | NotGivenc                  s  i d|d|dd|d|	d|
d|d|d	|d
|d|d|d|d|d|d|d|}dd |  D }tsFt|rUt|dkrUtdd| t|}t|dkrt|sgtdt|sotdt|r|swi }d|v rtdt||d< tj	fi d|d|d|dd|d|	d|
d|d|d
|ddd|d|d|d	|d|d|d|d|ddd  d!}t
|||d d"S tstd#t
 fd$d%||t|r|d"S d d"S )&NrV   r[   rU   rW   rX   rZ   r\   r]   r_   ra   rc   rd   re   rh   ri   rj   rT   c                 S     g | ]
\}}t |r|qS rF   r   .0kvrF   rF   rG   
<listcomp>g      z$Responses.stream.<locals>.<listcomp>r   OCannot provide both response_id/starting_after can't be provided together with , 3input must be provided when creating a new response3model must be provided when creating a new responseformat1Cannot mix and match text.format with text_formatrf   rb   Trk   rl   rm   rn   r   input_toolsr   z7id must be provided when streaming an existing responsec                
     s   j dpg  tdS )NT)r   rb   rU   rk   rl   rm   r   rn   )retriever   rF   rm   rk   rl   rU   r   rE   rn   rF   rG   <lambda>  s    z"Responses.stream.<locals>.<lambda>)itemsr   len
ValueErrorjoin_make_tools	TypeError_type_to_text_format_paramr
   r   r5   rE   r   rV   r[   rT   r   rf   rU   rW   rX   rZ   r\   r]   r_   ra   rc   rd   re   rh   ri   rj   r   rk   rl   rm   rn   new_response_argsnew_response_args_namesapi_requestrF   r   rG   rb   5  s   	
	


r   rf   rU   rW   rX   rZ   r\   r]   r_   ra   rb   rc   rd   re   rh   ri   rj   rk   rl   rm   rn   ParsedResponse[TextFormatT]c             
     s   t  r|si }d|v rtdt |d< td fdd}| jd	ti d
|d|d|d|d|d|d|	d|
d|d|d|d|d|d|dd|d|d|itjt|||||dt	t
tt  tdS )Nr   r   raw_responser/   r@   r   c                      t  | dS N)r   r   responser,   r   r   rf   rF   rG   parser  
   zResponses.parse.<locals>.parserr   rV   r[   rU   rW   rX   rZ   r\   r]   r_   ra   rb   rc   rd   re   rf   rh   ri   rj   rk   rl   rm   rn   post_parserr   r   r   r   r/   r@   r   r   r   r   r   r   r   r)   ResponseCreateParamsr(   r	   r   r4   r+   r/   rE   rV   r[   r   rf   rU   rW   rX   rZ   r\   r]   r_   ra   rb   rc   rd   re   rh   ri   rj   rk   rl   rm   rn   r   rF   r   rG   parse  sv   	
	zResponses.parserU   r   rb   rk   rl   rm   rn   #List[ResponseIncludable] | NotGivenLiteral[False] | NotGivenc          	      C  r   rB   rF   	rE   r   rU   r   rb   rk   rl   rm   rn   rF   rF   rG   r        zResponses.retrieverU   r   rk   rl   rm   rn   c          	      C  r   rB   rF   	rE   r   rb   rU   r   rk   rl   rm   rn   rF   rF   rG   r     r   c          	      C  r   rB   rF   r   rF   rF   rG   r   #  r   Frb   rU   r   rk   rl   rm   rn   c          	      C  r   a  
        Retrieves a model response with the given ID.

        Args:
          include: Additional fields to include in the response. See the `include` parameter for
              Response creation above for more information.

          starting_after: The sequence number of the event after which to start streaming.

          stream: If set to true, the model response data will be streamed to the client as it is
              generated using
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
              See the
              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
              for more information.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        NrF   r   rF   rF   rG   r   3     'c          	      C  r   a  
        Retrieves a model response with the given ID.

        Args:
          stream: If set to true, the model response data will be streamed to the client as it is
              generated using
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
              See the
              [Streaming section below](https://platform.openai.com/docs/api-reference/responses-streaming)
              for more information.

          include: Additional fields to include in the response. See the `include` parameter for
              Response creation above for more information.

          starting_after: The sequence number of the event after which to start streaming.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        NrF   r   rF   rF   rG   r   \  r   c          	      C  r   r   rF   r   rF   rF   rG   r     r   )Literal[False] | Literal[True] | NotGivenc          	      C  sR   |s	t d|| jd| t||||t|||dtjdt|p#dtt dS N:Expected a non-empty value for `response_id` but received /responses/)rU   r   rb   )rk   rl   rm   rn   queryF)r   r   rb   r   )	r   _getr(   r   r*   ResponseRetrieveParamsr/   r$   r;   r   rF   rF   rG   r     s*   r   Nonec                C  sD   |s	t d|ddi|pi }| jd| t||||dtdS )_  
        Deletes a model response with the given ID.

        Args:
          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        r   Accept*/*r   r   r   r   r   _deleter(   r   rE   r   rk   rl   rm   rn   rF   rF   rG   delete  s   zResponses.deletec                C  s6   |s	t d|| jd| dt||||dtdS )  Cancels a model response with the given ID.

        Only responses created with the
        `background` parameter set to `true` can be cancelled.
        [Learn more](https://platform.openai.com/docs/guides/background).

        Args:
          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        r   r   /cancelr   r   r   r   r(   r/   r   rF   rF   rG   cancel  s   
zResponses.cancel)r@   r   )r@   rJ   )r@   rO   8rT   ro   rU   rp   rV   rq   rW   rr   rX   rs   rY   rs   rZ   rt   r[   ru   r\   ro   r]   rr   r^   rv   r_   rw   r`   rx   ra   ro   rb   ry   rc   rz   rd   r{   re   r|   rf   r}   rg   rs   rh   rz   ri   r~   rj   r   rk   r   rl   r   rm   r   rn   r   r@   r/   )8rb   r   rT   ro   rU   rp   rV   rq   rW   rr   rX   rs   rY   rs   rZ   rt   r[   ru   r\   ro   r]   rr   r^   rv   r_   rw   r`   rx   ra   ro   rc   rz   rd   r{   re   r|   rf   r}   rg   rs   rh   rz   ri   r~   rj   r   rk   r   rl   r   rm   r   rn   r   r@   r   )8rb   r   rT   ro   rU   rp   rV   rq   rW   rr   rX   rs   rY   rs   rZ   rt   r[   ru   r\   ro   r]   rr   r^   rv   r_   rw   r`   rx   ra   ro   rc   rz   rd   r{   re   r|   rf   r}   rg   rs   rh   rz   ri   r~   rj   r   rk   r   rl   r   rm   r   rn   r   r@   r   )8rT   ro   rU   rp   rV   rq   rW   rr   rX   rs   rY   rs   rZ   rt   r[   ru   r\   ro   r]   rr   r^   rv   r_   rw   r`   rx   ra   ro   rb   r   rc   rz   rd   r{   re   r|   rf   r}   rg   rs   rh   rz   ri   r~   rj   r   rk   r   rl   r   rm   r   rn   r   r@   r   )r   r   r   r   r   r   rf   r   rk   r   rl   r   rm   r   rn   r   r@   r   )0rV   r   r[   r   rT   ro   r   r   rf   r   rU   rp   rW   rr   rX   rs   rZ   rt   r\   ro   r]   rr   r_   rw   ra   ro   rc   rz   rd   r{   re   r|   rh   rz   ri   r~   rj   r   rk   r   rl   r   rm   r   rn   r   r@   r   )4r   r   rV   rq   r[   r   rT   ro   r   r   rf   r   rU   rp   rW   rr   rX   rs   rZ   rt   r\   ro   r]   rr   r_   rw   ra   ro   rc   rz   rd   r{   re   r|   rh   rz   ri   r~   rj   r   r   r   rk   r   rl   r   rm   r   rn   r   r@   r   0rV   r   r[   r   r   r   rf   r   rU   rp   rW   rr   rX   rs   rZ   rt   r\   ro   r]   rr   r_   rw   ra   ro   rb   r   rc   rz   rd   r{   re   r|   rh   rz   ri   r~   rj   r   rk   r   rl   r   rm   r   rn   r   r@   r   r   r   rU   r   r   r   rb   r   rk   r   rl   r   rm   r   rn   r   r@   r/   )r   r   rb   r   rU   r   r   r   rk   r   rl   r   rm   r   rn   r   r@   r   )r   r   rb   r   rU   r   r   r   rk   r   rl   r   rm   r   rn   r   r@   r   )r   r   rU   r   r   r   rb   r   rk   r   rl   r   rm   r   rn   r   r@   r   r   r   rk   r   rl   r   rm   r   rn   r   r@   r   r   r   rk   r   rl   r   rm   r   rn   r   r@   r/   __name__
__module____qualname__r   rH   rM   rQ   r   r   r   rb   r   r   r   r   rF   rF   rF   rG   r=   3   s   	 Q Q TI!T((,+(c                   @  r?   )r>   r@   r   c                 C  rA   rB   )r   rC   rD   rF   rF   rG   rH     rI   zAsyncResponses.input_itemsAsyncResponsesWithRawResponsec                 C  rK   rL   )r  rD   rF   rF   rG   rM     rN   z AsyncResponses.with_raw_response#AsyncResponsesWithStreamingResponsec                 C  rK   rP   )r  rD   rF   rF   rG   rQ   )  rR   z&AsyncResponses.with_streaming_responseNrS   rT   ro   rU   rp   rV   rq   rW   rr   rX   rs   rY   rZ   rt   r[   ru   r\   r]   r^   rv   r_   rw   r`   rx   ra   rb   ry   rc   rz   rd   r{   re   r|   rf   r}   rg   rh   ri   r~   rj   r   rk   r   rl   r   rm   r   rn   r   r/   c                     dS r   rF   r   rF   rF   rG   r   2      PzAsyncResponses.creater   r    AsyncStream[ResponseStreamEvent]c                  r	  r   rF   r   rF   rF   rG   r     r
  r   +Response | AsyncStream[ResponseStreamEvent]c                  r	  r   rF   r   rF   rF   rG   r     r
  r   c                  s   | j dti d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|||||||d|rGtjntjI d H t||||dt|pXdtt dI d H S r   )	r   r   r)   r   r   r(   r/   r%   r;   r   rF   rF   rG   r     sr   !	
r   r   r   r   r   r   r   r   'AsyncResponseStreamManager[TextFormatT]c          	      C  r   rB   rF   r   rF   rF   rG   rb     r   zAsyncResponses.streamr   r   r   c                C  r   rB   rF   r   rF   rF   rG   rb     r   r   r   c             	   C  s  i d|d|d|d|d|	d|
d|d|d	|d
|d|d|d|d|d|d|d|}dd |  D }t|sFt|rUt|dkrUtdd| t|}t|dkrt|trhtdt|sptdt|r|sxi }d|v rtdt	||d< | j
d%i d|d|ddd|d|d|d|	d|
d|d|d
|d|d|d|d	|d|d|d|d|d|d |d!|}t|||d d"S t|trtd#| j|d|pg ||||d$}t|||t|r|d"S d d"S )&NrV   r[   rU   rW   rX   rZ   r\   r]   r_   ra   rc   rd   re   rh   ri   rj   rT   c                 S  r   rF   r   r   rF   rF   rG   r   N  r   z)AsyncResponses.stream.<locals>.<listcomp>r   r   r   r   r   r   r   rb   Trf   rk   rl   rm   rn   r   z@response_id must be provided when streaming an existing response)rb   rU   rk   rl   rm   rn   rF   )r   r   r   r   r   r   
isinstancer   r   r   r   r6   r   r   rF   rF   rG   rb     s   	


	

	
r   r   c             
     s   t  r|s	i }d|v rtdt |d< td fdd}| jd	ti d
|d|d|d|d|d|d|	d|
d|d|d|d|d|d|dd|d|d|itjt|||||dt	t
tt  tdI d H S )Nr   r   r   r/   r@   r   c                   r   r   r   r   r   rF   rG   r     r   z$AsyncResponses.parse.<locals>.parserr   rV   r[   rU   rW   rX   rZ   r\   r]   r_   ra   rb   rc   rd   re   rf   rh   ri   rj   r   r   r   r   r   rF   r   rG   r     sx   	
	zAsyncResponses.parser   r   r   c          	           d S rB   rF   r   rF   rF   rG   r        zAsyncResponses.retriever   c          	        r  rB   rF   r   rF   rF   rG   r     r  c          	        r  rB   rF   r   rF   rF   rG   r   	  r  Fr   c          	        r	  r   rF   r   rF   rF   rG   r   	     'c          	        r	  r   rF   r   rF   rF   rG   r   G	  r  c          	        r	  r   rF   r   rF   rF   rG   r   p	  r  r   c          	        s`   |s
t d|| jd| t||||t|||dtjI d H dt|p'dtt dI d H S r   )	r   r   r(   r   r*   r   r/   r%   r;   r   rF   rF   rG   r   	  s,   r   r   c                  sL   |s
t d|ddi|pi }| jd| t||||dtdI dH S )r   r   r   r   r   r   r   Nr   r   rF   rF   rG   r   	  s   zAsyncResponses.deletec                  s>   |s
t d|| jd| dt||||dtdI dH S )r   r   r   r   r   r   Nr   r   rF   rF   rG   r   	  s   
zAsyncResponses.cancel)r@   r   )r@   r  )r@   r  r   )8rb   r   rT   ro   rU   rp   rV   rq   rW   rr   rX   rs   rY   rs   rZ   rt   r[   ru   r\   ro   r]   rr   r^   rv   r_   rw   r`   rx   ra   ro   rc   rz   rd   r{   re   r|   rf   r}   rg   rs   rh   rz   ri   r~   rj   r   rk   r   rl   r   rm   r   rn   r   r@   r  )8rb   r   rT   ro   rU   rp   rV   rq   rW   rr   rX   rs   rY   rs   rZ   rt   r[   ru   r\   ro   r]   rr   r^   rv   r_   rw   r`   rx   ra   ro   rc   rz   rd   r{   re   r|   rf   r}   rg   rs   rh   rz   ri   r~   rj   r   rk   r   rl   r   rm   r   rn   r   r@   r  )8rT   ro   rU   rp   rV   rq   rW   rr   rX   rs   rY   rs   rZ   rt   r[   ru   r\   ro   r]   rr   r^   rv   r_   rw   r`   rx   ra   ro   rb   r   rc   rz   rd   r{   re   r|   rf   r}   rg   rs   rh   rz   ri   r~   rj   r   rk   r   rl   r   rm   r   rn   r   r@   r  )r   r   r   r   r   r   rf   r   rk   r   rl   r   rm   r   rn   r   r@   r  )0rV   r   r[   r   rT   ro   r   r   rf   r   rU   rp   rW   rr   rX   rs   rZ   rt   r\   ro   r]   rr   r_   rw   ra   ro   rc   rz   rd   r{   re   r|   rh   rz   ri   r~   rj   r   rk   r   rl   r   rm   r   rn   r   r@   r  )4r   r   rV   rq   r[   r   rT   ro   r   r   rf   r   rU   rp   rW   rr   rX   rs   rZ   rt   r\   ro   r]   rr   r_   rw   ra   ro   rc   rz   rd   r{   re   r|   rh   rz   ri   r~   rj   r   r   r   rk   r   rl   r   rm   r   rn   r   r@   r  r   r   )r   r   rb   r   rU   r   r   r   rk   r   rl   r   rm   r   rn   r   r@   r  )r   r   rb   r   rU   r   r   r   rk   r   rl   r   rm   r   rn   r   r@   r  )r   r   rU   r   r   r   rb   r   rk   r   rl   r   rm   r   rn   r   r@   r  r  r  r  rF   rF   rF   rG   r>     s    	 Q Q TI! T((,+(c                   @  $   e Zd ZdddZeddd	Zd
S )rJ   	responsesr=   r@   r   c                 C  P   || _ t|j| _t|j| _t|j| _t|j| _t|j| _d S rB   )
_responsesr   to_raw_response_wrapperr   r   r   r   r   rE   r  rF   rF   rG   __init__
      
z!ResponsesWithRawResponse.__init__r    c                 C     t | jjS rB   )r    r  rH   rD   rF   rF   rG   rH   
     z$ResponsesWithRawResponse.input_itemsNr  r=   r@   r   )r@   r    r  r  r  r  r   rH   rF   rF   rF   rG   rJ   
      
rJ   c                   @  r  )r  r  r>   r@   r   c                 C  r  rB   )r  r   async_to_raw_response_wrapperr   r   r   r   r   r  rF   rF   rG   r  
  r  z&AsyncResponsesWithRawResponse.__init__r!   c                 C  r  rB   )r!   r  rH   rD   rF   rF   rG   rH   2
  r  z)AsyncResponsesWithRawResponse.input_itemsNr  r>   r@   r   )r@   r!   r  rF   rF   rF   rG   r  
  r  r  c                   @  r  )rO   r  r=   r@   r   c                 C  :   || _ t|j| _t|j| _t|j| _t|j| _d S rB   )r  r   r   r   r   r   r  rF   rF   rG   r  8
     
z'ResponsesWithStreamingResponse.__init__r"   c                 C  r  rB   )r"   r  rH   rD   rF   rF   rG   rH   H
  r  z*ResponsesWithStreamingResponse.input_itemsNr  )r@   r"   r  rF   rF   rF   rG   rO   7
      
rO   c                   @  r  )r  r  r>   r@   r   c                 C  r!  rB   )r  r   r   r   r   r   r  rF   rF   rG   r  N
  r"  z,AsyncResponsesWithStreamingResponse.__init__r#   c                 C  r  rB   )r#   r  rH   rD   rF   rF   rG   rH   ^
  r  z/AsyncResponsesWithStreamingResponse.input_itemsNr   )r@   r#   r  rF   rF   rF   rG   r  M
  r#  r  rf   r   r@   List[ToolParam] | NotGivenc              	   C  s   t | stS g }| D ]M}|d dkr|| q
d|vr"|| q
tt|d }t|ts2tdd|v s8J td|d |	d|d |	dpJdd	|j
}||  q
|S )
NtypefunctionzcExpected Chat Completions function tool shape to be created using `openai.pydantic_function_tool()`
parametersnamedescriptionstrictF)r%  r(  r)  r'  r*  )r   r   appendr	   r   r  r&   	Exceptionr'   getr[   )rf   converted_toolstoolr&  new_toolrF   rF   rG   r   c
  s6   


r   )rf   r   r@   r$  )`
__future__r   typingr   r   r   r   r   r   r	   	functoolsr
   typing_extensionsr   r   httpx r   _typesr   r   r   r   r   r   _utilsr   r   r   _compatr   	_resourcer   r   	_responser   r   rH   r   r   r    r!   r"   r#   
_streamingr$   r%   
lib._toolsr&   r'   _base_clientr(   types.responsesr)   r*   lib._parsing._responsesr+   r,   r-   r   types.shared.chat_modelr.   types.responses.responser/   types.responses.tool_paramr0   r1   types.shared_params.metadatar2   types.shared_params.reasoningr3   types.responses.parsed_responser4   "lib.streaming.responses._responsesr5   r6   #types.responses.response_includabler7   #types.shared_params.responses_modelr8   $types.responses.response_input_paramr9   %types.responses.response_prompt_paramr:   %types.responses.response_stream_eventr;   *types.responses.response_text_config_paramr<   __all__r=   r>   rJ   r  rO   r  r   rF   rF   rF   rG   <module>   sp   $           p         t