o
    ?Hhq                     @  s  d dl mZ d dlmZmZmZmZmZmZm	Z	 d dl
mZ d dlmZ d dlZddlmZ ddlmZmZmZmZmZ dd	lmZmZ dd
lmZ ddlmZmZ ddlm Z m!Z! ddl"m#Z# ddl$m%Z% ddl&m'Z' ddl(m)Z)m*Z+m,Z-m.Z/ ddl0m1Z1 ddl2m3Z3m4Z4 ddl5m6Z6m7Z7 ddl8m9Z9 ddl:m;Z; ddl<m=Z= ddl>m?Z? ddl@mAZA ddlBmCZC ddlDmEZE ddlFmGZG ddlHmIZI ddgZJG dd deZKG d d deZLG d!d" d"ZMG d#d$ d$ZNG d%d& d&ZOG d'd( d(ZPdS ))    )annotations)DictListTypeUnionIterableOptionalcast)partial)LiteralN   )_legacy_response)	NOT_GIVENBodyQueryHeadersNotGiven)maybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper)Stream)completion_create_params)make_request_options)ResponseFormatTvalidate_input_toolsparse_chat_completiontype_to_response_format_param)	ChatModel)ChatCompletionStreamManager AsyncChatCompletionStreamManager)MetadataReasoningEffort)ChatCompletion)ChatCompletionChunk)ParsedChatCompletion)ChatCompletionToolParam)ChatCompletionAudioParam)ChatCompletionMessageParam) ChatCompletionStreamOptionsParam)$ChatCompletionPredictionContentParam)#ChatCompletionToolChoiceOptionParamCompletionsAsyncCompletionsc                !   @     e Zd ZedMddZedNddZeeeeeeeeeeeeeeeeeeeeeeeeeeeeddded	 dOdGdHZeeeeeeeeeeeeeeeeeeeeeeeeeeeeddded	 dPdKdLZdS )Qr/   returnCompletionsWithRawResponsec                 C     t | S a  
        This property can be used as a prefix for any HTTP method call to return the
        the raw response object instead of the parsed content.

        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
        )r3   self r8   f/home/air/sanwanet/gpt-api/venv/lib/python3.10/site-packages/openai/resources/beta/chat/completions.pywith_raw_response+      zCompletions.with_raw_response CompletionsWithStreamingResponsec                 C  r4   z
        An alternative to `.with_raw_response` that doesn't eagerly read the response body.

        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
        )r<   r6   r8   r8   r9   with_streaming_response5      z#Completions.with_streaming_responseN audioresponse_formatfrequency_penaltyfunction_call	functions
logit_biaslogprobsmax_completion_tokens
max_tokensmetadata
modalitiesnparallel_tool_calls
predictionpresence_penaltyreasoning_effortseedservice_tierstopstorestream_optionstemperaturetool_choicetoolstop_logprobstop_puserweb_search_optionsextra_headersextra_query
extra_bodytimeoutmessages$Iterable[ChatCompletionMessageParam]modelUnion[str, ChatModel]rA   -Optional[ChatCompletionAudioParam] | NotGivenrB    type[ResponseFormatT] | NotGivenrC   Optional[float] | NotGivenrD   0completion_create_params.FunctionCall | NotGivenrE   6Iterable[completion_create_params.Function] | NotGivenrF   #Optional[Dict[str, int]] | NotGivenrG   Optional[bool] | NotGivenrH   Optional[int] | NotGivenrI   rJ   Optional[Metadata] | NotGivenrK   3Optional[List[Literal['text', 'audio']]] | NotGivenrL   rM   bool | NotGivenrN   9Optional[ChatCompletionPredictionContentParam] | NotGivenrO   rP   $Optional[ReasoningEffort] | NotGivenrQ   rR   /Optional[Literal['auto', 'default']] | NotGivenrS   0Union[Optional[str], List[str], None] | NotGivenrT   rU   5Optional[ChatCompletionStreamOptionsParam] | NotGivenrV   rW   .ChatCompletionToolChoiceOptionParam | NotGivenrX   ,Iterable[ChatCompletionToolParam] | NotGivenrY   rZ   r[   str | NotGivenr\   4completion_create_params.WebSearchOptions | NotGivenr]   Headers | Noner^   Query | Noner_   Body | Noner`   'float | httpx.Timeout | None | NotGiven%ParsedChatCompletion[ResponseFormatT]c       "   $        s   t  ddi|p
i }d  fdd}#| jd	ti d
|d|d|d|d|d|d|d|	d|
d|d|d|d|d|d|d|d|t ||||d|||||||dtjt|| |!|"|#dttt	t
  tddS )!a  Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
        & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.

        You can pass a pydantic model to this method and it will automatically convert the model
        into a JSON schema, send it to the API and parse the response content back into the given model.

        This method will also automatically parse `function` tool calls if:
        - You use the `openai.pydantic_function_tool()` helper method
        - You mark your tool schema with `"strict": True`

        Example usage:
        ```py
        from pydantic import BaseModel
        from openai import OpenAI


        class Step(BaseModel):
            explanation: str
            output: str


        class MathResponse(BaseModel):
            steps: List[Step]
            final_answer: str


        client = OpenAI()
        completion = client.beta.chat.completions.parse(
            model="gpt-4o-2024-08-06",
            messages=[
                {"role": "system", "content": "You are a helpful math tutor."},
                {"role": "user", "content": "solve 8x + 31 = 2"},
            ],
            response_format=MathResponse,
        )

        message = completion.choices[0].message
        if message.parsed:
            print(message.parsed.steps)
            print("answer: ", message.parsed.final_answer)
        ```
        X-Stainless-Helper-Methodbeta.chat.completions.parseraw_completionr&   r2   r}   c                      t  | dS N)rB   chat_completioninput_tools_parse_chat_completionr   rB   rX   r8   r9   parser   
   z!Completions.parse.<locals>.parser/chat/completionsra   rc   rA   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   F)rB   rQ   rR   rS   rT   streamrU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   post_parserbodyoptionscast_tor   Nr   r&   r2   r}   )_validate_input_tools_postr   _type_to_response_formatr   CompletionCreateParamsr   r	   r   r(   r   r&   $r7   ra   rc   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   r   r8   r   r9   parse>   s   R	
!$	zCompletions.parseJcompletion_create_params.ResponseFormat | type[ResponseFormatT] | NotGiven,ChatCompletionStreamManager[ResponseFormatT]c       "   $      C  s  ddi|pi }t | jjjjfi d|d|d|dddt|d	|d
|d|d|d|	d|
d|d|d|d|d|d|d|d|d|d|d|d|d|d|d|d|d|d |d!|d"|d#|d$| d%|!d&|"}#t|#||d'S )(a  Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
        and automatic accumulation of each delta.

        This also supports all of the parsing utilities that `.parse()` does.

        Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:

        ```py
        with client.beta.chat.completions.stream(
            model="gpt-4o-2024-08-06",
            messages=[...],
        ) as stream:
            for event in stream:
                if event.type == "content.delta":
                    print(event.delta, flush=True, end="")
        ```

        When the context manager is entered, a `ChatCompletionStream` instance is returned which, like `.create(stream=True)` is an iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).

        When the context manager exits, the response will be closed, however the `stream` instance is still available outside
        the context manager.
        r~   beta.chat.completions.streamra   rc   rA   r   TrB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rT   rS   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   rB   r   )r
   _clientchatcompletionscreater   r"   $r7   ra   rc   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   api_requestr8   r8   r9   r      s   ?
	
 !"#$&zCompletions.stream)r2   r3   )r2   r<   Fra   rb   rc   rd   rA   re   rB   rf   rC   rg   rD   rh   rE   ri   rF   rj   rG   rk   rH   rl   rI   rl   rJ   rm   rK   rn   rL   rl   rM   ro   rN   rp   rO   rg   rP   rq   rQ   rl   rR   rr   rS   rs   rT   rk   rU   rt   rV   rg   rW   ru   rX   rv   rY   rl   rZ   rg   r[   rw   r\   rx   r]   ry   r^   rz   r_   r{   r`   r|   r2   r}   )Fra   rb   rc   rd   rA   re   rB   r   rC   rg   rD   rh   rE   ri   rF   rj   rG   rk   rH   rl   rI   rl   rJ   rm   rK   rn   rL   rl   rM   ro   rN   rp   rO   rg   rP   rq   rQ   rl   rR   rr   rS   rs   rT   rk   rU   rt   rV   rg   rW   ru   rX   rv   rY   rl   rZ   rg   r[   rw   r\   rx   r]   ry   r^   rz   r_   r{   r`   r|   r2   r   	__name__
__module____qualname__r   r:   r>   r   r   r   r8   r8   r8   r9   r/   *       	 c                !   @  r1   )Qr0   r2   AsyncCompletionsWithRawResponsec                 C  r4   r5   )r   r6   r8   r8   r9   r:   B  r;   z"AsyncCompletions.with_raw_response%AsyncCompletionsWithStreamingResponsec                 C  r4   r=   )r   r6   r8   r8   r9   r>   L  r?   z(AsyncCompletions.with_streaming_responseNr@   ra   rb   rc   rd   rA   re   rB   rf   rC   rg   rD   rh   rE   ri   rF   rj   rG   rk   rH   rl   rI   rJ   rm   rK   rn   rL   rM   ro   rN   rp   rO   rP   rq   rQ   rR   rr   rS   rs   rT   rU   rt   rV   rW   ru   rX   rv   rY   rZ   r[   rw   r\   rx   r]   ry   r^   rz   r_   r{   r`   r|   r}   c       "   $        s   t  ddi|pi }d  fdd}#| jd	ti d
|d|d|d|d|d|d|d|	d|
d|d|d|d|d|d|d|d|t ||||d|||||||dtjI dH t|| |!|"|#dttt	t
  tddI dH S )!a  Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
        & returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.

        You can pass a pydantic model to this method and it will automatically convert the model
        into a JSON schema, send it to the API and parse the response content back into the given model.

        This method will also automatically parse `function` tool calls if:
        - You use the `openai.pydantic_function_tool()` helper method
        - You mark your tool schema with `"strict": True`

        Example usage:
        ```py
        from pydantic import BaseModel
        from openai import AsyncOpenAI


        class Step(BaseModel):
            explanation: str
            output: str


        class MathResponse(BaseModel):
            steps: List[Step]
            final_answer: str


        client = AsyncOpenAI()
        completion = await client.beta.chat.completions.parse(
            model="gpt-4o-2024-08-06",
            messages=[
                {"role": "system", "content": "You are a helpful math tutor."},
                {"role": "user", "content": "solve 8x + 31 = 2"},
            ],
            response_format=MathResponse,
        )

        message = completion.choices[0].message
        if message.parsed:
            print(message.parsed.steps)
            print("answer: ", message.parsed.final_answer)
        ```
        r~   r   r   r&   r2   r}   c                   r   r   r   r   r   r8   r9   r     r   z&AsyncCompletions.parse.<locals>.parserr   ra   rc   rA   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   F)rB   rQ   rR   rT   rS   r   rU   rV   rW   rX   rY   rZ   r[   r\   Nr   r   r   )r   r   r   r   r   r   r   r	   r   r(   r   r&   r   r8   r   r9   r   U  s   R	
!$	zAsyncCompletions.parser   1AsyncChatCompletionStreamManager[ResponseFormatT]c       "   $      C  s  t | ddi|p
i }| jjjjd)i d|d|d|dddt|d	|d
|d|d|d|	d|
d|d|d|d|d|d|d|d|d|d|d|d|d|d|d|d|d|d |d!|d"|d#| d$|!d%|"d&|}#t|#||d'S )*a  Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
        and automatic accumulation of each delta.

        This also supports all of the parsing utilities that `.parse()` does.

        Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:

        ```py
        async with client.beta.chat.completions.stream(
            model="gpt-4o-2024-08-06",
            messages=[...],
        ) as stream:
            async for event in stream:
                if event.type == "content.delta":
                    print(event.delta, flush=True, end="")
        ```

        When the context manager is entered, an `AsyncChatCompletionStream` instance is returned which, like `.create(stream=True)` is an async iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).

        When the context manager exits, the response will be closed, however the `stream` instance is still available outside
        the context manager.
        r~   r   ra   rc   rA   r   TrB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r]   r^   r_   r`   r\   r   Nr8   )r   r   r   r   r   r   r#   r   r8   r8   r9   r     s   >	
 !"#%zAsyncCompletions.stream)r2   r   )r2   r   r   )Fra   rb   rc   rd   rA   re   rB   r   rC   rg   rD   rh   rE   ri   rF   rj   rG   rk   rH   rl   rI   rl   rJ   rm   rK   rn   rL   rl   rM   ro   rN   rp   rO   rg   rP   rq   rQ   rl   rR   rr   rS   rs   rT   rk   rU   rt   rV   rg   rW   ru   rX   rv   rY   rl   rZ   rg   r[   rw   r\   rx   r]   ry   r^   rz   r_   r{   r`   r|   r2   r   r   r8   r8   r8   r9   r0   A  r   c                   @     e Zd ZdddZdS )	r3   r   r/   r2   Nonec                 C     || _ t|j| _d S N)_completionsr   to_raw_response_wrapperr   r7   r   r8   r8   r9   __init__Z     
z#CompletionsWithRawResponse.__init__Nr   r/   r2   r   r   r   r   r   r8   r8   r8   r9   r3   Y      r3   c                   @  r   )	r   r   r0   r2   r   c                 C  r   r   )r   r   async_to_raw_response_wrapperr   r   r8   r8   r9   r   c  r   z(AsyncCompletionsWithRawResponse.__init__Nr   r0   r2   r   r   r8   r8   r8   r9   r   b  r   r   c                   @  r   )	r<   r   r/   r2   r   c                 C     || _ t|j| _d S r   )r   r   r   r   r8   r8   r9   r   l     
z)CompletionsWithStreamingResponse.__init__Nr   r   r8   r8   r8   r9   r<   k  r   r<   c                   @  r   )	r   r   r0   r2   r   c                 C  r   r   )r   r   r   r   r8   r8   r9   r   u  r   z.AsyncCompletionsWithStreamingResponse.__init__Nr   r   r8   r8   r8   r9   r   t  r   r   )Q
__future__r   typingr   r   r   r   r   r   r	   	functoolsr
   typing_extensionsr   httpx r   _typesr   r   r   r   r   _utilsr   r   _compatr   	_resourcer   r   	_responser   r   
_streamingr   
types.chatr   _base_clientr   lib._parsingr   r   r   r   r   r    r   types.chat_modelr!   lib.streaming.chatr"   r#   types.shared_paramsr$   r%   types.chat.chat_completionr&    types.chat.chat_completion_chunkr'   !types.chat.parsed_chat_completionr(   %types.chat.chat_completion_tool_paramr)   &types.chat.chat_completion_audio_paramr*   (types.chat.chat_completion_message_paramr+   /types.chat.chat_completion_stream_options_paramr,   3types.chat.chat_completion_prediction_content_paramr-   3types.chat.chat_completion_tool_choice_option_paramr.   __all__r/   r0   r3   r   r<   r   r8   r8   r8   r9   <module>   sL   $    			