o
    +i                     @  st  d dl mZ d dlmZ d dlmZmZ d dlZddlm	Z	 ddl
mZ ddlmZmZmZmZmZmZmZmZ dd	lmZmZmZmZmZ dd
lmZ ddlmZmZ ddl m!Z!m"Z" ddl#m$Z$ ddl%m&Z&m'Z' ddl(m)Z) ddl*m+Z+ ddl,m-Z- ddl.m/Z/ ddl0m1Z1 ddgZ2G dd deZ3G dd deZ4G dd dZ5G dd dZ6G dd dZ7G dd dZ8dS )     )annotations)List)LiteraloverloadN   )_legacy_response)completion_create_params)BodyOmitQueryHeadersNotGivenSequenceNotStromit	not_given)is_givenrequired_argsmaybe_transformstrip_not_givenasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper)DEFAULT_TIMEOUT)StreamAsyncStream)make_request_options)
Completion)
ModelParam)MetadataParam)AnthropicBetaParamCompletionsAsyncCompletionsc                   @     e Zd Zed3ddZed4ddZeeeeeeeeddded	d5d&d'Z	eeeeeeeddded(
d6d+d'Z	eeeeeeeddded(
d7d.d'Z	e
g d/g d0eeeeeeeddded	d8d2d'Z	dS )9r#   returnCompletionsWithRawResponsec                 C     t | S a  
        This property can be used as a prefix for any HTTP method call to return
        the raw response object instead of the parsed content.

        For more information, see https://www.github.com/anthropics/anthropic-sdk-python#accessing-raw-response-data-eg-headers
        )r'   self r,   \/var/www/html/psymed-ai/venv/lib/python3.10/site-packages/anthropic/resources/completions.pywith_raw_response      zCompletions.with_raw_response CompletionsWithStreamingResponsec                 C  r(   z
        An alternative to `.with_raw_response` that doesn't eagerly read the response body.

        For more information, see https://www.github.com/anthropics/anthropic-sdk-python#with_streaming_response
        )r0   r*   r,   r,   r-   with_streaming_response'      z#Completions.with_streaming_responseNmetadatastop_sequencesstreamtemperaturetop_ktop_pbetasextra_headersextra_query
extra_bodytimeoutmax_tokens_to_sampleintmodelr    promptstrr5   MetadataParam | Omitr6   SequenceNotStr[str] | Omitr7   Literal[False] | Omitr8   float | Omitr9   
int | Omitr:   r;   List[AnthropicBetaParam] | Omitr<   Headers | Noner=   Query | Noner>   Body | Noner?   'float | httpx.Timeout | None | NotGivenr   c                C     dS a  [Legacy] Create a Text Completion.

        The Text Completions API is a legacy API.

        We recommend using the
        [Messages API](https://docs.claude.com/en/api/messages) going forward.

        Future models and features will not be compatible with Text Completions. See our
        [migration guide](https://docs.claude.com/en/api/migrating-from-text-completions-to-messages)
        for guidance in migrating from Text Completions to Messages.

        Args:
          max_tokens_to_sample: The maximum number of tokens to generate before stopping.

              Note that our models may stop _before_ reaching this maximum. This parameter
              only specifies the absolute maximum number of tokens to generate.

          model: The model that will complete your prompt.

See
              [models](https://docs.anthropic.com/en/docs/models-overview) for additional
              details and options.

          prompt: The prompt that you want Claude to complete.

              For proper response generation you will need to format your prompt using
              alternating `

Human:` and `

Assistant:` conversational turns. For example:

              ```
              "

Human: {userQuestion}

Assistant:"
              ```

              See [prompt validation](https://docs.claude.com/en/api/prompt-validation) and
              our guide to [prompt design](https://docs.claude.com/en/docs/intro-to-prompting)
              for more details.

          metadata: An object describing metadata about the request.

          stop_sequences: Sequences that will cause the model to stop generating.

              Our models stop on `"

Human:"`, and may include additional built-in stop
              sequences in the future. By providing the stop_sequences parameter, you may
              include additional strings that will cause the model to stop generating.

          stream: Whether to incrementally stream the response using server-sent events.

              See [streaming](https://docs.claude.com/en/api/streaming) for details.

          temperature: Amount of randomness injected into the response.

              Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
              for analytical / multiple choice, and closer to `1.0` for creative and
              generative tasks.

              Note that even with `temperature` of `0.0`, the results will not be fully
              deterministic.

          top_k: Only sample from the top K options for each subsequent token.

              Used to remove "long tail" low probability responses.
              [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).

              Recommended for advanced use cases only. You usually only need to use
              `temperature`.

          top_p: Use nucleus sampling.

              In nucleus sampling, we compute the cumulative distribution over all the options
              for each subsequent token in decreasing probability order and cut it off once it
              reaches a particular probability specified by `top_p`. You should either alter
              `temperature` or `top_p`, but not both.

              Recommended for advanced use cases only. You usually only need to use
              `temperature`.

          betas: Optional header to specify the beta version(s) you want to use.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        Nr,   r+   r@   rB   rC   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r,   r,   r-   create0      hzCompletions.create
r5   r6   r8   r9   r:   r;   r<   r=   r>   r?   Literal[True]Stream[Completion]c                C  rO   a  [Legacy] Create a Text Completion.

        The Text Completions API is a legacy API.

        We recommend using the
        [Messages API](https://docs.claude.com/en/api/messages) going forward.

        Future models and features will not be compatible with Text Completions. See our
        [migration guide](https://docs.claude.com/en/api/migrating-from-text-completions-to-messages)
        for guidance in migrating from Text Completions to Messages.

        Args:
          max_tokens_to_sample: The maximum number of tokens to generate before stopping.

              Note that our models may stop _before_ reaching this maximum. This parameter
              only specifies the absolute maximum number of tokens to generate.

          model: The model that will complete your prompt.

See
              [models](https://docs.anthropic.com/en/docs/models-overview) for additional
              details and options.

          prompt: The prompt that you want Claude to complete.

              For proper response generation you will need to format your prompt using
              alternating `

Human:` and `

Assistant:` conversational turns. For example:

              ```
              "

Human: {userQuestion}

Assistant:"
              ```

              See [prompt validation](https://docs.claude.com/en/api/prompt-validation) and
              our guide to [prompt design](https://docs.claude.com/en/docs/intro-to-prompting)
              for more details.

          stream: Whether to incrementally stream the response using server-sent events.

              See [streaming](https://docs.claude.com/en/api/streaming) for details.

          metadata: An object describing metadata about the request.

          stop_sequences: Sequences that will cause the model to stop generating.

              Our models stop on `"

Human:"`, and may include additional built-in stop
              sequences in the future. By providing the stop_sequences parameter, you may
              include additional strings that will cause the model to stop generating.

          temperature: Amount of randomness injected into the response.

              Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
              for analytical / multiple choice, and closer to `1.0` for creative and
              generative tasks.

              Note that even with `temperature` of `0.0`, the results will not be fully
              deterministic.

          top_k: Only sample from the top K options for each subsequent token.

              Used to remove "long tail" low probability responses.
              [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).

              Recommended for advanced use cases only. You usually only need to use
              `temperature`.

          top_p: Use nucleus sampling.

              In nucleus sampling, we compute the cumulative distribution over all the options
              for each subsequent token in decreasing probability order and cut it off once it
              reaches a particular probability specified by `top_p`. You should either alter
              `temperature` or `top_p`, but not both.

              Recommended for advanced use cases only. You usually only need to use
              `temperature`.

          betas: Optional header to specify the beta version(s) you want to use.

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        Nr,   r+   r@   rB   rC   r7   r5   r6   r8   r9   r:   r;   r<   r=   r>   r?   r,   r,   r-   rR      rS   boolCompletion | Stream[Completion]c                C  rO   rW   r,   rX   r,   r,   r-   rR     rS   r@   rB   rC   r@   rB   rC   r7   %Literal[False] | Literal[True] | Omitc                C  s   t |s| jjtkrd}i tdt |
rddd |
D nti|p$i }| jdt|||||||||	d	|r:t	j
nt	jt||||dt|pHd	tt d
S )NX  anthropic-beta,c                 s      | ]}t |V  qd S NrD   .0er,   r,   r-   	<genexpr>      z%Completions.create.<locals>.<genexpr>/v1/complete	r@   rB   rC   r5   r6   r7   r8   r9   r:   r<   r=   r>   r?   Fbodyoptionscast_tor7   
stream_cls)r   _clientr?   r   r   joinr   _postr   r   CompletionCreateParamsStreaming"CompletionCreateParamsNonStreamingr   r   r   rQ   r,   r,   r-   rR   n  s>   &)r&   r'   )r&   r0   r@   rA   rB   r    rC   rD   r5   rE   r6   rF   r7   rG   r8   rH   r9   rI   r:   rH   r;   rJ   r<   rK   r=   rL   r>   rM   r?   rN   r&   r   )r@   rA   rB   r    rC   rD   r7   rU   r5   rE   r6   rF   r8   rH   r9   rI   r:   rH   r;   rJ   r<   rK   r=   rL   r>   rM   r?   rN   r&   rV   )r@   rA   rB   r    rC   rD   r7   rY   r5   rE   r6   rF   r8   rH   r9   rI   r:   rH   r;   rJ   r<   rK   r=   rL   r>   rM   r?   rN   r&   rZ   )r@   rA   rB   r    rC   rD   r5   rE   r6   rF   r7   r]   r8   rH   r9   rI   r:   rH   r;   rJ   r<   rK   r=   rL   r>   rM   r?   rN   r&   rZ   __name__
__module____qualname__r   r.   r2   r   r   r   rR   r   r,   r,   r,   r-   r#      n    	iiic                   @  r%   )9r$   r&   AsyncCompletionsWithRawResponsec                 C  r(   r)   )r|   r*   r,   r,   r-   r.     r/   z"AsyncCompletions.with_raw_response%AsyncCompletionsWithStreamingResponsec                 C  r(   r1   )r}   r*   r,   r,   r-   r2     r3   z(AsyncCompletions.with_streaming_responseNr4   r@   rA   rB   r    rC   rD   r5   rE   r6   rF   r7   rG   r8   rH   r9   rI   r:   r;   rJ   r<   rK   r=   rL   r>   rM   r?   rN   r   c                     dS rP   r,   rQ   r,   r,   r-   rR        hzAsyncCompletions.createrT   rU   AsyncStream[Completion]c                  r~   rW   r,   rX   r,   r,   r-   rR   "  r   rY   $Completion | AsyncStream[Completion]c                  r~   rW   r,   rX   r,   r,   r-   rR     r   r[   r\   r]   c                  s   t |s| jjtkrd}i tdt |
rddd |
D nti|p%i }| jdt|||||||||	d	|r;t	j
nt	jI d H t||||dt|pLd	tt d
I d H S )Nr^   r_   r`   c                 s  ra   rb   rc   rd   r,   r,   r-   rg     rh   z*AsyncCompletions.create.<locals>.<genexpr>ri   rj   rk   Frl   )r   rq   r?   r   r   rr   r   rs   r   r   rt   ru   r   r   r   rQ   r,   r,   r-   rR     s@   &)r&   r|   )r&   r}   rv   )r@   rA   rB   r    rC   rD   r7   rU   r5   rE   r6   rF   r8   rH   r9   rI   r:   rH   r;   rJ   r<   rK   r=   rL   r>   rM   r?   rN   r&   r   )r@   rA   rB   r    rC   rD   r7   rY   r5   rE   r6   rF   r8   rH   r9   rI   r:   rH   r;   rJ   r<   rK   r=   rL   r>   rM   r?   rN   r&   r   )r@   rA   rB   r    rC   rD   r5   rE   r6   rF   r7   r]   r8   rH   r9   rI   r:   rH   r;   rJ   r<   rK   r=   rL   r>   rM   r?   rN   r&   r   rw   r,   r,   r,   r-   r$     r{   c                   @     e Zd ZdddZdS )	r'   completionsr#   r&   Nonec                 C     || _ t|j| _d S rb   )_completionsr   to_raw_response_wrapperrR   r+   r   r,   r,   r-   __init__-     
z#CompletionsWithRawResponse.__init__Nr   r#   r&   r   rx   ry   rz   r   r,   r,   r,   r-   r'   ,      r'   c                   @  r   )	r|   r   r$   r&   r   c                 C  r   rb   )r   r   async_to_raw_response_wrapperrR   r   r,   r,   r-   r   6  r   z(AsyncCompletionsWithRawResponse.__init__Nr   r$   r&   r   r   r,   r,   r,   r-   r|   5  r   r|   c                   @  r   )	r0   r   r#   r&   r   c                 C     || _ t|j| _d S rb   )r   r   rR   r   r,   r,   r-   r   ?     
z)CompletionsWithStreamingResponse.__init__Nr   r   r,   r,   r,   r-   r0   >  r   r0   c                   @  r   )	r}   r   r$   r&   r   c                 C  r   rb   )r   r   rR   r   r,   r,   r-   r   H  r   z.AsyncCompletionsWithStreamingResponse.__init__Nr   r   r,   r,   r,   r-   r}   G  r   r}   )9
__future__r   typingr   typing_extensionsr   r   httpx r   typesr   _typesr	   r
   r   r   r   r   r   r   _utilsr   r   r   r   r   _compatr   	_resourcer   r   	_responser   r   
_constantsr   
_streamingr   r   _base_clientr   types.completionr   types.model_paramr    types.metadata_paramr!   types.anthropic_beta_paramr"   __all__r#   r$   r'   r|   r0   r}   r,   r,   r,   r-   <module>   s>   (      			