o
    +ih                 	   @  s2  U d Z ddlmZ ddlZddlZddlZddlZddlmZm	Z	m
Z
mZmZ ddlmZ ddlmZ ddlmZmZmZmZ ddlZddlmZmZ dd	lmZ dd
lmZmZmZ ddl m!Z!m"Z" ddl#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z*m+Z+ ddl#m,Z- ddl.m/Z/m0Z0 ddl1m2Z3 ddl4m5Z5m6Z6m7Z7m8Z8 ddl9m:Z: ddl;m<Z<m=Z=m>Z> ddl?m@Z@mAZAmBZB ddlCmDZD ddlEmFZFmGZGmHZH ddlImJZJmKZK ddlLmMZM ddlNmOZO ddlPmQZQmRZRmSZSmTZTmUZU ddlVmWZWmXZXmYZY ddlZm[Z[m\Z\ ddl]m^Z^ ddl_m`Z` ddlambZb dd d dd!Zceee`Zdd|d&d'Zed(d)d*d*d+d+d*d*d,Zfd-egd.< d(Zhd/egd0< d}d4d5ZiG d6d7 d7eYZjd~d;d<Zkdd?d@ZlddDdEZmddGdHZnddJdKZoddOdPZpG dQdR dRe!ZqddSddWdXZrddZd[Zsdd\d]Ztdd^d_ZuG d`da daeYZvddedfZwddjdkZxdlddmddudvZyddzd{ZzdS )zAnthropic chat models.    )annotationsN)AsyncIteratorCallableIteratorMappingSequence)cached_property)
itemgetter)AnyFinalLiteralcast)AsyncCallbackManagerForLLMRunCallbackManagerForLLMRun)OutputParserException)LanguageModelInputModelProfileModelProfileRegistry)BaseChatModelLangSmithParams)	AIMessageAIMessageChunkBaseMessageHumanMessageSystemMessageToolCallToolMessageis_data_content_blockcontent)InputTokenDetailsUsageMetadata)tool_call_chunk)JsonOutputKeyToolsParserJsonOutputParserPydanticOutputParserPydanticToolsParser)OutputParserLike)ChatGenerationChatGenerationChunk
ChatResult)RunnableRunnableMapRunnablePassthrough)BaseTool)from_envget_pydantic_field_namessecret_from_env)convert_to_json_schemaconvert_to_openai_tool)is_basemodel_subclass)_build_model_kwargs)	BaseModel
ConfigDictField	SecretStrmodel_validator)NotRequiredSelf	TypedDict)_get_default_async_httpx_client_get_default_httpx_client)_convert_from_v1_to_anthropic)	_PROFILES)extract_tool_callsuser	assistant)humanair   HumanMessageChunk
model_namestrreturnr   c                 C  s   t | pi }| S N)_MODEL_PROFILESgetcopy)rH   default rP   \/var/www/html/psymed-ai/venv/lib/python3.10/site-packages/langchain_anthropic/chat_models.py_get_default_model_profileM   s   rR   i   i    i   i }  )zclaude-3-haikuzclaude-3-5-haikuzclaude-3-7-sonnetzclaude-sonnet-4zclaude-opus-4zclaude-opus-4-1zclaude-sonnet-4-5zclaude-haiku-4-5zFinal[dict[str, int]] _MODEL_DEFAULT_MAX_OUTPUT_TOKENSz
Final[int]_FALLBACK_MAX_OUTPUT_TOKENSmodel
str | Noneintc                 C  s@   | st S | d}t|dkrd|dd n| }t|t S )zReturn the default max output tokens for an Anthropic model (with fallback).

    See the Claude docs for [Max Tokens limits](https://docs.claude.com/en/docs/about-claude/models/overview#model-comparison-table).
    -   N)rT   splitlenjoinrS   rM   )rU   partsfamilyrP   rP   rQ   _default_max_tokens_for`   s
   
"r`   c                   @  s:   e Zd ZU dZded< ded< ded< ded	< d
ed< dS )AnthropicToolzAnthropic tool definition.rI   namedict[str, Any]input_schemazNotRequired[str]descriptionzNotRequired[bool]strictzNotRequired[dict[str, str]]cache_controlN)__name__
__module____qualname____doc____annotations__rP   rP   rP   rQ   ra   n   s   
 ra   toolr
   boolc                   sH   t | tsdS | d  rt  tsdS g d}t fdd|D S )zCheck if a tool is a built-in Anthropic tool.

    [Claude docs for built-in tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview)
    Ftype)text_editor_	computer_bash_web_search_
web_fetch_code_execution_memory_c                 3  s    | ]}  |V  qd S rK   )
startswith).0prefix	tool_typerP   rQ   	<genexpr>       z#_is_builtin_tool.<locals>.<genexpr>)
isinstancedictrM   rI   any)rm   _builtin_tool_prefixesrP   rz   rQ   _is_builtin_tool|   s   

	r   urlr   c                 C  sV   d}t || }|rd|d|ddS d}t || }|r%d| dS d	}t|)
a  Convert part["image_url"]["url"] strings (OpenAI format) to Anthropic format.

    {
        "type": "base64",
        "media_type": "image/jpeg",
        "data": "/9j/4AAQSkZJRg...",
    }

    Or

    {
        "type": "url",
        "url": "https://example.com/image.jpg",
    }
    z3^data:(?P<media_type>image/.+);base64,(?P<data>.+)$base64
media_typedataro   r   r   z^https?://.*$r   ro   r   zMalformed url parameter. Must be either an image URL (https://example.com/image.jpg) or base64 encoded string (data:image/png;base64,'/9j/4AAQSk'...))rematchgroup
ValueError)r   base64_regexbase64_match	url_regex	url_matchmsgrP   rP   rQ   _format_image   s$   r   messagesSequence[BaseMessage].list[SystemMessage | AIMessage | HumanMessage]c                   s$  g }| D ] t  tr3t  jtr$ jr$tdd  jD r$t j ntd j j jdkdg |r9|d ndt fddt	tfD rt t
d	jtr]d
t
d	jdg}ntt
dt
d	j}t  jtry|d
 jd n| j  jd|id|d< q|  q|S )zQMerge runs of human/tool messages into single human messages with content blocks.c                 s  s(    | ]}t |to|d dkV  qdS )ro   tool_resultN)r~   r   rM   rx   blockrP   rP   rQ   r|      s
    
z"_merge_messages.<locals>.<genexpr>r   error)ro   r   tool_use_idis_errorrZ   Nc                 3  s*    | ] t  fd dfD V  qdS )c                 3  s    | ]}t | V  qd S rK   )r~   )rx   mcrP   rQ   r|      r}   z,_merge_messages.<locals>.<genexpr>.<genexpr>N)all)rx   currlastr   rQ   r|      s
    
r   textro   r   listr   update)r~   r   r   r   r   r   tool_call_idstatusr   r   r   rI   rN   appendextend
model_copy)r   mergednew_contentrP   r   rQ   _merge_messages   sD   


r   r   c                 C  s  | d dkrmd| v r&| d  drdt| d d}ndd| d dd}nd| v s1| ddkrFdd| d	 | dp@| d
ddd}nd| v rUdd| d dd}n| ddkrgdd| d dd}nd}t|| d dkrd| v rdd| d dd}nd| v s| ddkrdd| d	pd| dp| d
ddd}nb| ddkrdd| d	pd| d dd}nJd| v rdd| d dd}n;| ddkrdd| d dd}n)d}t|| d dkrdd| d	pd| d dd}nd| d  d}t||rEdD ]8}|| v r| | ||< q| d }r0||v r0|| ||< q| d }rC||v rC|| ||< q|S )zCFormat standard data content block to format expected by Anthropic.ro   imager   zdata:ro   sourcer   r   source_type	mime_typer    r   file_idfile)ro   r   idzOAnthropic only supports 'url', 'base64', or 'id' keys for image content blocks.documentzapplication/pdfr   z
text/plainzNAnthropic only supports 'url', 'base64', or 'id' keys for file content blocks.z
text-plainzBlock of type z is not supported.)rg   	citationstitlecontextextrasmetadata)rw   r   rM   r   )r   formatted_blockr   keyr   rP   rP   rQ   _format_data_content_block   s   
		

r   *tuple[str | list[dict] | None, list[dict]]c              
     s  d}g }t | }t|D ]3\}}|jdkr3|dur d}t|t|jtr/dd |jD }n|j}qt|j }t|jtst|jtsKd}t|g }|jD ] t tra|	d d qPt t
rd	 vrqd
}t| d	 dkrt d d }	|	d|	d qPt r|	t  qP d	 dkrt|tr d dd |jD v r fdd|jD }
|t|
 qP d }r|}nd v rzt d pd}W n tjy   i }Y nw i }|	td d | d d qP d	 dv r4dd   D } di kr.d v r.zt d }|r!||d< W n tjy-   Y nw |	| qP d	 dkrw dd}| rvdd   D }|drqg }|d D ]}dd | D }|	| q[||d< |	| qP d	 dkr|	d d   D  qP d	 d!kr|	d"d   D  qP d	 d#krtt d$ gd% d& d$ }|	i  d$|i qP d	 d'v r|	d(d   D  qP|	  qPd)t  }t|n|j}t|tr&|jr&|pg }t|tr	|r	d|jdgn|}d*d |D fd+d|jD }td,|t| |s8|d-kr8|t|d% k r8q|	||d. q||fS )/z$Format messages for Anthropic's API.Nsystemz2Received multiple non-consecutive system messages.c                 S  s$   g | ]}t |tr|nd |dqS )r   r   )r~   r   r   rP   rP   rQ   
<listcomp>~  s    z$_format_messages.<locals>.<listcomp>z6Anthropic message content must be str or list of dictsr   r   ro   z'Dict content block must have a type key	image_urlr   r   r   tool_user   c                 S  s   g | ]}|d  qS r   rP   rx   tcrP   rP   rQ   r     s    c                   s    g | ]}|d   d  kr|qS r   rP   r   )r   rP   rQ   r     s
    inputpartial_jsonz{}rb   ro   rb   r   r   )server_tool_usemcp_tool_usec                 S     i | ]\}}|d v r||qS ))ro   r   r   rb   server_namerg   rP   rx   kvrP   rP   rQ   
<dictcomp>      z$_format_messages.<locals>.<dictcomp>r   c                 S  r   ))ro   r   rg   r   rP   r   rP   rP   rQ   r     
    r   c                 S  s&   i | ]\}}|d kr|du s||qS )r   NrP   r   rP   rP   rQ   r     s
    thinkingc                 S  r   ))ro   r   rg   	signaturerP   r   rP   rP   rQ   r     r   redacted_thinkingc                 S  r   ))ro   rg   r   rP   r   rP   rP   rQ   r     r   r   r   rY   r   )code_execution_tool_resultbash_code_execution_tool_result&text_editor_code_execution_tool_resultmcp_tool_resultweb_search_tool_resultweb_fetch_tool_resultc                 S  r   ))ro   r   r   r   rg   retrieved_atrP   r   rP   rP   rQ   r     r   z1Content blocks must be str or dict, instead was: c                 S  s,   g | ]}t d |d dkrt d |d qS )r   ro   r   r   )r   r   rP   rP   rQ   r   6  s
    c                   s   g | ]
}|d   vr|qS r   rP   r   )tool_use_idsrP   rQ   r   ;  s    r   rD   )roler   )r   	enumeratero   r   r~   r   r   _message_type_lookupsrI   r   r   r   r   r   r   
tool_callsr   +_lc_tool_calls_to_anthropic_tool_use_blocksrM   jsonloadsJSONDecodeError_AnthropicToolUseitemsstrip_format_messagesr   r   r\   )r   r   formatted_messagesmerged_messages_imessager   r   r   r   overlapping
tool_inputargsr   input_r   cleaned_citationscitationcleaned_citationtool_contentmissing_tool_callsrP   )r   r   rQ   r   q  s6  
	




  


"r   eanthropic.BadRequestErrorNonec                 C  s"   d| j v rd}tj|dd |  )z!Handle Anthropic BadRequestError.z*messages: at least one message is requiredz!Received only system message(s).    
stacklevel)r   warningswarn)r   r   rP   rP   rQ   _handle_anthropic_bad_requestJ  s
   
r  c                   @  s  e Zd ZU dZeddZeddZded< 	 edd	d
Z	ded< 	 dZ
ded< 	 dZded< 	 dZded< 	 edddZded< 	 dZded< 	 edddZded< 	 ededdgdddZded < 	 ed!ed"d#ddZd$ed%< 	 eed&ddd'Zded(< 	 dZd)ed*< 	 dZded+< 	 eed'Zd,ed-< d.Zd/ed0< 	 dZd/ed1< 	 eddZd2ed3< 	 dZd4ed5< 	 dZd2ed6< 	 edd8d9Zedd;d<Z e!dd=d>Z"e!dd@dAZ#eddBdCZ$	dddGdHZ%e&dIdJe!ddLdMZ'e&dIdJe!ddOdPZ(e&dQdJddSdTZ)e*ddUdVZ+e*ddXdYZ,e*dd[d\Z-dd]dd`daZ.ddcddZ/ddedfZ0		dddgddndoZ1		dddgddrdsZ2ddvdwZ3		dddxdyZ4		dddzd{Z5dddZ6dddddddZ7d.dddddZ8	ddddZ9dS )ChatAnthropicum  Anthropic chat models.

    See [Anthropic's docs](https://docs.claude.com/en/docs/about-claude/models/overview)
    for a list of the latest models.

    Setup:
        Install `langchain-anthropic` and set environment variable `ANTHROPIC_API_KEY`.

        ```bash
        pip install -U langchain-anthropic
        export ANTHROPIC_API_KEY="your-api-key"
        ```

    Key init args — completion params:
        model:
            Name of Anthropic model to use. e.g. `'claude-sonnet-4-5-20250929'`.
        temperature:
            Sampling temperature. Ranges from `0.0` to `1.0`.
        max_tokens:
            Max number of tokens to generate.

    Key init args — client params:
        timeout:
            Timeout for requests.
        anthropic_proxy:
            Proxy to use for the Anthropic clients, will be used for every API call.
            If not passed in will be read from env var `ANTHROPIC_PROXY`.
        max_retries:
            Max number of retries if a request fails.
        api_key:
            Anthropic API key. If not passed in will be read from env var
            `ANTHROPIC_API_KEY`.
        base_url:
            Base URL for API requests. Only specify if using a proxy or service
            emulator.

    See full list of supported init args and their descriptions in the params section.

    Instantiate:
        ```python
        from langchain_anthropic import ChatAnthropic

        model = ChatAnthropic(
            model="claude-sonnet-4-5-20250929",
            temperature=0,
            max_tokens=1024,
            timeout=None,
            max_retries=2,
            # api_key="...",
            # base_url="...",
            # other params...
        )
        ```

    !!! note
        Any param which is not explicitly supported will be passed directly to the
        `anthropic.Anthropic.messages.create(...)` API every time to the model is
        invoked. For example:

        ```python
        from langchain_anthropic import ChatAnthropic
        import anthropic

        ChatAnthropic(..., extra_headers={}).invoke(...)

        # results in underlying API call of:

        anthropic.Anthropic(..).messages.create(..., extra_headers={})

        # which is also equivalent to:

        ChatAnthropic(...).invoke(..., extra_headers={})
        ```

    Invoke:
        ```python
        messages = [
            (
                "system",
                "You are a helpful translator. Translate the user sentence to French.",
            ),
            ("human", "I love programming."),
        ]
        model.invoke(messages)
        ```

        ```python
        AIMessage(
            content="J'aime la programmation.",
            response_metadata={
                "id": "msg_01Trik66aiQ9Z1higrD5XFx3",
                "model": "claude-sonnet-4-5-20250929",
                "stop_reason": "end_turn",
                "stop_sequence": None,
                "usage": {"input_tokens": 25, "output_tokens": 11},
            },
            id="run-5886ac5f-3c2e-49f5-8a44-b1e92808c929-0",
            usage_metadata={
                "input_tokens": 25,
                "output_tokens": 11,
                "total_tokens": 36,
            },
        )
        ```

    Stream:
        ```python
        for chunk in model.stream(messages):
            print(chunk.text, end="")
        ```

        ```python
        AIMessageChunk(content="J", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25")
        AIMessageChunk(content="'", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25")
        AIMessageChunk(content="a", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25")
        AIMessageChunk(content="ime", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25")
        AIMessageChunk(content=" la", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25")
        AIMessageChunk(content=" programm", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25")
        AIMessageChunk(content="ation", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25")
        AIMessageChunk(content=".", id="run-272ff5f9-8485-402c-b90d-eac8babc5b25")
        ```

        ```python
        stream = model.stream(messages)
        full = next(stream)
        for chunk in stream:
            full += chunk
        full
        ```

        ```python
        AIMessageChunk(content="J'aime la programmation.", id="run-b34faef0-882f-4869-a19c-ed2b856e6361")
        ```

    Async:
        ```python
        await model.ainvoke(messages)

        # stream:
        # async for chunk in (await model.astream(messages))

        # batch:
        # await model.abatch([messages])
        ```

        ```python
        AIMessage(
            content="J'aime la programmation.",
            response_metadata={
                "id": "msg_01Trik66aiQ9Z1higrD5XFx3",
                "model": "claude-sonnet-4-5-20250929",
                "stop_reason": "end_turn",
                "stop_sequence": None,
                "usage": {"input_tokens": 25, "output_tokens": 11},
            },
            id="run-5886ac5f-3c2e-49f5-8a44-b1e92808c929-0",
            usage_metadata={
                "input_tokens": 25,
                "output_tokens": 11,
                "total_tokens": 36,
            },
        )
        ```

    Tool calling:
        ```python
        from pydantic import BaseModel, Field


        class GetWeather(BaseModel):
            '''Get the current weather in a given location'''

            location: str = Field(..., description="The city and state, e.g. San Francisco, CA")


        class GetPopulation(BaseModel):
            '''Get the current population in a given location'''

            location: str = Field(..., description="The city and state, e.g. San Francisco, CA")


        model_with_tools = model.bind_tools([GetWeather, GetPopulation])
        ai_msg = model_with_tools.invoke("Which city is hotter today and which is bigger: LA or NY?")
        ai_msg.tool_calls
        ```

        ```python
        [
            {
                "name": "GetWeather",
                "args": {"location": "Los Angeles, CA"},
                "id": "toolu_01KzpPEAgzura7hpBqwHbWdo",
            },
            {
                "name": "GetWeather",
                "args": {"location": "New York, NY"},
                "id": "toolu_01JtgbVGVJbiSwtZk3Uycezx",
            },
            {
                "name": "GetPopulation",
                "args": {"location": "Los Angeles, CA"},
                "id": "toolu_01429aygngesudV9nTbCKGuw",
            },
            {
                "name": "GetPopulation",
                "args": {"location": "New York, NY"},
                "id": "toolu_01JPktyd44tVMeBcPPnFSEJG",
            },
        ]
        ```

        See `ChatAnthropic.bind_tools()` method for more.

    Structured output:
        ```python
        from typing import Optional

        from pydantic import BaseModel, Field


        class Joke(BaseModel):
            '''Joke to tell user.'''

            setup: str = Field(description="The setup of the joke")
            punchline: str = Field(description="The punchline to the joke")
            rating: int | None = Field(description="How funny the joke is, from 1 to 10")


        structured_model = model.with_structured_output(Joke)
        structured_model.invoke("Tell me a joke about cats")
        ```

        ```python
        Joke(
            setup="Why was the cat sitting on the computer?",
            punchline="To keep an eye on the mouse!",
            rating=None,
        )
        ```

        See `ChatAnthropic.with_structured_output()` for more.

    Image input:
        See [multimodal guides](https://docs.langchain.com/oss/python/langchain/models#multimodal)
        for more detail.

        ```python
        import base64

        import httpx
        from langchain_anthropic import ChatAnthropic
        from langchain_core.messages import HumanMessage

        image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg"
        image_data = base64.b64encode(httpx.get(image_url).content).decode("utf-8")

        model = ChatAnthropic(model="claude-sonnet-4-5-20250929")
        message = HumanMessage(
            content=[
                {
                    "type": "text",
                    "text": "Can you highlight the differences between these two images?",
                },
                {
                    "type": "image",
                    "base64": image_data,
                    "mime_type": "image/jpeg",
                },
                {
                    "type": "image",
                    "url": image_url,
                },
            ],
        )
        ai_msg = model.invoke([message])
        ai_msg.content
        ```

        ```python
        "After examining both images carefully, I can see that they are actually identical."
        ```

        ??? note "Files API"

            You can also pass in files that are managed through Anthropic's
            [Files API](https://docs.claude.com/en/docs/build-with-claude/files):

            ```python
            from langchain_anthropic import ChatAnthropic

            model = ChatAnthropic(
                model="claude-sonnet-4-5-20250929",
                betas=["files-api-2025-04-14"],
            )
            input_message = {
                "role": "user",
                "content": [
                    {
                        "type": "text",
                        "text": "Describe this document.",
                    },
                    {
                        "type": "image",
                        "id": "file_abc123...",
                    },
                ],
            }
            model.invoke([input_message])
            ```

    PDF input:
        See [multimodal guides](https://docs.langchain.com/oss/python/langchain/models#multimodal)
        for more detail.

        ```python
        from base64 import b64encode
        from langchain_anthropic import ChatAnthropic
        from langchain_core.messages import HumanMessage
        import requests

        url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
        data = b64encode(requests.get(url).content).decode()

        model = ChatAnthropic(model="claude-sonnet-4-5-20250929")
        ai_msg = model.invoke(
            [
                HumanMessage(
                    [
                        "Summarize this document.",
                        {
                            "type": "file",
                            "mime_type": "application/pdf",
                            "base64": data,
                        },
                    ]
                )
            ]
        )
        ai_msg.content
        ```

        ```python
        "This appears to be a simple document..."
        ```

        ??? note "Files API"

            You can also pass in files that are managed through Anthropic's
            [Files API](https://docs.claude.com/en/docs/build-with-claude/files):

            ```python
            from langchain_anthropic import ChatAnthropic

            model = ChatAnthropic(
                model="claude-sonnet-4-5-20250929",
                betas=["files-api-2025-04-14"],
            )
            input_message = {
                "role": "user",
                "content": [
                    {
                        "type": "text",
                        "text": "Describe this document.",
                    },
                    {
                        "type": "file",
                        "id": "file_abc123...",
                    },
                ],
            }
            model.invoke([input_message])
            ```

    Extended thinking:
        Certain [Claude models](https://docs.claude.com/en/docs/build-with-claude/extended-thinking#supported-models)
        support an [extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
        feature, which will output the step-by-step reasoning process that led to its
        final answer.

        To use it, specify the `thinking` parameter when initializing `ChatAnthropic`.

        It can also be passed in as a kwarg during invocation.

        You will need to specify a token budget to use this feature. See usage example:

        ```python
        from langchain_anthropic import ChatAnthropic

        model = ChatAnthropic(
            model="claude-sonnet-4-5-20250929",
            max_tokens=5000,
            thinking={"type": "enabled", "budget_tokens": 2000},
        )

        response = model.invoke("What is the cube root of 50.653?")
        response.content
        ```

        ```python
        [
            {
                "signature": "...",
                "thinking": "To find the cube root of 50.653...",
                "type": "thinking",
            },
            {"text": "The cube root of 50.653 is ...", "type": "text"},
        ]
        ```

        !!! warning "Differences in thinking across model versions"
            The Claude Messages API handles thinking differently across Claude Sonnet
            3.7 and Claude 4 models. Refer to [their docs](https://docs.claude.com/en/docs/build-with-claude/extended-thinking#differences-in-thinking-across-model-versions)
            for more info.

    Citations:
        Anthropic supports a [citations](https://docs.claude.com/en/docs/build-with-claude/citations)
        feature that lets Claude attach context to its answers based on source
        documents supplied by the user. When [document content blocks](https://docs.claude.com/en/docs/build-with-claude/citations#document-types)
        with `"citations": {"enabled": True}` are included in a query, Claude may
        generate citations in its response.

        ```python
        from langchain_anthropic import ChatAnthropic

        model = ChatAnthropic(model="claude-3-5-haiku-20241022")

        messages = [
            {
                "role": "user",
                "content": [
                    {
                        "type": "document",
                        "source": {
                            "type": "text",
                            "media_type": "text/plain",
                            "data": "The grass is green. The sky is blue.",
                        },
                        "title": "My Document",
                        "context": "This is a trustworthy document.",
                        "citations": {"enabled": True},
                    },
                    {"type": "text", "text": "What color is the grass and sky?"},
                ],
            }
        ]
        response = model.invoke(messages)
        response.content
        ```

        ```python
        [
            {"text": "Based on the document, ", "type": "text"},
            {
                "text": "the grass is green",
                "type": "text",
                "citations": [
                    {
                        "type": "char_location",
                        "cited_text": "The grass is green. ",
                        "document_index": 0,
                        "document_title": "My Document",
                        "start_char_index": 0,
                        "end_char_index": 20,
                    }
                ],
            },
            {"text": ", and ", "type": "text"},
            {
                "text": "the sky is blue",
                "type": "text",
                "citations": [
                    {
                        "type": "char_location",
                        "cited_text": "The sky is blue.",
                        "document_index": 0,
                        "document_title": "My Document",
                        "start_char_index": 20,
                        "end_char_index": 36,
                    }
                ],
            },
            {"text": ".", "type": "text"},
        ]
        ```

    Token usage:
        ```python
        ai_msg = model.invoke(messages)
        ai_msg.usage_metadata
        ```

        ```python
        {"input_tokens": 25, "output_tokens": 11, "total_tokens": 36}
        ```

        Message chunks containing token usage will be included during streaming by
        default:

        ```python
        stream = model.stream(messages)
        full = next(stream)
        for chunk in stream:
            full += chunk
        full.usage_metadata
        ```

        ```python
        {"input_tokens": 25, "output_tokens": 11, "total_tokens": 36}
        ```

        These can be disabled by setting `stream_usage=False` in the stream method,
        or by setting `stream_usage=False` when initializing ChatAnthropic.

    Prompt caching:
        Prompt caching reduces processing time and costs for repetitive tasks or prompts
        with consistent elements

        !!! note
            Only certain models support prompt caching.
            See the [Claude documentation](https://docs.claude.com/en/docs/build-with-claude/prompt-caching#supported-models)
            for a full list.

        ```python
        from langchain_anthropic import ChatAnthropic

        model = ChatAnthropic(model="claude-sonnet-4-5-20250929")

        messages = [
            {
                "role": "system",
                "content": [
                    {
                        "type": "text",
                        "text": "Below is some long context:",
                    },
                    {
                        "type": "text",
                        "text": f"{long_text}",
                        "cache_control": {"type": "ephemeral"},
                    },
                ],
            },
            {
                "role": "user",
                "content": "What's that about?",
            },
        ]

        response = model.invoke(messages)
        response.usage_metadata["input_token_details"]
        ```

        ```python
        {"cache_read": 0, "cache_creation": 1458}
        ```

        Alternatively, you may enable prompt caching at invocation time. You may want to
        conditionally cache based on runtime conditions, such as the length of the
        context. Alternatively, this is useful for app-level decisions about what to
        cache.

        ```python
        response = model.invoke(
            messages,
            cache_control={"type": "ephemeral"},
        )
        ```

        ??? note "Extended caching"

            The cache lifetime is 5 minutes by default. If this is too short, you can
            apply one hour caching by setting `ttl` to `'1h'`.

            ```python
            model = ChatAnthropic(
                model="claude-sonnet-4-5-20250929",
            )

            messages = [
                {
                    "role": "user",
                    "content": [
                        {
                            "type": "text",
                            "text": f"{long_text}",
                            "cache_control": {"type": "ephemeral", "ttl": "1h"},
                        },
                    ],
                }
            ]

            response = model.invoke(messages)
            ```

            Details of cached token counts will be included on the `InputTokenDetails`
            of response's `usage_metadata`:

            ```python
            response = model.invoke(messages)
            response.usage_metadata
            ```

            ```python
            {
                "input_tokens": 1500,
                "output_tokens": 200,
                "total_tokens": 1700,
                "input_token_details": {
                    "cache_read": 0,
                    "cache_creation": 1000,
                    "ephemeral_1h_input_tokens": 750,
                    "ephemeral_5m_input_tokens": 250,
                },
            }
            ```

            See [Claude documentation](https://docs.claude.com/en/docs/build-with-claude/prompt-caching#1-hour-cache-duration-beta)
            for detail.

    !!! note "Extended context windows (beta)"

        Claude Sonnet 4 supports a 1-million token context window, available in beta for
        organizations in usage tier 4 and organizations with custom rate limits.

        ```python
        from langchain_anthropic import ChatAnthropic

        model = ChatAnthropic(
            model="claude-sonnet-4-5-20250929",
            betas=["context-1m-2025-08-07"],  # Enable 1M context beta
        )

        long_document = """
        This is a very long document that would benefit from the extended 1M
        context window...
        [imagine this continues for hundreds of thousands of tokens]
        """

        messages = [
            HumanMessage(f"""
        Please analyze this document and provide a summary:

        {long_document}

        What are the key themes and main conclusions?
        """)
        ]

        response = model.invoke(messages)
        ```

        See [Claude documentation](https://docs.claude.com/en/docs/build-with-claude/context-windows#1m-token-context-window)
        for detail.


    !!! note "Token-efficient tool use (beta)"

        See LangChain [docs](https://docs.langchain.com/oss/python/integrations/chat/anthropic)
        for more detail.

        ```python
        from langchain_anthropic import ChatAnthropic
        from langchain_core.tools import tool

        model = ChatAnthropic(
            model="claude-sonnet-4-5-20250929",
            temperature=0,
            model_kwargs={
                "extra_headers": {
                    "anthropic-beta": "token-efficient-tools-2025-02-19"
                }
            }
        )

        @tool
        def get_weather(location: str) -> str:
            """Get the weather at a location."""
            return "It's sunny."

        model_with_tools = model.bind_tools([get_weather])
        response = model_with_tools.invoke(
            "What's the weather in San Francisco?"
        )
        print(response.tool_calls)
        print(f'Total tokens: {response.usage_metadata["total_tokens"]}')
        ```

        ```txt
        [{'name': 'get_weather', 'args': {'location': 'San Francisco'}, 'id': 'toolu_01HLjQMSb1nWmgevQUtEyz17', 'type': 'tool_call'}]
        Total tokens: 408
        ```

    !!! note "Context management"

        Anthropic supports a context editing feature that will automatically manage the
        model's context window (e.g., by clearing tool results).

        See [Anthropic documentation](https://docs.claude.com/en/docs/build-with-claude/context-editing)
        for details and configuration options.

        ```python
        from langchain_anthropic import ChatAnthropic

        model = ChatAnthropic(
            model="claude-sonnet-4-5-20250929",
            betas=["context-management-2025-06-27"],
            context_management={"edits": [{"type": "clear_tool_uses_20250919"}]},
        )
        model_with_tools = model.bind_tools([{"type": "web_search_20250305", "name": "web_search"}])
        response = model_with_tools.invoke("Search for recent developments in AI")
        ```

    !!! note "Built-in tools"

        See LangChain [docs](https://docs.langchain.com/oss/python/integrations/chat/anthropic#built-in-tools)
        for more detail.

        ??? note "Web search"

            ```python
            from langchain_anthropic import ChatAnthropic

            model = ChatAnthropic(model="claude-3-5-haiku-20241022")

            tool = {
                "type": "web_search_20250305",
                "name": "web_search",
                "max_uses": 3,
            }
            model_with_tools = model.bind_tools([tool])

            response = model_with_tools.invoke("How do I update a web app to TypeScript 5.5?")
            ```

        ??? note "Web fetch (beta)"

            ```python
            from langchain_anthropic import ChatAnthropic

            model = ChatAnthropic(
                model="claude-3-5-haiku-20241022",
                betas=["web-fetch-2025-09-10"],  # Enable web fetch beta
            )

            tool = {
                "type": "web_fetch_20250910",
                "name": "web_fetch",
                "max_uses": 3,
            }
            model_with_tools = model.bind_tools([tool])

            response = model_with_tools.invoke("Please analyze the content at https://example.com/article")
            ```

        ??? note "Code execution"

            ```python
            model = ChatAnthropic(
                model="claude-sonnet-4-5-20250929",
                betas=["code-execution-2025-05-22"],
            )

            tool = {"type": "code_execution_20250522", "name": "code_execution"}
            model_with_tools = model.bind_tools([tool])

            response = model_with_tools.invoke(
                "Calculate the mean and standard deviation of [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]"
            )
            ```

        ??? note "Remote MCP"

            ```python
            from langchain_anthropic import ChatAnthropic

            mcp_servers = [
                {
                    "type": "url",
                    "url": "https://mcp.deepwiki.com/mcp",
                    "name": "deepwiki",
                    "tool_configuration": {  # optional configuration
                        "enabled": True,
                        "allowed_tools": ["ask_question"],
                    },
                    "authorization_token": "PLACEHOLDER",  # optional authorization
                }
            ]

            model = ChatAnthropic(
                model="claude-sonnet-4-5-20250929",
                betas=["mcp-client-2025-04-04"],
                mcp_servers=mcp_servers,
            )

            response = model.invoke(
                "What transport protocols does the 2025-03-26 version of the MCP "
                "spec (modelcontextprotocol/modelcontextprotocol) support?"
            )
            ```

        ??? note "Text editor"

            ```python
            from langchain_anthropic import ChatAnthropic

            model = ChatAnthropic(model="claude-sonnet-4-5-20250929")

            tool = {"type": "text_editor_20250124", "name": "str_replace_editor"}
            model_with_tools = model.bind_tools([tool])

            response = model_with_tools.invoke(
                "There's a syntax error in my primes.py file. Can you help me fix it?"
            )
            print(response.text)
            response.tool_calls
            ```

            ```txt
            I'd be happy to help you fix the syntax error in your primes.py file. First, let's look at the current content of the file to identify the error.

            [{'name': 'str_replace_editor',
            'args': {'command': 'view', 'path': '/repo/primes.py'},
            'id': 'toolu_01VdNgt1YV7kGfj9LFLm6HyQ',
            'type': 'tool_call'}]
            ```

        ??? note "Memory tool"

            ```python
            from langchain_anthropic import ChatAnthropic

            model = ChatAnthropic(
                model="claude-sonnet-4-5-20250929",
                betas=["context-management-2025-06-27"],
            )
            model_with_tools = model.bind_tools([{"type": "memory_20250818", "name": "memory"}])
            response = model_with_tools.invoke("What are my interests?")
            ```

    !!! note "Response metadata"

        ```python
        ai_msg = model.invoke(messages)
        ai_msg.response_metadata
        ```

        ```python
        {
            "id": "msg_013xU6FHEGEq76aP4RgFerVT",
            "model": "claude-sonnet-4-5-20250929",
            "stop_reason": "end_turn",
            "stop_sequence": None,
            "usage": {"input_tokens": 25, "output_tokens": 11},
        }
        ```
    T)populate_by_namerH   )aliasrI   rU   Nmax_tokens_to_sample)rO   r  z
int | None
max_tokenszfloat | Nonetemperaturetop_ktop_ptimeoutdefault_request_timeoutr   rW   max_retriesstoplist[str] | Nonestop_sequencesbase_urlANTHROPIC_API_URLANTHROPIC_BASE_URLzhttps://api.anthropic.com)rO   )r  default_factoryrV   anthropic_api_urlapi_keyANTHROPIC_API_KEYr   r9   anthropic_api_keyANTHROPIC_PROXY)r  anthropic_proxyzMapping[str, str] | Nonedefault_headersbetasrc   model_kwargsFrn   	streamingstream_usagezdict[str, Any] | Noner   zlist[dict[str, Any]] | Nonemcp_serverscontext_managementrJ   c                 C     dS )zReturn type of chat model.zanthropic-chatrP   selfrP   rP   rQ   	_llm_type     zChatAnthropic._llm_typedict[str, str]c                 C  s
   dddS )z9Return a mapping of secret keys to environment variables.r  ANTHROPIC_MCP_SERVERS)r  r!  rP   r$  rP   rP   rQ   
lc_secrets  s   zChatAnthropic.lc_secretsc                 C  r#  )z/Whether the class is serializable in langchain.TrP   clsrP   rP   rQ   is_lc_serializable  r'  z ChatAnthropic.is_lc_serializable	list[str]c                 C  s   g dS )z}Get the namespace of the LangChain object.

        Returns:
            `["langchain", "chat_models", "anthropic"]`
        )	langchainchat_models	anthropicrP   r+  rP   rP   rQ   get_lc_namespace  s   zChatAnthropic.get_lc_namespacec                 C  s.   | j | j| j| j| j| j| j| j| j| j	d
S )zGet the identifying parameters.
rU   r  r	  r
  r  r  r  r  r  r   r3  r$  rP   rP   rQ   _identifying_params&  s   z!ChatAnthropic._identifying_paramskwargsr
   r   c                 K  sr   | j dd|i|}td|d| jd|d| jd}|d| j }r)||d< |p0|dd	 }r7||d
< |S )z Get standard params for tracing.r  r1  rU   chatr	  )ls_providerls_model_namels_model_typels_temperaturer  ls_max_tokensNls_stoprP   )_get_invocation_paramsr   rM   rU   r	  r  )r%  r  r5  params	ls_paramsr;  r<  rP   rP   rQ   _get_ls_params6  s   zChatAnthropic._get_ls_paramsbefore)modevaluesc                 C  s2   | ddu r| dp| d}t||d< |S )zSet default max_tokens.r  NrU   rH   )rM   r`   )r,  rC  rU   rP   rP   rQ   set_default_max_tokensI  s   z$ChatAnthropic.set_default_max_tokensr   c                 C  s   t | }t||S )zBuild model kwargs.)r0   r5   )r,  rC  all_required_field_namesrP   rP   rQ   build_extraR  s   
zChatAnthropic.build_extraafterr<   c                 C  s   | j du rt| j| _ | S )z$Set model profile if not overridden.N)profilerR   rU   r$  rP   rP   rQ   _set_model_profileY  s   
z ChatAnthropic._set_model_profilec                 C  s@   | j  | j| j| jpd d}| jd u s| jdkr| j|d< |S )N)r  r  r  r  r   r  )r  get_secret_valuer  r  r  r  )r%  client_paramsrP   rP   rQ   _client_params`  s   	
zChatAnthropic._client_paramsanthropic.Clientc                 C  d   | j }d|d i}d|v r|d |d< | jr| j|d< tdi |}i |d|i}tjdi |S Nr  r  r  http_clientrP   )rL  r  r?   r1  Clientr%  rK  http_client_paramsrP  r>  rP   rP   rQ   _clientp     
zChatAnthropic._clientanthropic.AsyncClientc                 C  rN  rO  )rL  r  r>   r1  AsyncClientrR  rP   rP   rQ   _async_client  rU  zChatAnthropic._async_client)r  r   r   c                K  s  |  | }t|D ]2\}}t|tr=|jddkr=dd |jD }|jdt	t
ttj |j||jdid||< qt|\}}	d|v r~|	r~t|	d	 d tra|d|	d	 d d	 d< nt|	d	 d tr}d
|	d	 d |ddg|	d	 d< n	 |dd}
| j| j|	| j| j| j|p| j| j| j| j|d| j|}| jdur| j|d< d|v r|d}t|tr|ddkrd|di v rt
t|d d }t||d< d|v r|d sdg|d< dd | D S )z.Get the request payload for the Anthropic API.output_versionv1c                 S  s(   g | ]}d |d |d | ddqS )	tool_callrb   r   r   )ro   rb   r   r   rM   rx   r[  rP   rP   rQ   r     s    z6ChatAnthropic._get_request_payload.<locals>.<listcomp>r   model_providerr   rg   rZ   r   )ro   r   rg   N)rU   r  r   r	  r
  r  r  r  r"  r!  r   r   response_formatro   json_schemaschemaoutput_formatr  zstructured-outputs-2025-11-13c                 S     i | ]\}}|d ur||qS rK   rP   r   rP   rP   rQ   r         z6ChatAnthropic._get_request_payload.<locals>.<dictcomp>) _convert_inputto_messagesr   r~   r   response_metadatarM   r   r   r@   r   r   typesContentBlockr   r   poprI   rU   r  r	  r
  r  r  r  r"  r!  r  r   r   #_convert_to_anthropic_output_formatr   )r%  r   r  r5  r   idxr   tcsr   r   _payloadr_  rP   rP   rQ   _get_request_payload  s|   	







z"ChatAnthropic._get_request_payloadro  c                 C  s2   d|v r| j jjjdi |S | j jjdi |S Nr  rP   )rT  betar   creater%  ro  rP   rP   rQ   _create  s   zChatAnthropic._createc                   s@   d|v r| j jjjdi |I d H S | j jjdi |I d H S rq  )rX  rr  r   rs  rt  rP   rP   rQ   _acreate  s   zChatAnthropic._acreate)r   r   list[BaseMessage]run_managerCallbackManagerForLLMRun | Nonebool | NoneIterator[ChatGenerationChunk]c             
   k  s    |d u r| j }d|d< | j|fd|i|}zD| |}t| o+t| o+t| }d }	|D ](}
t|
|||	d\}}	|d urXt|d}|rUt|j	t
rU|j|j	|d |V  q0W d S  tjys } zt| W Y d }~d S d }~ww NTstreamr  )r   coerce_content_to_stringblock_start_eventr   )chunk)r   rp  ru  _tools_in_params_documents_in_params_thinking_in_params(_make_message_chunk_from_anthropic_eventr)   r~   r   rI   on_llm_new_tokenr1  BadRequestErrorr  r%  r   r  rx  r   r5  ro  r}  r~  r  eventr   r  r   rP   rP   rQ   _stream  s>   	



zChatAnthropic._stream$AsyncCallbackManagerForLLMRun | None"AsyncIterator[ChatGenerationChunk]c             
   K s   |d u r| j }d|d< | j|fd|i|}zO| |I d H }t| o.t| o.t| }d }	|2 z/3 d H W }
t|
|||	d\}}	|d urbt|d}|r_t|j	t
r_|j|j	|dI d H  |V  q36 W d S  tjy~ } zt| W Y d }~d S d }~ww r|  )r   rp  rv  r  r  r  r  r)   r~   r   rI   r  r1  r  r  r  rP   rP   rQ   _astream  s>   	


zChatAnthropic._astreamr   r*   c           
      K  sD  |  }|d }|D ]1}t|tr d|v r |d du r |d t|tr;|ddkr;d|v r;|d du r;|d q
dd | D }d	d
i}d|v rWd|vrW|d |d< t|dkrw|d d dkrw|d dswt|d d |d}ntdd |D rt	|}	t||	|d}nt||d}t
|j|_tt|dg|dS )z/Format the output from the Anthropic API to LC.r   r   Nro   r   r   c                 S  s   i | ]\}}|d vr||qS ))r   r   ro   rP   r   rP   rP   rQ   r   U  s    z0ChatAnthropic._format_output.<locals>.<dictcomp>r^  r1  rU   rH   rY   r   r   rg  c                 s  s    | ]	}|d  dkV  qdS )ro   r   NrP   r   rP   rP   rQ   r|   c  s    z/ChatAnthropic._format_output.<locals>.<genexpr>)r   r   rg  r  )generations
llm_output)
model_dumpr~   r   rj  rM   r   r\   r   r   rB   _create_usage_metadatausageusage_metadatar*   r(   )
r%  r   r5  	data_dictr   r   r  rg  r   r   rP   rP   rQ   _format_output@  sR   


zChatAnthropic._format_outputc              
   K  sf   | j |fd|i|}z| |}W n tjy) } z
t| W Y d }~nd }~ww | j|fi |S Nr  )rp  ru  r1  r  r  r  r%  r   r  rx  r5  ro  r   r   rP   rP   rQ   	_generater  s   zChatAnthropic._generatec              
     sn   | j |fd|i|}z
| |I d H }W n tjy- } z
t| W Y d }~nd }~ww | j|fi |S r  )rp  rv  r1  r  r  r  r  rP   rP   rQ   
_agenerate  s   zChatAnthropic._ageneratera  dict | typeformatted_toolra   )Runnable[LanguageModelInput, BaseMessage]c                   sB   d t j dd | j|gddi|dd}d fdd}||B S )Na'  Anthropic structured output relies on forced tool calling, which is not supported when `thinking` is enabled. This method will raise langchain_core.exceptions.OutputParserException if tool calls are not generated. Consider disabling `thinking` or adjust your prompt to ensure the tool is called.r   r   methodfunction_callingr5  ra  )ls_structured_output_formatr   r   rJ   c                   s   | j st | S rK   )r   r   r  thinking_admonitionrP   rQ   _raise_if_no_tool_calls  s   zfChatAnthropic._get_llm_for_structured_output_when_thinking_is_enabled.<locals>._raise_if_no_tool_calls)r   r   rJ   r   )r  r  
bind_tools)r%  ra  r  llmr  rP   r  rQ   7_get_llm_for_structured_output_when_thinking_is_enabled  s   zEChatAnthropic._get_llm_for_structured_output_when_thinking_is_enabled)tool_choiceparallel_tool_callsrf   tools5Sequence[dict[str, Any] | type | Callable | BaseTool]r  dict[str, str] | str | Noner  rf   'Runnable[LanguageModelInput, AIMessage]c          	        s    fdd|D }|sn1t |tr||d< n't |tr&|dv r&d|i|d< nt |tr3d|d|d< n
d|d	}t||d
urV| }d|v rO||d d< nd|d|d< | jdd|i|S )u$  Bind tool-like objects to this chat model.

        Args:
            tools: A list of tool definitions to bind to this chat model.
                Supports Anthropic format tool schemas and any tool definition handled
                by `langchain_core.utils.function_calling.convert_to_openai_tool`.
            tool_choice: Which tool to require the model to call. Options are:

                - name of the tool as a string or as dict `{"type": "tool", "name": "<<tool_name>>"}`: calls corresponding tool;
                - `'auto'`, `{"type: "auto"}`, or `None`: automatically selects a tool (including no tool);
                - `'any'` or `{"type: "any"}`: force at least one tool to be called;
            parallel_tool_calls: Set to `False` to disable parallel tool use.
                Defaults to `None` (no specification, which allows parallel tool use).

                !!! version-added "Added in `langchain-anthropic` 0.3.2"
            strict: If `True`, Claude's schema adherence is applied to tool calls.
                See: [Anthropic docs](https://docs.claude.com/en/docs/build-with-claude/structured-outputs#when-to-use-json-outputs-vs-strict-tool-use).
            kwargs: Any additional parameters are passed directly to `bind`.

        Example:
            ```python
            from langchain_anthropic import ChatAnthropic
            from pydantic import BaseModel, Field


            class GetWeather(BaseModel):
                '''Get the current weather in a given location'''

                location: str = Field(..., description="The city and state, e.g. San Francisco, CA")


            class GetPrice(BaseModel):
                '''Get the price of a specific product.'''

                product: str = Field(..., description="The product to look up.")


            model = ChatAnthropic(model="claude-sonnet-4-5-20250929", temperature=0)
            model_with_tools = model.bind_tools([GetWeather, GetPrice])
            model_with_tools.invoke(
                "What is the weather like in San Francisco",
            )
            # -> AIMessage(
            #     content=[
            #         {'text': '<thinking>\nBased on the user\'s question, the relevant function to call is GetWeather, which requires the "location" parameter.\n\nThe user has directly specified the location as "San Francisco". Since San Francisco is a well known city, I can reasonably infer they mean San Francisco, CA without needing the state specified.\n\nAll the required parameters are provided, so I can proceed with the API call.\n</thinking>', 'type': 'text'},
            #         {'text': None, 'type': 'tool_use', 'id': 'toolu_01SCgExKzQ7eqSkMHfygvYuu', 'name': 'GetWeather', 'input': {'location': 'San Francisco, CA'}}
            #     ],
            #     response_metadata={'id': 'msg_01GM3zQtoFv8jGQMW7abLnhi', 'model': 'claude-sonnet-4-5-20250929', 'stop_reason': 'tool_use', 'stop_sequence': None, 'usage': {'input_tokens': 487, 'output_tokens': 145}},
            #     id='run-87b1331e-9251-4a68-acef-f0a018b639cc-0'
            # )
            ```

        Example — force tool call with tool_choice `'any'`:

            ```python
            from langchain_anthropic import ChatAnthropic
            from pydantic import BaseModel, Field


            class GetWeather(BaseModel):
                '''Get the current weather in a given location'''

                location: str = Field(..., description="The city and state, e.g. San Francisco, CA")


            class GetPrice(BaseModel):
                '''Get the price of a specific product.'''

                product: str = Field(..., description="The product to look up.")


            model = ChatAnthropic(model="claude-sonnet-4-5-20250929", temperature=0)
            model_with_tools = model.bind_tools([GetWeather, GetPrice], tool_choice="any")
            model_with_tools.invoke(
                "what is the weather like in San Francisco",
            )
            ```

        Example — force specific tool call with `tool_choice` `'<name_of_tool>'`:

        ```python
        from langchain_anthropic import ChatAnthropic
        from pydantic import BaseModel, Field


        class GetWeather(BaseModel):
            '''Get the current weather in a given location'''

            location: str = Field(..., description="The city and state, e.g. San Francisco, CA")


        class GetPrice(BaseModel):
            '''Get the price of a specific product.'''

            product: str = Field(..., description="The product to look up.")


        model = ChatAnthropic(model="claude-sonnet-4-5-20250929", temperature=0)
        model_with_tools = model.bind_tools([GetWeather, GetPrice], tool_choice="GetWeather")
        model_with_tools.invoke("What is the weather like in San Francisco")
        ```

        Example — cache specific tools:

        ```python
        from langchain_anthropic import ChatAnthropic, convert_to_anthropic_tool
        from pydantic import BaseModel, Field


        class GetWeather(BaseModel):
            '''Get the current weather in a given location'''

            location: str = Field(..., description="The city and state, e.g. San Francisco, CA")


        class GetPrice(BaseModel):
            '''Get the price of a specific product.'''

            product: str = Field(..., description="The product to look up.")


        # We'll convert our pydantic class to the anthropic tool format
        # before passing to bind_tools so that we can set the 'cache_control'
        # field on our tool.
        cached_price_tool = convert_to_anthropic_tool(GetPrice)
        # Currently the only supported "cache_control" value is
        # {"type": "ephemeral"}.
        cached_price_tool["cache_control"] = {"type": "ephemeral"}

        # We need to pass in extra headers to enable use of the beta cache
        # control API.
        model = ChatAnthropic(
            model="claude-sonnet-4-5-20250929",
            temperature=0,
        )
        model_with_tools = model.bind_tools([GetWeather, cached_price_tool])
        model_with_tools.invoke("What is the weather like in San Francisco")
        ```

        This outputs:

        ```python
        AIMessage(
            content=[
                {
                    "text": "Certainly! I can help you find out the current weather in San Francisco. To get this information, I'll use the GetWeather function. Let me fetch that data for you right away.",
                    "type": "text",
                },
                {
                    "id": "toolu_01TS5h8LNo7p5imcG7yRiaUM",
                    "input": {"location": "San Francisco, CA"},
                    "name": "GetWeather",
                    "type": "tool_use",
                },
            ],
            response_metadata={
                "id": "msg_01Xg7Wr5inFWgBxE5jH9rpRo",
                "model": "claude-sonnet-4-5-20250929",
                "stop_reason": "tool_use",
                "stop_sequence": None,
                "usage": {
                    "input_tokens": 171,
                    "output_tokens": 96,
                    "cache_creation_input_tokens": 1470,
                    "cache_read_input_tokens": 0,
                },
            },
            id="run-b36a5b54-5d69-470e-a1b0-b932d00b089e-0",
            tool_calls=[
                {
                    "name": "GetWeather",
                    "args": {"location": "San Francisco, CA"},
                    "id": "toolu_01TS5h8LNo7p5imcG7yRiaUM",
                    "type": "tool_call",
                }
            ],
            usage_metadata={
                "input_tokens": 171,
                "output_tokens": 96,
                "total_tokens": 267,
            },
        )
        ```

        If we invoke the tool again, we can see that the "usage" information in the AIMessage.response_metadata shows that we had a cache hit:

        ```python
        AIMessage(
            content=[
                {
                    "text": "To get the current weather in San Francisco, I can use the GetWeather function. Let me check that for you.",
                    "type": "text",
                },
                {
                    "id": "toolu_01HtVtY1qhMFdPprx42qU2eA",
                    "input": {"location": "San Francisco, CA"},
                    "name": "GetWeather",
                    "type": "tool_use",
                },
            ],
            response_metadata={
                "id": "msg_016RfWHrRvW6DAGCdwB6Ac64",
                "model": "claude-sonnet-4-5-20250929",
                "stop_reason": "tool_use",
                "stop_sequence": None,
                "usage": {
                    "input_tokens": 171,
                    "output_tokens": 82,
                    "cache_creation_input_tokens": 0,
                    "cache_read_input_tokens": 1470,
                },
            },
            id="run-88b1f825-dcb7-4277-ac27-53df55d22001-0",
            tool_calls=[
                {
                    "name": "GetWeather",
                    "args": {"location": "San Francisco, CA"},
                    "id": "toolu_01HtVtY1qhMFdPprx42qU2eA",
                    "type": "tool_call",
                }
            ],
            usage_metadata={
                "input_tokens": 171,
                "output_tokens": 82,
                "total_tokens": 253,
            },
        )
        ```
        c                   s$   g | ]}t |r
|nt| d qS )rf   )r   convert_to_anthropic_toolrx   rm   r  rP   rQ   r     s    
z,ChatAnthropic.bind_tools.<locals>.<listcomp>r  )r   autoro   rm   )ro   rb   z,Unrecognized 'tool_choice' type tool_choice=z. Expected dict, str, or None.Ndisable_parallel_tool_user  )ro   r  r  rP   )r~   r   rI   r   bind)	r%  r  r  r  rf   r5  formatted_toolsr   r  rP   r  rQ   r    s4    
o




zChatAnthropic.bind_toolsr  )include_rawr  r  r  *Literal['function_calling', 'json_schema'].Runnable[LanguageModelInput, dict | BaseModel]c                K  s`  |dkrd}t j|dd d}|dkrUt|}|d }| jdur/| jd	d
kr/| ||}n| j|g|ddi|dd}t|trNt	|rNt
|gdd}	n8t|dd}	n1|dkr|| jt|ddit|dd}t|trxt	|rxt|d}	nt }	n
d| d}
t|
|rtjtd|	B dd d}tjdd d}|j|gdd}t|d|B S ||	B S )a{  Model wrapper that returns outputs formatted to match the given schema.

        Args:
            schema: The output schema. Can be passed in as:

                - An Anthropic tool schema,
                - An OpenAI function/tool schema,
                - A JSON Schema,
                - A `TypedDict` class,
                - Or a Pydantic class.

                If `schema` is a Pydantic class then the model output will be a
                Pydantic instance of that class, and the model-generated fields will be
                validated by the Pydantic class. Otherwise the model output will be a
                dict and will not be validated.

                See `langchain_core.utils.function_calling.convert_to_openai_tool` for
                more on how to properly specify types and descriptions of schema fields
                when specifying a Pydantic or `TypedDict` class.
            include_raw:
                If `False` then only the parsed structured output is returned.

                If an error occurs during model output parsing it will be raised.

                If `True` then both the raw model response (a `BaseMessage`) and the
                parsed model response will be returned.

                If an error occurs during output parsing it will be caught and returned
                as well.

                The final output is always a `dict` with keys `'raw'`, `'parsed'`, and
                `'parsing_error'`.
            method: The structured output method to use. Options are:

                - `'function_calling'` (default): Use forced tool calling to get
                  structured output.
                - `'json_schema'`: Use Claude's dedicated
                  [structured output](https://docs.claude.com/en/docs/build-with-claude/structured-outputs)
                  feature.

            kwargs: Additional keyword arguments are ignored.

        Returns:
            A `Runnable` that takes same inputs as a
                `langchain_core.language_models.chat.BaseChatModel`. If `include_raw` is
                `False` and `schema` is a Pydantic class, `Runnable` outputs an instance
                of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is
                `False` then `Runnable` outputs a `dict`.

                If `include_raw` is `True`, then `Runnable` outputs a `dict` with keys:

                - `'raw'`: `BaseMessage`
                - `'parsed'`: `None` if there was a parsing error, otherwise the type
                    depends on the `schema` as described above.
                - `'parsing_error'`: `BaseException | None`

        Example: Pydantic schema (`include_raw=False`):

        ```python
        from langchain_anthropic import ChatAnthropic
        from pydantic import BaseModel


        class AnswerWithJustification(BaseModel):
            '''An answer to the user question along with justification for the answer.'''

            answer: str
            justification: str


        model = ChatAnthropic(model="claude-sonnet-4-5-20250929", temperature=0)
        structured_model = model.with_structured_output(AnswerWithJustification)

        structured_model.invoke("What weighs more a pound of bricks or a pound of feathers")

        # -> AnswerWithJustification(
        #     answer='They weigh the same',
        #     justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
        # )
        ```

        Example: Pydantic schema (`include_raw=True`):

        ```python
        from langchain_anthropic import ChatAnthropic
        from pydantic import BaseModel


        class AnswerWithJustification(BaseModel):
            '''An answer to the user question along with justification for the answer.'''

            answer: str
            justification: str


        model = ChatAnthropic(model="claude-sonnet-4-5-20250929", temperature=0)
        structured_model = model.with_structured_output(AnswerWithJustification, include_raw=True)

        structured_model.invoke("What weighs more a pound of bricks or a pound of feathers")
        # -> {
        #     'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
        #     'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
        #     'parsing_error': None
        # }
        ```

        Example: `dict` schema (`include_raw=False`):

        ```python
        from langchain_anthropic import ChatAnthropic

        schema = {
            "name": "AnswerWithJustification",
            "description": "An answer to the user question along with justification for the answer.",
            "input_schema": {
                "type": "object",
                "properties": {
                    "answer": {"type": "string"},
                    "justification": {"type": "string"},
                },
                "required": ["answer", "justification"],
            },
        }
        model = ChatAnthropic(model="claude-sonnet-4-5-20250929", temperature=0)
        structured_model = model.with_structured_output(schema)

        structured_model.invoke("What weighs more a pound of bricks or a pound of feathers")
        # -> {
        #     'answer': 'They weigh the same',
        #     'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
        # }
        ```
        	json_modezVUnrecognized structured output method 'json_mode'. Defaulting to 'json_schema' method.r   r   r`  r  rb   Nro   enabledr  r  )r  r  T)r  first_tool_only)key_namer  )rb  r  )pydantic_objectz'Unrecognized structured output method 'z0'. Expected 'function_calling' or 'json_schema'.rawc                 S     d S rK   rP   rn  rP   rP   rQ   <lambda>	      z6ChatAnthropic.with_structured_output.<locals>.<lambda>)parsedparsing_errorc                 S  r  rK   rP   r  rP   rP   rQ   r  	  r  )r  r  )exception_key)r  )r  r  r  r   rM   r  r  r~   ro   r4   r&   r#   r  rk  r3   r%   r$   r   r-   assignr	   with_fallbacksr,   )r%  ra  r  r  r5  warning_messager  	tool_namer  output_parsererror_messageparser_assignparser_noneparser_with_fallbackrP   rP   rQ   with_structured_output  sn    	

z$ChatAnthropic.with_structured_output<Sequence[dict[str, Any] | type | Callable | BaseTool] | Nonec                 K  s   t |\}}t|tr||d< |rdd |D |d< | jdur$| j|d< | jdur=| jjjjd	| j| j	|d|}|j
S | jjjd	| j	|d|}|j
S )
aQ  Count tokens in a sequence of input messages.

        Args:
            messages: The message inputs to tokenize.
            tools: If provided, sequence of `dict`, `BaseModel`, function, or `BaseTool`
                objects to be converted to tool schemas.
            kwargs: Additional keyword arguments are passed to the Anthropic
                `messages.count_tokens` method.

        Basic usage:

        ```python
        from langchain_anthropic import ChatAnthropic
        from langchain_core.messages import HumanMessage, SystemMessage

        model = ChatAnthropic(model="claude-sonnet-4-5-20250929")

        messages = [
            SystemMessage(content="You are a scientist"),
            HumanMessage(content="Hello, Claude"),
        ]
        model.get_num_tokens_from_messages(messages)
        ```

        ```txt
        14
        ```

        Pass tool schemas:

        ```python
        from langchain_anthropic import ChatAnthropic
        from langchain_core.messages import HumanMessage
        from langchain_core.tools import tool

        model = ChatAnthropic(model="claude-sonnet-4-5-20250929")

        @tool(parse_docstring=True)
        def get_weather(location: str) -> str:
            """Get the current weather in a given location

            Args:
                location: The city and state, e.g. San Francisco, CA
            """
            return "Sunny"

        messages = [
            HumanMessage(content="What's the weather like in San Francisco?"),
        ]
        model.get_num_tokens_from_messages(messages, tools=[get_weather])
        ```

        ```txt
        403
        ```

        !!! warning "Behavior changed in `langchain-anthropic` 0.3.0"

            Uses Anthropic's [token counting API](https://docs.claude.com/en/docs/build-with-claude/token-counting) to count tokens in messages.

        r   c                 S  s   g | ]}t |qS rP   )r  r  rP   rP   rQ   r   	  s    z>ChatAnthropic.get_num_tokens_from_messages.<locals>.<listcomp>r  Nr"  )r  rU   r   )rU   r   rP   )r   r~   rI   r"  r  rT  rr  r   count_tokensrU   input_tokens)r%  r   r  r5  formatted_systemr   beta_responseresponserP   rP   rQ   get_num_tokens_from_messages	  s.   C




z*ChatAnthropic.get_num_tokens_from_messages)rJ   rI   )rJ   r(  )rJ   rn   )rJ   r.  )rJ   rc   rK   )r  r  r5  r
   rJ   r   )rC  rc   rJ   r
   )rC  r   rJ   r
   )rJ   r<   )rJ   rM  )rJ   rV  )r   r   r  r  r5  r   rJ   r   )ro  r   rJ   r
   )NN)r   rw  r  r  rx  ry  r   rz  r5  r
   rJ   r{  )r   rw  r  r  rx  r  r   rz  r5  r
   rJ   r  )r   r
   r5  r
   rJ   r*   )
r   rw  r  r  rx  ry  r5  r
   rJ   r*   )
r   rw  r  r  rx  r  r5  r
   rJ   r*   )ra  r  r  ra   rJ   r  )r  r  r  r  r  rz  rf   rz  r5  r
   rJ   r  )
ra  r  r  rn   r  r  r5  r
   rJ   r  )r   rw  r  r  r5  r
   rJ   rW   ):rh   ri   rj   rk   r7   model_configr8   rU   rl   r  r	  r
  r  r  r  r  r/   r  r1   r  r  r  r  r   r  r  r   r   r!  r"  propertyr&  r*  classmethodr-  r2  r4  r@  r:   rD  rF  rI  r   rL  rT  rX  rp  ru  rv  r  r  r  r  r  r  r  r  r  rP   rP   rP   rQ   r  S  s   
       _


`
'
$5
    Ur  r  +dict[str, Any] | type | Callable | BaseToolrf   rz  c                  s   t  trt fdddD rt }|S t |dd }t|d |d d}d	|v r2|d	 |d	< d
|v rAt |trA|d
 |d
< |S )z;Convert a tool-like object to an Anthropic tool definition.c                 3  s    | ]}| v V  qd S rK   rP   )rx   r   rm   rP   rQ   r|   	  s    
z,convert_to_anthropic_tool.<locals>.<genexpr>)rb   re   rd   r  functionrb   
parameters)rb   rd   re   rf   )r~   r   r   ra   r3   rn   )rm   rf   anthropic_formattedoai_formattedrP   r  rQ   r  	  s   r  r>  c                 C  s&   d| v pd| v o| d  dpd| v S )Nr  
extra_bodyr!  r\  r>  rP   rP   rQ   r  
  s
   r  c                 C  s   |  di  ddkS )Nr   ro   r  r\  r  rP   rP   rQ   r  	
  s   r  c                 C  sf   |  dg D ]*}t| dtr0|d D ]}t|tr/| ddkr/| di  dr/  dS qqdS )	Nr   r   ro   r   r   r  TF)rM   r~   r   r   )r>  r   r   rP   rP   rQ   r  
  s   r  c                   @  s.   e Zd ZU ded< ded< ded< ded< dS )	r   zLiteral['tool_use']ro   rI   rb   r   r   r   N)rh   ri   rj   rl   rP   rP   rP   rQ   r   
  s
   
 r   r   list[ToolCall]list[_AnthropicToolUse]c                 C  s   dd | D S )Nc              
   S  s.   g | ]}t d |d |d td|d dqS )r   rb   r   rI   r   r   )r   r   r]  rP   rP   rQ   r   $
  s    z?_lc_tool_calls_to_anthropic_tool_use_blocks.<locals>.<listcomp>rP   )r   rP   rP   rQ   r   !
  s   r   ra  r  rc   c                 C  sL   ddl m} t| tot| }|st| tr|| }n|t| }d|dS )zConvert JSON schema, Pydantic model, or TypedDict into Claude output_format.

    See: https://docs.claude.com/en/docs/build-with-claude/structured-outputs
    r   )transform_schemar`  )ro   ra  )r1  r  r~   ro   r4   r   r2   )ra  r  is_pydantic_classr`  rP   rP   rQ   rk  /
  s   

rk  T)r   r  r  %anthropic.types.RawMessageStreamEventr   r~  r  ,anthropic.types.RawMessageStreamEvent | NoneJtuple[AIMessageChunk | None, anthropic.types.RawMessageStreamEvent | None]c                C  s  d}| j dkr$|r$t| jdrd| jji}ni }t|rdng |d}n5| j dkr|| jdur|d| jj v sFd	| jj v sFd
| jj v sFd| jj v r||rOtjddd | j }| j	|d< | jj d	krpt
| j	| jj| jjdd}|g}ng }t|g|d}| }n| j dkr| jj dv r|rt| jdrt| jdd}	t|	d}n| j }| j	|d< d|d< d|v r|dg|d< t|gd}n| jj dv r| j }| j	|d< d|d< t|gd}n| jj dkr| j }| j	|d< |rt|ddnd}
|
durt|
ddd	krt
| j	dd| jjd}|g}ng }t|g|d}n>| j dkrX|rXt| j}| jj| jjd}t| dd }rA| |d< t|rGdng ||d }|jd!rWd"|_n	 |rad#|jd$< ||fS )%a  Convert Anthropic streaming event to `AIMessageChunk`.

    Args:
        event: Raw streaming event from Anthropic SDK
        stream_usage: Whether to include usage metadata in the output chunks.
        coerce_content_to_string: Whether to convert structured content to plain
            text strings. When True, only text content is preserved; when False,
            structured content like tool calls and citations are maintained.
        block_start_event: Previous content block start event, used for tracking
            tool use blocks and maintaining context across related events.

    Returns:
        Tuple containing:
        - AIMessageChunk: Converted message chunk with appropriate content and
          metadata, or None if the event doesn't produce a chunk
        - RawMessageStreamEvent: Updated `block_start_event` for tracking content
          blocks across sequential events, or None if not applicable

    Note:
        Not all Anthropic events result in message chunks. Events like internal
        state changes return None for the message chunk while potentially
        updating the `block_start_event` for context tracking.

    Nmessage_startrU   rH   r   r  content_block_startr   r   r   r   z'Received unexpected tool content block.r   r   index)r  r   rb   r   )r   tool_call_chunkscontent_block_delta)
text_deltacitations_deltar   r   ro   r   r   >   thinking_deltasignature_deltar   input_json_deltacontent_blockmessage_delta)stop_reasonstop_sequencer"  )r   r  rg  r  r   r1  r^  )ro   hasattrr   rU   r   r  r  r  r  r  create_tool_call_chunkr   rb   deltagetattrrj  r   r  r  r  r  rg  rM   chunk_position)r  r   r~  r  message_chunkrg  r  r"   r  r   start_event_blockr  r"  rP   rP   rQ   r  ?
  s   














r  anthropic_usager6   r!   c              
   C  s   t | ddt | ddd}t | dd}d}|r,t|tr | }|D ]	}||||< q"t | ddp3d|d	 p8d |d p>d }t | d
dpGd}t|||| tdi dd | D dS )zCreate LangChain `UsageMetadata` from Anthropic `Usage` data.

    Note: Anthropic's `input_tokens` excludes cached tokens, so we manually add
    `cache_read` and `cache_creation` tokens to get the true total.

    cache_read_input_tokensNcache_creation_input_tokens)
cache_readcache_creationr  )ephemeral_5m_input_tokensephemeral_1h_input_tokensr  r   r   output_tokensc                 S  rc  rK   rP   r   rP   rP   rQ   r     rd  z*_create_usage_metadata.<locals>.<dictcomp>)r  r  total_tokensinput_token_detailsrP   )r  r~   r6   r  rM   r!   r    r   )r  r  r  cache_creation_keysr   r  r  rP   rP   rQ   r  
  s2   




r  )rH   rI   rJ   r   )rU   rV   rJ   rW   )rm   r
   rJ   rn   )r   rI   rJ   r   )r   r   rJ   r   )r   r   rJ   r   )r   r   rJ   r   )r   r   rJ   r   )rm   r  rf   rz  rJ   ra   )r>  r   rJ   rn   )r   r  rJ   r  )ra  r  rJ   rc   )
r  r  r   rn   r~  rn   r  r  rJ   r  )r  r6   rJ   r!   ){rk   
__future__r   rN   r   r   r  collections.abcr   r   r   r   r   	functoolsr   operatorr	   typingr
   r   r   r   r1  langchain_core.callbacksr   r   langchain_core.exceptionsr   langchain_core.language_modelsr   r   r   *langchain_core.language_models.chat_modelsr   r   langchain_core.messagesr   r   r   r   r   r   r   r   r   rh  langchain_core.messages.air    r!   langchain_core.messages.toolr"   r  langchain_core.output_parsersr#   r$   r%   r&   "langchain_core.output_parsers.baser'   langchain_core.outputsr(   r)   r*   langchain_core.runnablesr+   r,   r-   langchain_core.toolsr.   langchain_core.utilsr/   r0   r1   %langchain_core.utils.function_callingr2   r3   langchain_core.utils.pydanticr4   langchain_core.utils.utilsr5   pydanticr6   r7   r8   r9   r:   typing_extensionsr;   r<   r=   !langchain_anthropic._client_utilsr>   r?   langchain_anthropic._compatr@   "langchain_anthropic.data._profilesrA   "langchain_anthropic.output_parsersrB   r   rL   rR   rS   rl   rT   r`   ra   r   r   r   r   r   r  r  r  r  r  r  r   r   rk  r  r  rP   rP   rP   rQ   <module>   s    (






/
0
~ 
Z	               (




 