o
    +i                     @   sx   d Z ddlmZ ddlmZ G dd deZG dd deZG dd	 d	eeZ	G d
d deZ
dede
defddZdS )z$Custom **exceptions** for LangChain.    )Enum)Anyc                   @      e Zd ZdZdS )LangChainExceptionzGeneral LangChain exception.N__name__
__module____qualname____doc__ r   r   V/var/www/html/psymed-ai/venv/lib/python3.10/site-packages/langchain_core/exceptions.pyr          r   c                   @   r   )TracerExceptionz,Base class for exceptions in tracers module.Nr   r   r   r   r   r      r   r   c                	       sB   e Zd ZdZ			d
dededB dedB def fdd	Z  ZS )OutputParserExceptiona^  Exception that output parsers should raise to signify a parsing error.

    This exists to differentiate parsing errors from other code or execution errors
    that also may arise inside the output parser.

    `OutputParserException` will be available to catch and handle in ways to fix the
    parsing error, while other errors will be raised.
    NFerrorobservation
llm_outputsend_to_llmc                    sZ   t |trt|tjd}t | |r"|du s|du r"d}t||| _|| _	|| _
dS )aV  Create an `OutputParserException`.

        Args:
            error: The error that's being re-raised or an error message.
            observation: String explanation of error which can be passed to a model to
                try and remediate the issue.
            llm_output: String model output which is error-ing.

            send_to_llm: Whether to send the observation and llm_output back to an Agent
                after an `OutputParserException` has been raised.

                This gives the underlying model driving the agent the context that the
                previous output was improperly structured, in the hopes that it will
                update the output to the correct format.

        Raises:
            ValueError: If `send_to_llm` is `True` but either observation or
                `llm_output` are not provided.
        message
error_codeNzLArguments 'observation' & 'llm_output' are required if 'send_to_llm' is True)
isinstancestrcreate_message	ErrorCodeOUTPUT_PARSING_FAILUREsuper__init__
ValueErrorr   r   r   )selfr   r   r   r   msg	__class__r   r   r      s   

zOutputParserException.__init__)NNF)	r   r   r	   r
   r   r   boolr   __classcell__r   r   r!   r   r      s    r   c                   @   s,   e Zd ZdZdZdZdZdZdZdZ	dZ
d	S )
r   zError codes.INVALID_PROMPT_INPUTINVALID_TOOL_RESULTSMESSAGE_COERCION_FAILUREMODEL_AUTHENTICATIONMODEL_NOT_FOUNDMODEL_RATE_LIMITr   N)r   r   r	   r
   r%   r&   r'   r(   r)   r*   r   r   r   r   r   r   C   s    r   r   r   returnc                 C   s   |  d|j  dS )zCreate a message with a link to the LangChain troubleshooting guide.

    Args:
        message: The message to display.
        error_code: The error code to display.

    Returns:
        The full message with the troubleshooting link.
    zT
For troubleshooting, visit: https://docs.langchain.com/oss/python/langchain/errors/ )valuer   r   r   r   r   O   s   r   N)r
   enumr   typingr   	Exceptionr   r   r   r   r   r   r   r   r   r   r   <module>   s    4