Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 19 additions & 9 deletions libs/core/langchain_core/language_models/chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from pydantic import BaseModel, ConfigDict, Field
from typing_extensions import override

from langchain_core._api import deprecated
from langchain_core.caches import BaseCache
from langchain_core.callbacks import (
AsyncCallbackManager,
Expand Down Expand Up @@ -358,11 +359,11 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
)

@cached_property
def _serialized(self) -> dict[str, Any]:
def _serialized(self) -> builtins.dict[str, Any]:
# self is always a Serializable object in this case, thus the result is
# guaranteed to be a dict since dumps uses the default callback, which uses
# obj.to_json which always returns TypedDict subclasses
return cast("dict[str, Any]", dumpd(self))
return cast("builtins.dict[str, Any]", dumpd(self))

# --- Runnable methods ---

Expand Down Expand Up @@ -734,7 +735,9 @@ async def astream(

# --- Custom methods ---

def _combine_llm_outputs(self, _llm_outputs: list[dict | None], /) -> dict:
def _combine_llm_outputs(
self, _llm_outputs: list[builtins.dict | None], /
) -> builtins.dict:
return {}

def _convert_cached_generations(self, cache_val: list) -> list[ChatGeneration]:
Expand Down Expand Up @@ -780,8 +783,8 @@ def _get_invocation_params(
self,
stop: list[str] | None = None,
**kwargs: Any,
) -> dict:
params = self.dict()
) -> builtins.dict:
params = self.asdict()
params["stop"] = stop
return {**params, **kwargs}

Expand Down Expand Up @@ -846,7 +849,7 @@ def generate(
callbacks: Callbacks = None,
*,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
metadata: builtins.dict[str, Any] | None = None,
run_name: str | None = None,
run_id: uuid.UUID | None = None,
**kwargs: Any,
Expand Down Expand Up @@ -969,7 +972,7 @@ async def agenerate(
callbacks: Callbacks = None,
*,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
metadata: builtins.dict[str, Any] | None = None,
run_name: str | None = None,
run_id: uuid.UUID | None = None,
**kwargs: Any,
Expand Down Expand Up @@ -1511,8 +1514,15 @@ async def _call_async(
def _llm_type(self) -> str:
"""Return type of chat model."""

@override
def dict(self, **kwargs: Any) -> dict:
@deprecated("1.2.5", alternative="asdict", removal="2.0")
def dict(self, **_kwargs: Any) -> builtins.dict[str, Any]:
"""DEPRECATED - use `asdict()` instead.

Return a dictionary of the LLM.
"""
return self.asdict()

def asdict(self) -> builtins.dict[str, Any]:
"""Return a dictionary of the LLM."""
starter_dict = dict(self._identifying_params)
starter_dict["_type"] = self._llm_type
Expand Down
33 changes: 21 additions & 12 deletions libs/core/langchain_core/language_models/llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
)
from typing_extensions import override

from langchain_core._api import deprecated
from langchain_core.caches import BaseCache
from langchain_core.callbacks import (
AsyncCallbackManager,
Expand All @@ -57,6 +58,7 @@
from langchain_core.runnables.config import run_in_executor

if TYPE_CHECKING:
import builtins
import uuid

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -300,11 +302,11 @@ class BaseLLM(BaseLanguageModel[str], ABC):
)

@functools.cached_property
def _serialized(self) -> dict[str, Any]:
def _serialized(self) -> builtins.dict[str, Any]:
# self is always a Serializable object in this case, thus the result is
# guaranteed to be a dict since dumps uses the default callback, which uses
# obj.to_json which always returns TypedDict subclasses
return cast("dict[str, Any]", dumpd(self))
return cast("builtins.dict[str, Any]", dumpd(self))

# --- Runnable methods ---

Expand Down Expand Up @@ -519,7 +521,7 @@ def stream(
else:
prompt = self._convert_input(input).to_string()
config = ensure_config(config)
params = self.dict()
params = self.asdict()
params["stop"] = stop
params = {**params, **kwargs}
options = {"stop": stop}
Expand Down Expand Up @@ -589,7 +591,7 @@ async def astream(

prompt = self._convert_input(input).to_string()
config = ensure_config(config)
params = self.dict()
params = self.asdict()
params["stop"] = stop
params = {**params, **kwargs}
options = {"stop": stop}
Expand Down Expand Up @@ -844,7 +846,7 @@ def generate(
callbacks: Callbacks | list[Callbacks] | None = None,
*,
tags: list[str] | list[list[str]] | None = None,
metadata: dict[str, Any] | list[dict[str, Any]] | None = None,
metadata: builtins.dict[str, Any] | list[builtins.dict[str, Any]] | None = None,
run_name: str | list[str] | None = None,
run_id: uuid.UUID | list[uuid.UUID | None] | None = None,
**kwargs: Any,
Expand Down Expand Up @@ -977,7 +979,7 @@ def generate(
] * len(prompts)
run_name_list = [cast("str | None", run_name)] * len(prompts)
run_ids_list = self._get_run_ids_list(run_id, prompts)
params = self.dict()
params = self.asdict()
params["stop"] = stop
options = {"stop": stop}
(
Expand Down Expand Up @@ -1119,7 +1121,7 @@ async def agenerate(
callbacks: Callbacks | list[Callbacks] | None = None,
*,
tags: list[str] | list[list[str]] | None = None,
metadata: dict[str, Any] | list[dict[str, Any]] | None = None,
metadata: builtins.dict[str, Any] | list[builtins.dict[str, Any]] | None = None,
run_name: str | list[str] | None = None,
run_id: uuid.UUID | list[uuid.UUID | None] | None = None,
**kwargs: Any,
Expand Down Expand Up @@ -1241,7 +1243,7 @@ async def agenerate(
] * len(prompts)
run_name_list = [cast("str | None", run_name)] * len(prompts)
run_ids_list = self._get_run_ids_list(run_id, prompts)
params = self.dict()
params = self.asdict()
params["stop"] = stop
options = {"stop": stop}
(
Expand Down Expand Up @@ -1333,7 +1335,7 @@ async def _call_async(
callbacks: Callbacks = None,
*,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
metadata: builtins.dict[str, Any] | None = None,
**kwargs: Any,
) -> str:
"""Check Cache and run the LLM on the given prompt and input."""
Expand All @@ -1357,8 +1359,15 @@ def __str__(self) -> str:
def _llm_type(self) -> str:
"""Return type of llm."""

@override
def dict(self, **kwargs: Any) -> dict:
@deprecated("1.2.5", alternative="asdict", removal="2.0")
def dict(self, **_kwargs: Any) -> builtins.dict[str, Any]:
"""DEPRECATED - use `asdict()` instead.

Return a dictionary of the LLM.
"""
return self.asdict()

def asdict(self) -> builtins.dict[str, Any]:
"""Return a dictionary of the LLM."""
starter_dict = dict(self._identifying_params)
starter_dict["_type"] = self._llm_type
Expand All @@ -1385,7 +1394,7 @@ def save(self, file_path: Path | str) -> None:
directory_path.mkdir(parents=True, exist_ok=True)

# Fetch dictionary to save
prompt_dict = self.dict()
prompt_dict = self.asdict()

if save_path.suffix == ".json":
with save_path.open("w", encoding="utf-8") as f:
Expand Down
10 changes: 9 additions & 1 deletion libs/core/langchain_core/output_parsers/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,16 @@

from typing_extensions import override

from langchain_core._api import deprecated
from langchain_core.language_models import LanguageModelOutput
from langchain_core.messages import AnyMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, Generation
from langchain_core.runnables import Runnable, RunnableConfig, RunnableSerializable
from langchain_core.runnables.config import run_in_executor

if TYPE_CHECKING:
import builtins

from langchain_core.prompt_values import PromptValue

T = TypeVar("T")
Expand Down Expand Up @@ -329,7 +332,12 @@ def _type(self) -> str:
)
raise NotImplementedError(msg)

def dict(self, **kwargs: Any) -> dict:
@deprecated("1.2.5", alternative="asdict", removal="2.0")
@override
def dict(self, **kwargs: Any) -> builtins.dict[str, Any]:
return self.asdict()

def asdict(self, **kwargs: Any) -> builtins.dict[str, Any]:
"""Return dictionary representation of output parser."""
output_parser_dict = super().model_dump(**kwargs)
with contextlib.suppress(NotImplementedError):
Expand Down
47 changes: 30 additions & 17 deletions libs/core/langchain_core/prompts/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@

from __future__ import annotations

import builtins # noqa: TC003
import builtins
import contextlib
import json
from abc import ABC, abstractmethod
from collections.abc import Mapping # noqa: TC003
from collections.abc import Callable, Mapping
from functools import cached_property
from pathlib import Path
from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast
Expand All @@ -15,21 +15,20 @@
from pydantic import BaseModel, ConfigDict, Field, model_validator
from typing_extensions import Self, override

from langchain_core._api import deprecated
from langchain_core.exceptions import ErrorCode, create_message
from langchain_core.load import dumpd
from langchain_core.output_parsers.base import BaseOutputParser # noqa: TC001
from langchain_core.output_parsers.base import BaseOutputParser
from langchain_core.prompt_values import (
ChatPromptValueConcrete,
PromptValue,
StringPromptValue,
)
from langchain_core.runnables import RunnableConfig, RunnableSerializable
from langchain_core.runnables.config import ensure_config
from langchain_core.runnables.base import RunnableSerializable
from langchain_core.runnables.config import RunnableConfig, ensure_config
from langchain_core.utils.pydantic import create_model_v2

if TYPE_CHECKING:
from collections.abc import Callable

from langchain_core.documents import Document


Expand Down Expand Up @@ -116,11 +115,11 @@ def is_lc_serializable(cls) -> bool:
)

@cached_property
def _serialized(self) -> dict[str, Any]:
def _serialized(self) -> builtins.dict[str, Any]:
# self is always a Serializable object in this case, thus the result is
# guaranteed to be a dict since dumpd uses the default callback, which uses
# obj.to_json which always returns TypedDict subclasses
return cast("dict[str, Any]", dumpd(self))
return cast("builtins.dict[str, Any]", dumpd(self))

@property
@override
Expand Down Expand Up @@ -150,7 +149,7 @@ def get_input_schema(self, config: RunnableConfig | None = None) -> type[BaseMod
field_definitions={**required_input_variables, **optional_input_variables},
)

def _validate_input(self, inner_input: Any) -> dict:
def _validate_input(self, inner_input: Any) -> builtins.dict:
if not isinstance(inner_input, dict):
if len(self.input_variables) == 1:
var_name = self.input_variables[0]
Expand Down Expand Up @@ -186,19 +185,23 @@ def _validate_input(self, inner_input: Any) -> dict:
)
return inner_input_

def _format_prompt_with_error_handling(self, inner_input: dict) -> PromptValue:
def _format_prompt_with_error_handling(
self,
inner_input: builtins.dict,
) -> PromptValue:
inner_input_ = self._validate_input(inner_input)
return self.format_prompt(**inner_input_)

async def _aformat_prompt_with_error_handling(
self, inner_input: dict
self,
inner_input: builtins.dict,
) -> PromptValue:
inner_input_ = self._validate_input(inner_input)
return await self.aformat_prompt(**inner_input_)

@override
def invoke(
self, input: dict, config: RunnableConfig | None = None, **kwargs: Any
self, input: builtins.dict, config: RunnableConfig | None = None, **kwargs: Any
) -> PromptValue:
"""Invoke the prompt.

Expand All @@ -224,7 +227,7 @@ def invoke(

@override
async def ainvoke(
self, input: dict, config: RunnableConfig | None = None, **kwargs: Any
self, input: builtins.dict, config: RunnableConfig | None = None, **kwargs: Any
) -> PromptValue:
"""Async invoke the prompt.

Expand Down Expand Up @@ -286,7 +289,9 @@ def partial(self, **kwargs: str | Callable[[], str]) -> BasePromptTemplate:
prompt_dict["partial_variables"] = {**self.partial_variables, **kwargs}
return type(self)(**prompt_dict)

def _merge_partial_and_user_variables(self, **kwargs: Any) -> dict[str, Any]:
def _merge_partial_and_user_variables(
self, **kwargs: Any
) -> builtins.dict[str, Any]:
# Get partial params:
partial_kwargs = {
k: v if not callable(v) else v() for k, v in self.partial_variables.items()
Expand Down Expand Up @@ -330,7 +335,15 @@ def _prompt_type(self) -> str:
"""Return the prompt type key."""
raise NotImplementedError

def dict(self, **kwargs: Any) -> dict:
@deprecated("1.2.5", alternative="asdict", removal="2.0")
def dict(self, **kwargs: Any) -> builtins.dict[str, Any]:
"""DEPRECATED - use `asdict()` instead.

Return a dictionary of the LLM.
"""
return self.asdict(**kwargs)

def asdict(self, **kwargs: Any) -> builtins.dict[str, Any]:
"""Return dictionary representation of prompt.

Args:
Expand Down Expand Up @@ -365,7 +378,7 @@ def save(self, file_path: Path | str) -> None:
raise ValueError(msg)

# Fetch dictionary to save
prompt_dict = self.dict()
prompt_dict = self.asdict()
if "_type" not in prompt_dict:
msg = f"Prompt {self} does not support saving."
raise NotImplementedError(msg)
Expand Down