1"""Base classes and utilities for `Runnable` objects."""23from __future__ import annotations45import asyncio6import collections7import contextlib8import functools9import inspect10import threading11from abc import ABC, abstractmethod12from collections.abc import (13 AsyncGenerator,14 AsyncIterator,15 Awaitable,16 Callable,17 Coroutine,18 Iterator,19 Mapping,20 Sequence,21)22from concurrent.futures import FIRST_COMPLETED, wait23from functools import wraps24from itertools import tee25from operator import itemgetter26from types import GenericAlias27from typing import (28 TYPE_CHECKING,29 Any,30 Generic,31 Literal,32 Protocol,33 TypeVar,34 cast,35 get_args,36 get_type_hints,37 overload,38)3940from pydantic import BaseModel, ConfigDict, Field, RootModel41from typing_extensions import override4243from langchain_core._api import beta_decorator44from langchain_core._api.deprecation import warn_deprecated45from langchain_core.callbacks.manager import AsyncCallbackManager, CallbackManager46from langchain_core.load.serializable import (47 Serializable,48 SerializedConstructor,49 SerializedNotImplemented,50)51from langchain_core.runnables.config import (52 RunnableConfig,53 acall_func_with_variable_args,54 call_func_with_variable_args,55 ensure_config,56 get_async_callback_manager_for_config,57 get_callback_manager_for_config,58 get_config_list,59 get_executor_for_config,60 merge_configs,61 patch_config,62 run_in_executor,63 set_config_context,64)65from langchain_core.runnables.utils import (66 AddableDict,67 AnyConfigurableField,68 ConfigurableField,69 ConfigurableFieldSpec,70 Input,71 Output,72 accepts_config,73 accepts_run_manager,74 coro_with_context,75 gated_coro,76 gather_with_concurrency,77 get_function_first_arg_dict_keys,78 get_function_nonlocals,79 get_lambda_source,80 get_unique_config_specs,81 indent_lines_after_first,82 is_async_callable,83 is_async_generator,84)85from langchain_core.tracers._streaming import _StreamingCallbackHandler86from langchain_core.tracers.event_stream import (87 _astream_events_implementation_v1,88 _astream_events_implementation_v2,89)90from langchain_core.tracers.log_stream import (91 LogStreamCallbackHandler,92 _astream_log_implementation,93)94from langchain_core.tracers.root_listeners import (95 AsyncRootListenersTracer,96 RootListenersTracer,97)98from langchain_core.utils.aiter import aclosing, atee99from langchain_core.utils.iter import safetee100from langchain_core.utils.pydantic import create_model_v2101102if TYPE_CHECKING:103 from langchain_core.callbacks.manager import (104 AsyncCallbackManagerForChainRun,105 CallbackManagerForChainRun,106 )107 from langchain_core.prompts.base import BasePromptTemplate108 from langchain_core.runnables.fallbacks import (109 RunnableWithFallbacks as RunnableWithFallbacksT,110 )111 from langchain_core.runnables.graph import Graph112 from langchain_core.runnables.retry import ExponentialJitterParams113 from langchain_core.runnables.schema import StreamEvent114 from langchain_core.tools import BaseTool115 from langchain_core.tracers.log_stream import RunLog, RunLogPatch116 from langchain_core.tracers.root_listeners import AsyncListener117 from langchain_core.tracers.schemas import Run118119120Other = TypeVar("Other")121122_RUNNABLE_GENERIC_NUM_ARGS = 2 # Input and Output123124125class Runnable(ABC, Generic[Input, Output]):126 """A unit of work that can be invoked, batched, streamed, transformed and composed.127128 Key Methods129 ===========130131 - `invoke`/`ainvoke`: Transforms a single input into an output.132 - `batch`/`abatch`: Efficiently transforms multiple inputs into outputs.133 - `stream`/`astream`: Streams output from a single input as it's produced.134 - `astream_log`: Streams output and selected intermediate results from an135 input.136137 Built-in optimizations:138139 - **Batch**: By default, batch runs invoke() in parallel using a thread pool140 executor. Override to optimize batching.141142 - **Async**: Methods with `'a'` prefix are asynchronous. By default, they execute143 the sync counterpart using asyncio's thread pool.144 Override for native async.145146 All methods accept an optional config argument, which can be used to configure147 execution, add tags and metadata for tracing and debugging etc.148149 Runnables expose schematic information about their input, output and config via150 the `input_schema` property, the `output_schema` property and `config_schema`151 method.152153 Composition154 ===========155156 Runnable objects can be composed together to create chains in a declarative way.157158 Any chain constructed this way will automatically have sync, async, batch, and159 streaming support.160161 The main composition primitives are `RunnableSequence` and `RunnableParallel`.162163 **`RunnableSequence`** invokes a series of runnables sequentially, with164 one Runnable's output serving as the next's input. Construct using165 the `|` operator or by passing a list of runnables to `RunnableSequence`.166167 **`RunnableParallel`** invokes runnables concurrently, providing the same input168 to each. Construct it using a dict literal within a sequence or by passing a169 dict to `RunnableParallel`.170171172 For example,173174 ```python175 from langchain_core.runnables import RunnableLambda176177 # A RunnableSequence constructed using the `|` operator178 sequence = RunnableLambda(lambda x: x + 1) | RunnableLambda(lambda x: x * 2)179 sequence.invoke(1) # 4180 sequence.batch([1, 2, 3]) # [4, 6, 8]181182183 # A sequence that contains a RunnableParallel constructed using a dict literal184 sequence = RunnableLambda(lambda x: x + 1) | {185 "mul_2": RunnableLambda(lambda x: x * 2),186 "mul_5": RunnableLambda(lambda x: x * 5),187 }188 sequence.invoke(1) # {'mul_2': 4, 'mul_5': 10}189 ```190191 Standard Methods192 ================193194 All `Runnable`s expose additional methods that can be used to modify their195 behavior (e.g., add a retry policy, add lifecycle listeners, make them196 configurable, etc.).197198 These methods will work on any `Runnable`, including `Runnable` chains199 constructed by composing other `Runnable`s.200 See the individual methods for details.201202 For example,203204 ```python205 from langchain_core.runnables import RunnableLambda206207 import random208209 def add_one(x: int) -> int:210 return x + 1211212213 def buggy_double(y: int) -> int:214 \"\"\"Buggy code that will fail 70% of the time\"\"\"215 if random.random() > 0.3:216 print('This code failed, and will probably be retried!') # noqa: T201217 raise ValueError('Triggered buggy code')218 return y * 2219220 sequence = (221 RunnableLambda(add_one) |222 RunnableLambda(buggy_double).with_retry( # Retry on failure223 stop_after_attempt=10,224 wait_exponential_jitter=False225 )226 )227228 print(sequence.input_schema.model_json_schema()) # Show inferred input schema229 print(sequence.output_schema.model_json_schema()) # Show inferred output schema230 print(sequence.invoke(2)) # invoke the sequence (note the retry above!!)231 ```232233 Debugging and tracing234 =====================235236 As the chains get longer, it can be useful to be able to see intermediate results237 to debug and trace the chain.238239 You can set the global debug flag to True to enable debug output for all chains:240241 ```python242 from langchain_core.globals import set_debug243244 set_debug(True)245 ```246247 Alternatively, you can pass existing or custom callbacks to any given chain:248249 ```python250 from langchain_core.tracers import ConsoleCallbackHandler251252 chain.invoke(..., config={"callbacks": [ConsoleCallbackHandler()]})253 ```254255 For a UI (and much more) checkout [LangSmith](https://docs.langchain.com/langsmith/home).256257 """258259 name: str | None260 """The name of the `Runnable`. Used for debugging and tracing."""261262 def get_name(self, suffix: str | None = None, *, name: str | None = None) -> str:263 """Get the name of the `Runnable`.264265 Args:266 suffix: An optional suffix to append to the name.267 name: An optional name to use instead of the `Runnable`'s name.268269 Returns:270 The name of the `Runnable`.271 """272 if name:273 name_ = name274 elif hasattr(self, "name") and self.name:275 name_ = self.name276 else:277 # Here we handle a case where the runnable subclass is also a pydantic278 # model.279 cls = self.__class__280 # Then it's a pydantic sub-class, and we have to check281 # whether it's a generic, and if so recover the original name.282 if (283 hasattr(284 cls,285 "__pydantic_generic_metadata__",286 )287 and "origin" in cls.__pydantic_generic_metadata__288 and cls.__pydantic_generic_metadata__["origin"] is not None289 ):290 name_ = cls.__pydantic_generic_metadata__["origin"].__name__291 else:292 name_ = cls.__name__293294 if suffix:295 if name_[0].isupper():296 return name_ + suffix.title()297 return name_ + "_" + suffix.lower()298 return name_299300 @property301 def InputType(self) -> type[Input]: # noqa: N802302 """Input type.303304 The type of input this `Runnable` accepts specified as a type annotation.305306 Raises:307 TypeError: If the input type cannot be inferred.308 """309 # First loop through all parent classes and if any of them is310 # a Pydantic model, we will pick up the generic parameterization311 # from that model via the __pydantic_generic_metadata__ attribute.312 for base in self.__class__.mro():313 if hasattr(base, "__pydantic_generic_metadata__"):314 metadata = base.__pydantic_generic_metadata__315 if (316 "args" in metadata317 and len(metadata["args"]) == _RUNNABLE_GENERIC_NUM_ARGS318 ):319 return cast("type[Input]", metadata["args"][0])320321 # If we didn't find a Pydantic model in the parent classes,322 # then loop through __orig_bases__. This corresponds to323 # Runnables that are not pydantic models.324 for cls in self.__class__.__orig_bases__: # type: ignore[attr-defined]325 type_args = get_args(cls)326 if type_args and len(type_args) == _RUNNABLE_GENERIC_NUM_ARGS:327 return cast("type[Input]", type_args[0])328329 msg = (330 f"Runnable {self.get_name()} doesn't have an inferable InputType. "331 "Override the InputType property to specify the input type."332 )333 raise TypeError(msg)334335 @property336 def OutputType(self) -> type[Output]: # noqa: N802337 """Output Type.338339 The type of output this `Runnable` produces specified as a type annotation.340341 Raises:342 TypeError: If the output type cannot be inferred.343 """344 # First loop through bases -- this will help generic345 # any pydantic models.346 for base in self.__class__.mro():347 if hasattr(base, "__pydantic_generic_metadata__"):348 metadata = base.__pydantic_generic_metadata__349 if (350 "args" in metadata351 and len(metadata["args"]) == _RUNNABLE_GENERIC_NUM_ARGS352 ):353 return cast("type[Output]", metadata["args"][1])354355 for cls in self.__class__.__orig_bases__: # type: ignore[attr-defined]356 type_args = get_args(cls)357 if type_args and len(type_args) == _RUNNABLE_GENERIC_NUM_ARGS:358 return cast("type[Output]", type_args[1])359360 msg = (361 f"Runnable {self.get_name()} doesn't have an inferable OutputType. "362 "Override the OutputType property to specify the output type."363 )364 raise TypeError(msg)365366 @property367 def input_schema(self) -> type[BaseModel]:368 """The type of input this `Runnable` accepts specified as a Pydantic model."""369 return self.get_input_schema()370371 def get_input_schema(372 self,373 config: RunnableConfig | None = None,374 ) -> type[BaseModel]:375 """Get a Pydantic model that can be used to validate input to the `Runnable`.376377 `Runnable` objects that leverage the `configurable_fields` and378 `configurable_alternatives` methods will have a dynamic input schema that379 depends on which configuration the `Runnable` is invoked with.380381 This method allows to get an input schema for a specific configuration.382383 Args:384 config: A config to use when generating the schema.385386 Returns:387 A Pydantic model that can be used to validate input.388 """389 _ = config390 root_type = self.InputType391392 if (393 inspect.isclass(root_type)394 and not isinstance(root_type, GenericAlias)395 and issubclass(root_type, BaseModel)396 ):397 return root_type398399 return create_model_v2(400 self.get_name("Input"),401 root=root_type,402 # create model needs access to appropriate type annotations to be403 # able to construct the Pydantic model.404 # When we create the model, we pass information about the namespace405 # where the model is being created, so the type annotations can406 # be resolved correctly as well.407 # self.__class__.__module__ handles the case when the Runnable is408 # being sub-classed in a different module.409 module_name=self.__class__.__module__,410 )411412 def get_input_jsonschema(413 self, config: RunnableConfig | None = None414 ) -> dict[str, Any]:415 """Get a JSON schema that represents the input to the `Runnable`.416417 Args:418 config: A config to use when generating the schema.419420 Returns:421 A JSON schema that represents the input to the `Runnable`.422423 Example:424 ```python425 from langchain_core.runnables import RunnableLambda426427428 def add_one(x: int) -> int:429 return x + 1430431432 runnable = RunnableLambda(add_one)433434 print(runnable.get_input_jsonschema())435 ```436437 !!! version-added "Added in `langchain-core` 0.3.0"438439 """440 return self.get_input_schema(config).model_json_schema()441442 @property443 def output_schema(self) -> type[BaseModel]:444 """Output schema.445446 The type of output this `Runnable` produces specified as a Pydantic model.447 """448 return self.get_output_schema()449450 def get_output_schema(451 self,452 config: RunnableConfig | None = None,453 ) -> type[BaseModel]:454 """Get a Pydantic model that can be used to validate output to the `Runnable`.455456 `Runnable` objects that leverage the `configurable_fields` and457 `configurable_alternatives` methods will have a dynamic output schema that458 depends on which configuration the `Runnable` is invoked with.459460 This method allows to get an output schema for a specific configuration.461462 Args:463 config: A config to use when generating the schema.464465 Returns:466 A Pydantic model that can be used to validate output.467 """468 _ = config469 root_type = self.OutputType470471 if (472 inspect.isclass(root_type)473 and not isinstance(root_type, GenericAlias)474 and issubclass(root_type, BaseModel)475 ):476 return root_type477478 return create_model_v2(479 self.get_name("Output"),480 root=root_type,481 # create model needs access to appropriate type annotations to be482 # able to construct the Pydantic model.483 # When we create the model, we pass information about the namespace484 # where the model is being created, so the type annotations can485 # be resolved correctly as well.486 # self.__class__.__module__ handles the case when the Runnable is487 # being sub-classed in a different module.488 module_name=self.__class__.__module__,489 )490491 def get_output_jsonschema(492 self, config: RunnableConfig | None = None493 ) -> dict[str, Any]:494 """Get a JSON schema that represents the output of the `Runnable`.495496 Args:497 config: A config to use when generating the schema.498499 Returns:500 A JSON schema that represents the output of the `Runnable`.501502 Example:503 ```python504 from langchain_core.runnables import RunnableLambda505506507 def add_one(x: int) -> int:508 return x + 1509510511 runnable = RunnableLambda(add_one)512513 print(runnable.get_output_jsonschema())514 ```515516 !!! version-added "Added in `langchain-core` 0.3.0"517518 """519 return self.get_output_schema(config).model_json_schema()520521 @property522 def config_specs(self) -> list[ConfigurableFieldSpec]:523 """List configurable fields for this `Runnable`."""524 return []525526 def config_schema(self, *, include: Sequence[str] | None = None) -> type[BaseModel]:527 """The type of config this `Runnable` accepts specified as a Pydantic model.528529 To mark a field as configurable, see the `configurable_fields`530 and `configurable_alternatives` methods.531532 Args:533 include: A list of fields to include in the config schema.534535 Returns:536 A Pydantic model that can be used to validate config.537538 """539 include = include or []540 config_specs = self.config_specs541 configurable = (542 create_model_v2(543 "Configurable",544 field_definitions={545 spec.id: (546 spec.annotation,547 Field(548 spec.default, title=spec.name, description=spec.description549 ),550 )551 for spec in config_specs552 },553 )554 if config_specs555 else None556 )557558 # Many need to create a typed dict instead to implement NotRequired!559 all_fields = {560 **({"configurable": (configurable, None)} if configurable else {}),561 **{562 field_name: (field_type, None)563 for field_name, field_type in get_type_hints(RunnableConfig).items()564 if field_name in [i for i in include if i != "configurable"]565 },566 }567 return create_model_v2(self.get_name("Config"), field_definitions=all_fields)568569 def get_config_jsonschema(570 self, *, include: Sequence[str] | None = None571 ) -> dict[str, Any]:572 """Get a JSON schema that represents the config of the `Runnable`.573574 Args:575 include: A list of fields to include in the config schema.576577 Returns:578 A JSON schema that represents the config of the `Runnable`.579580 !!! version-added "Added in `langchain-core` 0.3.0"581582 """583 return self.config_schema(include=include).model_json_schema()584585 def get_graph(self, config: RunnableConfig | None = None) -> Graph:586 """Return a graph representation of this `Runnable`."""587 # Import locally to prevent circular import588 from langchain_core.runnables.graph import Graph # noqa: PLC0415589590 graph = Graph()591 try:592 input_node = graph.add_node(self.get_input_schema(config))593 except TypeError:594 input_node = graph.add_node(create_model_v2(self.get_name("Input")))595 runnable_node = graph.add_node(596 self, metadata=config.get("metadata") if config else None597 )598 try:599 output_node = graph.add_node(self.get_output_schema(config))600 except TypeError:601 output_node = graph.add_node(create_model_v2(self.get_name("Output")))602 graph.add_edge(input_node, runnable_node)603 graph.add_edge(runnable_node, output_node)604 return graph605606 def get_prompts(607 self, config: RunnableConfig | None = None608 ) -> list[BasePromptTemplate]:609 """Return a list of prompts used by this `Runnable`."""610 # Import locally to prevent circular import611 from langchain_core.prompts.base import BasePromptTemplate # noqa: PLC0415612613 return [614 node.data615 for node in self.get_graph(config=config).nodes.values()616 if isinstance(node.data, BasePromptTemplate)617 ]618619 def __or__(620 self,621 other: Runnable[Any, Other]622 | Callable[[Iterator[Any]], Iterator[Other]]623 | Callable[[AsyncIterator[Any]], AsyncIterator[Other]]624 | Callable[[Any], Other]625 | Mapping[str, Runnable[Any, Other] | Callable[[Any], Other] | Any],626 ) -> RunnableSerializable[Input, Other]:627 """Runnable "or" operator.628629 Compose this `Runnable` with another object to create a630 `RunnableSequence`.631632 Args:633 other: Another `Runnable` or a `Runnable`-like object.634635 Returns:636 A new `Runnable`.637 """638 return RunnableSequence(self, coerce_to_runnable(other))639640 def __ror__(641 self,642 other: Runnable[Other, Any]643 | Callable[[Iterator[Other]], Iterator[Any]]644 | Callable[[AsyncIterator[Other]], AsyncIterator[Any]]645 | Callable[[Other], Any]646 | Mapping[str, Runnable[Other, Any] | Callable[[Other], Any] | Any],647 ) -> RunnableSerializable[Other, Output]:648 """Runnable "reverse-or" operator.649650 Compose this `Runnable` with another object to create a651 `RunnableSequence`.652653 Args:654 other: Another `Runnable` or a `Runnable`-like object.655656 Returns:657 A new `Runnable`.658 """659 return RunnableSequence(coerce_to_runnable(other), self)660661 def pipe(662 self,663 *others: Runnable[Any, Other] | Callable[[Any], Other],664 name: str | None = None,665 ) -> RunnableSerializable[Input, Other]:666 """Pipe `Runnable` objects.667668 Compose this `Runnable` with `Runnable`-like objects to make a669 `RunnableSequence`.670671 Equivalent to `RunnableSequence(self, *others)` or `self | others[0] | ...`672673 Example:674 ```python675 from langchain_core.runnables import RunnableLambda676677678 def add_one(x: int) -> int:679 return x + 1680681682 def mul_two(x: int) -> int:683 return x * 2684685686 runnable_1 = RunnableLambda(add_one)687 runnable_2 = RunnableLambda(mul_two)688 sequence = runnable_1.pipe(runnable_2)689 # Or equivalently:690 # sequence = runnable_1 | runnable_2691 # sequence = RunnableSequence(first=runnable_1, last=runnable_2)692 sequence.invoke(1)693 await sequence.ainvoke(1)694 # -> 4695696 sequence.batch([1, 2, 3])697 await sequence.abatch([1, 2, 3])698 # -> [4, 6, 8]699 ```700701 Args:702 *others: Other `Runnable` or `Runnable`-like objects to compose703 name: An optional name for the resulting `RunnableSequence`.704705 Returns:706 A new `Runnable`.707 """708 return RunnableSequence(self, *others, name=name)709710 def pick(self, keys: str | list[str]) -> RunnableSerializable[Any, Any]:711 """Pick keys from the output `dict` of this `Runnable`.712713 !!! example "Pick a single key"714715 ```python716 import json717718 from langchain_core.runnables import RunnableLambda, RunnableMap719720 as_str = RunnableLambda(str)721 as_json = RunnableLambda(json.loads)722 chain = RunnableMap(str=as_str, json=as_json)723724 chain.invoke("[1, 2, 3]")725 # -> {"str": "[1, 2, 3]", "json": [1, 2, 3]}726727 json_only_chain = chain.pick("json")728 json_only_chain.invoke("[1, 2, 3]")729 # -> [1, 2, 3]730 ```731732 !!! example "Pick a list of keys"733734 ```python735 from typing import Any736737 import json738739 from langchain_core.runnables import RunnableLambda, RunnableMap740741 as_str = RunnableLambda(str)742 as_json = RunnableLambda(json.loads)743744745 def as_bytes(x: Any) -> bytes:746 return bytes(x, "utf-8")747748749 chain = RunnableMap(750 str=as_str, json=as_json, bytes=RunnableLambda(as_bytes)751 )752753 chain.invoke("[1, 2, 3]")754 # -> {"str": "[1, 2, 3]", "json": [1, 2, 3], "bytes": b"[1, 2, 3]"}755756 json_and_bytes_chain = chain.pick(["json", "bytes"])757 json_and_bytes_chain.invoke("[1, 2, 3]")758 # -> {"json": [1, 2, 3], "bytes": b"[1, 2, 3]"}759 ```760761 Args:762 keys: A key or list of keys to pick from the output dict.763764 Returns:765 a new `Runnable`.766767 """768 # Import locally to prevent circular import769 from langchain_core.runnables.passthrough import RunnablePick # noqa: PLC0415770771 return self | RunnablePick(keys)772773 def assign(774 self,775 **kwargs: Runnable[dict[str, Any], Any]776 | Callable[[dict[str, Any]], Any]777 | Mapping[str, Runnable[dict[str, Any], Any] | Callable[[dict[str, Any]], Any]],778 ) -> RunnableSerializable[Any, Any]:779 """Assigns new fields to the `dict` output of this `Runnable`.780781 ```python782 from langchain_core.language_models.fake import FakeStreamingListLLM783 from langchain_core.output_parsers import StrOutputParser784 from langchain_core.prompts import SystemMessagePromptTemplate785 from langchain_core.runnables import Runnable786 from operator import itemgetter787788 prompt = (789 SystemMessagePromptTemplate.from_template("You are a nice assistant.")790 + "{question}"791 )792 model = FakeStreamingListLLM(responses=["foo-lish"])793794 chain: Runnable = prompt | model | {"str": StrOutputParser()}795796 chain_with_assign = chain.assign(hello=itemgetter("str") | model)797798 print(chain_with_assign.input_schema.model_json_schema())799 # {'title': 'PromptInput', 'type': 'object', 'properties':800 {'question': {'title': 'Question', 'type': 'string'}}}801 print(chain_with_assign.output_schema.model_json_schema())802 # {'title': 'RunnableSequenceOutput', 'type': 'object', 'properties':803 {'str': {'title': 'Str',804 'type': 'string'}, 'hello': {'title': 'Hello', 'type': 'string'}}}805 ```806807 Args:808 **kwargs: A mapping of keys to `Runnable` or `Runnable`-like objects809 that will be invoked with the entire output dict of this `Runnable`.810811 Returns:812 A new `Runnable`.813814 """815 # Import locally to prevent circular import816 from langchain_core.runnables.passthrough import RunnableAssign # noqa: PLC0415817818 return self | RunnableAssign(RunnableParallel[dict[str, Any]](kwargs))819820 """ --- Public API --- """821822 @abstractmethod823 def invoke(824 self,825 input: Input,826 config: RunnableConfig | None = None,827 **kwargs: Any,828 ) -> Output:829 """Transform a single input into an output.830831 Args:832 input: The input to the `Runnable`.833 config: A config to use when invoking the `Runnable`.834835 The config supports standard keys like `'tags'`, `'metadata'` for836 tracing purposes, `'max_concurrency'` for controlling how much work to837 do in parallel, and other keys.838839 Please refer to `RunnableConfig` for more details.840841 Returns:842 The output of the `Runnable`.843 """844845 async def ainvoke(846 self,847 input: Input,848 config: RunnableConfig | None = None,849 **kwargs: Any,850 ) -> Output:851 """Transform a single input into an output.852853 Args:854 input: The input to the `Runnable`.855 config: A config to use when invoking the `Runnable`.856857 The config supports standard keys like `'tags'`, `'metadata'` for858 tracing purposes, `'max_concurrency'` for controlling how much work to859 do in parallel, and other keys.860861 Please refer to `RunnableConfig` for more details.862863 Returns:864 The output of the `Runnable`.865 """866 return await run_in_executor(config, self.invoke, input, config, **kwargs)867868 def batch(869 self,870 inputs: list[Input],871 config: RunnableConfig | list[RunnableConfig] | None = None,872 *,873 return_exceptions: bool = False,874 **kwargs: Any | None,875 ) -> list[Output]:876 """Default implementation runs invoke in parallel using a thread pool executor.877878 The default implementation of batch works well for IO bound runnables.879880 Subclasses must override this method if they can batch more efficiently;881 e.g., if the underlying `Runnable` uses an API which supports a batch mode.882883 Args:884 inputs: A list of inputs to the `Runnable`.885 config: A config to use when invoking the `Runnable`. The config supports886 standard keys like `'tags'`, `'metadata'` for887 tracing purposes, `'max_concurrency'` for controlling how much work888 to do in parallel, and other keys.889890 Please refer to `RunnableConfig` for more details.891 return_exceptions: Whether to return exceptions instead of raising them.892 **kwargs: Additional keyword arguments to pass to the `Runnable`.893894 Returns:895 A list of outputs from the `Runnable`.896 """897 if not inputs:898 return []899900 configs = get_config_list(config, len(inputs))901902 def invoke(input_: Input, config: RunnableConfig) -> Output | Exception:903 if return_exceptions:904 try:905 return self.invoke(input_, config, **kwargs)906 except Exception as e:907 return e908 else:909 return self.invoke(input_, config, **kwargs)910911 # If there's only one input, don't bother with the executor912 if len(inputs) == 1:913 return cast("list[Output]", [invoke(inputs[0], configs[0])])914915 with get_executor_for_config(configs[0]) as executor:916 return cast("list[Output]", list(executor.map(invoke, inputs, configs)))917918 @overload919 def batch_as_completed(920 self,921 inputs: Sequence[Input],922 config: RunnableConfig | Sequence[RunnableConfig] | None = None,923 *,924 return_exceptions: Literal[False] = False,925 **kwargs: Any,926 ) -> Iterator[tuple[int, Output]]: ...927928 @overload929 def batch_as_completed(930 self,931 inputs: Sequence[Input],932 config: RunnableConfig | Sequence[RunnableConfig] | None = None,933 *,934 return_exceptions: Literal[True],935 **kwargs: Any,936 ) -> Iterator[tuple[int, Output | Exception]]: ...937938 def batch_as_completed(939 self,940 inputs: Sequence[Input],941 config: RunnableConfig | Sequence[RunnableConfig] | None = None,942 *,943 return_exceptions: bool = False,944 **kwargs: Any | None,945 ) -> Iterator[tuple[int, Output | Exception]]:946 """Run `invoke` in parallel on a list of inputs.947948 Yields results as they complete.949950 Args:951 inputs: A list of inputs to the `Runnable`.952 config: A config to use when invoking the `Runnable`.953954 The config supports standard keys like `'tags'`, `'metadata'` for955 tracing purposes, `'max_concurrency'` for controlling how much work to956 do in parallel, and other keys.957958 Please refer to `RunnableConfig` for more details.959 return_exceptions: Whether to return exceptions instead of raising them.960 **kwargs: Additional keyword arguments to pass to the `Runnable`.961962 Yields:963 Tuples of the index of the input and the output from the `Runnable`.964965 """966 if not inputs:967 return968969 configs = get_config_list(config, len(inputs))970971 def invoke(972 i: int, input_: Input, config: RunnableConfig973 ) -> tuple[int, Output | Exception]:974 if return_exceptions:975 try:976 out: Output | Exception = self.invoke(input_, config, **kwargs)977 except Exception as e:978 out = e979 else:980 out = self.invoke(input_, config, **kwargs)981982 return (i, out)983984 if len(inputs) == 1:985 yield invoke(0, inputs[0], configs[0])986 return987988 with get_executor_for_config(configs[0]) as executor:989 futures = {990 executor.submit(invoke, i, input_, config)991 for i, (input_, config) in enumerate(zip(inputs, configs, strict=False))992 }993994 try:995 while futures:996 done, futures = wait(futures, return_when=FIRST_COMPLETED)997 while done:998 yield done.pop().result()999 finally:1000 for future in futures:1001 future.cancel()10021003 async def abatch(1004 self,1005 inputs: list[Input],1006 config: RunnableConfig | list[RunnableConfig] | None = None,1007 *,1008 return_exceptions: bool = False,1009 **kwargs: Any | None,1010 ) -> list[Output]:1011 """Default implementation runs `ainvoke` in parallel using `asyncio.gather`.10121013 The default implementation of `batch` works well for IO bound runnables.10141015 Subclasses must override this method if they can batch more efficiently;1016 e.g., if the underlying `Runnable` uses an API which supports a batch mode.10171018 Args:1019 inputs: A list of inputs to the `Runnable`.1020 config: A config to use when invoking the `Runnable`.10211022 The config supports standard keys like `'tags'`, `'metadata'` for1023 tracing purposes, `'max_concurrency'` for controlling how much work to1024 do in parallel, and other keys.10251026 Please refer to `RunnableConfig` for more details.1027 return_exceptions: Whether to return exceptions instead of raising them.1028 **kwargs: Additional keyword arguments to pass to the `Runnable`.10291030 Returns:1031 A list of outputs from the `Runnable`.10321033 """1034 if not inputs:1035 return []10361037 configs = get_config_list(config, len(inputs))10381039 async def ainvoke(value: Input, config: RunnableConfig) -> Output | Exception:1040 if return_exceptions:1041 try:1042 return await self.ainvoke(value, config, **kwargs)1043 except Exception as e:1044 return e1045 else:1046 return await self.ainvoke(value, config, **kwargs)10471048 coros = map(ainvoke, inputs, configs)1049 return await gather_with_concurrency(configs[0].get("max_concurrency"), *coros)10501051 @overload1052 def abatch_as_completed(1053 self,1054 inputs: Sequence[Input],1055 config: RunnableConfig | Sequence[RunnableConfig] | None = None,1056 *,1057 return_exceptions: Literal[False] = False,1058 **kwargs: Any | None,1059 ) -> AsyncIterator[tuple[int, Output]]: ...10601061 @overload1062 def abatch_as_completed(1063 self,1064 inputs: Sequence[Input],1065 config: RunnableConfig | Sequence[RunnableConfig] | None = None,1066 *,1067 return_exceptions: Literal[True],1068 **kwargs: Any | None,1069 ) -> AsyncIterator[tuple[int, Output | Exception]]: ...10701071 async def abatch_as_completed(1072 self,1073 inputs: Sequence[Input],1074 config: RunnableConfig | Sequence[RunnableConfig] | None = None,1075 *,1076 return_exceptions: bool = False,1077 **kwargs: Any | None,1078 ) -> AsyncIterator[tuple[int, Output | Exception]]:1079 """Run `ainvoke` in parallel on a list of inputs.10801081 Yields results as they complete.10821083 Args:1084 inputs: A list of inputs to the `Runnable`.1085 config: A config to use when invoking the `Runnable`.10861087 The config supports standard keys like `'tags'`, `'metadata'` for1088 tracing purposes, `'max_concurrency'` for controlling how much work to1089 do in parallel, and other keys.10901091 Please refer to `RunnableConfig` for more details.1092 return_exceptions: Whether to return exceptions instead of raising them.1093 **kwargs: Additional keyword arguments to pass to the `Runnable`.10941095 Yields:1096 A tuple of the index of the input and the output from the `Runnable`.10971098 """1099 if not inputs:1100 return11011102 configs = get_config_list(config, len(inputs))1103 # Get max_concurrency from first config, defaulting to None (unlimited)1104 max_concurrency = configs[0].get("max_concurrency") if configs else None1105 semaphore = asyncio.Semaphore(max_concurrency) if max_concurrency else None11061107 async def ainvoke_task(1108 i: int, input_: Input, config: RunnableConfig1109 ) -> tuple[int, Output | Exception]:1110 if return_exceptions:1111 try:1112 out: Output | Exception = await self.ainvoke(1113 input_, config, **kwargs1114 )1115 except Exception as e:1116 out = e1117 else:1118 out = await self.ainvoke(input_, config, **kwargs)1119 return (i, out)11201121 coros = [1122 gated_coro(semaphore, ainvoke_task(i, input_, config))1123 if semaphore1124 else ainvoke_task(i, input_, config)1125 for i, (input_, config) in enumerate(zip(inputs, configs, strict=False))1126 ]11271128 for coro in asyncio.as_completed(coros):1129 yield await coro11301131 def stream(1132 self,1133 input: Input,1134 config: RunnableConfig | None = None,1135 **kwargs: Any | None,1136 ) -> Iterator[Output]:1137 """Default implementation of `stream`, which calls `invoke`.11381139 Subclasses must override this method if they support streaming output.11401141 Args:1142 input: The input to the `Runnable`.1143 config: The config to use for the `Runnable`.1144 **kwargs: Additional keyword arguments to pass to the `Runnable`.11451146 Yields:1147 The output of the `Runnable`.11481149 """1150 yield self.invoke(input, config, **kwargs)11511152 async def astream(1153 self,1154 input: Input,1155 config: RunnableConfig | None = None,1156 **kwargs: Any | None,1157 ) -> AsyncIterator[Output]:1158 """Default implementation of `astream`, which calls `ainvoke`.11591160 Subclasses must override this method if they support streaming output.11611162 Args:1163 input: The input to the `Runnable`.1164 config: The config to use for the `Runnable`.1165 **kwargs: Additional keyword arguments to pass to the `Runnable`.11661167 Yields:1168 The output of the `Runnable`.11691170 """1171 yield await self.ainvoke(input, config, **kwargs)11721173 @overload1174 def astream_log(1175 self,1176 input: Any,1177 config: RunnableConfig | None = None,1178 *,1179 diff: Literal[True] = True,1180 with_streamed_output_list: bool = True,1181 include_names: Sequence[str] | None = None,1182 include_types: Sequence[str] | None = None,1183 include_tags: Sequence[str] | None = None,1184 exclude_names: Sequence[str] | None = None,1185 exclude_types: Sequence[str] | None = None,1186 exclude_tags: Sequence[str] | None = None,1187 **kwargs: Any,1188 ) -> AsyncIterator[RunLogPatch]: ...11891190 @overload1191 def astream_log(1192 self,1193 input: Any,1194 config: RunnableConfig | None = None,1195 *,1196 diff: Literal[False],1197 with_streamed_output_list: bool = True,1198 include_names: Sequence[str] | None = None,1199 include_types: Sequence[str] | None = None,1200 include_tags: Sequence[str] | None = None,1201 exclude_names: Sequence[str] | None = None,1202 exclude_types: Sequence[str] | None = None,1203 exclude_tags: Sequence[str] | None = None,1204 **kwargs: Any,1205 ) -> AsyncIterator[RunLog]: ...12061207 async def astream_log(1208 self,1209 input: Any,1210 config: RunnableConfig | None = None,1211 *,1212 diff: bool = True,1213 with_streamed_output_list: bool = True,1214 include_names: Sequence[str] | None = None,1215 include_types: Sequence[str] | None = None,1216 include_tags: Sequence[str] | None = None,1217 exclude_names: Sequence[str] | None = None,1218 exclude_types: Sequence[str] | None = None,1219 exclude_tags: Sequence[str] | None = None,1220 **kwargs: Any,1221 ) -> AsyncIterator[RunLogPatch] | AsyncIterator[RunLog]:1222 """Stream all output from a `Runnable`, as reported to the callback system.12231224 This includes all inner runs of LLMs, Retrievers, Tools, etc.12251226 Output is streamed as Log objects, which include a list of1227 Jsonpatch ops that describe how the state of the run has changed in each1228 step, and the final state of the run.12291230 The Jsonpatch ops can be applied in order to construct state.12311232 Args:1233 input: The input to the `Runnable`.1234 config: The config to use for the `Runnable`.1235 diff: Whether to yield diffs between each step or the current state.1236 with_streamed_output_list: Whether to yield the `streamed_output` list.1237 include_names: Only include logs with these names.1238 include_types: Only include logs with these types.1239 include_tags: Only include logs with these tags.1240 exclude_names: Exclude logs with these names.1241 exclude_types: Exclude logs with these types.1242 exclude_tags: Exclude logs with these tags.1243 **kwargs: Additional keyword arguments to pass to the `Runnable`.12441245 Yields:1246 A `RunLogPatch` or `RunLog` object.12471248 """1249 warn_deprecated(1250 since="1.3.3",1251 message=("astream_log is deprecated. Use astream instead."),1252 removal="2.0.0",1253 )1254 stream = LogStreamCallbackHandler(1255 auto_close=False,1256 include_names=include_names,1257 include_types=include_types,1258 include_tags=include_tags,1259 exclude_names=exclude_names,1260 exclude_types=exclude_types,1261 exclude_tags=exclude_tags,1262 _schema_format="original",1263 )12641265 # Mypy isn't resolving the overloads here1266 # Likely an issue b/c `self` is being passed through1267 # and it's can't map it to Runnable[Input,Output]?1268 async for item in _astream_log_implementation( # type: ignore[call-overload]1269 self,1270 input,1271 config,1272 diff=diff,1273 stream=stream,1274 with_streamed_output_list=with_streamed_output_list,1275 **kwargs,1276 ):1277 yield item12781279 @overload1280 def astream_events(1281 self,1282 input: Any,1283 config: RunnableConfig | None = None,1284 *,1285 version: Literal["v1", "v2"] = "v2",1286 include_names: Sequence[str] | None = None,1287 include_types: Sequence[str] | None = None,1288 include_tags: Sequence[str] | None = None,1289 exclude_names: Sequence[str] | None = None,1290 exclude_types: Sequence[str] | None = None,1291 exclude_tags: Sequence[str] | None = None,1292 **kwargs: Any,1293 ) -> AsyncIterator[StreamEvent]: ...12941295 @overload1296 def astream_events(1297 self,1298 input: Any,1299 config: RunnableConfig | None = None,1300 *,1301 version: Literal["v3"],1302 **kwargs: Any,1303 ) -> Awaitable[Any]: ...13041305 def astream_events(1306 self,1307 input: Any,1308 config: RunnableConfig | None = None,1309 *,1310 version: Literal["v1", "v2", "v3"] = "v2",1311 include_names: Sequence[str] | None = None,1312 include_types: Sequence[str] | None = None,1313 include_tags: Sequence[str] | None = None,1314 exclude_names: Sequence[str] | None = None,1315 exclude_types: Sequence[str] | None = None,1316 exclude_tags: Sequence[str] | None = None,1317 **kwargs: Any,1318 ) -> AsyncIterator[StreamEvent] | Awaitable[Any]:1319 """Generate a stream of events.13201321 Use to create an iterator over `StreamEvent` that provide real-time information1322 about the progress of the `Runnable`, including `StreamEvent` from intermediate1323 results.13241325 A `StreamEvent` is a dictionary with the following schema:13261327 - `event`: Event names are of the format:1328 `on_[runnable_type]_(start|stream|end)`.1329 - `name`: The name of the `Runnable` that generated the event.1330 - `run_id`: Randomly generated ID associated with the given execution of the1331 `Runnable` that emitted the event. A child `Runnable` that gets invoked as1332 part of the execution of a parent `Runnable` is assigned its own unique ID.1333 - `parent_ids`: The IDs of the parent runnables that generated the event. The1334 root `Runnable` will have an empty list. The order of the parent IDs is from1335 the root to the immediate parent. Only available for v2 version of the API.1336 The v1 version of the API will return an empty list.1337 - `tags`: The tags of the `Runnable` that generated the event.1338 - `metadata`: The metadata of the `Runnable` that generated the event.1339 - `data`: The data associated with the event. The contents of this field1340 depend on the type of event. See the table below for more details.13411342 Below is a table that illustrates some events that might be emitted by various1343 chains. Metadata fields have been omitted from the table for brevity.1344 Chain definitions have been included after the table.13451346 !!! note1347 This reference table is for the v2 version of the schema.13481349 | event | name | chunk | input | output |1350 | ---------------------- | -------------------- | ----------------------------------- | ------------------------------------------------- | --------------------------------------------------- |1351 | `on_chat_model_start` | `'[model name]'` | | `{"messages": [[SystemMessage, HumanMessage]]}` | |1352 | `on_chat_model_stream` | `'[model name]'` | `AIMessageChunk(content="hello")` | | |1353 | `on_chat_model_end` | `'[model name]'` | | `{"messages": [[SystemMessage, HumanMessage]]}` | `AIMessageChunk(content="hello world")` |1354 | `on_llm_start` | `'[model name]'` | | `{'input': 'hello'}` | |1355 | `on_llm_stream` | `'[model name]'` | `'Hello' ` | | |1356 | `on_llm_end` | `'[model name]'` | | `'Hello human!'` | |1357 | `on_chain_start` | `'format_docs'` | | | |1358 | `on_chain_stream` | `'format_docs'` | `'hello world!, goodbye world!'` | | |1359 | `on_chain_end` | `'format_docs'` | | `[Document(...)]` | `'hello world!, goodbye world!'` |1360 | `on_tool_start` | `'some_tool'` | | `{"x": 1, "y": "2"}` | |1361 | `on_tool_end` | `'some_tool'` | | | `{"x": 1, "y": "2"}` |1362 | `on_retriever_start` | `'[retriever name]'` | | `{"query": "hello"}` | |1363 | `on_retriever_end` | `'[retriever name]'` | | `{"query": "hello"}` | `[Document(...), ..]` |1364 | `on_prompt_start` | `'[template_name]'` | | `{"question": "hello"}` | |1365 | `on_prompt_end` | `'[template_name]'` | | `{"question": "hello"}` | `ChatPromptValue(messages: [SystemMessage, ...])` |13661367 In addition to the standard events, users can also dispatch custom events (see example below).13681369 Custom events will be only be surfaced with in the v2 version of the API!13701371 A custom event has following format:13721373 | Attribute | Type | Description |1374 | ----------- | ------ | --------------------------------------------------------------------------------------------------------- |1375 | `name` | `str` | A user defined name for the event. |1376 | `data` | `Any` | The data associated with the event. This can be anything, though we suggest making it JSON serializable. |13771378 Here are declarations associated with the standard events shown above:13791380 `format_docs`:13811382 ```python1383 def format_docs(docs: list[Document]) -> str:1384 '''Format the docs.'''1385 return ", ".join([doc.page_content for doc in docs])138613871388 format_docs = RunnableLambda(format_docs)1389 ```13901391 `some_tool`:13921393 ```python1394 @tool1395 def some_tool(x: int, y: str) -> dict:1396 '''Some_tool.'''1397 return {"x": x, "y": y}1398 ```13991400 `prompt`:14011402 ```python1403 template = ChatPromptTemplate.from_messages(1404 [1405 ("system", "You are Cat Agent 007"),1406 ("human", "{question}"),1407 ]1408 ).with_config({"run_name": "my_template", "tags": ["my_template"]})1409 ```14101411 !!! example14121413 ```python1414 from langchain_core.runnables import RunnableLambda141514161417 async def reverse(s: str) -> str:1418 return s[::-1]141914201421 chain = RunnableLambda(func=reverse)14221423 events = [1424 event async for event in chain.astream_events("hello", version="v2")1425 ]14261427 # Will produce the following events1428 # (run_id, and parent_ids has been omitted for brevity):1429 [1430 {1431 "data": {"input": "hello"},1432 "event": "on_chain_start",1433 "metadata": {},1434 "name": "reverse",1435 "tags": [],1436 },1437 {1438 "data": {"chunk": "olleh"},1439 "event": "on_chain_stream",1440 "metadata": {},1441 "name": "reverse",1442 "tags": [],1443 },1444 {1445 "data": {"output": "olleh"},1446 "event": "on_chain_end",1447 "metadata": {},1448 "name": "reverse",1449 "tags": [],1450 },1451 ]1452 ```14531454 ```python title="Dispatch custom event"1455 from langchain_core.callbacks.manager import (1456 adispatch_custom_event,1457 )1458 from langchain_core.runnables import RunnableLambda, RunnableConfig1459 import asyncio146014611462 async def slow_thing(some_input: str, config: RunnableConfig) -> str:1463 \"\"\"Do something that takes a long time.\"\"\"1464 await asyncio.sleep(1) # Placeholder for some slow operation1465 await adispatch_custom_event(1466 "progress_event",1467 {"message": "Finished step 1 of 3"},1468 config=config # Must be included for python < 3.101469 )1470 await asyncio.sleep(1) # Placeholder for some slow operation1471 await adispatch_custom_event(1472 "progress_event",1473 {"message": "Finished step 2 of 3"},1474 config=config # Must be included for python < 3.101475 )1476 await asyncio.sleep(1) # Placeholder for some slow operation1477 return "Done"14781479 slow_thing = RunnableLambda(slow_thing)14801481 async for event in slow_thing.astream_events("some_input", version="v2"):1482 print(event)1483 ```14841485 Args:1486 input: The input to the `Runnable`.1487 config: The config to use for the `Runnable`.1488 version: The version of the schema to use. One of `'v1'`, `'v2'`,1489 or `'v3'`.14901491 Most callers should use `'v2'` (the default), which yields1492 `StreamEvent` dicts and supports custom events.14931494 `'v3'` selects the typed, content-block-centric streaming1495 protocol and is only supported on `Runnable` subclasses that1496 implement it (currently `BaseChatModel` and1497 `langgraph.CompiledGraph`); on a generic `Runnable` it raises1498 `NotImplementedError`. The `'v3'` API is in beta and may1499 change. See the subclass override (e.g.1500 `BaseChatModel.astream_events`) for the v3 return shape.15011502 `'v1'` is retained for backwards compatibility and will be1503 deprecated in `0.4.0`. Custom events are only surfaced in1504 `'v2'` / `'v3'`.1505 include_names: Only include events from `Runnable` objects with matching names.1506 include_types: Only include events from `Runnable` objects with matching types.1507 include_tags: Only include events from `Runnable` objects with matching tags.1508 exclude_names: Exclude events from `Runnable` objects with matching names.1509 exclude_types: Exclude events from `Runnable` objects with matching types.1510 exclude_tags: Exclude events from `Runnable` objects with matching tags.1511 **kwargs: Additional keyword arguments to pass to the `Runnable`.15121513 Yields:1514 An async stream of `StreamEvent`.15151516 Raises:1517 NotImplementedError: If the version is not `'v1'`, `'v2'`, or `'v3'`, or1518 if `'v3'` is requested on a `Runnable` that does not implement the v31519 streaming protocol.15201521 """ # noqa: E5011522 if version == "v3":1523 return self._astream_events_v3_unsupported()1524 return self._astream_events_v1_v2(1525 input,1526 config=config,1527 version=version,1528 include_names=include_names,1529 include_types=include_types,1530 include_tags=include_tags,1531 exclude_names=exclude_names,1532 exclude_types=exclude_types,1533 exclude_tags=exclude_tags,1534 **kwargs,1535 )15361537 async def _astream_events_v3_unsupported(self) -> Any:1538 """Coroutine that raises when v3 isn't implemented on this Runnable.15391540 Lets the public `astream_events(version="v3")` return an awaitable1541 whose error surfaces on `await`, matching the v3 contract on1542 subclasses that do implement the protocol.1543 """1544 msg = (1545 "astream_events(version='v3') is only supported on Runnable "1546 "subclasses that implement the v3 streaming protocol "1547 "(BaseChatModel, CompiledGraph). "1548 f"Got: {type(self).__name__}"1549 )1550 raise NotImplementedError(msg)15511552 async def _astream_events_v1_v2(1553 self,1554 input: Any,1555 config: RunnableConfig | None = None,1556 *,1557 version: Literal["v1", "v2"] = "v2",1558 include_names: Sequence[str] | None = None,1559 include_types: Sequence[str] | None = None,1560 include_tags: Sequence[str] | None = None,1561 exclude_names: Sequence[str] | None = None,1562 exclude_types: Sequence[str] | None = None,1563 exclude_tags: Sequence[str] | None = None,1564 **kwargs: Any,1565 ) -> AsyncIterator[StreamEvent]:1566 if version == "v2":1567 event_stream = _astream_events_implementation_v2(1568 self,1569 input,1570 config=config,1571 include_names=include_names,1572 include_types=include_types,1573 include_tags=include_tags,1574 exclude_names=exclude_names,1575 exclude_types=exclude_types,1576 exclude_tags=exclude_tags,1577 **kwargs,1578 )1579 elif version == "v1":1580 warn_deprecated(1581 since="1.3.3",1582 message=(1583 "astream_events version='v1' is deprecated. "1584 "Use version='v2' or astream instead."1585 ),1586 removal="2.0.0",1587 )1588 # First implementation, built on top of astream_log API1589 # This implementation will be deprecated as of 0.2.01590 event_stream = _astream_events_implementation_v1(1591 self,1592 input,1593 config=config,1594 include_names=include_names,1595 include_types=include_types,1596 include_tags=include_tags,1597 exclude_names=exclude_names,1598 exclude_types=exclude_types,1599 exclude_tags=exclude_tags,1600 **kwargs,1601 )1602 else:1603 msg = f"Unsupported version: {version!r}. Expected 'v1', 'v2', or 'v3'."1604 raise NotImplementedError(msg)16051606 async with aclosing(event_stream):1607 async for event in event_stream:1608 yield event16091610 @overload1611 def stream_events(1612 self,1613 input: Any,1614 config: RunnableConfig | None = None,1615 *,1616 version: Literal["v1", "v2"] = "v2",1617 include_names: Sequence[str] | None = None,1618 include_types: Sequence[str] | None = None,1619 include_tags: Sequence[str] | None = None,1620 exclude_names: Sequence[str] | None = None,1621 exclude_types: Sequence[str] | None = None,1622 exclude_tags: Sequence[str] | None = None,1623 **kwargs: Any,1624 ) -> Iterator[StreamEvent]: ...16251626 @overload1627 def stream_events(1628 self,1629 input: Any,1630 config: RunnableConfig | None = None,1631 *,1632 version: Literal["v3"],1633 **kwargs: Any,1634 ) -> Iterator[Any]: ...16351636 def stream_events(1637 self,1638 input: Any,1639 config: RunnableConfig | None = None,1640 *,1641 version: Literal["v1", "v2", "v3"] = "v2",1642 include_names: Sequence[str] | None = None,1643 include_types: Sequence[str] | None = None,1644 include_tags: Sequence[str] | None = None,1645 exclude_names: Sequence[str] | None = None,1646 exclude_types: Sequence[str] | None = None,1647 exclude_tags: Sequence[str] | None = None,1648 **kwargs: Any,1649 ) -> Iterator[StreamEvent] | Iterator[Any]:1650 """Generate a stream of events synchronously.16511652 Synchronous counterpart to `astream_events`. For `version='v3'`, subclasses1653 that implement the v3 streaming protocol (`BaseChatModel`, `CompiledGraph`)1654 override this method. All other versions and base-class calls raise1655 `NotImplementedError`.16561657 Args:1658 input: The input to the `Runnable`.1659 config: The config to use for the `Runnable`.1660 version: The version of the schema to use. `'v3'` requires a subclass1661 that implements the v3 streaming protocol. `'v1'` and `'v2'` are not1662 supported on the sync path.1663 include_names: Only include events from `Runnable` objects with matching1664 names.1665 include_types: Only include events from `Runnable` objects with matching1666 types.1667 include_tags: Only include events from `Runnable` objects with matching1668 tags.1669 exclude_names: Exclude events from `Runnable` objects with matching names.1670 exclude_types: Exclude events from `Runnable` objects with matching types.1671 exclude_tags: Exclude events from `Runnable` objects with matching tags.1672 **kwargs: Additional keyword arguments to pass to the `Runnable`.16731674 Raises:1675 NotImplementedError: Always. Subclasses override this method for supported1676 versions.16771678 """1679 # Base impl always raises; consume args so they don't trip ARG002.1680 del input, config, include_names, include_types, include_tags1681 del exclude_names, exclude_types, exclude_tags, kwargs1682 if version == "v3":1683 msg = (1684 "stream_events(version='v3') is only supported on Runnable subclasses "1685 "that implement the v3 streaming protocol "1686 "(BaseChatModel, CompiledGraph). "1687 f"Got: {type(self).__name__}"1688 )1689 raise NotImplementedError(msg)1690 msg = (1691 f"stream_events(version={version!r}) is not supported. "1692 "Use astream_events() for v1/v2, or stream_events(version='v3') "1693 "on a supported subclass."1694 )1695 raise NotImplementedError(msg)16961697 def transform(1698 self,1699 input: Iterator[Input],1700 config: RunnableConfig | None = None,1701 **kwargs: Any | None,1702 ) -> Iterator[Output]:1703 """Transform inputs to outputs.17041705 Default implementation of transform, which buffers input and calls `astream`.17061707 Subclasses must override this method if they can start producing output while1708 input is still being generated.17091710 Args:1711 input: An iterator of inputs to the `Runnable`.1712 config: The config to use for the `Runnable`.1713 **kwargs: Additional keyword arguments to pass to the `Runnable`.17141715 Yields:1716 The output of the `Runnable`.17171718 """1719 final: Input1720 got_first_val = False17211722 for ichunk in input:1723 # The default implementation of transform is to buffer input and1724 # then call stream.1725 # It'll attempt to gather all input into a single chunk using1726 # the `+` operator.1727 # If the input is not addable, then we'll assume that we can1728 # only operate on the last chunk,1729 # and we'll iterate until we get to the last chunk.1730 if not got_first_val:1731 final = ichunk1732 got_first_val = True1733 else:1734 try:1735 final = final + ichunk # type: ignore[operator]1736 except TypeError:1737 final = ichunk17381739 if got_first_val:1740 yield from self.stream(final, config, **kwargs)17411742 async def atransform(1743 self,1744 input: AsyncIterator[Input],1745 config: RunnableConfig | None = None,1746 **kwargs: Any | None,1747 ) -> AsyncIterator[Output]:1748 """Transform inputs to outputs.17491750 Default implementation of atransform, which buffers input and calls `astream`.17511752 Subclasses must override this method if they can start producing output while1753 input is still being generated.17541755 Args:1756 input: An async iterator of inputs to the `Runnable`.1757 config: The config to use for the `Runnable`.1758 **kwargs: Additional keyword arguments to pass to the `Runnable`.17591760 Yields:1761 The output of the `Runnable`.17621763 """1764 final: Input1765 got_first_val = False17661767 async for ichunk in input:1768 # The default implementation of transform is to buffer input and1769 # then call stream.1770 # It'll attempt to gather all input into a single chunk using1771 # the `+` operator.1772 # If the input is not addable, then we'll assume that we can1773 # only operate on the last chunk,1774 # and we'll iterate until we get to the last chunk.1775 if not got_first_val:1776 final = ichunk1777 got_first_val = True1778 else:1779 try:1780 final = final + ichunk # type: ignore[operator]1781 except TypeError:1782 final = ichunk17831784 if got_first_val:1785 async for output in self.astream(final, config, **kwargs):1786 yield output17871788 def bind(self, **kwargs: Any) -> Runnable[Input, Output]:1789 """Bind arguments to a `Runnable`, returning a new `Runnable`.17901791 Useful when a `Runnable` in a chain requires an argument that is not1792 in the output of the previous `Runnable` or included in the user input.17931794 Args:1795 **kwargs: The arguments to bind to the `Runnable`.17961797 Returns:1798 A new `Runnable` with the arguments bound.17991800 Example:1801 ```python1802 from langchain_ollama import ChatOllama1803 from langchain_core.output_parsers import StrOutputParser18041805 model = ChatOllama(model="llama3.1")18061807 # Without bind1808 chain = model | StrOutputParser()18091810 chain.invoke("Repeat quoted words exactly: 'One two three four five.'")1811 # Output is 'One two three four five.'18121813 # With bind1814 chain = model.bind(stop=["three"]) | StrOutputParser()18151816 chain.invoke("Repeat quoted words exactly: 'One two three four five.'")1817 # Output is 'One two'1818 ```1819 """1820 return RunnableBinding(bound=self, kwargs=kwargs, config={})18211822 def with_config(1823 self,1824 config: RunnableConfig | None = None,1825 # Sadly Unpack is not well-supported by mypy so this will have to be untyped1826 **kwargs: Any,1827 ) -> Runnable[Input, Output]:1828 """Bind config to a `Runnable`, returning a new `Runnable`.18291830 Args:1831 config: The config to bind to the `Runnable`.1832 **kwargs: Additional keyword arguments to pass to the `Runnable`.18331834 Returns:1835 A new `Runnable` with the config bound.18361837 """1838 return RunnableBinding(1839 bound=self,1840 config=cast(1841 "RunnableConfig",1842 {**(config or {}), **kwargs},1843 ),1844 kwargs={},1845 )18461847 def with_listeners(1848 self,1849 *,1850 on_start: Callable[[Run], None]1851 | Callable[[Run, RunnableConfig], None]1852 | None = None,1853 on_end: Callable[[Run], None]1854 | Callable[[Run, RunnableConfig], None]1855 | None = None,1856 on_error: Callable[[Run], None]1857 | Callable[[Run, RunnableConfig], None]1858 | None = None,1859 ) -> Runnable[Input, Output]:1860 """Bind lifecycle listeners to a `Runnable`, returning a new `Runnable`.18611862 The Run object contains information about the run, including its `id`,1863 `type`, `input`, `output`, `error`, `start_time`, `end_time`, and1864 any tags or metadata added to the run.18651866 Args:1867 on_start: Called before the `Runnable` starts running, with the `Run`1868 object.1869 on_end: Called after the `Runnable` finishes running, with the `Run`1870 object.1871 on_error: Called if the `Runnable` throws an error, with the `Run`1872 object.18731874 Returns:1875 A new `Runnable` with the listeners bound.18761877 Example:1878 ```python1879 from langchain_core.runnables import RunnableLambda1880 from langchain_core.tracers.schemas import Run18811882 import time188318841885 def test_runnable(time_to_sleep: int):1886 time.sleep(time_to_sleep)188718881889 def fn_start(run_obj: Run):1890 print("start_time:", run_obj.start_time)189118921893 def fn_end(run_obj: Run):1894 print("end_time:", run_obj.end_time)189518961897 chain = RunnableLambda(test_runnable).with_listeners(1898 on_start=fn_start, on_end=fn_end1899 )1900 chain.invoke(2)1901 ```1902 """1903 return RunnableBinding(1904 bound=self,1905 config_factories=[1906 lambda config: {1907 "callbacks": [1908 RootListenersTracer(1909 config=config,1910 on_start=on_start,1911 on_end=on_end,1912 on_error=on_error,1913 )1914 ],1915 }1916 ],1917 )19181919 def with_alisteners(1920 self,1921 *,1922 on_start: AsyncListener | None = None,1923 on_end: AsyncListener | None = None,1924 on_error: AsyncListener | None = None,1925 ) -> Runnable[Input, Output]:1926 """Bind async lifecycle listeners to a `Runnable`.19271928 Returns a new `Runnable`.19291930 The Run object contains information about the run, including its `id`,1931 `type`, `input`, `output`, `error`, `start_time`, `end_time`, and1932 any tags or metadata added to the run.19331934 Args:1935 on_start: Called asynchronously before the `Runnable` starts running,1936 with the `Run` object.1937 on_end: Called asynchronously after the `Runnable` finishes running,1938 with the `Run` object.1939 on_error: Called asynchronously if the `Runnable` throws an error,1940 with the `Run` object.19411942 Returns:1943 A new `Runnable` with the listeners bound.19441945 Example:1946 ```python1947 from langchain_core.runnables import RunnableLambda, Runnable1948 from datetime import datetime, timezone1949 import time1950 import asyncio195119521953 def format_t(timestamp: float) -> str:1954 return datetime.fromtimestamp(timestamp, tz=timezone.utc).isoformat()195519561957 async def test_runnable(time_to_sleep: int):1958 print(f"Runnable[{time_to_sleep}s]: starts at {format_t(time.time())}")1959 await asyncio.sleep(time_to_sleep)1960 print(f"Runnable[{time_to_sleep}s]: ends at {format_t(time.time())}")196119621963 async def fn_start(run_obj: Runnable):1964 print(f"on start callback starts at {format_t(time.time())}")1965 await asyncio.sleep(3)1966 print(f"on start callback ends at {format_t(time.time())}")196719681969 async def fn_end(run_obj: Runnable):1970 print(f"on end callback starts at {format_t(time.time())}")1971 await asyncio.sleep(2)1972 print(f"on end callback ends at {format_t(time.time())}")197319741975 runnable = RunnableLambda(test_runnable).with_alisteners(1976 on_start=fn_start, on_end=fn_end1977 )197819791980 async def concurrent_runs():1981 await asyncio.gather(runnable.ainvoke(2), runnable.ainvoke(3))198219831984 asyncio.run(concurrent_runs())1985 # Result:1986 # on start callback starts at 2025-03-01T07:05:22.875378+00:001987 # on start callback starts at 2025-03-01T07:05:22.875495+00:001988 # on start callback ends at 2025-03-01T07:05:25.878862+00:001989 # on start callback ends at 2025-03-01T07:05:25.878947+00:001990 # Runnable[2s]: starts at 2025-03-01T07:05:25.879392+00:001991 # Runnable[3s]: starts at 2025-03-01T07:05:25.879804+00:001992 # Runnable[2s]: ends at 2025-03-01T07:05:27.881998+00:001993 # on end callback starts at 2025-03-01T07:05:27.882360+00:001994 # Runnable[3s]: ends at 2025-03-01T07:05:28.881737+00:001995 # on end callback starts at 2025-03-01T07:05:28.882428+00:001996 # on end callback ends at 2025-03-01T07:05:29.883893+00:001997 # on end callback ends at 2025-03-01T07:05:30.884831+00:001998 ```1999 """2000 return RunnableBinding(
Findings
✓ No findings reported for this file.