Skip to content

Commit be7de42

Browse files
Serialize all the chains! (#761)
Co-authored-by: Harrison Chase <[email protected]>
1 parent e2a7fed commit be7de42

File tree

17 files changed

+478
-21
lines changed

17 files changed

+478
-21
lines changed

langchain/chains/api/base.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
from typing import Any, Dict, List, Optional
55

6-
from pydantic import BaseModel, root_validator
6+
from pydantic import BaseModel, Field, root_validator
77

88
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
99
from langchain.chains.base import Chain
@@ -18,7 +18,7 @@ class APIChain(Chain, BaseModel):
1818

1919
api_request_chain: LLMChain
2020
api_answer_chain: LLMChain
21-
requests_wrapper: RequestsWrapper
21+
requests_wrapper: RequestsWrapper = Field(exclude=True)
2222
api_docs: str
2323
question_key: str = "question" #: :meta private:
2424
output_key: str = "output" #: :meta private:
@@ -102,3 +102,7 @@ def from_llm_and_api_docs(
102102
api_docs=api_docs,
103103
**kwargs,
104104
)
105+
106+
@property
107+
def _chain_type(self) -> str:
108+
return "api_chain"

langchain/chains/combine_documents/map_reduce.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -168,3 +168,7 @@ def combine_docs(
168168
extra_return_dict = {}
169169
output, _ = self.combine_document_chain.combine_docs(result_docs, **kwargs)
170170
return output, extra_return_dict
171+
172+
@property
173+
def _chain_type(self) -> str:
174+
return "map_reduce_documents_chain"

langchain/chains/combine_documents/map_rerank.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -111,3 +111,7 @@ def combine_docs(self, docs: List[Document], **kwargs: Any) -> Tuple[str, dict]:
111111
if self.return_intermediate_steps:
112112
extra_info["intermediate_steps"] = results
113113
return output[self.answer_key], extra_info
114+
115+
@property
116+
def _chain_type(self) -> str:
117+
return "map_rerank_documents_chain"

langchain/chains/combine_documents/refine.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,3 +113,7 @@ def combine_docs(self, docs: List[Document], **kwargs: Any) -> Tuple[str, dict]:
113113
else:
114114
extra_return_dict = {}
115115
return res, extra_return_dict
116+
117+
@property
118+
def _chain_type(self) -> str:
119+
return "refine_documents_chain"

langchain/chains/combine_documents/stuff.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,3 +83,7 @@ def combine_docs(self, docs: List[Document], **kwargs: Any) -> Tuple[str, dict]:
8383
inputs = self._get_inputs(docs, **kwargs)
8484
# Call predict on the LLM.
8585
return self.llm_chain.predict(**inputs), {}
86+
87+
@property
88+
def _chain_type(self) -> str:
89+
return "stuff_documents_chain"

langchain/chains/hyde/base.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,3 +69,7 @@ def from_llm(
6969
prompt = PROMPT_MAP[prompt_key]
7070
llm_chain = LLMChain(llm=llm, prompt=prompt)
7171
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain)
72+
73+
@property
74+
def _chain_type(self) -> str:
75+
return "hyde_chain"

langchain/chains/llm_bash/base.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -73,3 +73,7 @@ def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
7373
else:
7474
raise ValueError(f"unknown format from LLM: {t}")
7575
return {self.output_key: output}
76+
77+
@property
78+
def _chain_type(self) -> str:
79+
return "llm_bash_chain"

langchain/chains/llm_checker/base.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,3 +97,7 @@ def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
9797
)
9898
output = question_to_checked_assertions_chain({"question": question})
9999
return {self.output_key: output["revised_statement"]}
100+
101+
@property
102+
def _chain_type(self) -> str:
103+
return "llm_checker_chain"

langchain/chains/llm_math/base.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,3 +68,7 @@ def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
6868
else:
6969
raise ValueError(f"unknown format from LLM: {t}")
7070
return {self.output_key: answer}
71+
72+
@property
73+
def _chain_type(self) -> str:
74+
return "llm_math_chain"

langchain/chains/llm_requests.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,9 @@ class LLMRequestsChain(Chain, BaseModel):
1818
"""Chain that hits a URL and then uses an LLM to parse results."""
1919

2020
llm_chain: LLMChain
21-
requests_wrapper: RequestsWrapper = Field(default_factory=RequestsWrapper)
21+
requests_wrapper: RequestsWrapper = Field(
22+
default_factory=RequestsWrapper, exclude=True
23+
)
2224
text_length: int = 8000
2325
requests_key: str = "requests_result" #: :meta private:
2426
input_key: str = "url" #: :meta private:
@@ -71,3 +73,7 @@ def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
7173
other_keys[self.requests_key] = soup.get_text()[: self.text_length]
7274
result = self.llm_chain.predict(**other_keys)
7375
return {self.output_key: result}
76+
77+
@property
78+
def _chain_type(self) -> str:
79+
return "llm_requests_chain"

0 commit comments

Comments
 (0)