Skip to content

Commit 0cfa614

Browse files
docs: add inline documentation for the Backend module (#1353)
Signed-off-by: Colin O'Sullivan <colin.osullivan1@ibm.com> Co-authored-by: Tomáš Dvořák <toomas2d@gmail.com>
1 parent b192568 commit 0cfa614

File tree

6 files changed

+832
-2
lines changed

6 files changed

+832
-2
lines changed

python/beeai_framework/backend/backend.py

Lines changed: 93 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,17 +7,110 @@
77

88

99
class Backend:
10+
"""A unified interface for managing chat and embedding models.
11+
12+
The Backend class provides a convenient way to bundle together a chat model
13+
and an embedding model, which are commonly used together in AI applications.
14+
It offers factory methods for easy instantiation from provider names.
15+
16+
Attributes:
17+
chat: The chat model instance for text generation and conversation.
18+
embedding: The embedding model instance for text vectorization.
19+
20+
Example:
21+
>>> # Create backend with specific models
22+
>>> from beeai_framework.backend import Backend
23+
>>> from beeai_framework.adapters.openai import OpenAIChatModel, OpenAIEmbeddingModel
24+
>>> backend = Backend(
25+
... chat=OpenAIChatModel("gpt-4"),
26+
... embedding=OpenAIEmbeddingModel("text-embedding-3-small")
27+
... )
28+
>>> # Create backend from provider names
29+
>>> backend = Backend.from_name(chat="openai:gpt-4", embedding="openai:text-embedding-3-small")
30+
>>> # Create backend using same provider for both
31+
>>> backend = Backend.from_provider("openai")
32+
"""
33+
1034
def __init__(self, *, chat: ChatModel, embedding: EmbeddingModel) -> None:
35+
"""Initialize a Backend with chat and embedding models.
36+
37+
Args:
38+
chat: The chat model instance to use for text generation.
39+
embedding: The embedding model instance to use for text vectorization.
40+
41+
Example:
42+
>>> from beeai_framework.adapters.openai import OpenAIChatModel, OpenAIEmbeddingModel
43+
>>> backend = Backend(
44+
... chat=OpenAIChatModel("gpt-4"),
45+
... embedding=OpenAIEmbeddingModel("text-embedding-3-small")
46+
... )
47+
"""
1148
self.chat = chat
1249
self.embedding = embedding
1350

1451
@staticmethod
1552
def from_name(*, chat: str | ProviderName, embedding: str | ProviderName) -> "Backend":
53+
"""Create a Backend instance from provider and model names.
54+
55+
This factory method allows you to instantiate a Backend by specifying
56+
the provider and model names as strings, without needing to import
57+
specific model classes.
58+
59+
Args:
60+
chat: The chat model identifier in the format "provider:model" or just "provider".
61+
Examples: "openai:gpt-4", "anthropic:claude-3-opus", "ollama".
62+
embedding: The embedding model identifier in the format "provider:model" or just "provider".
63+
Examples: "openai:text-embedding-3-small", "ollama:nomic-embed-text".
64+
65+
Returns:
66+
A new Backend instance with the specified chat and embedding models.
67+
68+
Example:
69+
>>> backend = Backend.from_name(
70+
... chat="openai:gpt-4",
71+
... embedding="openai:text-embedding-3-small"
72+
... )
73+
>>> backend = Backend.from_name(
74+
... chat="anthropic:claude-3-opus",
75+
... embedding="ollama:nomic-embed-text"
76+
... )
77+
"""
1678
return Backend(chat=ChatModel.from_name(chat), embedding=EmbeddingModel.from_name(embedding))
1779

1880
@staticmethod
1981
def from_provider(name: str | ProviderName) -> "Backend":
82+
"""Create a Backend instance using the same provider for both models.
83+
84+
This is a convenience method for when you want to use the same provider
85+
for both chat and embedding models. It uses the provider's default models.
86+
87+
Args:
88+
name: The provider name (e.g., "openai", "anthropic", "ollama").
89+
The provider's default chat and embedding models will be used.
90+
91+
Returns:
92+
A new Backend instance with both chat and embedding models from the same provider.
93+
94+
Example:
95+
>>> # Uses OpenAI's default chat and embedding models
96+
>>> backend = Backend.from_provider("openai")
97+
>>> # Uses Ollama's default chat and embedding models
98+
>>> backend = Backend.from_provider("ollama")
99+
"""
20100
return Backend.from_name(chat=name, embedding=name)
21101

22102
async def clone(self) -> "Backend":
103+
"""Create a deep copy of this Backend instance.
104+
105+
This method clones both the chat and embedding models, creating
106+
independent copies that can be modified without affecting the original.
107+
108+
Returns:
109+
A new Backend instance with cloned chat and embedding models.
110+
111+
Example:
112+
>>> original = Backend.from_provider("openai")
113+
>>> cloned = await original.clone()
114+
>>> # Modifications to cloned won't affect original
115+
"""
23116
return Backend(chat=await self.chat.clone(), embedding=await self.embedding.clone())

0 commit comments

Comments
 (0)