@@ -14,12 +14,18 @@ from replicate import Replicate
1414# Initialize with REPLICATE_API_TOKEN env var by default
1515replicate = Replicate()
1616
17- # Run a model
17+ # Create a model function
18+ flux = replicate.use(" black-forest-labs/flux-schnell" )
19+
20+ # Call it like any Python function
21+ output = flux(prompt = " astronaut on a horse" )
22+ print (output)
23+
24+ # Or use run() for one-off predictions
1825output = replicate.run(
1926 " black-forest-labs/flux-schnell" ,
2027 input = {" prompt" : " astronaut on a horse" }
2128)
22- print (output)
2329```
2430
2531## Client Initialization
@@ -66,9 +72,43 @@ asyncio.run(main())
6672
6773## High-Level Methods
6874
69- ### run () - Run a Model
75+ ### use () - Create a Reusable Model Function (Recommended)
7076
71- The simplest way to run a model and get output.
77+ The most Pythonic way to interact with models. Creates a callable function for any model.
78+
79+ ``` python
80+ # Create a model function
81+ sdxl = replicate.use(" stability-ai/sdxl" )
82+
83+ # Call it like a regular function
84+ image = sdxl(prompt = " a 19th century portrait of a wombat gentleman" )
85+
86+ # Use it multiple times with different inputs
87+ image1 = sdxl(prompt = " a cat in a hat" , negative_prompt = " blurry, low quality" )
88+ image2 = sdxl(prompt = " a dog in sunglasses" , num_outputs = 4 )
89+
90+ # Works great with language models too
91+ llama = replicate.use(" meta/llama-2-70b-chat" )
92+ response = llama(
93+ prompt = " Write a haiku about Python programming" ,
94+ temperature = 0.7 ,
95+ max_new_tokens = 100
96+ )
97+
98+ # Enable streaming for models that support it
99+ llama_stream = replicate.use(" meta/llama-2-70b-chat" , streaming = True )
100+ for chunk in llama_stream(prompt = " Explain quantum computing" ):
101+ print (chunk, end = " " )
102+
103+ # Can accept model references in various formats
104+ model = replicate.use(" owner/name:version" ) # Specific version
105+ model = replicate.use(" owner/name" ) # Latest version
106+ model = replicate.use(" 5c7d5dc6dd8bf75c1acaa8565735e7986bc5b66206b55cca93cb72c9bf15ccaa" ) # Version ID
107+ ```
108+
109+ ### run() - Run a Model Once
110+
111+ Direct method to run a model and get output. Good for one-off predictions.
72112
73113``` python
74114# Basic usage - returns output when complete
@@ -98,7 +138,7 @@ replicate.run("5c7d5dc6dd8bf75c1acaa8565735e7986bc5b66206b55cca93cb72c9bf15ccaa"
98138
99139### stream() - Stream Model Output
100140
101- For models that support streaming (like language models).
141+ For models that support streaming (like language models). Returns an iterator of output chunks.
102142
103143``` python
104144# Stream text output
@@ -116,26 +156,10 @@ async for event in async_replicate.stream("meta/llama-2-70b-chat", input={"promp
116156 print (str (event), end = " " )
117157```
118158
119- ### use() - Create a Reusable Model Function
120-
121- Experimental feature for creating reusable model functions.
122-
123- ``` python
124- # Create a model function
125- stable_diffusion = replicate.use(" stability-ai/stable-diffusion" )
126-
127- # Use it multiple times
128- image1 = stable_diffusion(prompt = " a cat in a hat" )
129- image2 = stable_diffusion(prompt = " a dog in sunglasses" )
130-
131- # With streaming models
132- llama = replicate.use(" meta/llama-2-70b-chat" , streaming = True )
133- for chunk in llama(prompt = " Explain quantum computing" ):
134- print (chunk, end = " " )
135- ```
136-
137159### search() - Search Models
138160
161+ Find models by keyword or description.
162+
139163``` python
140164# Search for models
141165results = replicate.search(query = " image generation" , limit = 10 )
0 commit comments