@@ -11,18 +11,17 @@ pip install replicate
1111``` python
1212import replicate
1313
14- # Create a model function
14+ # Create a model function for image generation
1515banana = replicate.use(" google/nano-banana" )
1616
17- # Call it like any Python function
18- output = banana(prompt = " astronaut on a horse" )
19- print (output)
17+ # Call it - returns an image URL
18+ image_url = banana(prompt = " astronaut on a horse" )
19+ print (image_url) # https://replicate.delivery/...
2020
21- # Or use run() for one-off predictions
22- output = replicate.run(
23- " google/nano-banana" ,
24- input = {" prompt" : " astronaut on a horse" }
25- )
21+ # Or for text generation
22+ claude = replicate.use(" anthropic/claude-4-sonnet" )
23+ text = claude(prompt = " Write a haiku about Python" )
24+ print (text) # "Code flows like water..."
2625```
2726
2827## Client Initialization
@@ -33,7 +32,7 @@ By default, the SDK uses the `REPLICATE_API_TOKEN` environment variable:
3332import replicate
3433
3534# Uses REPLICATE_API_TOKEN from environment
36- output = replicate.run(" google/nano-banana" , input = {" prompt" : " hello" })
35+ image_url = replicate.run(" google/nano-banana" , input = {" prompt" : " hello" })
3736```
3837
3938### Custom Client Configuration
@@ -53,7 +52,7 @@ replicate = Replicate(
5352)
5453
5554# Now use this configured client
56- output = replicate.run(" google/nano-banana" , input = {" prompt" : " hello" })
55+ image_url = replicate.run(" google/nano-banana" , input = {" prompt" : " hello" })
5756```
5857
5958### Asynchronous Client
@@ -66,11 +65,11 @@ import os
6665async def main ():
6766 # Can specify token explicitly if needed
6867 replicate = AsyncReplicate(bearer_token = os.environ.get(" MY_REPLICATE_TOKEN" ))
69- output = await replicate.run(
68+ image_url = await replicate.run(
7069 " google/nano-banana" ,
7170 input = {" prompt" : " a watercolor painting" }
7271 )
73- print (output)
72+ print (image_url) # https://replicate.delivery/...
7473
7574asyncio.run(main())
7675```
@@ -82,28 +81,28 @@ asyncio.run(main())
8281The most Pythonic way to interact with models. Creates a callable function for any model.
8382
8483``` python
85- # Create a model function
84+ # Image generation - returns file URLs
8685banana = replicate.use(" google/nano-banana" )
8786
88- # Call it like a regular function
89- image = banana(prompt = " a 19th century portrait of a wombat gentleman" )
87+ # Simple call with just prompt (only required input)
88+ image_url = banana(prompt = " a 19th century portrait of a wombat gentleman" )
89+ print (image_url) # Returns: https://replicate.delivery/...
9090
91- # Use it multiple times with different inputs
92- image1 = banana(prompt = " a cat in a hat" , negative_prompt = " blurry, low quality " )
93- image2 = banana(prompt = " a dog in sunglasses" , num_outputs = 4 )
91+ # Use it multiple times
92+ image1 = banana(prompt = " a cat in a hat" )
93+ image2 = banana(prompt = " a dog in sunglasses" )
9494
95- # Works great with language models too
95+ # Text generation - returns text string
9696claude = replicate.use(" anthropic/claude-4-sonnet" )
97- response = claude(
98- prompt = " Write a haiku about Python programming" ,
99- temperature = 0.7 ,
100- max_new_tokens = 100
101- )
10297
103- # Enable streaming for models that support it
98+ # Simple call with just prompt (only required input)
99+ text = claude(prompt = " Write a haiku about Python programming" )
100+ print (text) # Returns: "Code flows like water..."
101+
102+ # Enable streaming for text models
104103claude_stream = replicate.use(" anthropic/claude-4-sonnet" , streaming = True )
105104for chunk in claude_stream(prompt = " Explain quantum computing" ):
106- print (chunk, end = " " )
105+ print (chunk, end = " " ) # Streams text chunks
107106
108107# Can accept model references in various formats
109108model = replicate.use(" owner/name:version" ) # Specific version
@@ -116,24 +115,19 @@ model = replicate.use("5c7d5dc6dd8bf75c1acaa8565735e7986bc5b66206b55cca93cb72c9b
116115Direct method to run a model and get output. Good for one-off predictions.
117116
118117``` python
119- # Basic usage - returns output when complete
120- output = replicate.run(
121- " google/nano-banana:39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b " ,
118+ # Image generation - returns a file URL
119+ image_url = replicate.run(
120+ " google/nano-banana" ,
122121 input = {" prompt" : " a 19th century portrait of a wombat gentleman" }
123122)
123+ print (image_url) # https://replicate.delivery/...
124124
125- # With options
126- output = replicate.run(
125+ # Text generation - returns text string
126+ text = replicate.run(
127127 " anthropic/claude-4-sonnet" ,
128- input = {
129- " prompt" : " Write a poem about machine learning" ,
130- " max_new_tokens" : 500 ,
131- " temperature" : 0.7
132- },
133- wait = 30 , # Wait up to 30 seconds for completion (or True for unlimited)
134- use_file_output = True , # Return files as FileOutput objects
135- file_encoding_strategy = " base64" # Encode input files as base64 (or "url")
128+ input = {" prompt" : " Write a poem about machine learning" }
136129)
130+ print (text) # "In silicon valleys deep and wide..."
137131
138132# Model reference formats
139133replicate.run(" owner/name:version" , input = {}) # Specific version
@@ -146,19 +140,19 @@ replicate.run("5c7d5dc6dd8bf75c1acaa8565735e7986bc5b66206b55cca93cb72c9bf15ccaa"
146140For models that support streaming (like language models). Returns an iterator of output chunks.
147141
148142``` python
149- # Stream text output
150- for event in replicate.stream(
143+ # Stream text output (only for text models like Claude)
144+ for chunk in replicate.stream(
151145 " anthropic/claude-4-sonnet" ,
152- input = {
153- " prompt" : " Tell me a story about a robot" ,
154- " max_new_tokens" : 1000
155- }
146+ input = {" prompt" : " Tell me a story about a robot" }
156147):
157- print (str (event) , end = " " )
148+ print (chunk , end = " " ) # Prints each text chunk as it arrives
158149
159150# Async streaming
160- async for event in async_replicate.stream(" anthropic/claude-4-sonnet" , input = {" prompt" : " Hello" }):
161- print (str (event), end = " " )
151+ async for chunk in async_replicate.stream(
152+ " anthropic/claude-4-sonnet" ,
153+ input = {" prompt" : " Hello" }
154+ ):
155+ print (chunk, end = " " )
162156```
163157
164158### search() - Search Models
@@ -278,6 +272,7 @@ prediction = replicate.models.predictions.create(
278272 model_name = " nano-banana" ,
279273 input = {" prompt" : " a beautiful landscape" }
280274)
275+ # prediction.output will be an image URL when complete
281276```
282277
283278#### Model Examples
@@ -444,25 +439,24 @@ print(f"Webhook signing secret: {webhook_secret.key}")
444439The SDK supports multiple ways to provide file inputs:
445440
446441``` python
447- # File object
442+ # For models that accept image inputs (like Claude with vision)
448443with open (" input.jpg" , " rb" ) as f:
449- output = replicate.run(" model:version" , input = {" image" : f})
444+ text = replicate.run(" anthropic/claude-4-sonnet" , input = {
445+ " prompt" : " What's in this image?" ,
446+ " image" : f
447+ })
450448
451449# File path (automatically opened)
452- output = replicate.run(" model:version" , input = {" image" : " path/to/image.jpg" })
450+ text = replicate.run(" anthropic/claude-4-sonnet" , input = {
451+ " prompt" : " Describe this" ,
452+ " image" : " path/to/image.jpg"
453+ })
453454
454455# URL
455- output = replicate.run(" model:version" , input = {" image" : " https://example.com/image.jpg" })
456-
457- # Base64 data URI
458- output = replicate.run(" model:version" , input = {" image" : " data:image/jpeg;base64,..." })
459-
460- # Control encoding strategy
461- output = replicate.run(
462- " model:version" ,
463- input = {" image" : file_obj},
464- file_encoding_strategy = " base64" # or "url" (uploads to Replicate)
465- )
456+ text = replicate.run(" anthropic/claude-4-sonnet" , input = {
457+ " prompt" : " Analyze this image" ,
458+ " image" : " https://example.com/image.jpg"
459+ })
466460```
467461
468462### Output Files
@@ -472,19 +466,23 @@ File outputs are automatically converted to `FileOutput` objects:
472466``` python
473467from replicate.helpers import FileOutput
474468
475- output = replicate.run(" model:version" , input = {" prompt" : " generate an image" })
469+ # Image generation returns a file URL
470+ image_url = replicate.run(" google/nano-banana" , input = {" prompt" : " sunset over mountains" })
471+ print (f " Generated image: { image_url} " )
472+
473+ # Text generation returns a string
474+ text = replicate.run(" anthropic/claude-4-sonnet" , input = {" prompt" : " Tell me a joke" })
475+ print (f " Response: { text} " )
476476
477- # If output is a FileOutput
478- if isinstance (output, FileOutput):
477+ # When using FileOutput wrapper
478+ from replicate.helpers import FileOutput
479+ if isinstance (image_url, FileOutput):
479480 # Get the URL
480- print (f " File URL: { output.url} " )
481-
482- # Read the file content
483- content = output.read()
481+ print (f " File URL: { image_url.url} " )
484482
485483 # Save to disk
486484 with open (" output.jpg" , " wb" ) as f:
487- for chunk in output :
485+ for chunk in image_url :
488486 f.write(chunk)
489487```
490488
@@ -502,7 +500,7 @@ from replicate.exceptions import (
502500)
503501
504502try :
505- output = replicate.run(" model:version " , input = {" prompt" : " test" })
503+ image_url = replicate.run(" google/nano-banana " , input = {" prompt" : " test" })
506504except ModelError as e:
507505 # Model execution failed
508506 print (f " Model error: { e} " )
@@ -598,8 +596,8 @@ replicate = Replicate(
598596)
599597
600598# Per-request timeout
601- output = replicate.run(
602- " model:version " ,
599+ image_url = replicate.run(
600+ " google/nano-banana " ,
603601 input = {" prompt" : " test" },
604602 wait = 60 # Wait up to 60 seconds for completion
605603)
@@ -629,28 +627,29 @@ from replicate import AsyncReplicate
629627async def main ():
630628 replicate = AsyncReplicate()
631629
632- # Run a model
633- output = await replicate.run(
630+ # Image generation
631+ image_url = await replicate.run(
634632 " google/nano-banana" ,
635633 input = {" prompt" : " a futuristic city" }
636634 )
635+ print (f " Image: { image_url} " )
637636
638- # Stream output
639- async for event in replicate.stream(
637+ # Stream text generation
638+ async for chunk in replicate.stream(
640639 " anthropic/claude-4-sonnet" ,
641640 input = {" prompt" : " Tell me a joke" }
642641 ):
643- print (event , end = " " )
642+ print (chunk , end = " " )
644643
645644 # Pagination
646645 async for model in replicate.models.list():
647646 print (model.name)
648647
649648 # Concurrent requests
650649 tasks = [
651- replicate.run(" model1 " , input = {" prompt" : " test1 " }),
652- replicate.run(" model2 " , input = {" prompt" : " test2 " }),
653- replicate.run(" model3 " , input = {" prompt" : " test3 " })
650+ replicate.run(" google/nano-banana " , input = {" prompt" : " cat " }),
651+ replicate.run(" google/nano-banana " , input = {" prompt" : " dog " }),
652+ replicate.run(" anthropic/claude-4-sonnet " , input = {" prompt" : " Hello " })
654653 ]
655654 results = await asyncio.gather(* tasks)
656655
@@ -772,9 +771,9 @@ The new SDK uses a different API structure. Here's how to migrate:
772771``` python
773772import replicate
774773
775- # Run a model
776- output = replicate.run(
777- " google/nano-banana:version " ,
774+ # Run a model - image generation returns URL
775+ image_url = replicate.run(
776+ " google/nano-banana" ,
778777 input = {" prompt" : " a cat" }
779778)
780779
@@ -786,9 +785,9 @@ model = replicate.models.get("google/nano-banana")
786785``` python
787786import replicate
788787
789- # Run a model
790- output = replicate.run(
791- " google/nano-banana:version " ,
788+ # Run a model - image generation returns URL
789+ image_url = replicate.run(
790+ " google/nano-banana" ,
792791 input = {" prompt" : " a cat" }
793792)
794793
0 commit comments