Skip to content

Commit 9fc966f

Browse files
committed
Rename spread to map
1 parent 519c5d6 commit 9fc966f

19 files changed

+150
-149
lines changed

docs/graph/beta/decisions.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -420,6 +420,6 @@ _(This example is complete, it can be run "as is" — you'll need to add `import
420420

421421
## Next Steps
422422

423-
- Learn about [parallel execution](parallel.md) with broadcasting and spreading
423+
- Learn about [parallel execution](parallel.md) with broadcasting and mapping
424424
- Understand [join nodes](joins.md) for aggregating parallel results
425425
- See the [API reference][pydantic_graph.beta.decision] for complete decision documentation

docs/graph/beta/index.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ Every graph has:
118118

119119
## A More Complex Example
120120

121-
Here's an example showcasing parallel execution with a spread operation:
121+
Here's an example showcasing parallel execution with a map operation:
122122

123123
```python {title="parallel_processing.py"}
124124
from dataclasses import dataclass
@@ -149,9 +149,9 @@ async def main():
149149
# Create a join to collect results
150150
collect_results = g.join(ListReducer[int])
151151

152-
# Build the graph with spread operation
152+
# Build the graph with map operation
153153
g.add(
154-
g.edge_from(g.start_node).spread().to(square),
154+
g.edge_from(g.start_node).map().to(square),
155155
g.edge_from(square).to(collect_results),
156156
g.edge_from(collect_results).to(g.end_node),
157157
)
@@ -171,7 +171,7 @@ _(This example is complete, it can be run "as is" — you'll need to add `import
171171
In this example:
172172

173173
1. The start node receives a list of integers
174-
2. The `.spread()` operation fans out each item to a separate parallel execution of the `square` step
174+
2. The `.map()` operation fans out each item to a separate parallel execution of the `square` step
175175
3. All results are collected back together using a [`ListReducer`][pydantic_graph.beta.join.ListReducer]
176176
4. The joined results flow to the end node
177177

@@ -182,15 +182,15 @@ Explore the detailed documentation for each feature:
182182
- [**Steps**](steps.md) - Learn about step nodes and execution contexts
183183
- [**Joins**](joins.md) - Understand join nodes and reducer patterns
184184
- [**Decisions**](decisions.md) - Implement conditional branching
185-
- [**Parallel Execution**](parallel.md) - Master broadcasting and spreading
185+
- [**Parallel Execution**](parallel.md) - Master broadcasting and mapping
186186

187187
## Comparison with Original API
188188

189189
The original graph API (documented in the [main graph page](../../graph.md)) uses a class-based approach with [`BaseNode`][pydantic_graph.nodes.BaseNode] subclasses. The beta API uses a builder pattern with decorated functions, which provides:
190190

191191
**Advantages:**
192192
- More concise syntax for simple workflows
193-
- Explicit control over parallelism with spread/broadcast
193+
- Explicit control over parallelism with map/broadcast
194194
- Built-in reducers for common aggregation patterns
195195
- Easier to visualize complex data flows
196196

docs/graph/beta/joins.md

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ Join nodes synchronize and aggregate data from parallel execution paths. They us
44

55
## Overview
66

7-
When you use [parallel execution](parallel.md) (broadcasting or spreading), you often need to collect and combine the results. Join nodes serve this purpose by:
7+
When you use [parallel execution](parallel.md) (broadcasting or mapping), you often need to collect and combine the results. Join nodes serve this purpose by:
88

99
1. Waiting for all parallel tasks to complete
1010
2. Aggregating their outputs using a [`Reducer`][pydantic_graph.beta.join.Reducer]
@@ -41,7 +41,7 @@ async def main():
4141

4242
g.add(
4343
g.edge_from(g.start_node).to(generate_numbers),
44-
g.edge_from(generate_numbers).spread().to(square),
44+
g.edge_from(generate_numbers).map().to(square),
4545
g.edge_from(square).to(collect),
4646
g.edge_from(collect).to(g.end_node),
4747
)
@@ -88,7 +88,7 @@ async def main():
8888

8989
g.add(
9090
g.edge_from(g.start_node).to(generate),
91-
g.edge_from(generate).spread().to(to_string),
91+
g.edge_from(generate).map().to(to_string),
9292
g.edge_from(to_string).to(collect),
9393
g.edge_from(collect).to(g.end_node),
9494
)
@@ -131,7 +131,7 @@ async def main():
131131

132132
g.add(
133133
g.edge_from(g.start_node).to(generate_keys),
134-
g.edge_from(generate_keys).spread().to(create_entry),
134+
g.edge_from(generate_keys).map().to(create_entry),
135135
g.edge_from(create_entry).to(merge),
136136
g.edge_from(merge).to(g.end_node),
137137
)
@@ -180,7 +180,7 @@ async def main():
180180

181181
g.add(
182182
g.edge_from(g.start_node).to(generate),
183-
g.edge_from(generate).spread().to(accumulate),
183+
g.edge_from(generate).map().to(accumulate),
184184
g.edge_from(accumulate).to(ignore),
185185
g.edge_from(ignore).to(get_total),
186186
g.edge_from(get_total).to(g.end_node),
@@ -240,7 +240,7 @@ async def main():
240240

241241
g.add(
242242
g.edge_from(g.start_node).to(generate),
243-
g.edge_from(generate).spread().to(identity),
243+
g.edge_from(generate).map().to(identity),
244244
g.edge_from(identity).to(sum_join),
245245
g.edge_from(sum_join).to(g.end_node),
246246
)
@@ -311,7 +311,7 @@ async def main():
311311

312312
g.add(
313313
g.edge_from(g.start_node).to(generate),
314-
g.edge_from(generate).spread().to(process),
314+
g.edge_from(generate).map().to(process),
315315
g.edge_from(process).to(metrics),
316316
g.edge_from(metrics).to(g.end_node),
317317
)
@@ -381,8 +381,8 @@ async def main():
381381

382382
g.add(
383383
g.edge_from(g.start_node).to(source_a, source_b),
384-
g.edge_from(source_a).spread().to(process_a),
385-
g.edge_from(source_b).spread().to(process_b),
384+
g.edge_from(source_a).map().to(process_a),
385+
g.edge_from(source_b).map().to(process_b),
386386
g.edge_from(process_a).to(join_a),
387387
g.edge_from(process_b).to(join_b),
388388
g.edge_from(join_a).to(store_a),
@@ -429,6 +429,6 @@ This ensures proper synchronization even with nested parallel operations.
429429

430430
## Next Steps
431431

432-
- Learn about [parallel execution](parallel.md) with broadcasting and spreading
432+
- Learn about [parallel execution](parallel.md) with broadcasting and mapping
433433
- Explore [conditional branching](decisions.md) with decision nodes
434434
- See the [API reference][pydantic_graph.beta.join] for complete reducer documentation

docs/graph/beta/parallel.md

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# Parallel Execution
22

3-
The beta graph API provides two powerful mechanisms for parallel execution: **broadcasting** and **spreading**.
3+
The beta graph API provides two powerful mechanisms for parallel execution: **broadcasting** and **mapping**.
44

55
## Overview
66

@@ -67,7 +67,7 @@ All three steps receive the same input value (`10`) and execute in parallel.
6767

6868
Spreading fans out elements from an iterable, processing each element in parallel:
6969

70-
```python {title="basic_spread.py"}
70+
```python {title="basic_map.py"}
7171
from dataclasses import dataclass
7272

7373
from pydantic_graph.beta import GraphBuilder, ListReducer, StepContext
@@ -94,7 +94,7 @@ async def main():
9494
# Spreading: each item in the list gets its own parallel execution
9595
g.add(
9696
g.edge_from(g.start_node).to(generate_list),
97-
g.edge_from(generate_list).spread().to(square),
97+
g.edge_from(generate_list).map().to(square),
9898
g.edge_from(square).to(collect),
9999
g.edge_from(collect).to(g.end_node),
100100
)
@@ -107,11 +107,11 @@ async def main():
107107

108108
_(This example is complete, it can be run "as is" — you'll need to add `import asyncio; asyncio.run(main())` to run `main`)_
109109

110-
### Using `add_spreading_edge()`
110+
### Using `add_mapping_edge()`
111111

112-
The convenience method [`add_spreading_edge()`][pydantic_graph.beta.graph_builder.GraphBuilder.add_spreading_edge] provides a simpler syntax:
112+
The convenience method [`add_mapping_edge()`][pydantic_graph.beta.graph_builder.GraphBuilder.add_mapping_edge] provides a simpler syntax:
113113

114-
```python {title="spreading_convenience.py"}
114+
```python {title="mapping_convenience.py"}
115115
from dataclasses import dataclass
116116

117117
from pydantic_graph.beta import GraphBuilder, ListReducer, StepContext
@@ -136,7 +136,7 @@ async def main():
136136
collect = g.join(ListReducer[str])
137137

138138
g.add(g.edge_from(g.start_node).to(generate_numbers))
139-
g.add_spreading_edge(generate_numbers, stringify)
139+
g.add_mapping_edge(generate_numbers, stringify)
140140
g.add(
141141
g.edge_from(stringify).to(collect),
142142
g.edge_from(collect).to(g.end_node),
@@ -152,9 +152,9 @@ _(This example is complete, it can be run "as is" — you'll need to add `import
152152

153153
## Empty Iterables
154154

155-
When spreading an empty iterable, you can specify a `downstream_join_id` to ensure the join still executes:
155+
When mapping an empty iterable, you can specify a `downstream_join_id` to ensure the join still executes:
156156

157-
```python {title="empty_spread.py"}
157+
```python {title="empty_map.py"}
158158
from dataclasses import dataclass
159159

160160
from pydantic_graph.beta import GraphBuilder, ListReducer, StepContext
@@ -179,7 +179,7 @@ async def main():
179179
collect = g.join(ListReducer[int])
180180

181181
g.add(g.edge_from(g.start_node).to(generate_empty))
182-
g.add_spreading_edge(generate_empty, double, downstream_join_id=collect.id)
182+
g.add_mapping_edge(generate_empty, double, downstream_join_id=collect.id)
183183
g.add(
184184
g.edge_from(double).to(collect),
185185
g.edge_from(collect).to(g.end_node),
@@ -195,11 +195,11 @@ _(This example is complete, it can be run "as is" — you'll need to add `import
195195

196196
## Nested Parallel Operations
197197

198-
You can nest broadcasts and spreads for complex parallel patterns:
198+
You can nest broadcasts and maps for complex parallel patterns:
199199

200200
### Spread then Broadcast
201201

202-
```python {title="spread_then_broadcast.py"}
202+
```python {title="map_then_broadcast.py"}
203203
from dataclasses import dataclass
204204

205205
from pydantic_graph.beta import GraphBuilder, ListReducer, StepContext
@@ -230,7 +230,7 @@ async def main():
230230
g.add(
231231
g.edge_from(g.start_node).to(generate_list),
232232
# Spread the list, then broadcast each item to both steps
233-
g.edge_from(generate_list).spread().to(add_one, add_two),
233+
g.edge_from(generate_list).map().to(add_one, add_two),
234234
g.edge_from(add_one, add_two).to(collect),
235235
g.edge_from(collect).to(g.end_node),
236236
)
@@ -249,7 +249,7 @@ The result contains:
249249

250250
### Multiple Sequential Spreads
251251

252-
```python {title="sequential_spreads.py"}
252+
```python {title="sequential_maps.py"}
253253
from dataclasses import dataclass
254254

255255
from pydantic_graph.beta import GraphBuilder, ListReducer, StepContext
@@ -279,10 +279,10 @@ async def main():
279279

280280
g.add(
281281
g.edge_from(g.start_node).to(generate_pairs),
282-
# First spread: one task per tuple
283-
g.edge_from(generate_pairs).spread().to(unpack_pair),
284-
# Second spread: one task per number in each tuple
285-
g.edge_from(unpack_pair).spread().to(stringify),
282+
# First map: one task per tuple
283+
g.edge_from(generate_pairs).map().to(unpack_pair),
284+
# Second map: one task per number in each tuple
285+
g.edge_from(unpack_pair).map().to(stringify),
286286
g.edge_from(stringify).to(collect),
287287
g.edge_from(collect).to(g.end_node),
288288
)
@@ -324,11 +324,11 @@ async def main():
324324
collect = g.join(ListReducer[str])
325325

326326
g.add(g.edge_from(g.start_node).to(generate))
327-
g.add_spreading_edge(
327+
g.add_mapping_edge(
328328
generate,
329329
process,
330-
pre_spread_label='before spread',
331-
post_spread_label='after spread',
330+
pre_map_label='before map',
331+
post_map_label='after map',
332332
)
333333
g.add(
334334
g.edge_from(process).to(collect),
@@ -375,7 +375,7 @@ async def main():
375375

376376
g.add(
377377
g.edge_from(g.start_node).to(generate),
378-
g.edge_from(generate).spread().to(track_and_square),
378+
g.edge_from(generate).map().to(track_and_square),
379379
g.edge_from(track_and_square).to(collect),
380380
g.edge_from(collect).to(g.end_node),
381381
)

docs/graph/beta/steps.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -345,6 +345,6 @@ async def returns_str(ctx: StepContext[MyState, None, None]) -> str:
345345

346346
## Next Steps
347347

348-
- Learn about [parallel execution](parallel.md) with broadcasting and spreading
348+
- Learn about [parallel execution](parallel.md) with broadcasting and mapping
349349
- Understand [join nodes](joins.md) for aggregating parallel results
350350
- Explore [conditional branching](decisions.md) with decision nodes

pydantic_graph/pydantic_graph/beta/decision.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ class DecisionBranch(Generic[SourceT]):
112112
path: Path
113113
"""The execution path to follow when an input value matches this branch of a decision node.
114114
115-
This can include transforming, spreading, and broadcasting the output before sending to the next node or nodes.
115+
This can include transforming, mapping, and broadcasting the output before sending to the next node or nodes.
116116
117117
The path can also include position-aware labels which are used when generating mermaid diagrams."""
118118

@@ -211,7 +211,7 @@ def transform(
211211
path_builder=self.path_builder.transform(func),
212212
)
213213

214-
def spread(
214+
def map(
215215
self: DecisionBranchBuilder[StateT, DepsT, Iterable[T], SourceT, HandledT],
216216
*,
217217
fork_id: ForkID | None = None,
@@ -224,16 +224,16 @@ def spread(
224224
225225
Args:
226226
fork_id: Optional ID for the fork, defaults to a generated value
227-
downstream_join_id: Optional ID of a downstream join node which is involved when spreading empty iterables
227+
downstream_join_id: Optional ID of a downstream join node which is involved when mapping empty iterables
228228
229229
Returns:
230-
A new DecisionBranchBuilder where spreading is performed prior to generating the final output.
230+
A new DecisionBranchBuilder where mapping is performed prior to generating the final output.
231231
"""
232232
return DecisionBranchBuilder(
233233
decision=self.decision,
234234
source=self.source,
235235
matches=self.matches,
236-
path_builder=self.path_builder.spread(fork_id=fork_id, downstream_join_id=downstream_join_id),
236+
path_builder=self.path_builder.map(fork_id=fork_id, downstream_join_id=downstream_join_id),
237237
)
238238

239239
def label(self, label: str) -> DecisionBranchBuilder[StateT, DepsT, OutputT, SourceT, HandledT]:

pydantic_graph/pydantic_graph/beta/graph.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -659,23 +659,23 @@ def _handle_path(self, path: Path, inputs: Any, fork_stack: ForkStack) -> Sequen
659659
try:
660660
iter(inputs)
661661
except TypeError:
662-
raise RuntimeError(f'Cannot spread non-iterable value: {inputs!r}')
662+
raise RuntimeError(f'Cannot map non-iterable value: {inputs!r}')
663663

664664
node_run_id = NodeRunID(str(uuid.uuid4()))
665665

666-
# If the spread specifies a downstream join id, eagerly create a reducer for it
666+
# If the map specifies a downstream join id, eagerly create a reducer for it
667667
if item.downstream_join_id is not None:
668668
join_node = self.graph.nodes[item.downstream_join_id]
669669
assert isinstance(join_node, Join)
670670
self._active_reducers[(item.downstream_join_id, node_run_id)] = join_node.create_reducer(), fork_stack
671671

672-
spread_tasks: list[GraphTask] = []
672+
map_tasks: list[GraphTask] = []
673673
for thread_index, input_item in enumerate(inputs):
674674
item_tasks = self._handle_path(
675675
path.next_path, input_item, fork_stack + (ForkStackItem(item.fork_id, node_run_id, thread_index),)
676676
)
677-
spread_tasks += item_tasks
678-
return spread_tasks
677+
map_tasks += item_tasks
678+
return map_tasks
679679
elif isinstance(item, BroadcastMarker):
680680
return [GraphTask(item.fork_id, inputs, fork_stack)]
681681
elif isinstance(item, TransformMarker):

0 commit comments

Comments
 (0)