Skip to content

Commit c4b270b

Browse files
committed
AI add a variable which contains all input V variables
V0 = V[0] ...
1 parent 3467cf0 commit c4b270b

13 files changed

+165
-8
lines changed

README.md

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,13 @@ You can also get the node from comfy manager under the name of More math.
2323
- Vector Math: Support for List literals `[v1, v2, ...]` and operations between lists/scalars/tensors
2424
- Custom functions `funcname(variable,variable,...)->expression;` they can be used in any later defined custom function or in expression. Shadowing inbuilt functions do not work. **Be careful with recursion. There is no stack limit. Got to 700 000 iterations before I got bored.**
2525
- Custom variables `varname=expression;` They can be used in any later assigment or final expression.
26+
- Support for **indexed assignment**: `a[i, j, ...] = expression;`. Supports multidimensional tensors and nested lists.
27+
- **Scalar Filling**: If the assigned value has only 1 element (scalar, 1-element list/tensor), it fills the entire selected slice.
28+
- **Rank Matching**: Automatically squeezes leading ones from the value to match the rank of the target slice (e.g., assigning a 4D tensor with `dim0=1` to a 3D slice).
29+
- **Available Variables**:
30+
- `V0`, `V1`, ...: Individual input variables.
31+
- `V`: A stacked tensor of all input variables `V` (shape: `[num_variables, ...]`). Available when shapes match.
32+
- `Vcnt` or `V_count`: Number of input variables.
2633
- Support for control flow statements including `if/else`, `while` loops, blocks `{}`, and `return` statements. `if`/`else`/`while` do not work like ternary operator or other inbuilts. They colapse tensors and list to single value using any.
2734
- Support for stack. Stack survives between field evaluations but not between nodes or end of node execution.
2835
- Usefull in GuiderMath node to store variables between steps.
@@ -37,6 +44,10 @@ You can also get the node from comfy manager under the name of More math.
3744
- Modifications to existing variables persist to outer scope
3845
- **Return Statements**: `return [expression];`
3946
- Early return from functions or top-level expressions
47+
- **For Loops**: `for (variable in expression) statement`
48+
- Iterates over elements of a list or a tensor (along dimension 0)
49+
- **Break/Continue**: `break;`, `continue;`
50+
- Control loop execution (works in `while` and `for` loops)
4051

4152
## Operators
4253

@@ -141,6 +152,8 @@ You can also get the node from comfy manager under the name of More math.
141152
- `k_expr` can be a math expression (using `kX`, `kY`, `kZ`) or a list literal.
142153
- `convolution(tensor, kw, [kh], [kd], k_expr)` or `conv`: Applies a convolution to `tensor`. Does not perform automatic permutations. Expects standard PyTorch layout `(Batch, Channel, Spatial...)`.
143154
- `k_expr` can be a math expression (using `kX`, `kY`, `kZ`) or a list literal.
155+
- **`get_value(tensor, position)`**: Retrieves a value from a tensor at the specified N-dimensional position (provided as a list or tensor). Uses the formula `pos0*strides[0] + pos1*strides[1] + ...` to find the linear index.
156+
- **`crop(tensor, position, size)`**: Extracts a sub-tensor of specified `size` starting at `position` (both provided as lists/tensors). Areas outside the input tensor are filled with zeros.
144157

145158
- `permute(tensor, dims)` or `perm`: Rearranges the dimensions of the tensor. (e.g., `perm(a, [2, 3, 0, 1])`)
146159
- `reshape(tensor, shape)` or `rshp`: Reshapes the tensor to a new shape. (e.g., `rshp(a, [S0*S1, S2, S3])`)

more_math/AudioMathNode.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
as_tensor,
66
normalize_to_common_shape,
77
make_zero_like,
8+
get_v_variable
89
)
910
from .Parser.UnifiedMathVisitor import UnifiedMathVisitor
1011
from comfy_api.latest import io
@@ -127,6 +128,12 @@ def execute(cls, V, F, Expression, length_mismatch="tile"):
127128
"batch_count": a_w.shape[0],
128129
} | generate_dim_variables(a_w) | V_norm_waveforms | sample_rates
129130

131+
v_stacked, v_cnt = get_v_variable(V_norm_waveforms, length_mismatch=length_mismatch)
132+
if v_stacked is not None:
133+
variables["V"] = v_stacked
134+
variables["Vcnt"] = float(v_cnt)
135+
variables["V_count"] = float(v_cnt)
136+
130137
for k, val in F.items():
131138
variables[k] = val if val is not None else 0.0
132139

more_math/ConditioningMathNode.py

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
from unittest import result
22
import torch
3-
from .helper_functions import generate_dim_variables, parse_expr, getIndexTensorAlongDim, as_tensor, normalize_to_common_shape, make_zero_like
3+
from .helper_functions import generate_dim_variables, parse_expr, getIndexTensorAlongDim, as_tensor, normalize_to_common_shape, make_zero_like, get_v_variable
44
from .Parser.UnifiedMathVisitor import UnifiedMathVisitor
55
from comfy_api.latest import io
66
from antlr4 import InputStream, CommonTokenStream
@@ -135,6 +135,12 @@ def execute(cls, V, F, Expression, Expression_pi,batching, length_mismatch="tile
135135
"batch_count": a.shape[0],
136136
} | generate_dim_variables(a) | V_norm_tensors
137137

138+
v_stacked, v_cnt = get_v_variable(V_norm_tensors, length_mismatch=length_mismatch)
139+
if v_stacked is not None:
140+
variables["V"] = v_stacked
141+
variables["Vcnt"] = float(v_cnt)
142+
variables["V_count"] = float(v_cnt)
143+
138144
for k, val in F.items():
139145
variables[k] = val if val is not None else 0.0
140146

@@ -164,6 +170,12 @@ def execute(cls, V, F, Expression, Expression_pi,batching, length_mismatch="tile
164170
"batch_count": a_p.shape[0] if a_p.numel() > 0 else 0,
165171
} | generate_dim_variables(a_p) | V_norm_pooled
166172

173+
v_stacked, v_cnt = get_v_variable(V_norm_pooled, length_mismatch=length_mismatch)
174+
if v_stacked is not None:
175+
variables_pi["V"] = v_stacked
176+
variables_pi["Vcnt"] = float(v_cnt)
177+
variables_pi["V_count"] = float(v_cnt)
178+
167179
for k, val in F.items():
168180
variables_pi[k] = val if val is not None else 0.0
169181

more_math/FloatMathNode.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from inspect import cleandoc
22
import torch
33

4-
from .helper_functions import parse_expr
4+
from .helper_functions import parse_expr, get_v_variable
55
from .Parser.UnifiedMathVisitor import UnifiedMathVisitor
66

77
from comfy_api.latest import io
@@ -86,6 +86,12 @@ def execute(cls, FloatFunc, V):
8686
for k, val in V.items():
8787
variables[k] = val if val is not None else 0.0
8888

89+
v_stacked, v_cnt = get_v_variable(variables)
90+
if v_stacked is not None:
91+
variables["V"] = v_stacked
92+
variables["Vcnt"] = float(v_cnt)
93+
variables["V_count"] = float(v_cnt)
94+
8995
tree = parse_expr(FloatFunc);
9096
# scalar execution
9197
# UnifiedMathVisitor expects variables and a shape. Shape [1] for scalar?

more_math/GuiderMathNode.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
parse_expr,
1111
make_zero_like,
1212
as_tensor,
13+
get_v_variable
1314
)
1415
from comfy_api.latest import io
1516
import comfy.sampler_helpers
@@ -165,6 +166,12 @@ def setVars(self, x, sigma, seed, g_results):
165166
"c": g_results.get("V2", make_zero_like(eval_samples)),
166167
"d": g_results.get("V3", make_zero_like(eval_samples)),
167168
})
169+
170+
v_stacked, v_cnt = get_v_variable(g_results)
171+
if v_stacked is not None:
172+
variables["V"] = v_stacked
173+
variables["Vcnt"] = float(v_cnt)
174+
variables["V_count"] = float(v_cnt)
168175

169176
for k, v in self.F.items():
170177
variables[k] = v if v is not None else 0.0

more_math/ImageMathNode.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from .helper_functions import generate_dim_variables, parse_expr, getIndexTensorAlongDim, as_tensor, normalize_to_common_shape, make_zero_like
1+
from .helper_functions import generate_dim_variables, parse_expr, getIndexTensorAlongDim, as_tensor, normalize_to_common_shape, make_zero_like, get_v_variable
22
from .Parser.UnifiedMathVisitor import UnifiedMathVisitor
33
from comfy_api.latest import io
44
from antlr4 import InputStream, CommonTokenStream
@@ -128,6 +128,12 @@ def execute(cls, V, F, Expression, length_mismatch="error"):
128128
# Add all dynamic inputs
129129
variables.update(V_norm)
130130

131+
v_stacked, v_cnt = get_v_variable(V_norm, length_mismatch=length_mismatch)
132+
if v_stacked is not None:
133+
variables["V"] = v_stacked
134+
variables["Vcnt"] = float(v_cnt)
135+
variables["V_count"] = float(v_cnt)
136+
131137
for k, val in F.items():
132138
variables[k] = val if val is not None else 0.0
133139

more_math/LatentMathNode.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,8 @@
66
parse_expr,
77
as_tensor,
88
normalize_to_common_shape,
9-
make_zero_like
9+
make_zero_like,
10+
get_v_variable
1011
)
1112
from .Parser.UnifiedMathVisitor import UnifiedMathVisitor
1213
import torch
@@ -181,6 +182,12 @@ def execute(cls, V, F, Expression,batching, length_mismatch="tile") -> io.NodeOu
181182
# Add all dynamic inputs
182183
variables.update(V_norm_samples)
183184

185+
v_stacked, v_cnt = get_v_variable(V_norm_samples, length_mismatch=length_mismatch)
186+
if v_stacked is not None:
187+
variables["V"] = v_stacked
188+
variables["Vcnt"] = float(v_cnt)
189+
variables["V_count"] = float(v_cnt)
190+
184191
for k, v in F.items():
185192
variables[k] = v if v is not None else 0.0
186193

more_math/MaskMathNode.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from .helper_functions import generate_dim_variables,parse_expr, getIndexTensorAlongDim, as_tensor, normalize_to_common_shape,make_zero_like
1+
from .helper_functions import generate_dim_variables,parse_expr, getIndexTensorAlongDim, as_tensor, normalize_to_common_shape,make_zero_like, get_v_variable
22
from .Parser.UnifiedMathVisitor import UnifiedMathVisitor
33
from comfy_api.latest import io
44
from antlr4 import InputStream, CommonTokenStream
@@ -120,6 +120,12 @@ def execute(cls, V, F, Expression, length_mismatch="tile"):
120120
"batch_count": ae.shape[0],
121121
} | generate_dim_variables(ae)
122122

123+
v_stacked, v_cnt = get_v_variable(V_norm, length_mismatch=length_mismatch)
124+
if v_stacked is not None:
125+
variables["V"] = v_stacked
126+
variables["Vcnt"] = float(v_cnt)
127+
variables["V_count"] = float(v_cnt)
128+
123129
# Add all dynamic inputs
124130
variables.update(V_norm)
125131

more_math/NoiseMathNode.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from .helper_functions import generate_dim_variables, as_tensor, parse_expr, getIndexTensorAlongDim, make_zero_like
1+
from .helper_functions import generate_dim_variables, as_tensor, parse_expr, getIndexTensorAlongDim, make_zero_like, get_v_variable
22
from comfy_api.latest import io
33
import torch
44
from .Parser.MathExprParser import MathExprParser,InputStream,CommonTokenStream
@@ -123,6 +123,12 @@ def generate_noise(self, input_latent: torch.Tensor) -> torch.Tensor:
123123
"input_latent": samples,
124124
} | generate_dim_variables(samples) | vals | self.F
125125

126+
v_stacked, v_cnt = get_v_variable(vals)
127+
if v_stacked is not None:
128+
variables["V"] = v_stacked
129+
variables["Vcnt"] = float(v_cnt)
130+
variables["V_count"] = float(v_cnt)
131+
126132
if time_dim is not None:
127133
F = getIndexTensorAlongDim(samples, time_dim)
128134
variables.update({"frame": F, "frame_count": frame_count})

more_math/VideoMathNode.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import torch
2-
from .helper_functions import generate_dim_variables, parse_expr, getIndexTensorAlongDim, as_tensor, normalize_to_common_shape, make_zero_like
2+
from .helper_functions import generate_dim_variables, parse_expr, getIndexTensorAlongDim, as_tensor, normalize_to_common_shape, make_zero_like, get_v_variable
33
from .Parser.UnifiedMathVisitor import UnifiedMathVisitor
44
from comfy_api.latest import io
55
from antlr4 import InputStream, CommonTokenStream
@@ -130,6 +130,12 @@ def execute(cls, V, F, Expression, Expression_pi, length_mismatch="tile"):
130130
"channel_count": ae.shape[3],
131131
} | generate_dim_variables(ae)
132132

133+
v_stacked, v_cnt = get_v_variable(V_norm, length_mismatch=length_mismatch)
134+
if v_stacked is not None:
135+
variables["V"] = v_stacked
136+
variables["Vcnt"] = float(v_cnt)
137+
variables["V_count"] = float(v_cnt)
138+
133139
# Add all dynamic inputs
134140
variables.update(V_norm)
135141

@@ -187,6 +193,13 @@ def execute(cls, V, F, Expression, Expression_pi, length_mismatch="tile"):
187193
"batch_count": a_w.shape[0],
188194
} | generate_dim_variables(a_w) | V_norm_waveforms | sample_rates
189195

196+
v_stacked, v_cnt = get_v_variable(V_norm_waveforms, length_mismatch=length_mismatch)
197+
if v_stacked is not None:
198+
# This 'variables' is the one for the second eval in VideoMathNode
199+
variables["V"] = v_stacked
200+
variables["Vcnt"] = float(v_cnt)
201+
variables["V_count"] = float(v_cnt)
202+
190203
for k, val in F.items():
191204
variables[k] = val if val is not None else 0.0
192205

0 commit comments

Comments
 (0)