Skip to content

Commit b32bdfb

Browse files
committed
clean up
1 parent f5aa742 commit b32bdfb

File tree

12 files changed

+39
-104
lines changed

12 files changed

+39
-104
lines changed

Project.toml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
99
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
1010
OMEinsum = "ebe7aa44-baf0-506c-a96f-8464559b3922"
1111
Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
12-
PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a"
1312
PrettyTables = "08abe8d2-0d0c-5749-adfa-8a2ac140af0d"
1413
ProblemReductions = "899c297d-f7d2-4ebf-8815-a35996def416"
1514
StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
@@ -28,7 +27,6 @@ DocStringExtensions = "0.8.6, 0.9"
2827
LinearAlgebra = "1"
2928
OMEinsum = "0.8"
3029
Pkg = "1"
31-
PrecompileTools = "1"
3230
PrettyTables = "2"
3331
ProblemReductions = "0.3"
3432
StatsBase = "0.34"

examples/hard-core-lattice-gas/main.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ mars = marginals(pmodel)
6262
show_graph(SimpleGraph(graph), sites; vertex_colors=[(b = mars[[i]][2]; (1-b, 1-b, 1-b)) for i in 1:nv(graph)], texts=fill("", nv(graph)))
6363
# The can see the sites at the corner is more likely to be occupied.
6464
# To obtain two-site correlations, one can set the variables to query marginal probabilities manually.
65-
pmodel2 = TensorNetworkModel(problem, β; mars=[[e.src, e.dst] for e in edges(graph)])
65+
pmodel2 = TensorNetworkModel(problem, β; unity_tensors_labels = [[e.src, e.dst] for e in edges(graph)])
6666
mars = marginals(pmodel2);
6767

6868
# We show the probability that both sites on an edge are not occupied

src/Core.jl

Lines changed: 10 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -47,16 +47,16 @@ Probabilistic modeling with a tensor network.
4747
### Fields
4848
* `vars` are the degrees of freedom in the tensor network.
4949
* `code` is the tensor network contraction pattern.
50-
* `tensors` are the tensors fed into the tensor network, the leading tensors are unity tensors associated with `mars`.
50+
* `tensors` are the tensors fed into the tensor network, the leading tensors are unity tensors associated with `unity_tensors_labels`.
5151
* `evidence` is a dictionary used to specify degrees of freedom that are fixed to certain values.
52-
* `mars` is a vector, each element is a vector of variables to compute marginal probabilities.
52+
* `unity_tensors_idx` is a vector of indices of the unity tensors in the `tensors` array. Unity tensors are dummy tensors used to obtain the marginal probabilities.
5353
"""
5454
struct TensorNetworkModel{LT, ET, MT <: AbstractArray}
5555
vars::Vector{LT}
5656
code::ET
5757
tensors::Vector{MT}
5858
evidence::Dict{LT, Int}
59-
mars::Vector{Vector{LT}}
59+
unity_tensors_idx::Vector{Int}
6060
end
6161

6262
"""
@@ -110,84 +110,25 @@ $(TYPEDSIGNATURES)
110110
* `evidence` is a dictionary of evidences, the values are integers start counting from 0.
111111
* `optimizer` is the tensor network contraction order optimizer, please check the package [`OMEinsumContractionOrders.jl`](https://github.com/TensorBFS/OMEinsumContractionOrders.jl) for available algorithms.
112112
* `simplifier` is some strategies for speeding up the `optimizer`, please refer the same link above.
113-
* `mars` is a list of marginal probabilities. It is all single variables by default, i.e. `[[1], [2], ..., [n]]`. One can also specify multi-variables, which may increase the computational complexity.
113+
* `unity_tensors_labels` is a list of labels for the unity tensors. It is all single variables by default, i.e. `[[1], [2], ..., [n]]`. One can also specify multi-variables, which may increase the computational complexity.
114114
"""
115115
function TensorNetworkModel(
116-
model::UAIModel;
116+
model::UAIModel{ET, FT};
117117
openvars = (),
118118
evidence = Dict{Int,Int}(),
119119
optimizer = GreedyMethod(),
120120
simplifier = nothing,
121-
mars = [[i] for i=1:model.nvars]
122-
)::TensorNetworkModel
123-
return TensorNetworkModel(
124-
1:(model.nvars),
125-
model.cards,
126-
model.factors;
127-
openvars,
128-
evidence,
129-
optimizer,
130-
simplifier,
131-
mars
132-
)
133-
end
134-
135-
"""
136-
$(TYPEDSIGNATURES)
137-
"""
138-
function TensorNetworkModel(
139-
vars::AbstractVector{LT},
140-
cards::AbstractVector{Int},
141-
factors::Vector{<:Factor{T}};
142-
openvars = (),
143-
evidence = Dict{LT, Int}(),
144-
optimizer = GreedyMethod(),
145-
simplifier = nothing,
146-
mars = [[v] for v in vars]
147-
)::TensorNetworkModel where {T, LT}
148-
# The 1st argument of `EinCode` is a vector of vector of labels for specifying the input tensors,
149-
# The 2nd argument of `EinCode` is a vector of labels for specifying the output tensor,
150-
# e.g.
151-
# `EinCode([[1, 2], [2, 3]], [1, 3])` is the EinCode for matrix multiplication.
152-
rawcode = EinCode([mars..., [[factor.vars...] for factor in factors]...], collect(LT, openvars)) # labels for vertex tensors (unity tensors) and edge tensors
153-
tensors = Array{T}[[ones(T, [cards[i] for i in mar]...) for mar in mars]..., [t.vals for t in factors]...]
154-
return TensorNetworkModel(collect(LT, vars), rawcode, tensors; evidence, optimizer, simplifier, mars)
155-
end
156-
157-
"""
158-
$(TYPEDSIGNATURES)
159-
"""
160-
function TensorNetworkModel(
161-
vars::AbstractVector{LT},
162-
rawcode::EinCode,
163-
tensors::Vector{<:AbstractArray};
164-
evidence = Dict{LT, Int}(),
165-
optimizer = GreedyMethod(),
166-
simplifier = nothing,
167-
mars = [[v] for v in vars]
168-
)::TensorNetworkModel where {LT}
121+
unity_tensors_labels = [[i] for i=1:model.nvars]
122+
) where {ET, FT}
169123
# `optimize_code` optimizes the contraction order of a raw tensor network without a contraction order specified.
170124
# The 1st argument is the contraction pattern to be optimized (without contraction order).
171125
# The 2nd arugment is the size dictionary, which is a label-integer dictionary.
172126
# The 3rd and 4th arguments are the optimizer and simplifier that configures which algorithm to use and simplify.
127+
rawcode = EinCode([unity_tensors_labels..., [[factor.vars...] for factor in model.factors]...], collect(Int, openvars)) # labels for vertex tensors (unity tensors) and edge tensors
128+
tensors = Array{ET}[[ones(ET, [model.cards[i] for i in lb]...) for lb in unity_tensors_labels]..., [t.vals for t in model.factors]...]
173129
size_dict = OMEinsum.get_size_dict(getixsv(rawcode), tensors)
174130
code = optimize_code(rawcode, size_dict, optimizer, simplifier)
175-
TensorNetworkModel(collect(LT, vars), code, tensors, evidence, mars)
176-
end
177-
178-
"""
179-
$(TYPEDSIGNATURES)
180-
"""
181-
function TensorNetworkModel(
182-
model::UAIModel{T}, code;
183-
evidence = Dict{Int,Int}(),
184-
mars = [[i] for i=1:model.nvars],
185-
vars = [1:model.nvars...]
186-
)::TensorNetworkModel where{T}
187-
@debug "constructing tensor network model from code"
188-
tensors = Array{T}[[ones(T, [model.cards[i] for i in mar]...) for mar in mars]..., [t.vals for t in model.factors]...]
189-
190-
return TensorNetworkModel(vars, code, tensors, evidence, mars)
131+
return TensorNetworkModel(collect(Int, 1:model.nvars), code, tensors, evidence, collect(Int, 1:length(unity_tensors_labels)))
191132
end
192133

193134
"""

src/TensorInference.jl

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -52,13 +52,4 @@ include("mmap.jl")
5252
include("sampling.jl")
5353
include("cspmodels.jl")
5454

55-
# import PrecompileTools
56-
# PrecompileTools.@setup_workload begin
57-
# # Putting some things in `@setup_workload` instead of `@compile_workload` can reduce the size of the
58-
# # precompile file and potentially make loading faster.
59-
# PrecompileTools.@compile_workload begin
60-
# include("../example/asia-network/main.jl")
61-
# end
62-
# end
63-
6455
end # module

src/cspmodels.jl

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,11 @@ Convert a constraint satisfiability problem (or energy model) to a probabilistic
2828
* `mars` is the list of variables to be marginalized.
2929
"""
3030
function TensorNetworkModel(problem::ConstraintSatisfactionProblem, β::T; evidence::Dict=Dict{Int,Int}(),
31-
optimizer=GreedyMethod(), openvars=Int[], simplifier=nothing, mars=[[l] for l in variables(problem)]) where T <: Real
31+
optimizer=GreedyMethod(), openvars=Int[], simplifier=nothing, unity_tensors_labels = [[l] for l in variables(problem)]) where T <: Real
3232
tensors, ixs = generate_tensors(β, problem)
3333
factors = [Factor((ix...,), t) for (ix, t) in zip(ixs, tensors)]
34-
return TensorNetworkModel(variables(problem), fill(num_flavors(problem), num_variables(problem)), factors; openvars, evidence, optimizer, simplifier, mars)
34+
model = UAIModel(num_variables(problem), fill(num_flavors(problem), num_variables(problem)), factors)
35+
return TensorNetworkModel(model; openvars, evidence, optimizer, simplifier, unity_tensors_labels)
3536
end
3637

3738
"""
@@ -47,8 +48,9 @@ The program will regenerate tensors from the problem, without repeated optimizin
4748
"""
4849
function update_temperature(tnet::TensorNetworkModel, problem::ConstraintSatisfactionProblem, β::Real)
4950
tensors, ixs = generate_tensors(β, problem)
50-
alltensors = [tnet.tensors[1:length(tnet.mars)]..., tensors...]
51-
return TensorNetworkModel(tnet.vars, tnet.code, alltensors, tnet.evidence, tnet.mars)
51+
@assert tnet.unity_tensors_idx == collect(1:length(tnet.unity_tensors_idx)) "The target tensor network can not be updated! Got `unity_tensors_idx = $(tnet.unity_tensors_idx)`"
52+
alltensors = [tnet.tensors[tnet.unity_tensors_idx]..., tensors...]
53+
return TensorNetworkModel(tnet.vars, tnet.code, alltensors, tnet.evidence, tnet.unity_tensors_idx)
5254
end
5355

5456
function MMAPModel(problem::ConstraintSatisfactionProblem, β::Real;

src/map.jl

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -53,13 +53,15 @@ $(TYPEDSIGNATURES)
5353
Returns the largest log-probability and the most probable configuration.
5454
"""
5555
function most_probable_config(tn::TensorNetworkModel; usecuda = false)::Tuple{Real, Vector}
56-
expected_mars = [[l] for l in get_vars(tn)]
57-
@assert tn.mars[1:length(expected_mars)] == expected_mars "To get the the most probable configuration, the leading elements of `tn.vars` must be `$expected_mars`"
56+
ixs = OMEinsum.getixsv(tn.code)
57+
unity_labels = ixs[tn.unity_tensors_idx]
58+
indices = [findfirst(==([l]), unity_labels) for l in get_vars(tn)]
59+
@assert !any(isnothing, indices) "To get the the most probable configuration, the unity tensors labels must include all variables"
5860
vars = get_vars(tn)
5961
tensors = map(t -> Tropical.(log.(t)), adapt_tensors(tn; usecuda, rescale = false))
6062
logp, grads = cost_and_gradient(tn.code, tensors)
6163
# use Array to convert CuArray to CPU arrays
62-
return content(Array(logp)[]), map(k -> haskey(tn.evidence, vars[k]) ? tn.evidence[vars[k]] : argmax(grads[k]) - 1, 1:length(vars))
64+
return content(Array(logp)[]), map(k -> haskey(tn.evidence, vars[k]) ? tn.evidence[vars[k]] : argmax(grads[tn.unity_tensors_idx[indices[k]]]) - 1, 1:length(vars))
6365
end
6466

6567
"""

src/mar.jl

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ are their respective marginals. A marginal is a probability distribution over
130130
a subset of variables, obtained by integrating or summing over the remaining
131131
variables in the model. By default, the function returns the marginals of all
132132
individual variables. To specify which marginal variables to query, set the
133-
`mars` field when constructing a [`TensorNetworkModel`](@ref). Note that
133+
`unity_tensors_labels` field when constructing a [`TensorNetworkModel`](@ref). Note that
134134
the choice of marginal variables will affect the contraction order of the
135135
tensor network.
136136
@@ -158,7 +158,7 @@ Dict{Vector{Int64}, Vector{Float64}} with 8 entries:
158158
[7] => [0.145092, 0.854908]
159159
[2] => [0.05, 0.95]
160160
161-
julia> tn2 = TensorNetworkModel(model; evidence=Dict(1=>0), mars=[[2, 3], [3, 4]]);
161+
julia> tn2 = TensorNetworkModel(model; evidence=Dict(1=>0), unity_tensors_labels = [[2, 3], [3, 4]]);
162162
163163
julia> marginals(tn2)
164164
Dict{Vector{Int64}, Matrix{Float64}} with 2 entries:
@@ -186,9 +186,10 @@ function marginals(tn::TensorNetworkModel; usecuda = false, rescale = true)::Dic
186186
# sometimes, the cost can overflow, then we need to rescale the tensors during contraction.
187187
cost, grads = cost_and_gradient(tn.code, adapt_tensors(tn; usecuda, rescale))
188188
@debug "cost = $cost"
189+
ixs = OMEinsum.getixsv(tn.code)
189190
if rescale
190-
return Dict(zip(tn.mars, LinearAlgebra.normalize!.(getfield.(grads[1:length(tn.mars)], :normalized_value), 1)))
191+
return Dict(zip(ixs[tn.unity_tensors_idx], LinearAlgebra.normalize!.(getfield.(grads[1:length(tn.unity_tensors_idx)], :normalized_value), 1)))
191192
else
192-
return Dict(zip(tn.mars, LinearAlgebra.normalize!.(grads[1:length(tn.mars)], 1)))
193+
return Dict(zip(ixs[tn.unity_tensors_idx], LinearAlgebra.normalize!.(grads[1:length(tn.unity_tensors_idx)], 1)))
193194
end
194195
end

src/utils.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -356,7 +356,7 @@ function random_matrix_product_state(::Type{T}, n::Int, chi::Int, d::Int=2) wher
356356
optimize_code(DynamicEinCode(ixs, Int[]), OMEinsum.get_size_dict(ixs, tensors), GreedyMethod()),
357357
tensors,
358358
Dict{Int, Int}(),
359-
Vector{Int}[[i] for i=1:n]
359+
collect(1:n)
360360
)
361361
end
362362
random_matrix_product_state(n::Int, chi::Int, d::Int=2) = random_matrix_product_state(ComplexF64, n, chi, d)

test/cspmodels.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ using GenericTensorNetworks
77
β = 2.0
88
g = GenericTensorNetworks.Graphs.smallgraph(:petersen)
99
problem = IndependentSet(g)
10-
model = TensorNetworkModel(problem, β; mars=[[2, 3]])
10+
model = TensorNetworkModel(problem, β; unity_tensors_labels = [[2, 3]])
1111
mars = marginals(model)[[2, 3]]
1212
problem2 = IndependentSet(g)
1313
mars2 = TensorInference.normalize!(GenericTensorNetworks.solve(GenericTensorNetwork(problem2; openvertices=[2, 3]), PartitionFunction(β)), 1)
@@ -28,7 +28,7 @@ using GenericTensorNetworks
2828

2929
β = 1.0
3030
problem = SpinGlass(g, -ones(Int, ne(g)), zeros(Int, nv(g)))
31-
model = TensorNetworkModel(problem, β; mars=[[2, 3]])
31+
model = TensorNetworkModel(problem, β; unity_tensors_labels = [[2, 3]])
3232
samples = sample(model, 100)
3333
@test sum(energy.(Ref(problem), samples))/100 <= -14
3434
end

test/map.jl

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,16 +2,14 @@ using Test
22
using OMEinsum
33
using TensorInference
44

5-
@testset "load from code" begin
5+
@testset "load from model" begin
66
model = problem_from_artifact("uai2014", "MAR", "Promedus", 14)
77

88
tn1 = TensorNetworkModel(read_model(model);
99
evidence=read_evidence(model),
1010
optimizer = TreeSA(ntrials = 3, niters = 2, βs = 1:0.1:80))
1111

12-
tn2 = TensorNetworkModel(read_model(model), tn1.code, evidence=read_evidence(model))
13-
14-
@test tn1.code == tn2.code
12+
@test tn1 isa TensorNetworkModel
1513
end
1614

1715
@testset "gradient-based tensor network solvers" begin

0 commit comments

Comments
 (0)