Skip to content

Commit de58c60

Browse files
committed
fixed and restored testpartialconstraint
1 parent eb8be7d commit de58c60

File tree

4 files changed

+17
-42
lines changed

4 files changed

+17
-42
lines changed

src/services/CalcFactor.jl

Lines changed: 10 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -504,7 +504,6 @@ function _beforeSolveCCW!(
504504
F_::Type{<:AbstractRelative},
505505
ccwl::CommonConvWrapper{F},
506506
variables::AbstractVector{<:DFGVariable},
507-
# destVarVals::AbstractVector,
508507
sfidx::Int,
509508
N::Integer;
510509
measurement = Vector{Tuple{}}(),
@@ -519,46 +518,27 @@ function _beforeSolveCCW!(
519518
end
520519

521520
# in forward solve case, important to set which variable is being solved early in this sequence
521+
# set the 'solvefor' variable index -- i.e. which connected variable of the factor is being computed in this convolution.
522522
ccwl.varidx[] = sfidx
523+
# ccwl.varidx[] = findfirst(==(solvefor), getLabel.(variables))
523524

524-
# TBD, order of fmd ccwl cf are a little weird and should be revised.
525525
# TODO, maxlen should parrot N (barring multi-/nullhypo issues)
526-
# set the 'solvefor' variable index -- i.e. which connected variable of the factor is being computed in this convolution.
527-
# ccwl.varidx[] = findfirst(==(solvefor), getLabel.(variables))
528526
# everybody use maxlen number of points in belief function estimation
529527
maxlen = maximum((N, length.(ccwl.varValsAll[])...,))
530528

531-
# splice
532-
# should be type stable
529+
# splice, type stable
530+
# make deepcopy of destination variable since multiple approxConv type computations should happen from different factors to the same variable
533531
tvarv = tuple(
534532
map(s->getVal(s; solveKey), variables[1:sfidx-1])...,
535-
deepcopy(ccwl.varValsAll[][sfidx]), # destVarVals = deepcopy(ccwl.varValsAll[][sfidx])
533+
deepcopy(getVal(variables[sfidx]; solveKey)), # deepcopy(ccwl.varValsAll[][sfidx]),
536534
map(s->getVal(s; solveKey), variables[sfidx+1:end])...,
537-
)
538-
# not type-stable
539-
# varvals = [getVal(s; solveKey) for s in variables]
540-
# varvals[sfidx] = destVarVals
541-
# varvals_ = [varvals[1:sfidx-1]..., destVarVals, varvals[sfidx+1:end]...]
542-
# tvarv = tuple(varvals...)
543-
544-
545-
# @info "TYPES" typeof(ccwl.varValsAll[]) typeof(tvarv)
535+
)
546536
ccwl.varValsAll[] = tvarv
547-
# ccwl.varValsAll[] = map(s->getVal(s; solveKey), tuple(variables...))
548-
## PLAN B, make deep copy of ccwl.varValsAll[ccwl.varidx[]] just before the numerical solve
549-
550-
# maxlen, ccwl.varidx[] = _updateParamVec(variables, solvefor, ccwl.varValsAll, N; solveKey)
551-
# # TODO, ensure all values (not just multihypothesis) is correctly used from here
552-
# for (i,varVal) in enumerate(_varValsAll)
553-
# resize!(ccwl.varValsAll[i],length(varVal))
554-
# ccwl.varValsAll[i][:] = varVal #TODO Kyk hierna: this looks like it will effectively result in vnd.val memory being overwritten
555-
# end
556537

557-
# TODO better consolidation still possible
558-
# FIXME ON FIRE, what happens if this is a partial dimension factor? See #1246
559-
# FIXME, confirm this is hypo sensitive selection from variables, better to use double indevariablesng for clarity getDimension(ccw.fullvariables[hypoidx[ccwl.varidx[]]])
560-
xDim = getDimension(getVariableType(variables[ccwl.varidx[]])) # ccwl.varidx[]
561-
# ccwl.xDim = xDim
538+
539+
# FIXME, confirm what happens when this is a partial dimension factor? See #1246
540+
# indexing over all possible hypotheses
541+
xDim = getDimension(getVariableType(variables[ccwl.varidx[]]))
562542
# TODO maybe refactor different type or api call?
563543

564544
# setup the partial or complete decision variable dimensions for this ccwl object
@@ -567,7 +547,6 @@ function _beforeSolveCCW!(
567547
_setCCWDecisionDimsConv!(ccwl, xDim)
568548

569549
# FIXME do not divert Mixture for sampling
570-
571550
updateMeasurement!(ccwl, maxlen; needFreshMeasurements, measurement, _allowThreads=true)
572551

573552
# used in ccw functor for AbstractRelativeMinimize
@@ -584,7 +563,6 @@ function _beforeSolveCCW!(
584563
F_::Type{<:AbstractPrior},
585564
ccwl::CommonConvWrapper{F},
586565
variables::AbstractVector{<:DFGVariable},
587-
# destVarVals::AbstractVector,
588566
sfidx::Int,
589567
N::Integer;
590568
measurement = Vector{Tuple{}}(),
@@ -594,12 +572,7 @@ function _beforeSolveCCW!(
594572
# FIXME, NEEDS TO BE CLEANED UP AND WORK ON MANIFOLDS PROPER
595573

596574
ccwl.varidx[] = sfidx
597-
# fnc = ccwl.usrfnc!
598-
# sfidx = findfirst(getLabel.(variables) .== solvefor)
599575
@assert ccwl.varidx[] == 1 "Solving on Prior with CCW should have sfidx=1, priors are unary factors."
600-
# @assert sfidx == 1 "Solving on Prior with CCW should have sfidx=1, priors are unary factors."
601-
# ccwl.varidx[] = sfidx
602-
# sfidx = 1 # why hardcoded to 1, maybe for only the AbstractPrior case here
603576

604577
# setup the partial or complete decision variable dimensions for this ccwl object
605578
# NOTE perhaps deconv has changed the decision variable list, so placed here during consolidation phase

src/services/EvalFactor.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -390,7 +390,7 @@ function evalPotentialSpecific(
390390
end
391391

392392
# return the found points, and info per coord
393-
return ccwl.varValsAll[][sfidx], ipc # same memory locazation as (destinationVarVals, ipc)
393+
return ccwl.varValsAll[][sfidx], ipc
394394
end
395395

396396

test/runtests.jl

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,6 @@ include("testDefaultDeconv.jl")
5656

5757
include("testPartialFactors.jl")
5858
include("testPartialPrior.jl")
59-
@error "MUST RESTORE PARTIALCONSTRAINT TEST"
6059
include("testpartialconstraint.jl")
6160
include("testPartialNH.jl")
6261
include("testMixturePrior.jl")

test/testpartialconstraint.jl

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -263,7 +263,8 @@ global v2, fg
263263
X2pts_ = getVal(v2)
264264
@cast X2pts[i,j] := X2pts_[j][i]
265265
# NOTE, SUPER IMPORTANT, predictbelief returns full dimension points (even if only partials are sent in for proposals)
266-
val_, = predictbelief(fg, v2, [f4], N=N)
266+
valB, = propagateBelief(fg, v2, [f4], N=N)
267+
val_ = getPoints(valB, false)
267268
@cast val[i,j] := val_[j][i]
268269
@show X2pts_[1]';
269270
@show val_[1]';
@@ -275,7 +276,8 @@ val_, = predictbelief(fg, v2, [f4], N=N)
275276
# partial pairwise
276277
X2pts_ = getVal(v2)
277278
@cast X2pts[i,j] := X2pts_[j][i]
278-
val_, = predictbelief(fg, v2, [f3], N=N)
279+
valB, = propagateBelief(fg, v2, [f3], N=N)
280+
val_ = getPoints(valB, false)
279281
@cast val[i,j] := val_[j][i]
280282
@test norm(X2pts[1,:] - val[1,:]) < 1e-10
281283
@test 0.0 < norm(X2pts[2,:] - val[2,:])
@@ -286,7 +288,8 @@ val2_ = getVal(v1)
286288
##
287289

288290
# combination of partials
289-
val_, = predictbelief(fg, v2, [f3;f4], N=N)
291+
valB, = propagateBelief(fg, v2, [f3;f4], N=N)
292+
val_ = getPoints(valB, false)
290293
@cast val[i,j] := val_[j][i]
291294
# plotKDE(kde!(val),levels=3)
292295
@test norm(Statistics.mean(val,dims=2)[1] .- [-20.0]) < 1

0 commit comments

Comments
 (0)