Skip to content

Commit 5a0b7e8

Browse files
Affiedehann
andcommitted
Changes from worksession
Co-authored-by: Dehann Fourie <[email protected]>
1 parent 7149783 commit 5a0b7e8

File tree

10 files changed

+189
-144
lines changed

10 files changed

+189
-144
lines changed

src/Deprecated.jl

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -119,11 +119,17 @@ function solveGraphParametric2(
119119
return d, result, flatvar.idx, Σ
120120
end
121121

122+
##==============================================================================
123+
## Deprecate code below before v0.35
124+
##==============================================================================
125+
126+
@deprecate _prepCCW(w...;kw...) _createCCW(w...;kw...)
122127

123128
##==============================================================================
124129
## Deprecate code below before v0.34
125130
##==============================================================================
126131

132+
127133
# function CommonConvWrapper(
128134
# usrfnc::T,
129135
# fullvariables, #::Tuple ::Vector{<:DFGVariable};

src/Serialization/services/DispatchPackedConversions.jl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,12 +37,12 @@ function reconstFactorData(
3737
vars = map(f -> getVariable(dfg, f), varOrder)
3838
userCache = preambleCache(dfg, vars, usrfnc)
3939

40-
# TODO -- improve _prepCCW for hypotheses and certainhypo field recovery when deserializing
40+
# TODO -- improve _createCCW for hypotheses and certainhypo field recovery when deserializing
4141
# reconstitute from stored data
4242
# FIXME, add threadmodel=threadmodel
4343
# FIXME https://github.com/JuliaRobotics/DistributedFactorGraphs.jl/issues/590#issuecomment-776838053
4444
# FIXME dont know what manifolds to use in ccw
45-
ccw = _prepCCW(
45+
ccw = _createCCW(
4646
vars,
4747
usrfnc;
4848
multihypo,
@@ -145,7 +145,7 @@ function rebuildFactorMetadata!(
145145
end
146146

147147
#... Copying neighbor data into the factor?
148-
# JT TODO it looks like this is already updated in getDefaultFactorData -> _prepCCW
148+
# JT TODO it looks like this is already updated in getDefaultFactorData -> _createCCW
149149
# factormetadata.variableuserdata is deprecated, remove when removing deprecation
150150
# for i in 1:Threads.nthreads()
151151
# ccw_new.fnc.cpt[i].factormetadata.variableuserdata = deepcopy(neighborUserData)

src/entities/FactorOperationalMemory.jl

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ Related
8383
Base.@kwdef struct CommonConvWrapper{
8484
T <: AbstractFactor,
8585
VT <: Tuple,
86-
NTP <: Tuple,
86+
TP <: Tuple,
8787
CT,
8888
AM <: AbstractManifold,
8989
HP <: Union{Nothing, <:Distributions.Categorical{Float64, Vector{Float64}}},
@@ -98,8 +98,9 @@ Base.@kwdef struct CommonConvWrapper{
9898
fullvariables::VT
9999
# shortcuts to numerical containers
100100
""" Numerical containers for all connected variables. Hypo selection needs to be passed
101-
to each hypothesis evaluation event on user function via CalcFactor, #1321 """
102-
varValsAll::NTP
101+
to each hypothesis evaluation event on user function via CalcFactor, #1321.
102+
Points directly at the variable VND.val (not a deepcopy). """
103+
varValsAll::TP
103104
""" dummy cache value to be deep copied later for each of the CalcFactor instances """
104105
dummyCache::CT = nothing
105106
# derived config parameters for this factor

src/services/ApproxConv.jl

Lines changed: 22 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -12,23 +12,36 @@ function approxConvBelief(
1212
skipSolve::Bool = false,
1313
)
1414
#
15-
v1 = getVariable(dfg, target)
16-
N = N == 0 ? getNumPts(v1; solveKey = solveKey) : N
15+
v_trg = getVariable(dfg, target)
16+
N = N == 0 ? getNumPts(v_trg; solveKey = solveKey) : N
17+
# approxConv should push its result into duplicate memory destination, NOT the variable.VND.val itself. ccw.varValsAll always points directly to variable.VND.val
1718
# points and infoPerCoord
18-
pts, ipc = evalFactor(dfg, fc, v1.label, measurement; solveKey, N, skipSolve, nullSurplus)
19+
20+
pts, ipc = evalFactor(
21+
dfg,
22+
fc,
23+
v_trg.label,
24+
measurement;
25+
solveKey,
26+
N,
27+
skipSolve,
28+
nullSurplus
29+
)
1930

2031
len = length(ipc)
2132
mask = 1e-14 .< abs.(ipc)
2233
partl = collect(1:len)[mask]
2334

2435
# is the convolution infoPerCoord full or partial
25-
if sum(mask) == len
36+
res = if sum(mask) == len
2637
# not partial
27-
return manikde!(getManifold(getVariable(dfg, target)), pts; partial = nothing)
38+
manikde!(getManifold(getVariable(dfg, target)), pts; partial = nothing)
2839
else
2940
# is partial
30-
return manikde!(getManifold(getVariable(dfg, target)), pts; partial = partl)
41+
manikde!(getManifold(getVariable(dfg, target)), pts; partial = partl)
3142
end
43+
44+
return res
3245
end
3346

3447
approxConv(w...; kw...) = getPoints(approxConvBelief(w...; kw...), false)
@@ -96,7 +109,9 @@ function approxConvBelief(
96109
neMsk = exists.(tfg, varLbls) .|> x -> xor(x, true)
97110
# put the non-existing variables into the temporary graph `tfg`
98111
# bring all the solveKeys too
99-
addVariable!.(tfg, getVariable.(dfg, varLbls[neMsk]))
112+
for v in getVariable.(dfg, varLbls[neMsk])
113+
addVariable!(tfg, v.label, getVariableType(v))
114+
end
100115
# variables adjacent to the shortest path should be initialized from dfg
101116
setdiff(varLbls, path[xor.(fctMsk, true)]) .|>
102117
x -> initVariable!(tfg, x, getBelief(dfg, x))

src/services/CalcFactor.jl

Lines changed: 55 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -220,58 +220,42 @@ Function returns with ARR[sfidx] pointing at newly allocated deepcopy of the
220220
existing values in getVal(Xi[.label==solvefor]).
221221
222222
Notes
223+
- 2023Q2 intended use, only create VarValsAll the first time a factor added/reconstructed
223224
- Return values `sfidx` is the element in ARR where `Xi.label==solvefor` and
224225
- `maxlen` is length of all (possibly resampled) `ARR` contained particles.
225226
- `Xi` is order sensitive.
226227
- for initialization, solveFor = Nothing.
227228
- `P = getPointType(<:InferenceVariable)`
228-
229-
DevNotes
230-
- FIXME ARR internally should become a NamedTuple
231229
"""
232-
function _prepParamVec(
233-
Xi::Vector{<:DFGVariable},
234-
solvefor::Union{Nothing, Symbol},
235-
N::Int = 0;
230+
function _createVarValsAll(
231+
Xi::Vector{<:DFGVariable};
236232
solveKey::Symbol = :default,
237233
)
238234
#
239-
# FIXME refactor to new NamedTuple instead
240-
varParamsAll = getVal.(Xi; solveKey)
241-
# Xi_labels = getLabel.(Xi)
242-
sfidx = if isnothing(solvefor)
243-
0
244-
else
245-
findfirst(==(solvefor), getLabel.(Xi))
246-
end
247-
# sfidx = isnothing(sfidx) ? 0 : sfidx
235+
# Note, NamedTuple once upon a time created way too much recompile load on repeat solves, #1564
236+
varValsAll = map(var_i->getVal(var_i; solveKey), tuple(Xi...))
248237

249-
# this line does nothing...
250-
# maxlen = N # FIXME see #105
251-
252-
LEN = length.(varParamsAll)
253-
maxlen = maximum([N; LEN])
238+
# how many points
239+
LEN = length.(varValsAll)
240+
maxlen = maximum(LEN)
241+
# NOTE, forcing maxlen to N results in errors (see test/testVariousNSolveSize.jl) see #105
242+
# maxlen = N == 0 ? maxlen : N
254243

244+
# allow each variable to have a different number of points, which is resized during compute here
255245
# resample variables with too few kernels (manifolds points)
256246
SAMP = LEN .< maxlen
257247
for i = 1:length(Xi)
258248
if SAMP[i]
259249
Pr = getBelief(Xi[i], solveKey)
260-
_resizePointsVector!(varParamsAll[i], Pr, maxlen)
250+
_resizePointsVector!(varValsAll[i], Pr, maxlen)
261251
end
262252
end
263253

264254
# TODO --rather define reusable memory for the proposal
265255
# we are generating a proposal distribution, not direct replacement for existing memory and hence the deepcopy.
266-
if sfidx > 0
267-
# FIXME, POSSIBLE SOURCE OF HUGE MEMORY CONSUMPTION ALLOCATION
268-
varParamsAll[sfidx] = deepcopy(varParamsAll[sfidx])
269-
end
256+
# POSSIBLE SOURCE OF HUGE MEMORY CONSUMPTION ALLOCATION
270257

271-
varValsAll = tuple(varParamsAll...)
272-
# FIXME, forcing maxlen to N results in errors (see test/testVariousNSolveSize.jl) see #105
273-
# maxlen = N == 0 ? maxlen : N
274-
return varValsAll, maxlen, sfidx
258+
return varValsAll
275259
end
276260

277261
"""
@@ -352,10 +336,12 @@ end
352336
$SIGNATURES
353337
354338
Notes
339+
- _createCCW is likely only used when adding or reconstructing a new factor in the graph,
340+
- else use _updateCCW
355341
- Can be called with `length(Xi)==0`
356342
"""
357-
function _prepCCW(
358-
Xi::Vector{<:DFGVariable},
343+
function _createCCW(
344+
Xi::AbstractVector{<:DFGVariable},
359345
usrfnc::T;
360346
multihypo::Union{Nothing, <:Distributions.Categorical} = nothing,
361347
nullhypo::Real = 0.0,
@@ -378,7 +364,8 @@ function _prepCCW(
378364
end
379365

380366
# TODO check no Anys, see #1321
381-
_varValsAll, maxlen, sfidx = _prepParamVec(Xi, nothing, 0; solveKey) # Nothing for init.
367+
# NOTE, _varValsAll is only a reference to the actual VND.val memory of each variable
368+
_varValsAll = _createVarValsAll(Xi; solveKey)
382369

383370
manifold = getManifold(usrfnc)
384371
# standard factor metadata
@@ -435,7 +422,7 @@ function _prepCCW(
435422
pttypes = getVariableType.(Xi) .|> getPointType
436423
PointType = 0 < length(pttypes) ? pttypes[1] : Vector{Float64}
437424
if !isconcretetype(PointType)
438-
@warn "_prepCCW PointType is not concrete $PointType" maxlog=50
425+
@warn "_createCCW PointType is not concrete $PointType" maxlog=50
439426
end
440427

441428
# PointType[],
@@ -487,40 +474,43 @@ approximate convolution computations.
487474
DevNotes
488475
- TODO consolidate with others, see https://github.com/JuliaRobotics/IncrementalInference.jl/projects/6
489476
"""
490-
function _updateCCW!(
477+
function _beforeSolveCCW!(
491478
F_::Type{<:AbstractRelative},
492479
ccwl::CommonConvWrapper{F},
493-
Xi::AbstractVector{<:DFGVariable},
494-
solvefor::Symbol,
480+
variables::AbstractVector{<:DFGVariable},
481+
# solvefor::Symbol,
495482
N::Integer;
496483
measurement = Vector{Tuple{}}(),
497484
needFreshMeasurements::Bool = true,
498485
solveKey::Symbol = :default,
499486
) where {F <: AbstractFactor} # F might be Mixture
500487
#
501-
if length(Xi) !== 0
488+
if length(variables) !== 0
502489
nothing
503490
else
504-
@debug("cannot prep ccw.param list with length(Xi)==0, see DFG #590")
491+
@debug("cannot prep ccw.param list with length(variables)==0, see DFG #590")
505492
end
506493

507-
# FIXME, order of fmd ccwl cf are a little weird and should be revised.
508-
# FIXME maxlen should parrot N (barring multi-/nullhypo issues)
509-
_varValsAll, maxlen, sfidx = _prepParamVec(Xi, solvefor, N; solveKey)
494+
# TBD, order of fmd ccwl cf are a little weird and should be revised.
495+
# TODO, maxlen should parrot N (barring multi-/nullhypo issues)
496+
# set the 'solvefor' variable index -- i.e. which connected variable of the factor is being computed in this convolution.
497+
# ccwl.varidx[] = findfirst(==(solvefor), getLabel.(variables))
498+
# everybody use maxlen number of points in belief function estimation
499+
maxlen = maximum((N, length.(ccwl.varValsAll)...,))
510500

511-
# TODO, ensure all values (not just multihypothesis) is correctly used from here
512-
for (i,varVal) in enumerate(_varValsAll)
513-
resize!(ccwl.varValsAll[i],length(varVal))
514-
ccwl.varValsAll[i][:] = varVal
515-
end
501+
## PLAN B, make deep copy of ccwl.varValsAll[ccwl.varidx[]] just before the numerical solve
516502

517-
# set the 'solvefor' variable index -- i.e. which connected variable of the factor is being computed in this convolution.
518-
ccwl.varidx[] = sfidx
503+
# maxlen, ccwl.varidx[] = _updateParamVec(variables, solvefor, ccwl.varValsAll, N; solveKey)
504+
# # TODO, ensure all values (not just multihypothesis) is correctly used from here
505+
# for (i,varVal) in enumerate(_varValsAll)
506+
# resize!(ccwl.varValsAll[i],length(varVal))
507+
# ccwl.varValsAll[i][:] = varVal #TODO Kyk hierna: this looks like it will effectively result in vnd.val memory being overwritten
508+
# end
519509

520510
# TODO better consolidation still possible
521511
# FIXME ON FIRE, what happens if this is a partial dimension factor? See #1246
522-
# FIXME, confirm this is hypo sensitive selection from Xi, better to use double indexing for clarity getDimension(ccw.fullvariables[hypoidx[sfidx]])
523-
xDim = getDimension(getVariableType(Xi[sfidx])) # ccwl.varidx[]
512+
# FIXME, confirm this is hypo sensitive selection from variables, better to use double indevariablesng for clarity getDimension(ccw.fullvariables[hypoidx[ccwl.varidx[]]])
513+
xDim = getDimension(getVariableType(variables[ccwl.varidx[]])) # ccwl.varidx[]
524514
# ccwl.xDim = xDim
525515
# TODO maybe refactor different type or api call?
526516

@@ -540,23 +530,25 @@ function _updateCCW!(
540530
# calculate new gradients
541531
# J = ccwl.gradients(measurement..., pts...)
542532

543-
return sfidx, maxlen
533+
return maxlen
544534
end
545535

546-
function _updateCCW!(
536+
function _beforeSolveCCW!(
547537
F_::Type{<:AbstractPrior},
548538
ccwl::CommonConvWrapper{F},
549539
Xi::AbstractVector{<:DFGVariable},
550-
solvefor::Symbol,
540+
# solvefor::Symbol,
551541
N::Integer;
552542
measurement = Vector{Tuple{}}(),
553543
needFreshMeasurements::Bool = true,
554544
solveKey::Symbol = :default,
555545
) where {F <: AbstractFactor} # F might be Mixture
556546
# FIXME, NEEDS TO BE CLEANED UP AND WORK ON MANIFOLDS PROPER
557547
# fnc = ccwl.usrfnc!
558-
sfidx = findfirst(getLabel.(Xi) .== solvefor)
559-
@assert sfidx == 1 "Solving on Prior with CCW should have sfidx=1, priors are unary factors."
548+
# sfidx = findfirst(getLabel.(Xi) .== solvefor)
549+
@assert ccwl.varidx[] == 1 "Solving on Prior with CCW should have sfidx=1, priors are unary factors."
550+
# @assert sfidx == 1 "Solving on Prior with CCW should have sfidx=1, priors are unary factors."
551+
# ccwl.varidx[] = sfidx
560552
# sfidx = 1 # why hardcoded to 1, maybe for only the AbstractPrior case here
561553

562554
# setup the partial or complete decision variable dimensions for this ccwl object
@@ -570,30 +562,30 @@ function _updateCCW!(
570562
# update ccwl.measurement values
571563
updateMeasurement!(ccwl, maxlen; needFreshMeasurements, measurement, _allowThreads=true)
572564

573-
return sfidx, maxlen
565+
return maxlen
574566
end
575567

576568
# TODO, can likely deprecate this
577-
function _updateCCW!(
569+
function _beforeSolveCCW!(
578570
ccwl::Union{CommonConvWrapper{F}, CommonConvWrapper{Mixture{N_, F, S, T}}},
579571
Xi::AbstractVector{<:DFGVariable},
580-
solvefor::Symbol,
572+
# solvefor::Symbol,
581573
N::Integer;
582574
kw...,
583575
) where {N_, F <: AbstractRelative, S, T}
584576
#
585-
return _updateCCW!(F, ccwl, Xi, solvefor, N; kw...)
577+
return _beforeSolveCCW!(F, ccwl, Xi, N; kw...)
586578
end
587579

588-
function _updateCCW!(
580+
function _beforeSolveCCW!(
589581
ccwl::Union{CommonConvWrapper{F}, CommonConvWrapper{Mixture{N_, F, S, T}}},
590582
Xi::AbstractVector{<:DFGVariable},
591-
solvefor::Symbol,
583+
# solvefor::Symbol,
592584
N::Integer;
593585
kw...,
594586
) where {N_, F <: AbstractPrior, S, T}
595587
#
596-
return _updateCCW!(F, ccwl, Xi, solvefor, N; kw...)
588+
return _beforeSolveCCW!(F, ccwl, Xi, N; kw...)
597589
end
598590

599591

src/services/DeconvUtils.jl

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ function approxDeconv(
5555
fctSmpls = deepcopy(measurement)
5656

5757
# TODO assuming vector on only first container in measurement::Tuple
58-
makeTarget = (i) -> measurement[i] # TODO does not support copy-primitive types like Float64, only Ref()
58+
makeTarget = (smpidx) -> measurement[smpidx] # TODO does not support copy-primitive types like Float64, only Ref()
5959
# makeTarget = (i) -> view(measurement[1][i],:)
6060
# makeTarget = (i) -> view(measurement[1], :, i)
6161

@@ -73,22 +73,22 @@ function approxDeconv(
7373

7474
for idx = 1:N
7575
# towards each particle in their own thread (not 100% ready yet, factors should be separate memory)
76-
targeti_ = makeTarget(idx)
76+
target_smpl = makeTarget(idx)
7777

7878
# TODO must first resolve hypothesis selection before unrolling them -- deferred #1096
7979
resize!(ccw.activehypo, length(hyporecipe.activehypo[2][2]))
8080
ccw.activehypo[:] = hyporecipe.activehypo[2][2]
8181

82-
onehypo!, _ = _buildCalcFactorLambdaSample(ccw, idx, targeti_, measurement)
82+
onehypo!, _ = _buildCalcFactorLambdaSample(ccw, idx, target_smpl, measurement)
8383
#
8484

8585
# lambda with which to find best measurement values
86-
hypoObj = (tgt) -> (targeti_ .= tgt; onehypo!())
86+
hypoObj = (tgt) -> (target_smpl .= tgt; onehypo!())
8787

8888
# find solution via SubArray view pointing to original memory location
8989
if fcttype isa AbstractManifoldMinimize
9090
sfidx = ccw.varidx[]
91-
targeti_ .= _solveLambdaNumericMeas(
91+
target_smpl .= _solveLambdaNumericMeas(
9292
fcttype,
9393
hypoObj,
9494
res_,
@@ -97,7 +97,7 @@ function approxDeconv(
9797
islen1,
9898
)
9999
else
100-
targeti_ .= _solveLambdaNumeric(fcttype, hypoObj, res_, measurement[idx], islen1)
100+
target_smpl .= _solveLambdaNumeric(fcttype, hypoObj, res_, measurement[idx], islen1)
101101
end
102102
end
103103

0 commit comments

Comments
 (0)