|
18 | 18 | function distmesh(fdist::Function,
|
19 | 19 | fh::Union{Function,HUniform},
|
20 | 20 | h::Number,
|
21 |
| - setup::DistMeshSetup=DistMeshSetup(); |
| 21 | + setup::AbstractDistMeshAlgorithm=DistMeshSetup(); |
22 | 22 | origin=GeometryBasics.Point{3,Float64}(-1,-1,-1),
|
23 | 23 | widths=GeometryBasics.Point{3,Float64}(2,2,2),
|
24 | 24 | fix=nothing,
|
@@ -209,3 +209,177 @@ function distmesh(fdist::Function,
|
209 | 209 | end
|
210 | 210 | end
|
211 | 211 | end
|
| 212 | + |
| 213 | +function distmesh(fdist::Function, |
| 214 | + fh, |
| 215 | + h::Number, |
| 216 | + setup::DistMeshQuality, |
| 217 | + origin, |
| 218 | + widths, |
| 219 | + fix, |
| 220 | + ::Val{stats}, |
| 221 | + ::Type{VertType}) where {VertType, stats} |
| 222 | + |
| 223 | + geps=1e-1*h+setup.iso # parameter for filtering tets outside bounds and considering for max displacment of a node |
| 224 | + |
| 225 | + # static parameter info |
| 226 | + non_uniform = isa(fh, Function) # so we can elide fh calls |
| 227 | + have_fixed = !isa(fix, Nothing) |
| 228 | + |
| 229 | + #ptol=.001; ttol=.1; L0mult=1+.4/2^(dim-1); deltat=.2; geps=1e-1*h; |
| 230 | + |
| 231 | + # initialize Vertex Arrays |
| 232 | + # the first N points in the array correspond to'fix' points that do not move |
| 233 | + if have_fixed |
| 234 | + p = copy(fix) |
| 235 | + else |
| 236 | + p = VertType[] |
| 237 | + end |
| 238 | + |
| 239 | + pt_dists = map(fdist, p) # cache to store point locations so we can minimize fdist calls |
| 240 | + |
| 241 | + # add points to p based on the initial distribution |
| 242 | + if setup.distribution === :regular |
| 243 | + simplecubic!(fdist, p, pt_dists, h, setup.iso, origin, widths, VertType) |
| 244 | + elseif setup.distribution === :packed |
| 245 | + # face-centered cubic point distribution |
| 246 | + facecenteredcubic!(fdist, p, pt_dists, h, setup.iso, origin, widths, VertType) |
| 247 | + end |
| 248 | + |
| 249 | + # initialize arrays |
| 250 | + pair_set = Set{Tuple{Int32,Int32}}() # set used for ensure we have a unique set of edges |
| 251 | + pair = Tuple{Int32,Int32}[] # edge indices (Int32 since we use Tetgen) |
| 252 | + dp = zeros(VertType, length(p)) # displacement at each node |
| 253 | + bars = VertType[] # the vector of each edge |
| 254 | + L = eltype(VertType)[] # vector length of each edge |
| 255 | + non_uniform && (L0 = eltype(VertType)[]) # desired edge length computed by dh (edge length function) |
| 256 | + t = GeometryBasics.SimplexFace{4,Int32}[] # tetrahedra indices from delaunay triangulation |
| 257 | + maxmove = typemax(eltype(VertType)) # stores an iteration max movement for retriangulation |
| 258 | + |
| 259 | + # arrays for tracking quality metrics |
| 260 | + tris = NTuple{3,Int32}[] # triangles used for quality checks |
| 261 | + triset = Set{NTuple{3,Int32}}() # set for triangles to ensure uniqueness |
| 262 | + qualities = eltype(VertType)[] |
| 263 | + |
| 264 | + # information on each iteration |
| 265 | + statsdata = DistMeshStatistics() |
| 266 | + lcount = 0 # iteration counter |
| 267 | + |
| 268 | + @inbounds while true |
| 269 | + # if large move, retriangulation |
| 270 | + if maxmove>setup.ttol*h |
| 271 | + |
| 272 | + delaunayn!(fdist, p, t, geps) # compute a new delaunay triangulation |
| 273 | + |
| 274 | + tet_to_edges!(pair, pair_set, t) # Describe each edge by a unique pair of nodes |
| 275 | + |
| 276 | + # resize arrays for new pair counts |
| 277 | + resize!(bars, length(pair)) |
| 278 | + resize!(L, length(pair)) |
| 279 | + non_uniform && resize!(L0, length(pair)) |
| 280 | + |
| 281 | + stats && push!(statsdata.retriangulations, lcount) |
| 282 | + end |
| 283 | + |
| 284 | + # compute edge lengths (L) and adaptive edge lengths (L0) |
| 285 | + # Lp norm (p=3) is partially computed here |
| 286 | + Lsum = zero(eltype(L)) |
| 287 | + L0sum = non_uniform ? zero(eltype(L0)) : length(pair) |
| 288 | + for i in eachindex(pair) |
| 289 | + pb = pair[i] |
| 290 | + b1 = p[pb[1]] |
| 291 | + b2 = p[pb[2]] |
| 292 | + barvec = b1 - b2 # bar vector |
| 293 | + bars[i] = barvec |
| 294 | + L[i] = sqrt(sum(barvec.^2)) # length |
| 295 | + non_uniform && (L0[i] = fh((b1+b2)./2)) |
| 296 | + Lsum = Lsum + L[i].^3 |
| 297 | + non_uniform && (L0sum = L0sum + L0[i].^3) |
| 298 | + end |
| 299 | + |
| 300 | + # zero out force at each node |
| 301 | + for i in eachindex(dp) |
| 302 | + dp[i] = zero(VertType) |
| 303 | + end |
| 304 | + |
| 305 | + # this is not hoisted correctly in the loop so we initialize here |
| 306 | + # finish computing the Lp norm (p=3) |
| 307 | + lscbrt = (1+(0.4/2^2))*cbrt(Lsum/L0sum) |
| 308 | + |
| 309 | + # Move mesh points based on edge lengths L and forces F |
| 310 | + for i in eachindex(pair) |
| 311 | + L0_f = non_uniform ? L0[i].*lscbrt : lscbrt |
| 312 | + # compute force vectors |
| 313 | + F = L0_f-L[i] |
| 314 | + # edges are not allowed to pull, only repel |
| 315 | + if F > zero(eltype(L)) |
| 316 | + FBar = bars[i].*F./L[i] |
| 317 | + # add the force vector to the node |
| 318 | + b1 = pair[i][1] |
| 319 | + b2 = pair[i][2] |
| 320 | + dp[b1] = dp[b1] .+ FBar |
| 321 | + dp[b2] = dp[b2] .- FBar |
| 322 | + end |
| 323 | + end |
| 324 | + |
| 325 | + # Zero out forces on fix points |
| 326 | + if have_fixed |
| 327 | + for i in eachindex(fix) |
| 328 | + dp[i] = zero(VertType) |
| 329 | + end |
| 330 | + end |
| 331 | + |
| 332 | + # apply point forces and |
| 333 | + # bring outside points back to the boundary |
| 334 | + maxdp = typemin(eltype(VertType)) |
| 335 | + maxmove = typemin(eltype(VertType)) |
| 336 | + for i in eachindex(p) |
| 337 | + |
| 338 | + p0 = p[i] # store original point location |
| 339 | + p[i] = p[i].+setup.deltat.*dp[i] # apply displacements to points |
| 340 | + |
| 341 | + # Check if we are verifiably within the bounds and use this value |
| 342 | + # to avoid recomputing fdist. This increases performance greatly on |
| 343 | + # complex distance functions and large node counts |
| 344 | + move = sqrt(sum((p[i]-p0).^2)) # compute movement from the displacement |
| 345 | + d_est = pt_dists[i] + move # apply the movement to our cache point |
| 346 | + d = d_est < -geps ? d_est : fdist(p[i]) # determine if we need correct or approximate distance |
| 347 | + pt_dists[i] = d # store distance |
| 348 | + |
| 349 | + if d < -geps |
| 350 | + maxdp = max(maxdp, setup.deltat*sqrt(sum(dp[i].^2))) |
| 351 | + end |
| 352 | + |
| 353 | + if d <= setup.iso |
| 354 | + maxmove = max(move,maxmove) |
| 355 | + continue |
| 356 | + end |
| 357 | + |
| 358 | + # bring points back to boundary if outside using central difference |
| 359 | + p[i] = p[i] .- centraldiff(fdist,p[i]).*(d+setup.iso) |
| 360 | + maxmove = max(sqrt(sum((p[i]-p0).^2)), maxmove) |
| 361 | + pt_dists[i] = setup.iso # ideally |
| 362 | + end |
| 363 | + |
| 364 | + # increment iteration counter |
| 365 | + lcount = lcount + 1 |
| 366 | + |
| 367 | + # save iteration stats |
| 368 | + if stats |
| 369 | + push!(statsdata.maxmove,maxmove) |
| 370 | + push!(statsdata.maxdp,maxdp) |
| 371 | + triangle_qualities!(tris,triset,qualities,p,t) |
| 372 | + sort!(qualities) # sort for median calc and robust summation |
| 373 | + mine, maxe = extrema(qualities) |
| 374 | + push!(statsdata.average_qual, sum(qualities)/length(qualities)) |
| 375 | + push!(statsdata.median_qual, qualities[round(Int,length(qualities)/2)]) |
| 376 | + push!(statsdata.minimum_qual, mine) |
| 377 | + push!(statsdata.maximum_qual, maxe) |
| 378 | + end |
| 379 | + |
| 380 | + # Termination criterion |
| 381 | + if maxdp<setup.ptol*h |
| 382 | + return p, t, statsdata |
| 383 | + end |
| 384 | + end |
| 385 | +end |
0 commit comments