We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 61f0fd9 commit 01a9225Copy full SHA for 01a9225
.github/workflows/runtests.yml
@@ -7,7 +7,7 @@ jobs:
7
runs-on: ${{ matrix.os }}
8
strategy:
9
matrix:
10
- julia-version: ['1.6', '1.7', '~1.8.0-0']
+ julia-version: ['1.6', '1.7', '1.8']
11
os: [ubuntu-latest]
12
provider: ['mkl', 'fftw']
13
fail-fast: false
src/CMBLensing.jl
@@ -84,7 +84,7 @@ import Random: randn!
84
export
85
@⌛, @show⌛, @ismain, @namedtuple, @repeated, @unpack, @cpu!, @gpu!, @cu!, @fwdmodel,
86
animate, argmaxf_lnP, argmaxf_logpdf, AzFourier, BandPassOp, BaseDataSet, batch, batch_index, batch_length,
87
- batch_map, batch_pmap, BlockDiagEquiRect, beamCℓs, CachedLenseFlow, camb, cov_to_Cℓ, cpu, Cℓ_2D,
+ batch_map, batch_pmap, BlockDiagEquiRect, beamCℓs, cache, CachedLenseFlow, camb, cov_to_Cℓ, cpu, Cℓ_2D,
88
Cℓ_to_Cov, DataSet, DerivBasis, diag, Diagonal, DiagOp, dot, EBFourier, EBMap, expnorm,
89
Field, FieldArray, fieldinfo, FieldMatrix, FieldOrOpArray, FieldOrOpMatrix, FieldOrOpRowVector,
90
FieldOrOpVector, FieldRowVector, FieldTuple, FieldVector, FieldVector,
src/dataset.jl
@@ -329,7 +329,7 @@ function load_sim(;
329
330
if Nbatch != nothing
331
d = ds.d *= batch(ones(Int,Nbatch))
332
- ds.L = alloc_cache(L(ϕ*batch(ones(Int,Nbatch))), ds.d)
+ ds.L = precompute!!(L(ϕ*batch(ones(Int,Nbatch))), ds.d)
333
end
334
335
return (;f, f̃, ϕ, d, ds, ds₀=ds(), Cℓ, proj)
src/deprecated.jl
@@ -1,8 +1,8 @@
1
2
### argmaxf_lnP
3
4
-argmaxf_lnP(ϕ::Field, ds::DataSet; kwargs...) = argmaxf_lnP(cache(ds.L(ϕ),ds.d), (;), ds; kwargs...)
5
-argmaxf_lnP(ϕ::Field, θ, ds::DataSet; kwargs...) = argmaxf_lnP(cache(ds.L(ϕ),ds.d), θ, ds; kwargs...)
+argmaxf_lnP(ϕ::Field, ds::DataSet; kwargs...) = argmaxf_lnP(precompute!!(ds.L(ϕ),ds.d), (;), ds; kwargs...)
+argmaxf_lnP(ϕ::Field, θ, ds::DataSet; kwargs...) = argmaxf_lnP(precompute!!(ds.L(ϕ),ds.d), θ, ds; kwargs...)
6
function argmaxf_lnP(
Lϕ,
@@ -107,3 +107,9 @@ Base.@deprecate white_noise(f::Field, rng::AbstractRNG) randn!(rng, f)
107
### Cℓs
108
109
Base.@deprecate InterpolatedCℓs(args...) Cℓs(args...)
110
+
111
112
+### cache
113
114
+Base.@deprecate cache(L, f) precompute!!(L, f)
115
+Base.@deprecate cache!(L, f) precompute!!(L, f)
src/lenseflow.jl
@@ -2,7 +2,7 @@
abstract type LenseFlowOp{S<:ODESolver,T} <: FlowOpWithAdjoint{T} end
# `L = LenseFlow(ϕ)` just creates a wrapper holding ϕ. Then when you do `L*f` or
-# `cache(L,f)` we create a CachedLenseFlow object which holds all the
+# `precompute!!(L,f)` we create a CachedLenseFlow object which holds all the
# precomputed quantities and preallocated memory needed to do the lense.
"""
test/runtests.jl
@@ -540,7 +540,7 @@ end
540
## S0
541
Cf = maybegpu(Cℓ_to_Cov(:I, proj, Cℓ.TT))
542
@test (f = @inferred simulate(rng,Cf)) isa FlatS0
543
- @test (Lϕ = cache(LenseFlow(ϕ),f)) isa CachedLenseFlow
+ @test (Lϕ = precompute!!(LenseFlow(ϕ),f)) isa CachedLenseFlow
544
@test (@inferred Lϕ*f) isa FlatS0
545
# adjoints
546
f,g = simulate(rng,Cf),simulate(rng,Cf)
@@ -552,7 +552,7 @@ end
552
# S2 lensing
553
Cf = maybegpu(Cℓ_to_Cov(:P, proj, Cℓ.EE, Cℓ.BB))
554
@test (f = @inferred simulate(rng,Cf)) isa FlatS2
555
556
@test (@inferred Lϕ*f) isa FlatS2
557
558
@@ -574,7 +574,7 @@ end
574
@testset "Posterior" begin
575
576
Cℓ = camb()
577
- L = LenseFlow{RK4Solver{7}}
+ L = LenseFlow(7)
578
T = Float64
579
580
@testset "Nside = $Nside" for Nside in Nsides_big
0 commit comments