diff --git a/.github/workflows/Format.yml b/.github/workflows/Format.yml new file mode 100644 index 00000000..c718b1e1 --- /dev/null +++ b/.github/workflows/Format.yml @@ -0,0 +1,105 @@ +name: 'Format' + +on: + pull_request_target: + paths: ['**/*.jl'] + types: [opened, synchronize, reopened, ready_for_review] + +permissions: + contents: read + actions: write + pull-requests: write + +jobs: + runic: + runs-on: ubuntu-latest + if: github.event.pull_request.draft == false + steps: + - name: Check out repository + uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} + fetch-depth: 0 + + - name: Add upstream remote + run: | + git remote add upstream https://github.com/${{ github.repository }} + git fetch upstream + + - name: Setup Julia + uses: julia-actions/setup-julia@v2 + with: + version: '1' + arch: 'x64' + - uses: julia-actions/cache@v2 + + - name: Install Runic + run: | + julia --project=@runic -e 'using Pkg; Pkg.add("Runic")' + curl -o git-runic https://raw.githubusercontent.com/fredrikekre/Runic.jl/master/bin/git-runic + chmod +x git-runic + sudo mv git-runic /usr/local/bin + + - name: Run Runic + id: runic + run: | + set +e + MERGE_BASE=$(git merge-base upstream/${{ github.base_ref }} HEAD) || exit 1 + DIFF=$(git runic --diff $MERGE_BASE) + EXIT_CODE=$? + + echo "exit_code=$EXIT_CODE" >> $GITHUB_OUTPUT + echo "diff<> $GITHUB_OUTPUT + echo "$DIFF" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + # if Runic failed, bail out + [ $EXIT_CODE -eq 2 ] && exit 1 || exit 0 + + - name: Find comment + uses: peter-evans/find-comment@v3 + id: find-comment + with: + issue-number: ${{ github.event.pull_request.number }} + comment-author: 'github-actions[bot]' + body-includes: '' + + - name: Comment formatting suggestions + if: steps.runic.outputs.exit_code == 1 + uses: peter-evans/create-or-update-comment@v4 + with: + comment-id: ${{ steps.find-comment.outputs.comment-id }} + issue-number: ${{ github.event.pull_request.number }} + body: | + + + Your PR requires formatting changes to meet the project's style guidelines. + Please consider running [Runic](https://github.com/fredrikekre/Runic.jl) (`git runic ${{ github.base_ref }}`) to apply these changes. + +
+ Click here to view the suggested changes. + + ```diff + ${{ steps.runic.outputs.diff }} + ``` + +
+ edit-mode: replace + + - name: Update stale comment + if: steps.runic.outputs.exit_code == 0 && steps.find-comment.outputs.comment-id + uses: peter-evans/create-or-update-comment@v4 + with: + comment-id: ${{ steps.find-comment.outputs.comment-id }} + issue-number: ${{ github.event.pull_request.number }} + body: | + + + Your PR no longer requires formatting changes. Thank you for your contribution! + edit-mode: replace + + # XXX: if Github ever supports allow-failure (actions/runner#2347) + #- name: Propagate exit code + # run: | + # exit ${{ steps.runic.outputs.exit_code }} \ No newline at end of file diff --git a/.typos.toml b/.typos.toml index f3194677..88843008 100644 --- a/.typos.toml +++ b/.typos.toml @@ -5,6 +5,7 @@ Shepard = "Shepard" arange = "arange" iy = "iy" nin = "nin" +tect = "tect" [files] extend-exclude = ["tutorials/*.pvsm","docs/paper/paper.bib"] \ No newline at end of file diff --git a/ext/Chmy_utils.jl b/ext/Chmy_utils.jl index 9fe42678..ed36b895 100644 --- a/ext/Chmy_utils.jl +++ b/ext/Chmy_utils.jl @@ -15,31 +15,31 @@ println("Loading Chmy-GMG tools") Creates a GMG `CartGrid` data structure from a `Chmy` grid object """ -function create_CartGrid(grid::StructuredGrid; ylevel=0.0) +function create_CartGrid(grid::StructuredGrid; ylevel = 0.0) - coord1D = Vector.(coords(grid, Vertex())) + coord1D = Vector.(coords(grid, Vertex())) coord1D_cen = Vector.(coords(grid, Center())) - N = length.(coord1D) - L = extent(grid, Vertex()) - X₁ = origin(grid, Vertex()) - Δ = spacing(grid) - ConstantΔ = false; + N = length.(coord1D) + L = extent(grid, Vertex()) + X₁ = origin(grid, Vertex()) + Δ = spacing(grid) + ConstantΔ = false if isa(grid, UniformGrid) ConstantΔ = true end - if ndims(grid)==2 + if ndims(grid) == 2 # we need a special treatment of this, as all GMG routines work with 3D coordinates - X₁ = (X₁[1], ylevel, X₁[2]) - L = (L[1], 0.0, L[2]) - Δ = (Δ[1], 0.0, Δ[2]) - N = (N[1],1,N[2]) + X₁ = (X₁[1], ylevel, X₁[2]) + L = (L[1], 0.0, L[2]) + Δ = (Δ[1], 0.0, Δ[2]) + N = (N[1], 1, N[2]) coord1D = (coord1D[1], [0.0], coord1D[2]) coord1D_cen = (coord1D_cen[1], [0.0], coord1D_cen[2]) end - Xₙ = X₁ .+ L - + Xₙ = X₁ .+ L - return CartGrid(ConstantΔ,N,Δ,L,X₁,Xₙ,coord1D, coord1D_cen) + + return CartGrid(ConstantΔ, N, Δ, L, X₁, Xₙ, coord1D, coord1D_cen) end # all functions to be ported @@ -47,7 +47,7 @@ function_names = (:add_box!, :add_sphere!, :add_ellipsoid!, :add_cylinder!, :add for fn in function_names - @eval begin + @eval begin """ $($fn)( Phase::Field, Temp::Field, @@ -56,22 +56,24 @@ for fn in function_names Sets `$($fn)` function for `Chmy` fields and grids. """ - function $fn( Phase::Field, - Temp::Field, - Grid::StructuredGrid; # required input - kwargs...) + function $fn( + Phase::Field, + Temp::Field, + Grid::StructuredGrid; # required input + kwargs... + ) CartGrid = create_CartGrid(Grid) cell = false - if all(location(Phase).==Center()) + if all(location(Phase) .== Center()) cell = true end - - return ($fn)(Phase, Temp, CartGrid; cell=cell, kwargs...) + + return ($fn)(Phase, Temp, CartGrid; cell = cell, kwargs...) end end - + end @@ -80,28 +82,30 @@ function_names = (:above_surface, :below_surface) for fn in function_names - @eval begin + @eval begin """ $($fn)( Grid::StructuredGrid, field::Field, DataSurface_Cart::CartData; kwargs...) Sets `$($fn)` function for `Chmy` grids and the field `field` which can be either on vertices or centers """ - function $fn( Grid::StructuredGrid, - field::Field, - DataSurface_Cart::CartData; - kwargs...) + function $fn( + Grid::StructuredGrid, + field::Field, + DataSurface_Cart::CartData; + kwargs... + ) CartGrid = create_CartGrid(Grid) - + cell = false - if all(location(field).==Center()) + if all(location(field) .== Center()) cell = true end - return ($fn)(CartGrid, DataSurface_Cart; cell=cell, kwargs...) + return ($fn)(CartGrid, DataSurface_Cart; cell = cell, kwargs...) end end - + end diff --git a/ext/GLMakie_Visualisation.jl b/ext/GLMakie_Visualisation.jl index 6f130a35..3ace3601 100644 --- a/ext/GLMakie_Visualisation.jl +++ b/ext/GLMakie_Visualisation.jl @@ -29,24 +29,24 @@ All fields in the dataset can be explored, and if the optional parameter `Topogr Note that this requires orthogonal grids, so it will work with a `GeoData` set, or with an orthogonal `CartData` set. Note that you may have to use `project_CartData` to project it to orthogonal cartesian coordinates. """ -function visualise(Data::AbstractGeneralGrid; Topography=nothing, Topo_range=nothing) +function visualise(Data::AbstractGeneralGrid; Topography = nothing, Topo_range = nothing) - axis_equal = false; # in case we use x/y/z data in km, this is useful + axis_equal = false # in case we use x/y/z data in km, this is useful - if isa(Data,GeoData) - x = Data.lon.val[:,1,1]; xlab = "lon" - y = Data.lat.val[1,:,1]; ylab = "lat" - z = Data.depth.val[1,1,:]; zlab = "depth [km]" - orthogonal = true; - elseif isa(Data,CartData) - # Determine - x = Data.x.val[:,1,1]; xlab = "X [km]" - y = Data.y.val[1,:,1]; ylab = "Y [km]" - z = Data.z.val[1,1,:]; zlab = "Z [km]" + if isa(Data, GeoData) + x = Data.lon.val[:, 1, 1]; xlab = "lon" + y = Data.lat.val[1, :, 1]; ylab = "lat" + z = Data.depth.val[1, 1, :]; zlab = "depth [km]" + orthogonal = true + elseif isa(Data, CartData) + # Determine + x = Data.x.val[:, 1, 1]; xlab = "X [km]" + y = Data.y.val[1, :, 1]; ylab = "Y [km]" + z = Data.z.val[1, 1, :]; zlab = "Z [km]" axis_equal = true - if sum(abs.(Data.x.val[:,1,2] - Data.x.val[:,1,1]))>1e-10 + if sum(abs.(Data.x.val[:, 1, 2] - Data.x.val[:, 1, 1])) > 1.0e-10 orthogonal = false warning("Non-orthogonal CartData - can only show topography") else @@ -56,96 +56,105 @@ function visualise(Data::AbstractGeneralGrid; Topography=nothing, Topo_range=not else error("not yet implemented ") end - + if !axis_equal x_vec = 1:length(x) y_vec = 1:length(y) z_vec = 1:length(z) else - x_vec = range(x[1],x[end],length(x)); - y_vec = range(y[1],y[end],length(y)); - z_vec = range(z[1],z[end],length(z)); + x_vec = range(x[1], x[end], length(x)) + y_vec = range(y[1], y[end], length(y)) + z_vec = range(z[1], z[end], length(z)) end # determine width of axis - dx = (maximum(x) - minimum(x))/(length(x)-1); - dy = (maximum(y) - minimum(y))/(length(y)-1); - dz = (maximum(z) - minimum(z))/(length(z)-1); - + dx = (maximum(x) - minimum(x)) / (length(x) - 1) + dy = (maximum(y) - minimum(y)) / (length(y) - 1) + dz = (maximum(z) - minimum(z)) / (length(z) - 1) + if !isnothing(Topography) - if isa(Topography,GeoData) - x_topo = (Topography.lon.val .- x[1])/dx; - y_topo = (Topography.lat.val .- y[1])/dy; - z_topo = (Topography.depth.val .- z[1])/dz; - elseif isa(Topography,CartData) - x_topo = Topography.x.val; - y_topo = Topography.y.val; - z_topo = Topography.z.val; + if isa(Topography, GeoData) + x_topo = (Topography.lon.val .- x[1]) / dx + y_topo = (Topography.lat.val .- y[1]) / dy + z_topo = (Topography.depth.val .- z[1]) / dz + elseif isa(Topography, CartData) + x_topo = Topography.x.val + y_topo = Topography.y.val + z_topo = Topography.z.val end if isnothing(Topo_range) - # base - topo_max = round(maximum(ustrip.(Topography.fields.Topography)),digits=1) + # base + topo_max = round(maximum(ustrip.(Topography.fields.Topography)), digits = 1) Topo_range = (-topo_max, topo_max) end end - - data_names = keys(Data.fields) # Names of the fields + + data_names = keys(Data.fields) # Names of the fields data_selected = Observable(Symbol(data_names[1])) - data_string = @lift String($data_selected) + data_string = @lift String($data_selected) get_vol(f_name) = Data.fields[f_name] - vol = lift(get_vol, data_selected) + vol = lift(get_vol, data_selected) - fig = Figure(resolution = (2000,2000), fontsize=20) + fig = Figure(resolution = (2000, 2000), fontsize = 20) ax = LScene(fig[1, 1:2], scenekw = (camera = cam3d!, raw = false)) # Create sliders sgrid = SliderGrid( - fig[2, 2], - (label = xlab, range = 1:length(x_vec), - format = v -> string(round((v-1)*dx + x[1], digits = 2))), - (label = ylab, range = 1:length(y_vec), - format = v -> string(round((v-1)*dy + y[1], digits = 2))), - (label = zlab, range = 1:length(z_vec), - format = v -> string(round((v-1)*dz + z[1], digits = 2))), - (label = "Transparency topo", range = 0:.01:1), + fig[2, 2], + ( + label = xlab, range = 1:length(x_vec), + format = v -> string(round((v - 1) * dx + x[1], digits = 2)), + ), + ( + label = ylab, range = 1:length(y_vec), + format = v -> string(round((v - 1) * dy + y[1], digits = 2)), + ), + ( + label = zlab, range = 1:length(z_vec), + format = v -> string(round((v - 1) * dz + z[1], digits = 2)), + ), + (label = "Transparency topo", range = 0:0.01:1), ) - + # Create dropdown menus - menu_dataset = Menu(fig, options = [String.(data_names)...], default=String(data_selected[])) - menu_colormap = Menu(fig, options = ["roma","romaO","vik","turku","davos","batlow","tab10","tab20","bone"], - default="roma") + menu_dataset = Menu(fig, options = [String.(data_names)...], default = String(data_selected[])) + menu_colormap = Menu( + fig, options = ["roma", "romaO", "vik", "turku", "davos", "batlow", "tab10", "tab20", "bone"], + default = "roma" + ) # Colorbar limits - cmin = @lift round(minimum($vol), digits=2) - cmax = @lift round(maximum($vol), digits=2) + cmin = @lift round(minimum($vol), digits = 2) + cmax = @lift round(maximum($vol), digits = 2) cmin_str = @lift string($cmin) cmax_str = @lift string($cmax) - - cmin_box = Textbox(fig, stored_string = cmin_str,width = nothing) - cmax_box = Textbox(fig, stored_string = cmax_str,width = nothing) + + cmin_box = Textbox(fig, stored_string = cmin_str, width = nothing) + cmax_box = Textbox(fig, stored_string = cmax_str, width = nothing) iso_level = Observable([1.7]) - iso_alpha = Observable(0.5); - iso_box = Textbox(fig, stored_string ="$(iso_level[][1])",width = nothing) - iso_toggle = Toggle(fig, active = true); - iso_slide = Slider(fig, range = 0:.01:1) + iso_alpha = Observable(0.5) + iso_box = Textbox(fig, stored_string = "$(iso_level[][1])", width = nothing) + iso_toggle = Toggle(fig, active = true) + iso_slide = Slider(fig, range = 0:0.01:1) set_close_to!(iso_slide, iso_alpha[]) - Label(fig[3,1:4], " ", width = nothing) + Label(fig[3, 1:4], " ", width = nothing) fig[2, 1] = vgrid!( - hgrid!(Label(fig, "Dataset", width = nothing),menu_dataset), - hgrid!(Label(fig, "Colormap", width = nothing),menu_colormap), + hgrid!(Label(fig, "Dataset", width = nothing), menu_dataset), + hgrid!(Label(fig, "Colormap", width = nothing), menu_colormap), hgrid!(Label(fig, "Color axis limits", width = nothing), hgrid!(cmin_box, Label(fig, "-", width = 20), cmax_box)), - hgrid!(hgrid!(Label(fig, "Isovalue", width = nothing), iso_toggle), hgrid!(iso_box,Label(fig, "α: ", width = nothing),iso_slide)); - tellheight = false) + hgrid!(hgrid!(Label(fig, "Isovalue", width = nothing), iso_toggle), hgrid!(iso_box, Label(fig, "α: ", width = nothing), iso_slide)); + tellheight = false + ) lo = sgrid.layout nc = ncols(lo) - - # Note: volumeslices & GLMakie in general seems to have a bit of an issue with + + # Note: volumeslices & GLMakie in general seems to have a bit of an issue with # using real coordinates. In many cases the numerical values of lon/lat are much smaller than the depth values, # & not centered around zero. # @@ -153,63 +162,69 @@ function visualise(Data::AbstractGeneralGrid; Topography=nothing, Topo_range=not # and simply overwrite the names of the labels if orthogonal - plt = volumeslices!(ax, x_vec,y_vec,z_vec,vol, colorrange=(cmin,cmax), colormap=:roma) - iso = GLMakie.contour!(plt, x_vec, y_vec, z_vec, vol, levels = iso_level, alpha=iso_alpha, colormap=plt.attributes.colormap, colorrange=plt.attributes.colorrange) + plt = volumeslices!(ax, x_vec, y_vec, z_vec, vol, colorrange = (cmin, cmax), colormap = :roma) + iso = GLMakie.contour!(plt, x_vec, y_vec, z_vec, vol, levels = iso_level, alpha = iso_alpha, colormap = plt.attributes.colormap, colorrange = plt.attributes.colorrange) -# plt = volumeslices!(ax, reverse(x_vec),reverse(y_vec),z_vec,vol, colorrange=(cmin,cmax), colormap=:roma) -# iso = GLMakie.contour!(plt, reverse(x_vec), reverse(y_vec), z_vec, vol, levels = iso_level, alpha=iso_alpha, colormap=plt.attributes.colormap, colorrange=plt.attributes.colorrange) + # plt = volumeslices!(ax, reverse(x_vec),reverse(y_vec),z_vec,vol, colorrange=(cmin,cmax), colormap=:roma) + # iso = GLMakie.contour!(plt, reverse(x_vec), reverse(y_vec), z_vec, vol, levels = iso_level, alpha=iso_alpha, colormap=plt.attributes.colormap, colorrange=plt.attributes.colorrange) end topo_alpha = Observable(0.5) if !isnothing(Topography) - # in case topography is supplied - topo_surf = surface!(ax, x_topo[:,:,1], y_topo[:,:,1], z_topo[:,:,1], colormap=(:oleron, topo_alpha[]), color=ustrip.(Topography.fields.Topography[:,:,1]), colorrange=Topo_range, transparency = true) - cb_surf = Colorbar(fig[1, 4], topo_surf, vertical = true, label="Topography", height = Relative(0.6)) + # in case topography is supplied + topo_surf = surface!(ax, x_topo[:, :, 1], y_topo[:, :, 1], z_topo[:, :, 1], colormap = (:oleron, topo_alpha[]), color = ustrip.(Topography.fields.Topography[:, :, 1]), colorrange = Topo_range, transparency = true) + cb_surf = Colorbar(fig[1, 4], topo_surf, vertical = true, label = "Topography", height = Relative(0.6)) end if !axis_equal - xticks!(ax.scene, xtickrange=[0.;length(x)],xticklabels=["$(x[1])", "$(x[end])"]) - yticks!(ax.scene, ytickrange=[0.;length(y)],yticklabels=["$(y[1])", "$(y[end])"]) - zticks!(ax.scene, ztickrange=[0.;length(z)],zticklabels=["$(z[1])", "$(z[end])"]) + xticks!(ax.scene, xtickrange = [0.0;length(x)], xticklabels = ["$(x[1])", "$(x[end])"]) + yticks!(ax.scene, ytickrange = [0.0;length(y)], yticklabels = ["$(y[1])", "$(y[end])"]) + zticks!(ax.scene, ztickrange = [0.0;length(z)], zticklabels = ["$(z[1])", "$(z[end])"]) else - xticks!(ax.scene, xtickrange=[x[1];x[end]],xticklabels=["$(x[1])", "$(x[end])"]) - yticks!(ax.scene, ytickrange=[y[1];y[end]],yticklabels=["$(y[1])", "$(y[end])"]) - zticks!(ax.scene, ztickrange=[z[1];z[end]],zticklabels=["$(z[1])", "$(z[end])"]) + xticks!(ax.scene, xtickrange = [x[1];x[end]], xticklabels = ["$(x[1])", "$(x[end])"]) + yticks!(ax.scene, ytickrange = [y[1];y[end]], yticklabels = ["$(y[1])", "$(y[end])"]) + zticks!(ax.scene, ztickrange = [z[1];z[end]], zticklabels = ["$(z[1])", "$(z[end])"]) end xlabel!(ax.scene, xlab) ylabel!(ax.scene, ylab) zlabel!(ax.scene, zlab) - # - cb = Colorbar(fig[1, 3], plt, vertical = true, label=data_string, height = Relative(0.6)) + # + cb = Colorbar(fig[1, 3], plt, vertical = true, label = data_string, height = Relative(0.6)) # connect sliders to `volumeslices` update methods sl_yz, sl_xz, sl_xy, sl_alpha_topo = sgrid.sliders - on(sl_yz.value) do v; plt[:update_yz][](v) end - on(sl_xz.value) do v; plt[:update_xz][](v) end - on(sl_xy.value) do v; plt[:update_xy][](v) end + on(sl_yz.value) do v + plt[:update_yz][](v) + end + on(sl_xz.value) do v + plt[:update_xz][](v) + end + on(sl_xy.value) do v + plt[:update_xy][](v) + end if orthogonal - set_close_to!(sl_yz, .5length(x_vec)) - set_close_to!(sl_xz, .5length(y_vec)) - set_close_to!(sl_xy, .5length(z_vec)) + set_close_to!(sl_yz, 0.5length(x_vec)) + set_close_to!(sl_xz, 0.5length(y_vec)) + set_close_to!(sl_xy, 0.5length(z_vec)) end - set_close_to!(sl_alpha_topo, .5) - - # change color limits + set_close_to!(sl_alpha_topo, 0.5) + + # change color limits on(cmin_box.stored_string) do s ra = plt[:colorrange] - plt[:colorrange] = (parse(Float64,s), ra.val[2]) + plt[:colorrange] = (parse(Float64, s), ra.val[2]) end on(cmax_box.stored_string) do s ra = plt[:colorrange] - plt[:colorrange] = (ra.val[1], parse(Float64,s)) + plt[:colorrange] = (ra.val[1], parse(Float64, s)) end - # Change data + # Change data on(menu_dataset.selection) do s data_selected[] = Symbol(s) plt[:colorrange] = (cmin[], cmax[]) @@ -217,23 +232,23 @@ function visualise(Data::AbstractGeneralGrid; Topography=nothing, Topo_range=not cmax_box.displayed_string = cmax_str[] # update values - set_close_to!(sl_yz, sl_yz.value[]) - set_close_to!(sl_xz, sl_xz.value[]) - set_close_to!(sl_xy, sl_xy.value[]) - + set_close_to!(sl_yz, sl_yz.value[]) + set_close_to!(sl_xz, sl_xz.value[]) + set_close_to!(sl_xy, sl_xy.value[]) + end - - # Change colormap + + # Change colormap on(menu_colormap.selection) do s plt.colormap = s end - # Create isosurface? + # Create isosurface? on(iso_toggle.active) do s iso.visible = s end on(iso_box.stored_string) do s - iso_level[] = [parse(Float64,s)] + iso_level[] = [parse(Float64, s)] end on(iso_slide.value) do v iso_alpha[] = v @@ -243,15 +258,15 @@ function visualise(Data::AbstractGeneralGrid; Topography=nothing, Topo_range=not topo_alpha[] = v if !isnothing(Topography) # in case topography is supplied - topo_surf.attributes.colormap=(:oleron,topo_alpha[]) + topo_surf.attributes.colormap = (:oleron, topo_alpha[]) end end # add toggles to show/hide slices if orthogonal - hmaps = [plt[Symbol(:heatmap_, s)][] for s ∈ (:yz, :xz, :xy)] - toggles = [Toggle(lo[i, nc + 1], active = true) for i ∈ 1:4] + hmaps = [plt[Symbol(:heatmap_, s)][] for s in (:yz, :xz, :xy)] + toggles = [Toggle(lo[i, nc + 1], active = true) for i in 1:4] if !isnothing(Topography) map(zip([hmaps; topo_surf], toggles)) do (h, t) @@ -269,7 +284,7 @@ function visualise(Data::AbstractGeneralGrid; Topography=nothing, Topo_range=not display(fig) - + return nothing end @@ -277,11 +292,11 @@ end heatmap(x::GeoData, args...; field=:Topography, kwargs...) heatmap for a 2D GeoData object (surface) """ -function heatmap(x::GeoData, args...; field=:Topography, kwargs...) - @assert size(x.depth.val,3)==1 +function heatmap(x::GeoData, args...; field = :Topography, kwargs...) + @assert size(x.depth.val, 3) == 1 @assert hasfield(typeof(x.fields), field) - heatmap(x.lon.val[:,1], x.lat.val[1,:], ustrip.(x.fields[field][:,:,1]), args...; kwargs...) + return heatmap(x.lon.val[:, 1], x.lat.val[1, :], ustrip.(x.fields[field][:, :, 1]), args...; kwargs...) end @@ -289,13 +304,13 @@ end heatmap(x::GeoData, a::Array{_T,N}, args...; kwargs...) in-place heatmap for a 2D GeoData object (surface) with an array `a`. """ -function heatmap(x::GeoData, a::Array{_T,N}, args...; kwargs...) where{_T,N} - @assert size(x.depth.val,3)==1 +function heatmap(x::GeoData, a::Array{_T, N}, args...; kwargs...) where {_T, N} + @assert size(x.depth.val, 3) == 1 - if N==3 - heatmap(x.lon.val[:,1], x.lat.val[1,:], ustrip.(a[:,:,1]), args...; kwargs...) - elseif N==2 - heatmap(x.lon.val[:,1], x.lat.val[1,:], ustrip.(a), args...; kwargs...) + return if N == 3 + heatmap(x.lon.val[:, 1], x.lat.val[1, :], ustrip.(a[:, :, 1]), args...; kwargs...) + elseif N == 2 + heatmap(x.lon.val[:, 1], x.lat.val[1, :], ustrip.(a), args...; kwargs...) end end @@ -304,11 +319,11 @@ end heatmap(x::CartData, args...; field=:Topography, kwargs...) heatmap for a 2D CartData object (surface) """ -function heatmap(x::CartData, args...; field=:Topography, kwargs...) - @assert size(x.z.val,3)==1 +function heatmap(x::CartData, args...; field = :Topography, kwargs...) + @assert size(x.z.val, 3) == 1 @assert hasfield(typeof(x.fields), field) - heatmap(x.x.val[:,1], x.y.val[1,:], ustrip.(x.fields[field][:,:,1]), args...; kwargs...) + return heatmap(x.x.val[:, 1], x.y.val[1, :], ustrip.(x.fields[field][:, :, 1]), args...; kwargs...) end @@ -316,13 +331,13 @@ end heatmap(x::CartData, a::Array{_T,N}, args...; kwargs...) in-place heatmap for a 2D CartData object (surface) with an array `a`. """ -function heatmap(x::CartData, a::Array{_T,N}, args...; kwargs...) where{_T,N} - @assert size(x.z.val,3)==1 +function heatmap(x::CartData, a::Array{_T, N}, args...; kwargs...) where {_T, N} + @assert size(x.z.val, 3) == 1 - if N==3 - heatmap(x.x.val[:,1], x.y.val[1,:], ustrip.(a[:,:,1]), args...; kwargs...) - elseif N==2 - heatmap(x.x.val[:,1], x.y.val[1,:], ustrip.(a), args...; kwargs...) + return if N == 3 + heatmap(x.x.val[:, 1], x.y.val[1, :], ustrip.(a[:, :, 1]), args...; kwargs...) + elseif N == 2 + heatmap(x.x.val[:, 1], x.y.val[1, :], ustrip.(a), args...; kwargs...) end end @@ -332,11 +347,11 @@ end heatmap!(x::GeoData, args...; field=:Topography, kwargs...) in-place heatmap for a 2D GeoData object (surface), """ -function heatmap!(x::GeoData, args...; field=:Topography, kwargs...) - @assert size(x.depth.val,3)==1 +function heatmap!(x::GeoData, args...; field = :Topography, kwargs...) + @assert size(x.depth.val, 3) == 1 @assert hasfield(typeof(x.fields), field) - heatmap(x.lon.val[:,1], x.lat.val[1,:], ustrip.(x.fields[field][:,:,1]), args...; kwargs...) + return heatmap(x.lon.val[:, 1], x.lat.val[1, :], ustrip.(x.fields[field][:, :, 1]), args...; kwargs...) end @@ -344,13 +359,13 @@ end heatmap!(x::GeoData, a::Array{_T,N}, args...; kwargs...) in-place heatmap for a 2D GeoData object (surface) with an array `a`. """ -function heatmap!(x::GeoData, a::Array{_T,N}, args...; kwargs...) where{_T,N} - @assert size(x.depth.val,3)==1 +function heatmap!(x::GeoData, a::Array{_T, N}, args...; kwargs...) where {_T, N} + @assert size(x.depth.val, 3) == 1 - if N==3 - heatmap!(x.lon.val[:,1], x.lat.val[1,:], ustrip.(a[:,:,1]), args...; kwargs...) - elseif N==2 - heatmap!(x.lon.val[:,1], x.lat.val[1,:], ustrip.(a), args...; kwargs...) + return if N == 3 + heatmap!(x.lon.val[:, 1], x.lat.val[1, :], ustrip.(a[:, :, 1]), args...; kwargs...) + elseif N == 2 + heatmap!(x.lon.val[:, 1], x.lat.val[1, :], ustrip.(a), args...; kwargs...) end end @@ -359,19 +374,19 @@ end heatmap!(x::CartData, args...; field=:Topography, colorbar=false, kwargs...) in-place heatmap for a 2D CartData object (surface) """ -function heatmap!(x::CartData, args...; field=:Topography, colorbar=false, kwargs...) - @assert size(x.z.val,3)==1 +function heatmap!(x::CartData, args...; field = :Topography, colorbar = false, kwargs...) + @assert size(x.z.val, 3) == 1 @assert hasfield(typeof(x.fields), field) - data = ustrip.(x.fields[field][:,:,1]) + data = ustrip.(x.fields[field][:, :, 1]) - fig,ax,hm = heatmap!(x.x.val[:,1], x.y.val[1,:], data, args...; kwargs...) + fig, ax, hm = heatmap!(x.x.val[:, 1], x.y.val[1, :], data, args...; kwargs...) cb = nothing if colorbar - cb = Colorbar(fig[1,2], limits=extrema(filter(!isnan,x.fields[field])), label=field) + cb = Colorbar(fig[1, 2], limits = extrema(filter(!isnan, x.fields[field])), label = field) end - + return fig_out end @@ -379,15 +394,15 @@ end heatmap!(x::CartData, a::Array{_T,N}, args...; kwargs...) in-place heatmap for a 2D CartData object (surface) with an array `a`. """ -function heatmap!(x::CartData, a::Array{_T,N}, args...; kwargs...) where{_T,N} - @assert size(x.z.val,3)==1 +function heatmap!(x::CartData, a::Array{_T, N}, args...; kwargs...) where {_T, N} + @assert size(x.z.val, 3) == 1 - if N==3 - heatmap!(x.x.val[:,1], x.y.val[1,:], ustrip.(a[:,:,1]), args...; kwargs...) - elseif N==2 - heatmap!(x.x.val[:,1], x.y.val[1,:], ustrip.(a[:,:]), args...; kwargs...) + return if N == 3 + heatmap!(x.x.val[:, 1], x.y.val[1, :], ustrip.(a[:, :, 1]), args...; kwargs...) + elseif N == 2 + heatmap!(x.x.val[:, 1], x.y.val[1, :], ustrip.(a[:, :]), args...; kwargs...) end end -end \ No newline at end of file +end diff --git a/ext/GMT_utils.jl b/ext/GMT_utils.jl index 77878ec8..5b206dba 100644 --- a/ext/GMT_utils.jl +++ b/ext/GMT_utils.jl @@ -7,9 +7,9 @@ import GeophysicalModelGenerator: import_topo, import_GeoTIFF # Julia v1.9.0 does not load package extensions when their dependency is # loaded from the main environment. if VERSION >= v"1.9.1" - using GMT + using GMT else - using ..GMT + using ..GMT end using GeophysicalModelGenerator: lonlatdepth_grid, GeoData, UTMData, km, remove_NaN_surface! @@ -65,38 +65,38 @@ julia> write_paraview(Topo,"Topo_Alps") "Topo_Alps.vts" ``` """ -function import_topo(limits; file::String="@earth_relief_01m", maxattempts=5) - - # Correct if negative values are given (longitude coordinates that are west) - ind = findall(limits[1:2] .< 0); - - if (limits[1] < 0) && (limits[2] < 0) - limits[ind] .= 360 .+ limits[ind] - limits[1:2] = sort(limits[1:2]) - end - - # Download topo file - add a few attempts to do so - local G - attempt = 0 - while attempt < maxattempts - try - G = gmtread(file, limits=limits, grid=true); - break - catch - @warn "Failed downloading GMT topography on attempt $attempt/$maxattempts" - sleep(5) # wait a few sec +function import_topo(limits; file::String = "@earth_relief_01m", maxattempts = 5) + + # Correct if negative values are given (longitude coordinates that are west) + ind = findall(limits[1:2] .< 0) + + if (limits[1] < 0) && (limits[2] < 0) + limits[ind] .= 360 .+ limits[ind] + limits[1:2] = sort(limits[1:2]) end - attempt += 1 - end - (@isdefined G) || error("Could not download GMT topography data") - - # Transfer to GeoData - nx, ny = size(G.z,2), size(G.z,1) - Lon,Lat,Depth = lonlatdepth_grid(G.x[1:nx],G.y[1:ny],0); - @views Depth[:,:,1] = 1e-3*G.z'; - Topo = GeoData(Lon, Lat, Depth, (Topography=Depth*km,)) - - return Topo + + # Download topo file - add a few attempts to do so + local G + attempt = 0 + while attempt < maxattempts + try + G = gmtread(file, limits = limits, grid = true) + break + catch + @warn "Failed downloading GMT topography on attempt $attempt/$maxattempts" + sleep(5) # wait a few sec + end + attempt += 1 + end + (@isdefined G) || error("Could not download GMT topography data") + + # Transfer to GeoData + nx, ny = size(G.z, 2), size(G.z, 1) + Lon, Lat, Depth = lonlatdepth_grid(G.x[1:nx], G.y[1:ny], 0) + @views Depth[:, :, 1] = 1.0e-3 * G.z' + Topo = GeoData(Lon, Lat, Depth, (Topography = Depth * km,)) + + return Topo end """ @@ -115,7 +115,7 @@ julia> Topo = import_topo(lon=(-50, -40), lat=(-10,-5), file="@earth_relief_30s" ``` """ -import_topo(; lat=[37,49], lon=[4,20], file::String="@earth_relief_01m", maxattempts=5) = import_topo([lon[1],lon[2], lat[1], lat[2]], file=file, maxattempts=maxattempts) +import_topo(; lat = [37, 49], lon = [4, 20], file::String = "@earth_relief_01m", maxattempts = 5) = import_topo([lon[1], lon[2], lat[1], lat[2]], file = file, maxattempts = maxattempts) """ @@ -133,62 +133,62 @@ Optional keywords: - `constantDepth`: if true we will not warp the surface by z-values, but use a constant value instead - `removeNaN_z` : if true, we will remove NaN values from the z-dataset """ -function import_GeoTIFF(fname::String; fieldname=:layer1, negative=false, iskm=true, NorthernHemisphere=true, constantDepth=false, removeNaN_z=false, removeNaN_field=false) - G = gmtread(fname); - - # Transfer to GeoData - nx,ny = length(G.x)-1, length(G.y)-1 - Lon,Lat,Depth = lonlatdepth_grid(G.x[1:nx],G.y[1:ny],0); - if hasfield(typeof(G),:z) - Depth[:,:,1] = G.z'; - if negative - Depth[:,:,1] = -G.z'; +function import_GeoTIFF(fname::String; fieldname = :layer1, negative = false, iskm = true, NorthernHemisphere = true, constantDepth = false, removeNaN_z = false, removeNaN_field = false) + G = gmtread(fname) + + # Transfer to GeoData + nx, ny = length(G.x) - 1, length(G.y) - 1 + Lon, Lat, Depth = lonlatdepth_grid(G.x[1:nx], G.y[1:ny], 0) + if hasfield(typeof(G), :z) + Depth[:, :, 1] = G.z' + if negative + Depth[:, :, 1] = -G.z' + end + if iskm + Depth *= 1.0e-3 * km + end end - if iskm - Depth *= 1e-3*km; + + # Create GeoData structure + data = zero(Lon) + if hasfield(typeof(G), :z) + data = Depth + + elseif hasfield(typeof(G), :image) + if length(size(G.image)) == 3 + data = permutedims(G.image, [2, 1, 3]) + elseif length(size(G.image)) == 2 + data[:, :, 1] = G.image' + end + + end + + if removeNaN_z + remove_NaN_surface!(Depth, Lon, Lat) + end + if removeNaN_field + remove_NaN_surface!(data, Lon, Lat) end - end - - # Create GeoData structure - data = zero(Lon) - if hasfield(typeof(G),:z) - data = Depth - - elseif hasfield(typeof(G),:image) - if length(size(G.image)) == 3 - data = permutedims(G.image,[2, 1, 3]); - elseif length(size(G.image)) == 2 - data[:,:,1] = G.image' + data_field = NamedTuple{(fieldname,)}((data,)) + + if constantDepth + Depth = zero(Lon) end - end - - if removeNaN_z - remove_NaN_surface!(Depth, Lon, Lat) - end - if removeNaN_field - remove_NaN_surface!(data, Lon, Lat) - end - data_field = NamedTuple{(fieldname,)}((data,)); - - if constantDepth - Depth = zero(Lon) - end - - if contains(G.proj4,"utm") - zone = parse(Int64,split.(split(G.proj4,"zone=")[2]," ")[1]); # retrieve UTM zone - data_GMT = UTMData(Lon, Lat, Depth, zone, NorthernHemisphere, data_field) - - elseif contains(G.proj4,"longlat") - data_GMT = GeoData(Lon, Lat, Depth, data_field) - - else - error("I'm sorry, I don't know how to handle this projection yet: $(G.proj4)\n + if contains(G.proj4, "utm") + zone = parse(Int64, split.(split(G.proj4, "zone=")[2], " ")[1]) # retrieve UTM zone + data_GMT = UTMData(Lon, Lat, Depth, zone, NorthernHemisphere, data_field) + + elseif contains(G.proj4, "longlat") + data_GMT = GeoData(Lon, Lat, Depth, data_field) + + else + error("I'm sorry, I don't know how to handle this projection yet: $(G.proj4)\n We recommend that you transfer your GeoTIFF to longlat by using QGIS \n Open the GeoTIFF there and Export -> Save As , while selecting \"EPSG:4326 - WGS 84\" projection.") - end + end - return data_GMT + return data_GMT end diff --git a/ext/Gmsh_utils.jl b/ext/Gmsh_utils.jl index 8fc0d800..128a6b09 100644 --- a/ext/Gmsh_utils.jl +++ b/ext/Gmsh_utils.jl @@ -14,24 +14,24 @@ Reads a Gmsh file and returns a `FEData` object with info about the mesh. `tag_n """ function import_Gmsh(fname::String) - mesh = GmshDiscreteModel(fname, renumber=false) + mesh = GmshDiscreteModel(fname, renumber = false) # Extract vertices - nverts = length(mesh.grid.node_coordinates); - dims = length(mesh.grid.node_coordinates[1]) - vertices = [mesh.grid.node_coordinates[n][i] for i=1:dims,n=1:nverts] + nverts = length(mesh.grid.node_coordinates) + dims = length(mesh.grid.node_coordinates[1]) + vertices = [mesh.grid.node_coordinates[n][i] for i in 1:dims,n in 1:nverts] # write coords as 1D double array nvertices_cell = length(mesh.grid.cell_node_ids[1]) - connectivity = [c[i] for i=1:nvertices_cell, c in mesh.grid.cell_node_ids] + connectivity = [c[i] for i in 1:nvertices_cell, c in mesh.grid.cell_node_ids] # extract tag of each of the tetrahedrons - regions, tag_names = cell_tags_from_gmsh(mesh) + regions, tag_names = cell_tags_from_gmsh(mesh) - cellfields = (regions=regions,) - fields = nothing + cellfields = (regions = regions,) + fields = nothing - return FEData(vertices,connectivity, fields, cellfields), tag_names + return FEData(vertices, connectivity, fields, cellfields), tag_names end @@ -41,29 +41,27 @@ end Returns a list with integers that are the tags for each of the cells """ function cell_tags_from_gmsh(mesh) - cell_entities = mesh.face_labeling.d_to_dface_to_entity[4] # volumetric entities + cell_entities = mesh.face_labeling.d_to_dface_to_entity[4] # volumetric entities cell_entities_unique = unique(cell_entities) - tag_unique = zeros(Int64,size(cell_entities_unique)) - - for i=1:length(cell_entities_unique) - for (n,tag) in enumerate(mesh.face_labeling.tag_to_entities) - if any(tag .== cell_entities_unique[i]) - tag_unique[i] = n - end + tag_unique = zeros(Int64, size(cell_entities_unique)) + + for i in 1:length(cell_entities_unique) + for (n, tag) in enumerate(mesh.face_labeling.tag_to_entities) + if any(tag .== cell_entities_unique[i]) + tag_unique[i] = n + end end end # create tags for cells - tags = zeros(Int64,length(cell_entities)) - for (i,entity) in enumerate(cell_entities_unique) - id = findall(cell_entities.==entity) + tags = zeros(Int64, length(cell_entities)) + for (i, entity) in enumerate(cell_entities_unique) + id = findall(cell_entities .== entity) tags[id] .= tag_unique[i] end - - return tags, mesh.face_labeling.tag_to_name -end - + return tags, mesh.face_labeling.tag_to_name +end -end \ No newline at end of file +end diff --git a/src/GeophysicalModelGenerator.jl b/src/GeophysicalModelGenerator.jl index 7d000e99..480771ca 100644 --- a/src/GeophysicalModelGenerator.jl +++ b/src/GeophysicalModelGenerator.jl @@ -6,12 +6,12 @@ using Base: String, show_index, Tuple, FieldDescStorage import GeoParams using .GeoParams export - @u_str, uconvert, upreffered, unit, ustrip, NoUnits, # Units - GeoUnit, GEO_units, SI_units, NO_units, AbstractGeoUnits, - Nondimensionalize, Nondimensionalize!, Dimensionalize, Dimensionalize!, - superscript, upreferred, GEO, SI, NONE, isDimensional, - km, m, cm, mm, Myrs, yr, s, MPa, Pa, Pas, K, C, kg, mol, - isDimensional, Value, NumValue, Unit, UnitValue + @u_str, uconvert, upreffered, unit, ustrip, NoUnits, # Units + GeoUnit, GEO_units, SI_units, NO_units, AbstractGeoUnits, + Nondimensionalize, Nondimensionalize!, Dimensionalize, Dimensionalize!, + superscript, upreferred, GEO, SI, NONE, isDimensional, + km, m, cm, mm, Myrs, yr, s, MPa, Pa, Pas, K, C, kg, mol, + isDimensional, Value, NumValue, Unit, UnitValue export ReadCSV_LatLon, meshgrid, voxel_grav @@ -23,7 +23,7 @@ export AbstractGeneralGrid using DelimitedFiles, Statistics # other packages -using WriteVTK, Colors, MeshIO, FileIO, Interpolations, Geodesy +using WriteVTK, Colors, MeshIO, FileIO, Interpolations, Geodesy export vtk_multiblock, vtk_save # Simplifies writing multiblock files export LLA diff --git a/src/IO.jl b/src/IO.jl index 59059481..be75073b 100644 --- a/src/IO.jl +++ b/src/IO.jl @@ -15,12 +15,12 @@ julia> Data_set = GeophysicalModelGenerator.GeoData(Lon3D,Lat3D,Depth3D,(Da julia> save_GMG("test",Data_set) ``` """ -function save_GMG(filename::String, data::Union{GeoData, CartData, UTMData}; dir=pwd()) - file_ext = joinpath(dir,filename*".jld2") +function save_GMG(filename::String, data::Union{GeoData, CartData, UTMData}; dir = pwd()) + file_ext = joinpath(dir, filename * ".jld2") jldsave(file_ext; data) return nothing -end +end """ load_GMG(filename::String, dir=pwd(); maxattempts=5) @@ -57,28 +57,26 @@ GeoData ``` """ -function load_GMG(filename::String, dir=pwd(); maxattempts=5) +function load_GMG(filename::String, dir = pwd(); maxattempts = 5) local_filename = "download_GMG_temp.jld2" - if contains(filename,"http") - file_ext = download_data(filename, local_filename, dir=dir, maxattempts=maxattempts) + if contains(filename, "http") + file_ext = download_data(filename, local_filename, dir = dir, maxattempts = maxattempts) else # local file - file_ext = joinpath(dir,filename*".jld2") + file_ext = joinpath(dir, filename * ".jld2") end # load data: - data = load_object(file_ext) + data = load_object(file_ext) # remove local temporary file - if contains(filename,"http") + if contains(filename, "http") rm(local_filename) end return data -end - - +end """ @@ -96,28 +94,28 @@ julia> download_data(url) ``` """ -function download_data(url::String, local_filename="temp.dat"; dir=pwd(), maxattempts=5) +function download_data(url::String, local_filename = "temp.dat"; dir = pwd(), maxattempts = 5) - if !contains(url,"http") + if !contains(url, "http") @warn "the url does not contain http; please double check that it worked" end #download remote file to a local temporary directory - file_ext = []; + file_ext = [] attempt = 0 - while attempt nmark_z = ParseValue_LaMEM_InputFile("SaltModels.dat","nmark_z",Int64, ar ``` """ -function ParseValue_LaMEM_InputFile(file,keyword,type; args::Union{String,Nothing}=nothing) +function ParseValue_LaMEM_InputFile(file, keyword, type; args::Union{String, Nothing} = nothing) value = nothing for line in eachline(file) line_strip = lstrip(line) # strip leading tabs/spaces # Strip comments - ind = findfirst("#", line) + ind = findfirst("#", line) if isnothing(ind) # no comments else - line_strip = line_strip[1:ind[1]-2]; + line_strip = line_strip[1:(ind[1] - 2)] end line_strip = rstrip(line_strip) # strip last tabs/spaces if startswith(line_strip, keyword) ind = findfirst("=", line_strip) - if type==String + if type == String value = split(line_strip)[3:end] else - value = parse.(type,split(line_strip)[3:end]) + value = parse.(type, split(line_strip)[3:end]) - if length(value)==1 - value=value[1]; + if length(value) == 1 + value = value[1] end end end @@ -147,18 +147,18 @@ end """ This parses a LaMEM command line argument string and checks if the keyword exists there """ -function ParseValue_CommandLineArgs(args,keyword,type, value) - args_vec = split(args,"-"*keyword) +function ParseValue_CommandLineArgs(args, keyword, type, value) + args_vec = split(args, "-" * keyword) - if length(args_vec)==2 + if length(args_vec) == 2 # we found the keyword args_vec_keyword = split(args_vec[2]) str = args_vec_keyword[1] # first block after keyword is what we want str_strip = replace(str, "," => " ") # in case we have an array of values value = parse.(type, split(str_strip)) # puts an array of values in a vector - if length(value)==1 - value=value[1]; + if length(value) == 1 + value = value[1] end end @@ -197,41 +197,41 @@ LaMEM Grid: ``` """ -function read_LaMEM_inputfile(file; args::Union{String,Nothing}=nothing ) +function read_LaMEM_inputfile(file; args::Union{String, Nothing} = nothing) # read information from file - nmark_x = ParseValue_LaMEM_InputFile(file,"nmark_x",Int64, args=args); - nmark_y = ParseValue_LaMEM_InputFile(file,"nmark_y",Int64, args=args); - nmark_z = ParseValue_LaMEM_InputFile(file,"nmark_z",Int64, args=args); + nmark_x = ParseValue_LaMEM_InputFile(file, "nmark_x", Int64, args = args) + nmark_y = ParseValue_LaMEM_InputFile(file, "nmark_y", Int64, args = args) + nmark_z = ParseValue_LaMEM_InputFile(file, "nmark_z", Int64, args = args) - nel_x = ParseValue_LaMEM_InputFile(file,"nel_x",Int64, args=args); - nel_y = ParseValue_LaMEM_InputFile(file,"nel_y",Int64, args=args); - nel_z = ParseValue_LaMEM_InputFile(file,"nel_z",Int64, args=args); + nel_x = ParseValue_LaMEM_InputFile(file, "nel_x", Int64, args = args) + nel_y = ParseValue_LaMEM_InputFile(file, "nel_y", Int64, args = args) + nel_z = ParseValue_LaMEM_InputFile(file, "nel_z", Int64, args = args) - coord_x = ParseValue_LaMEM_InputFile(file,"coord_x",Float64, args=args); - coord_y = ParseValue_LaMEM_InputFile(file,"coord_y",Float64, args=args); - coord_z = ParseValue_LaMEM_InputFile(file,"coord_z",Float64, args=args); + coord_x = ParseValue_LaMEM_InputFile(file, "coord_x", Float64, args = args) + coord_y = ParseValue_LaMEM_InputFile(file, "coord_y", Float64, args = args) + coord_z = ParseValue_LaMEM_InputFile(file, "coord_z", Float64, args = args) - nseg_x = ParseValue_LaMEM_InputFile(file,"nseg_x",Int64, args=args); - nseg_y = ParseValue_LaMEM_InputFile(file,"nseg_y",Int64, args=args); - nseg_z = ParseValue_LaMEM_InputFile(file,"nseg_z",Int64, args=args); + nseg_x = ParseValue_LaMEM_InputFile(file, "nseg_x", Int64, args = args) + nseg_y = ParseValue_LaMEM_InputFile(file, "nseg_y", Int64, args = args) + nseg_z = ParseValue_LaMEM_InputFile(file, "nseg_z", Int64, args = args) - bias_x = ParseValue_LaMEM_InputFile(file,"bias_x",Float64, args=args); - bias_y = ParseValue_LaMEM_InputFile(file,"bias_y",Float64, args=args); - bias_z = ParseValue_LaMEM_InputFile(file,"bias_z",Float64, args=args); + bias_x = ParseValue_LaMEM_InputFile(file, "bias_x", Float64, args = args) + bias_y = ParseValue_LaMEM_InputFile(file, "bias_y", Float64, args = args) + bias_z = ParseValue_LaMEM_InputFile(file, "bias_z", Float64, args = args) # compute information from file - W = coord_x[end]-coord_x[1]; - L = coord_y[end]-coord_y[1]; - H = coord_z[end]-coord_z[1]; + W = coord_x[end] - coord_x[1] + L = coord_y[end] - coord_y[1] + H = coord_z[end] - coord_z[1] - nel_x_tot = sum(nel_x); - nel_y_tot = sum(nel_y); - nel_z_tot = sum(nel_z); + nel_x_tot = sum(nel_x) + nel_y_tot = sum(nel_y) + nel_z_tot = sum(nel_z) - nump_x = nel_x_tot*nmark_x; - nump_y = nel_y_tot*nmark_y; - nump_z = nel_z_tot*nmark_z; + nump_x = nel_x_tot * nmark_x + nump_y = nel_y_tot * nmark_y + nump_z = nel_z_tot * nmark_z # Create 1D coordinate vectors (either regular or refined) xn, x = Create1D_grid_vector(coord_x, nel_x, nmark_x, nseg_x, bias_x) @@ -239,21 +239,23 @@ function read_LaMEM_inputfile(file; args::Union{String,Nothing}=nothing ) zn, z = Create1D_grid_vector(coord_z, nel_z, nmark_z, nseg_z, bias_z) # node grid - Xn,Yn,Zn = xyz_grid(xn, yn, zn); + Xn, Yn, Zn = xyz_grid(xn, yn, zn) # marker grid - X,Y,Z = xyz_grid(x, y, z); + X, Y, Z = xyz_grid(x, y, z) # finish Grid - Grid = LaMEM_grid( nmark_x, nmark_y, nmark_z, - nump_x, nump_y, nump_z, - nel_x_tot, nel_y_tot, nel_z_tot, - W, L, H, - coord_x, coord_y, coord_z, - x, y, z, - X, Y, Z, - xn, yn, zn, - Xn, Yn, Zn); + Grid = LaMEM_grid( + nmark_x, nmark_y, nmark_z, + nump_x, nump_y, nump_z, + nel_x_tot, nel_y_tot, nel_z_tot, + W, L, H, + coord_x, coord_y, coord_z, + x, y, z, + X, Y, Z, + xn, yn, zn, + Xn, Yn, Zn + ) return Grid end @@ -262,46 +264,46 @@ end Returns 1D coordinate vectors of grid points and of marker locations for a regular spacing """ function Create1D_grid_vector(coord::Vector{Float64}, nel::Int64, nmark::Int64, nseg::Union{Nothing, Int64}, bias::Union{Nothing, Float64}) - W = coord[end] - coord[1] - Δ = W / nel; - xn = range(coord[1], coord[end], length=nel+1); # coordinates of the normals to the cells + W = coord[end] - coord[1] + Δ = W / nel + xn = range(coord[1], coord[end], length = nel + 1) # coordinates of the normals to the cells - nump = nmark*nel - Δ_m = W / nump; - x = range(coord[1]+ Δ_m/2, coord[end] - Δ_m/2, length=nump); + nump = nmark * nel + Δ_m = W / nump + x = range(coord[1] + Δ_m / 2, coord[end] - Δ_m / 2, length = nump) return xn, x end """ Returns 1D coordinate vectors of grid points and of marker locations for a regular spacing """ -function Create1D_grid_vector(coord::Vector{T}, nel::Vector{I}, nmark::I, nseg::I, bias::Union{Nothing, T, Vector{T}}) where {T<:Float64, I<:Int64} +function Create1D_grid_vector(coord::Vector{T}, nel::Vector{I}, nmark::I, nseg::I, bias::Union{Nothing, T, Vector{T}}) where {T <: Float64, I <: Int64} if isnothing(bias) bias = ones(length(nel)) end - xn = make1DCoords(nseg, nel, coord, bias); - x = make1DMarkerCoords(xn, nmark); + xn = make1DCoords(nseg, nel, coord, bias) + x = make1DMarkerCoords(xn, nmark) return xn, x end function make1DMarkerCoords(xn::Array{Float64, 1}, nmark::Int64) # preallocate - nel = length(xn) - 1 - nump = nel * nmark; - x = zeros(Float64, nump); + nel = length(xn) - 1 + nump = nel * nmark + x = zeros(Float64, nump) # compute coordinates - for i = 1 : nel + for i in 1:nel # start of cell - x0 = xn[i]; + x0 = xn[i] # markers spacing inside cell - dx = (xn[i+1] - x0) / nmark; + dx = (xn[i + 1] - x0) / nmark # compute position - for j = 1 : nmark - x[nmark*i-(nmark-j)] = x0 + dx/2 + (j-1)*dx; + for j in 1:nmark + x[nmark * i - (nmark - j)] = x0 + dx / 2 + (j - 1) * dx end end @@ -310,20 +312,20 @@ end function make1DCoords(nseg::Int64, nel, coord::Array{Float64, 1}, bias) # preallocate - nel_tot = sum(nel); - x = zeros(Float64, nel_tot+1); + nel_tot = sum(nel) + x = zeros(Float64, nel_tot + 1) - for i = 1 : nseg + for i in 1:nseg # indices of this segment in the coordinate vector if i == 1 indE = nel[1] + 1 else - indE = sum(nel[1:i]) + 1; + indE = sum(nel[1:i]) + 1 end - indS = indE - nel[i]; + indS = indE - nel[i] # compute coordinates - x[indS:indE] = makeCoordSegment(coord[i], coord[i+1], nel[i], bias[i]); + x[indS:indE] = makeCoordSegment(coord[i], coord[i + 1], nel[i], bias[i]) end return x @@ -331,29 +333,29 @@ end function makeCoordSegment(xStart::Float64, xEnd::Float64, numCells::Int64, bias::Float64) # average cell size - avgSize = (xEnd - xStart) / numCells; + avgSize = (xEnd - xStart) / numCells # uniform case if bias == 1.0 - x = Array(xStart : avgSize : xEnd); - # non-uniform case + x = Array(xStart:avgSize:xEnd) + # non-uniform case else - x = zeros(Float64, numCells+1) + x = zeros(Float64, numCells + 1) # cell size limits - begSize = 2.0 * avgSize / (1.0 + bias); - endSize = bias * begSize; + begSize = 2.0 * avgSize / (1.0 + bias) + endSize = bias * begSize # cell size increment (negative for bias < 1) - dx = (endSize - begSize) / (numCells - 1); + dx = (endSize - begSize) / (numCells - 1) # generate coordinates - x[1] = xStart; - for i = 2 : numCells + 1 - x[i] = x[i-1] + begSize + (i-2)*dx; + x[1] = xStart + for i in 2:(numCells + 1) + x[i] = x[i - 1] + begSize + (i - 2) * dx end # overwrite last coordinate - x[end] = xEnd; + x[end] = xEnd end return x @@ -361,13 +363,13 @@ end # Print an overview of the LaMEM Grid struct: function Base.show(io::IO, d::LaMEM_grid) - println(io,"LaMEM Grid: ") - println(io," nel : ($(d.nel_x), $(d.nel_y), $(d.nel_z))") - println(io," marker/cell : ($(d.nmark_x), $(d.nmark_y), $(d.nmark_z))") - println(io," markers : ($(d.nump_x), $(d.nump_y), $(d.nump_z))") - println(io," x ϵ [$(d.coord_x[1]) : $(d.coord_x[end])]") - println(io," y ϵ [$(d.coord_y[1]) : $(d.coord_y[end])]") - println(io," z ϵ [$(d.coord_z[1]) : $(d.coord_z[end])]") + println(io, "LaMEM Grid: ") + println(io, " nel : ($(d.nel_x), $(d.nel_y), $(d.nel_z))") + println(io, " marker/cell : ($(d.nmark_x), $(d.nmark_y), $(d.nmark_z))") + println(io, " markers : ($(d.nump_x), $(d.nump_y), $(d.nump_z))") + println(io, " x ϵ [$(d.coord_x[1]) : $(d.coord_x[end])]") + println(io, " y ϵ [$(d.coord_y[1]) : $(d.coord_y[end])]") + return println(io, " z ϵ [$(d.coord_z[1]) : $(d.coord_z[end])]") end """ @@ -398,138 +400,141 @@ Writing LaMEM marker file -> ./markers/mdb.00000003.dat ``` """ -function save_LaMEM_markers_parallel(Grid::CartData; PartitioningFile=empty, directory="./markers", verbose=true, is64bit=false) +function save_LaMEM_markers_parallel(Grid::CartData; PartitioningFile = empty, directory = "./markers", verbose = true, is64bit = false) - x = ustrip.(Grid.x.val[:,1,1]); - y = ustrip.(Grid.y.val[1,:,1]); - z = ustrip.(Grid.z.val[1,1,:]); + x = ustrip.(Grid.x.val[:, 1, 1]) + y = ustrip.(Grid.y.val[1, :, 1]) + z = ustrip.(Grid.z.val[1, 1, :]) - if haskey(Grid.fields,:Phases) - Phases = Grid.fields[:Phases]; + if haskey(Grid.fields, :Phases) + Phases = Grid.fields[:Phases] else error("You must provide the field :Phases in the structure") end - if haskey(Grid.fields,:Temp) - Temp = Grid.fields[:Temp]; + if haskey(Grid.fields, :Temp) + Temp = Grid.fields[:Temp] else if verbose println("Field :Temp is not provided; setting it to zero") end - Temp = zeros(size(Phases)); + Temp = zeros(size(Phases)) end - if PartitioningFile==empty + if PartitioningFile == empty # in case we run this on 1 processor only - Nprocx = 1; - Nprocy = 1; - Nprocz = 1; - xc,yc,zc = x,y,z; + Nprocx = 1 + Nprocy = 1 + Nprocz = 1 + xc, yc, zc = x, y, z else - Nprocx,Nprocy,Nprocz, - xc,yc,zc, - nNodeX,nNodeY,nNodeZ = get_processor_partitioning(PartitioningFile, is64bit=is64bit) + Nprocx, Nprocy, Nprocz, + xc, yc, zc, + nNodeX, nNodeY, nNodeZ = get_processor_partitioning(PartitioningFile, is64bit = is64bit) if verbose - @show Nprocx,Nprocy,Nprocz, xc,yc,zc, nNodeX,nNodeY,nNodeZ + @show Nprocx, Nprocy, Nprocz, xc, yc, zc, nNodeX, nNodeY, nNodeZ end end - Nproc = Nprocx*Nprocy*Nprocz; - num, num_i, num_j, num_k = get_numscheme(Nprocx, Nprocy, Nprocz); + Nproc = Nprocx * Nprocy * Nprocz + num, num_i, num_j, num_k = get_numscheme(Nprocx, Nprocy, Nprocz) - xi,ix_start,ix_end = get_ind(x,xc,Nprocx); - yi,iy_start,iy_end = get_ind(y,yc,Nprocy); - zi,iz_start,iz_end = get_ind(z,zc,Nprocz); + xi, ix_start, ix_end = get_ind(x, xc, Nprocx) + yi, iy_start, iy_end = get_ind(y, yc, Nprocy) + zi, iz_start, iz_end = get_ind(z, zc, Nprocz) - x_start = ix_start[num_i[:]]; - y_start = iy_start[num_j[:]]; - z_start = iz_start[num_k[:]]; - x_end = ix_end[num_i[:]]; - y_end = iy_end[num_j[:]]; - z_end = iz_end[num_k[:]]; + x_start = ix_start[num_i[:]] + y_start = iy_start[num_j[:]] + z_start = iz_start[num_k[:]] + x_end = ix_end[num_i[:]] + y_end = iy_end[num_j[:]] + z_end = iz_end[num_k[:]] # Loop over all processors partition - for n=1:Nproc + for n in 1:Nproc # Extract coordinates for current processor - part_x = ustrip.(Grid.x.val[x_start[n]:x_end[n],y_start[n]:y_end[n],z_start[n]:z_end[n]]); - part_y = ustrip.(Grid.y.val[x_start[n]:x_end[n],y_start[n]:y_end[n],z_start[n]:z_end[n]]); - part_z = ustrip.(Grid.z.val[x_start[n]:x_end[n],y_start[n]:y_end[n],z_start[n]:z_end[n]]); - part_phs = Phases[x_start[n]:x_end[n],y_start[n]:y_end[n],z_start[n]:z_end[n]]; - part_T = Temp[x_start[n]:x_end[n],y_start[n]:y_end[n],z_start[n]:z_end[n]]; - num_particles = size(part_x,1)* size(part_x,2) * size(part_x,3); + part_x = ustrip.(Grid.x.val[x_start[n]:x_end[n], y_start[n]:y_end[n], z_start[n]:z_end[n]]) + part_y = ustrip.(Grid.y.val[x_start[n]:x_end[n], y_start[n]:y_end[n], z_start[n]:z_end[n]]) + part_z = ustrip.(Grid.z.val[x_start[n]:x_end[n], y_start[n]:y_end[n], z_start[n]:z_end[n]]) + part_phs = Phases[x_start[n]:x_end[n], y_start[n]:y_end[n], z_start[n]:z_end[n]] + part_T = Temp[x_start[n]:x_end[n], y_start[n]:y_end[n], z_start[n]:z_end[n]] + num_particles = size(part_x, 1) * size(part_x, 2) * size(part_x, 3) # Information vector per processor - num_prop = 5; # number of properties we save [x/y/z/phase/T] - lvec_info = num_particles; + num_prop = 5 # number of properties we save [x/y/z/phase/T] + lvec_info = num_particles - lvec_prtcls = zeros(Float64,num_prop*num_particles); + lvec_prtcls = zeros(Float64, num_prop * num_particles) - lvec_prtcls[1:num_prop:end] = part_x[:]; - lvec_prtcls[2:num_prop:end] = part_y[:]; - lvec_prtcls[3:num_prop:end] = part_z[:]; - lvec_prtcls[4:num_prop:end] = part_phs[:]; - lvec_prtcls[5:num_prop:end] = part_T[:]; + lvec_prtcls[1:num_prop:end] = part_x[:] + lvec_prtcls[2:num_prop:end] = part_y[:] + lvec_prtcls[3:num_prop:end] = part_z[:] + lvec_prtcls[4:num_prop:end] = part_phs[:] + lvec_prtcls[5:num_prop:end] = part_T[:] # Write output files - if ~isdir(directory); mkdir(directory); end # Create dir if not existent - fname = @sprintf "%s/mdb.%1.8d.dat" directory (n-1); # Name + if ~isdir(directory) + mkdir(directory) + end # Create dir if not existent + fname = @sprintf "%s/mdb.%1.8d.dat" directory (n - 1) # Name if verbose println("Writing LaMEM marker file -> $fname") # print info end - lvec_output = [lvec_info; lvec_prtcls]; # one vec with info about length + lvec_output = [lvec_info; lvec_prtcls] # one vec with info about length PetscBinaryWrite_Vec(fname, lvec_output) # Write PETSc vector as binary file end + return end # Internal routine to retrieve indices of local portion of the grid -function get_ind(x,xc,Nprocx) +function get_ind(x, xc, Nprocx) if Nprocx == 1 - xi = length(x); - ix_start = [1]; - ix_end = [length(x)]; + xi = length(x) + ix_start = [1] + ix_end = [length(x)] else - xi = zeros(Int64,Nprocx) - for k= 1:Nprocx - if k==1 - xi[k] = length(x[ (x .>=xc[k]) .& (x .<=xc[k+1]) ]); + xi = zeros(Int64, Nprocx) + for k in 1:Nprocx + if k == 1 + xi[k] = length(x[(x .>= xc[k]) .& (x .<= xc[k + 1])]) else - xi[k] = length(x[ (x.>xc[k]) .& (x.<=xc[k+1])]); + xi[k] = length(x[(x .> xc[k]) .& (x .<= xc[k + 1])]) end end - ix_start = cumsum( [0; xi[1:end-1]] ) .+ 1; - ix_end = cumsum(xi[1:end]); + ix_start = cumsum([0; xi[1:(end - 1)]]) .+ 1 + ix_end = cumsum(xi[1:end]) end - return xi,ix_start,ix_end + return xi, ix_start, ix_end end # Internal routine -function get_numscheme(Nprocx,Nprocy,Nprocz) - n = zeros(Int64, Nprocx*Nprocy*Nprocz) - nix = zeros(Int64, Nprocx*Nprocy*Nprocz) - njy = zeros(Int64, Nprocx*Nprocy*Nprocz) - nkz = zeros(Int64, Nprocx*Nprocy*Nprocz) - - num=0; - for k=1:Nprocz - for j=1:Nprocy - for i=1:Nprocx - num=num+1; - n[num] = num; - nix[num]= i; - njy[num]= j; - nkz[num]= k; +function get_numscheme(Nprocx, Nprocy, Nprocz) + n = zeros(Int64, Nprocx * Nprocy * Nprocz) + nix = zeros(Int64, Nprocx * Nprocy * Nprocz) + njy = zeros(Int64, Nprocx * Nprocy * Nprocz) + nkz = zeros(Int64, Nprocx * Nprocy * Nprocz) + + num = 0 + for k in 1:Nprocz + for j in 1:Nprocy + for i in 1:Nprocx + num = num + 1 + n[num] = num + nix[num] = i + njy[num] = j + nkz[num] = k end end end - return n,nix,njy,nkz + return n, nix, njy, nkz end @@ -543,15 +548,15 @@ Writes a vector `A` to disk, such that it can be read with `PetscBinaryRead` (wh function PetscBinaryWrite_Vec(filename, A) # Note: use "hton" to transfer to Big Endian type, which is what PETScBinaryRead expects - open(filename,"w+") do f - n = length(A); - nummark = A[1]; # number of markers + return open(filename, "w+") do f + n = length(A) + nummark = A[1] # number of markers - write(f,hton(Float64(1211214))); # header (not actually used) - write(f,hton(Float64(nummark))); # info about # of markers written + write(f, hton(Float64(1211214))) # header (not actually used) + write(f, hton(Float64(nummark))) # info about # of markers written - for i=2:n - write(f,hton(Float64(A[i]))); # Write data itself + for i in 2:n + write(f, hton(Float64(A[i]))) # Write data itself end end @@ -566,32 +571,32 @@ Reads a LaMEM processor partitioning file, used to create marker files, and retu By default this is done for a 32bit PETSc installation, which will fail if you actually use a 64bit version. """ -function get_processor_partitioning(filename; is64bit=false) +function get_processor_partitioning(filename; is64bit = false) if is64bit - typ=Int64 + typ = Int64 else - typ=Int32 + typ = Int32 end io = open(filename, "r") - nProcX = ntoh(read(io,typ)) - nProcY = ntoh(read(io,typ)) - nProcZ = ntoh(read(io,typ)) + nProcX = ntoh(read(io, typ)) + nProcY = ntoh(read(io, typ)) + nProcZ = ntoh(read(io, typ)) - nNodeX = ntoh(read(io,typ)) - nNodeY = ntoh(read(io,typ)) - nNodeZ = ntoh(read(io,typ)) + nNodeX = ntoh(read(io, typ)) + nNodeY = ntoh(read(io, typ)) + nNodeZ = ntoh(read(io, typ)) - iX = [ntoh(read(io,typ)) for i=1:nProcX+1]; - iY = [ntoh(read(io,typ)) for i=1:nProcY+1]; - iZ = [ntoh(read(io,typ)) for i=1:nProcZ+1]; + iX = [ntoh(read(io, typ)) for i in 1:(nProcX + 1)] + iY = [ntoh(read(io, typ)) for i in 1:(nProcY + 1)] + iZ = [ntoh(read(io, typ)) for i in 1:(nProcZ + 1)] - CharLength = ntoh(read(io,Float64)) - xcoor = [ntoh(read(io,Float64)) for i=1:nNodeX].*CharLength; - ycoor = [ntoh(read(io,Float64)) for i=1:nNodeY].*CharLength; - zcoor = [ntoh(read(io,Float64)) for i=1:nNodeZ].*CharLength; + CharLength = ntoh(read(io, Float64)) + xcoor = [ntoh(read(io, Float64)) for i in 1:nNodeX] .* CharLength + ycoor = [ntoh(read(io, Float64)) for i in 1:nNodeY] .* CharLength + zcoor = [ntoh(read(io, Float64)) for i in 1:nNodeZ] .* CharLength xc = xcoor[iX .+ 1] yc = ycoor[iY .+ 1] @@ -599,15 +604,13 @@ function get_processor_partitioning(filename; is64bit=false) close(io) - return nProcX,nProcY,nProcZ, - xc,yc,zc, - nNodeX,nNodeY,nNodeZ + return nProcX, nProcY, nProcZ, + xc, yc, zc, + nNodeX, nNodeY, nNodeZ end - - """ coord, Data_3D_Arrays, Name_Vec = read_data_VTR(fname) @@ -618,65 +621,65 @@ function read_data_VTR(fname, FullSize) file = open(fname, "r") header = true - num = 1; - CoordOffset = zeros(Int64,3); - Offset_Vec = []; - Name_Vec = []; Type_Vec = []; - NumComp_Vec = []; PieceExtent=[]; WholeExtent=[]; - while header==true - - line = readline(file) - line_strip = lstrip(line) + num = 1 + CoordOffset = zeros(Int64, 3) + Offset_Vec = [] + Name_Vec = []; Type_Vec = [] + NumComp_Vec = []; PieceExtent = []; WholeExtent = [] + while header == true + + line = readline(file) + line_strip = lstrip(line) if startswith(line_strip, "") # Read info where the coordinates are stored - Type, Name, NumberOfComponents, CoordOffset[1] = Parse_VTR_Line(readline(file)); num += 1 - Type, Name, NumberOfComponents, CoordOffset[2] = Parse_VTR_Line(readline(file)); num += 1 - Type, Name, NumberOfComponents, CoordOffset[3] = Parse_VTR_Line(readline(file)); num += 1 + Type, Name, NumberOfComponents, CoordOffset[1] = Parse_VTR_Line(readline(file)); num += 1 + Type, Name, NumberOfComponents, CoordOffset[2] = Parse_VTR_Line(readline(file)); num += 1 + Type, Name, NumberOfComponents, CoordOffset[3] = Parse_VTR_Line(readline(file)); num += 1 end if startswith(line_strip, "") - line_strip = lstrip(readline(file)) + line_strip = lstrip(readline(file)) while ~startswith(line_strip, "") - Type, Name, NumberOfComponents, Offset = Parse_VTR_Line(line_strip); num += 1 + Type, Name, NumberOfComponents, Offset = Parse_VTR_Line(line_strip); num += 1 - Offset_Vec = [Offset_Vec; Offset]; - Name_Vec = [Name_Vec; Name]; - Type_Vec = [Type_Vec; Type]; - NumComp_Vec = [NumComp_Vec; NumberOfComponents]; - line_strip = lstrip(readline(file)) + Offset_Vec = [Offset_Vec; Offset] + Name_Vec = [Name_Vec; Name] + Type_Vec = [Type_Vec; Type] + NumComp_Vec = [NumComp_Vec; NumberOfComponents] + line_strip = lstrip(readline(file)) end end if startswith(line_strip, "") - line_strip = lstrip(readline(file)) + line_strip = lstrip(readline(file)) while ~startswith(line_strip, "") - Type, Name, NumberOfComponents, Offset = Parse_VTR_Line(line_strip); num += 1 - - Offset_Vec = [Offset_Vec; Offset]; - Name_Vec = [Name_Vec; Name]; - Type_Vec = [Type_Vec; Type]; - NumComp_Vec = [NumComp_Vec; NumberOfComponents]; - line_strip = lstrip(readline(file)) - # if we have cell Data, for some reason we need to increment this by one. + Type, Name, NumberOfComponents, Offset = Parse_VTR_Line(line_strip); num += 1 + + Offset_Vec = [Offset_Vec; Offset] + Name_Vec = [Name_Vec; Name] + Type_Vec = [Type_Vec; Type] + NumComp_Vec = [NumComp_Vec; NumberOfComponents] + line_strip = lstrip(readline(file)) + # if we have cell Data, for some reason we need to increment this by one. PieceExtent[1:2:end] .+= 1 end end if startswith(line_strip, "1; ix_left = ix_left[2:end]; ix_right=ix_right[2:end]; end - if iy_left[1]>1; iy_left = iy_left[2:end]; iy_right=iy_right[2:end]; end - if iz_left[1]>1; iz_left = iz_left[2:end]; iz_right=iz_right[2:end]; end - - data3D_full[1:NumComp_Vec[i], ix_left, iy_left, iz_left] = data3D[1:NumComp_Vec[i],ix_right, iy_right, iz_right]; + ix_left = ix; ix_right = 1:length(ix_left) + iy_left = iy; iy_right = 1:length(iy_left) + iz_left = iz; iz_right = 1:length(iz_left) + if ix_left[1] > 1 + ix_left = ix_left[2:end]; ix_right = ix_right[2:end] + end + if iy_left[1] > 1 + iy_left = iy_left[2:end]; iy_right = iy_right[2:end] + end + if iz_left[1] > 1 + iz_left = iz_left[2:end]; iz_right = iz_right[2:end] + end + + data3D_full[1:NumComp_Vec[i], ix_left, iy_left, iz_left] = data3D[1:NumComp_Vec[i], ix_right, iy_right, iz_right] #data3D_full[1:NumComp_Vec[i], ix, iy, iz] = data3D; Data_3D_Arrays = [Data_3D_Arrays; data3D_full] end - i=length(Name_Vec); + i = length(Name_Vec) - if Type_Vec[i]=="UInt8" - data3D = ReadBinaryData(file, start_bin, Offset_Vec[i], numPoints*NumComp_Vec[i]*sizeof(UInt8), DataType=UInt8) + if Type_Vec[i] == "UInt8" + data3D = ReadBinaryData(file, start_bin, Offset_Vec[i], numPoints * NumComp_Vec[i] * sizeof(UInt8), DataType = UInt8) else - data3D = ReadBinaryData(file, start_bin, Offset_Vec[i], numPoints*NumComp_Vec[i]*sizeof(Float32) ) + data3D = ReadBinaryData(file, start_bin, Offset_Vec[i], numPoints * NumComp_Vec[i] * sizeof(Float32)) end - data3D = getArray(data3D, PieceExtent, NumComp_Vec[i]); - data3D_full = zeros(Float64,NumComp_Vec[i],FullSize[1],FullSize[2],FullSize[3]) # Generate full d - data3D_full[1:NumComp_Vec[i], ix, iy, iz] = data3D[1:NumComp_Vec[i],1:length(ix),1:length(iy),1:length(iz)]; + data3D = getArray(data3D, PieceExtent, NumComp_Vec[i]) + data3D_full = zeros(Float64, NumComp_Vec[i], FullSize[1], FullSize[2], FullSize[3]) # Generate full d + data3D_full[1:NumComp_Vec[i], ix, iy, iz] = data3D[1:NumComp_Vec[i], 1:length(ix), 1:length(iy), 1:length(iz)] Data_3D_Arrays = [Data_3D_Arrays; data3D_full] return coord_x_full, coord_y_full, coord_z_full, Data_3D_Arrays, Name_Vec, NumComp_Vec, ix, iy, iz end - # Parses a line of a *.vtr file & retrieve Type/Name/NumberOfComponents/Offset - function Parse_VTR_Line(line) - line_strip = lstrip(line) +# Parses a line of a *.vtr file & retrieve Type/Name/NumberOfComponents/Offset +function Parse_VTR_Line(line) + line_strip = lstrip(line) # Retrieve Type if findfirst("type", line_strip) != nothing - id_start = findfirst("type", line_strip)[1]+6 + id_start = findfirst("type", line_strip)[1] + 6 - line_strip = line_strip[id_start:end] - id_end = findfirst("\"", line_strip)[1]-1 - Type = line_strip[1:id_end] - line_strip = line_strip[id_end:end] + line_strip = line_strip[id_start:end] + id_end = findfirst("\"", line_strip)[1] - 1 + Type = line_strip[1:id_end] + line_strip = line_strip[id_end:end] else - Type=nothing; + Type = nothing end # Retrieve Name if findfirst("Name", line_strip) != nothing - id_start = findfirst("Name", line_strip)[1]+6 - line_strip = line_strip[id_start:end] - id_end = findfirst("\"", line_strip)[1]-1 - Name = line_strip[1:id_end] - line_strip = line_strip[id_end:end] + id_start = findfirst("Name", line_strip)[1] + 6 + line_strip = line_strip[id_start:end] + id_end = findfirst("\"", line_strip)[1] - 1 + Name = line_strip[1:id_end] + line_strip = line_strip[id_end:end] else - Name=nothing + Name = nothing end # Retrieve number of components if findfirst("NumberOfComponents", line_strip) != nothing - id_start = findfirst("NumberOfComponents", line_strip)[1]+20 - line_strip = line_strip[id_start:end] - id_end = findfirst("\"", line_strip)[1]-1 - NumberOfComponents = parse(Int64,line_strip[1:id_end]) - line_strip = line_strip[id_end:end] + id_start = findfirst("NumberOfComponents", line_strip)[1] + 20 + line_strip = line_strip[id_start:end] + id_end = findfirst("\"", line_strip)[1] - 1 + NumberOfComponents = parse(Int64, line_strip[1:id_end]) + line_strip = line_strip[id_end:end] else - NumberOfComponents=nothing + NumberOfComponents = nothing end # Offset if findfirst("offset", line_strip) != nothing - id_start = findfirst("offset", line_strip)[1]+8 - line_strip = line_strip[id_start:end] - id_end = findfirst("\"", line_strip)[1]-1 - Offset = parse(Int64,line_strip[1:id_end]) + id_start = findfirst("offset", line_strip)[1] + 8 + line_strip = line_strip[id_start:end] + id_end = findfirst("\"", line_strip)[1] - 1 + Offset = parse(Int64, line_strip[1:id_end]) else - Offset=nothing; + Offset = nothing end return Type, Name, NumberOfComponents, Offset end function getArray(data, PieceExtent, NumComp) - data = reshape(data, (NumComp, PieceExtent[2]-PieceExtent[1]+1, PieceExtent[4]-PieceExtent[3]+1, PieceExtent[6]-PieceExtent[5]+1)) + data = reshape(data, (NumComp, PieceExtent[2] - PieceExtent[1] + 1, PieceExtent[4] - PieceExtent[3] + 1, PieceExtent[6] - PieceExtent[5] + 1)) return data end -function ReadBinaryData(file::IOStream, start_bin::Int64, Offset::Int64, BytesToRead; DataType=Float32) +function ReadBinaryData(file::IOStream, start_bin::Int64, Offset::Int64, BytesToRead; DataType = Float32) - seekstart(file); # go to start - skip(file, start_bin+Offset) # move to beginning of raw binary data - buffer = read(file,BytesToRead) # Read necesaary bytes - data = reinterpret(DataType,buffer) # Transfer to buffer + seekstart(file) # go to start + skip(file, start_bin + Offset) # move to beginning of raw binary data + buffer = read(file, BytesToRead) # Read necesaary bytes + data = reinterpret(DataType, buffer) # Transfer to buffer - data = Float64.(data[1:end]); # Transfer to Float64 + data = Float64.(data[1:end]) # Transfer to Float64 return data end @@ -833,44 +842,44 @@ ParaviewData fields: (:phase, :density, :visc_total, :visc_creep, :velocity, :pressure, :temperature, :dev_stress, :strain_rate, :j2_dev_stress, :j2_strain_rate, :plast_strain, :plast_dissip, :tot_displ, :yield, :moment_res, :cont_res) ``` """ -function read_data_PVTR(fname, dir) - file = open(joinpath(dir,fname), "r") +function read_data_PVTR(fname, dir) + file = open(joinpath(dir, fname), "r") header = true - num = 1; - FullSize= (1,1,1); - num_data_sets = 1; - Data_3D=[]; coord_x=[]; coord_y=[]; coord_z=[]; NumComp=[]; Names=[] - while header==true - - line = readline(file) - line_strip = lstrip(line) + num = 1 + FullSize = (1, 1, 1) + num_data_sets = 1 + Data_3D = []; coord_x = []; coord_y = []; coord_z = []; NumComp = []; Names = [] + while header == true + + line = readline(file) + line_strip = lstrip(line) if startswith(line_strip, "1 + if size(data, 1) > 1 Data_NamedTuple = NamedTuple{(Names1[i],)}((data_tuple,)) else Data_NamedTuple = NamedTuple{(Names1[i],)}((data_tuple[1],)) end Data_Array = [Data_Array; Data_NamedTuple] - num = num+NumComp[i]; + num = num + NumComp[i] end # Merge vector with tuples into a NamedTuple - fields = Data_Array[1]; - for i=2:length(Data_Array) + fields = Data_Array[1] + for i in 2:length(Data_Array) fields = merge(fields, Data_Array[i]) end # Create a ParaviewData struct from it. - X,Y,Z = xyz_grid(coord_x, coord_y, coord_z) - DataC = ParaviewData(X,Y,Z, fields); + X, Y, Z = xyz_grid(coord_x, coord_y, coord_z) + DataC = ParaviewData(X, Y, Z, fields) return DataC end @@ -934,34 +943,34 @@ This writes a topography file `Topo` for use in LaMEM, which should have size `( """ function save_LaMEM_topography(Topo::CartData, filename::String) - if (size(Topo.z.val,3) != 1) + if (size(Topo.z.val, 3) != 1) error("Not a valid `CartData' Topography file (size in 3rd dimension should be 1)") end - if !haskey(Topo.fields,:Topography) + if !haskey(Topo.fields, :Topography) error("The topography `CartData` structure requires a field :Topography") end # get grid properties - nx = Float64(size(Topo.fields.Topography,1)); - ny = Float64(size(Topo.fields.Topography,2)); - x0 = ustrip(Topo.x.val[1,1,1]); - y0 = ustrip(Topo.y.val[1,1,1]); + nx = Float64(size(Topo.fields.Topography, 1)) + ny = Float64(size(Topo.fields.Topography, 2)) + x0 = ustrip(Topo.x.val[1, 1, 1]) + y0 = ustrip(Topo.y.val[1, 1, 1]) # LaMEM wants a uniform grid, so interpolate if necessary - if length(unique(trunc.(diff(Topo.x.val[:,1,1]), digits=8))) > 1 || length(unique(trunc.(diff(Topo.y.val[1,:,1]), digits=8))) > 1 - x1 = ustrip(Topo.x.val[end,1,1]); - y1 = ustrip(Topo.y.val[1,end,1]); - dx = (x1-x0) / (nx-1); - dy = (y1-y0) / (ny-1); + if length(unique(trunc.(diff(Topo.x.val[:, 1, 1]), digits = 8))) > 1 || length(unique(trunc.(diff(Topo.y.val[1, :, 1]), digits = 8))) > 1 + x1 = ustrip(Topo.x.val[end, 1, 1]) + y1 = ustrip(Topo.y.val[1, end, 1]) + dx = (x1 - x0) / (nx - 1) + dy = (y1 - y0) / (ny - 1) - itp = LinearInterpolation((Topo.x.val[:,1,1], Topo.y.val[1,:,1]), ustrip.(Topo.fields.Topography[:,:,1])); - Topo_itp = [itp(x,y) for x in x0:dx:x1, y in y0:dy:y1]; + itp = LinearInterpolation((Topo.x.val[:, 1, 1], Topo.y.val[1, :, 1]), ustrip.(Topo.fields.Topography[:, :, 1])) + Topo_itp = [itp(x, y) for x in x0:dx:x1, y in y0:dy:y1] # Code the topograhic data into a vector Topo_vec = [ nx;ny;x0;y0;dx;dy; Topo_itp[:]] else - dx = ustrip(Topo.x.val[2,2,1]) - x0 - dy = ustrip(Topo.y.val[2,2,1]) - y0 + dx = ustrip(Topo.x.val[2, 2, 1]) - x0 + dy = ustrip(Topo.y.val[2, 2, 1]) - y0 # Code the topograhic data into a vector Topo_vec = [ nx;ny;x0;y0;dx;dy; ustrip.(Topo.fields.Topography[:])] end @@ -982,29 +991,29 @@ The directory where the LaMEM binary is can be specified; if not it is assumed t Likewise for the `mpiexec` directory (if not specified it is assumed to be available on the command line). """ -function create_partitioning_file(LaMEM_input::String,NumProc::Int64; LaMEM_dir::String=pwd(), LaMEM_options="", MPI_dir="", verbose=true) +function create_partitioning_file(LaMEM_input::String, NumProc::Int64; LaMEM_dir::String = pwd(), LaMEM_options = "", MPI_dir = "", verbose = true) # Create string to execute LaMEM - mpi_str = MPI_dir*"mpiexec -n $(NumProc) " - LaMEM_str = LaMEM_dir*"/"*"LaMEM -ParamFile "*LaMEM_input*" -mode save_grid " - str = mpi_str*LaMEM_str + mpi_str = MPI_dir * "mpiexec -n $(NumProc) " + LaMEM_str = LaMEM_dir * "/" * "LaMEM -ParamFile " * LaMEM_input * " -mode save_grid " + str = mpi_str * LaMEM_str - if verbose==true + if verbose == true println("Executing command: $str") end # Run - exit=run(`sh -c $str`, wait=false); + exit = run(`sh -c $str`, wait = false) # Retrieve newest file if success(exit) - files=readdir(glob"ProcessorPartitioning_*.bin") + files = readdir(glob"ProcessorPartitioning_*.bin") time_modified = zeros(length(files)) - for (i,file) in enumerate(files) + for (i, file) in enumerate(files) time_modified[i] = stat(file).mtime end - id = findall(time_modified.==maximum(time_modified)) # last modified - PartFile = files[id] - if verbose==true + id = findall(time_modified .== maximum(time_modified)) # last modified + PartFile = files[id] + if verbose == true println("Successfully generated PartitioningFile: $(PartFile[1])") end else @@ -1020,11 +1029,11 @@ end Returns 3D coordinate arrays """ -function coordinate_grids(Data::LaMEM_grid; cell=false) - X,Y,Z = Data.X, Data.Y, Data.Z +function coordinate_grids(Data::LaMEM_grid; cell = false) + X, Y, Z = Data.X, Data.Y, Data.Z if cell - X,Y,Z = average_q1(X),average_q1(Y), average_q1(Z) + X, Y, Z = average_q1(X), average_q1(Y), average_q1(Z) end - return X,Y,Z + return X, Y, Z end diff --git a/src/Paraview_collection.jl b/src/Paraview_collection.jl index a85efe50..31b701ef 100644 --- a/src/Paraview_collection.jl +++ b/src/Paraview_collection.jl @@ -17,7 +17,7 @@ Optional options - `file_extension`: file extension of the vtk files. Default is `.vts` but all `vt*` work. - `time`: Vector of the timesteps; if not specified, pseudo time steps are assigned. """ -function make_paraview_collection(; dir=pwd(), pvd_name=nothing, files=nothing, file_extension = ".vts", time = nothing) +function make_paraview_collection(; dir = pwd(), pvd_name = nothing, files = nothing, file_extension = ".vts", time = nothing) # if no files are given, use all vtm files in the directory curdir = pwd() diff --git a/src/Paraview_output.jl b/src/Paraview_output.jl index 1a78e3bf..e886d4f1 100644 --- a/src/Paraview_output.jl +++ b/src/Paraview_output.jl @@ -63,65 +63,65 @@ julia> write_paraview(Data_set, "test_Points", PointsData=true) ``` """ -function write_paraview(DataSet::ParaviewData, filename="test"; PointsData=false, pvd=nothing, time=nothing, directory=nothing, verbose=true) +function write_paraview(DataSet::ParaviewData, filename = "test"; PointsData = false, pvd = nothing, time = nothing, directory = nothing, verbose = true) # Error checking - if !(length(size(DataSet.x))==length(size(DataSet.y))==length(size(DataSet.z))) + if !(length(size(DataSet.x)) == length(size(DataSet.y)) == length(size(DataSet.z))) error("The X/Y/Z or Lon/Lat/Depth arrays should be 3 dimensional") end # Create directory if required if !isnothing(directory) mkpath(directory) - filename = joinpath(directory, filename); # add directory name to pathname + filename = joinpath(directory, filename) # add directory name to pathname end - # Create VT* file - if PointsData + # Create VT* file + if PointsData # in case we write a dataset with unconnected points (e.g., GPS data, EQ locations etc.) - npoints = length(DataSet.x) - cells = [MeshCell(VTKCellTypes.VTK_VERTEX, (i, )) for i = 1:npoints] - x = ustrip.(DataSet.x.val); x = x[:]; - y = ustrip.(DataSet.y.val); y = y[:]; - z = ustrip.(DataSet.z.val); z = z[:]; + npoints = length(DataSet.x) + cells = [MeshCell(VTKCellTypes.VTK_VERTEX, (i,)) for i in 1:npoints] + x = ustrip.(DataSet.x.val); x = x[:] + y = ustrip.(DataSet.y.val); y = y[:] + z = ustrip.(DataSet.z.val); z = z[:] - vtkfile = vtk_grid(filename, x,y,z, cells) + vtkfile = vtk_grid(filename, x, y, z, cells) else # for connected 3D grids, 2D planes or 1D lines - vtkfile = vtk_grid(filename, ustrip.(DataSet.x.val), ustrip.(DataSet.y.val), ustrip.(DataSet.z.val)) + vtkfile = vtk_grid(filename, ustrip.(DataSet.x.val), ustrip.(DataSet.y.val), ustrip.(DataSet.z.val)) end # Add data fields to VT* file - names = String.(collect(keys(DataSet.fields))); # this is how to retrieve the names of the data fields + names = String.(collect(keys(DataSet.fields))) # this is how to retrieve the names of the data fields for (index, name) in enumerate(names) - - if typeof(DataSet.fields[index])<: Tuple + + if typeof(DataSet.fields[index]) <: Tuple # if we do a tuple of velocities, it appears difficult to deal with units # This will require some more work unit_name = "" - Data = DataSet.fields[index] - if unit(Data[1][1])!=NoUnits + Data = DataSet.fields[index] + if unit(Data[1][1]) != NoUnits error("potential error as vector data fields have units; please save them with no units!") end else unit_name = unit(DataSet.fields[index][1]) - Data = ustrip.(DataSet.fields[index]) + Data = ustrip.(DataSet.fields[index]) end - - name_with_units = join([name," [$(unit_name)]"]); # add units to the name of the field - if PointsData - vtkfile[name_with_units, VTKPointData()] = Data[:]; + + name_with_units = join([name, " [$(unit_name)]"]) # add units to the name of the field + if PointsData + vtkfile[name_with_units, VTKPointData()] = Data[:] else - vtkfile[name_with_units] = Data; + vtkfile[name_with_units] = Data end end - outfiles = vtk_save(vtkfile); + outfiles = vtk_save(vtkfile) if verbose println("Saved file: $(outfiles[1])") end if !isnothing(pvd) - # Write movie + # Write movie pvd[time] = vtkfile end @@ -129,18 +129,18 @@ function write_paraview(DataSet::ParaviewData, filename="test"; PointsData=false end # Multiple dispatch such that we can also call the routine with GeoData input: -write_paraview(DataSet::GeoData, filename::Any; PointsData=false, pvd=nothing, time=nothing, directory=nothing) = write_paraview(convert(ParaviewData,DataSet), filename, PointsData=PointsData, pvd=pvd, time=time, directory=directory); +write_paraview(DataSet::GeoData, filename::Any; PointsData = false, pvd = nothing, time = nothing, directory = nothing) = write_paraview(convert(ParaviewData, DataSet), filename, PointsData = PointsData, pvd = pvd, time = time, directory = directory); """ write_paraview(DataSet::UTMData, filename::Any; PointsData=false, pvd=nothing, time=nothing, directory=nothing, verbose=true) Writes a `UTMData` structure to paraview. Note that this data is *not* transformed into an Earth-like framework, but remains cartesian instead. """ -function write_paraview(DataSet::UTMData, filename::Any; PointsData=false, pvd=nothing, time=nothing, directory=nothing, verbose=true) - +function write_paraview(DataSet::UTMData, filename::Any; PointsData = false, pvd = nothing, time = nothing, directory = nothing, verbose = true) + PVData = ParaviewData(DataSet.EW, DataSet.NS, DataSet.depth.val, DataSet.fields) - outfiles = write_paraview(PVData, filename, PointsData=PointsData, pvd=pvd, time=time, directory=directory, verbose=verbose); + outfiles = write_paraview(PVData, filename, PointsData = PointsData, pvd = pvd, time = time, directory = directory, verbose = verbose) return outfiles end @@ -149,16 +149,15 @@ end Writes a `CartData` structure to paraview. """ -function write_paraview(DataSet::CartData, filename::Any; PointsData=false, pvd=nothing, time=nothing, directory=nothing, verbose=true) - +function write_paraview(DataSet::CartData, filename::Any; PointsData = false, pvd = nothing, time = nothing, directory = nothing, verbose = true) + PVData = ParaviewData(DataSet.x.val, DataSet.y.val, DataSet.z.val, DataSet.fields) - outfiles = write_paraview(PVData, filename, PointsData=PointsData, pvd=pvd, time=time, directory=directory, verbose=verbose); + outfiles = write_paraview(PVData, filename, PointsData = PointsData, pvd = pvd, time = time, directory = directory, verbose = verbose) return outfiles end - """ pvd = movie_paraview(; name="Movie", pvd=pvd, Finalize::Bool=false, Initialize::Bool=true) @@ -180,94 +179,94 @@ end movie_paraview(pvd=movie, Finalize=true) ``` """ -function movie_paraview(; name="Movie", pvd=nothing, Finalize::Bool=false, Initialize::Bool=true) +function movie_paraview(; name = "Movie", pvd = nothing, Finalize::Bool = false, Initialize::Bool = true) if (Initialize) & !(Finalize) - pvd = paraview_collection(name) + pvd = paraview_collection(name) end if Finalize vtk_save(pvd) println("Saved PVD file") end - + return pvd end - + """ write_paraview(DataSet::Q1Data, filename="test"; directory=nothing, pvd=nothing, time=nothing, verbose=true) Writes a `Q1Data` dataset to disk, which has cell and vertex field """ -function write_paraview(DataSet::Q1Data, filename="test"; directory=nothing, pvd=nothing, time=nothing, verbose=true) +function write_paraview(DataSet::Q1Data, filename = "test"; directory = nothing, pvd = nothing, time = nothing, verbose = true) # Error checking - if !(length(size(DataSet.x))==length(size(DataSet.y))==length(size(DataSet.z))) + if !(length(size(DataSet.x)) == length(size(DataSet.y)) == length(size(DataSet.z))) error("The X/Y/Z should be 3 dimensional") end # Create directory if required if !isnothing(directory) mkpath(directory) - filename = joinpath(directory, filename); # add directory name to pathname + filename = joinpath(directory, filename) # add directory name to pathname end - # Create VT* file - vtkfile = vtk_grid(filename, ustrip.(DataSet.x.val), ustrip.(DataSet.y.val), ustrip.(DataSet.z.val)) - + # Create VT* file + vtkfile = vtk_grid(filename, ustrip.(DataSet.x.val), ustrip.(DataSet.y.val), ustrip.(DataSet.z.val)) + # Add vertex data fields to VT* file - names = String.(collect(keys(DataSet.fields))); # this is how to retrieve the names of the data fields + names = String.(collect(keys(DataSet.fields))) # this is how to retrieve the names of the data fields for (index, name) in enumerate(names) - - if typeof(DataSet.fields[index])<: Tuple + + if typeof(DataSet.fields[index]) <: Tuple # if we do a tuple of velocities, it appears difficult to deal with units # This will require some more work unit_name = "" - Data = DataSet.fields[index] - if unit(Data[1][1])!=NoUnits + Data = DataSet.fields[index] + if unit(Data[1][1]) != NoUnits error("potential error as vector data fields have units; please save them with no units!") end else unit_name = unit(DataSet.fields[index][1]) - Data = ustrip.(DataSet.fields[index]) + Data = ustrip.(DataSet.fields[index]) end - - name_with_units = join([name," [$(unit_name)]"]); # add units to the name of the field - #if PointsData - # vtkfile[name_with_units, VTKPointData()] = Data[:]; + + name_with_units = join([name, " [$(unit_name)]"]) # add units to the name of the field + #if PointsData + # vtkfile[name_with_units, VTKPointData()] = Data[:]; #else - vtkfile[name_with_units] = Data; + vtkfile[name_with_units] = Data #end end # Add cell data fields to VT* file - names = String.(collect(keys(DataSet.cellfields))); # this is how to retrieve the names of the data fields + names = String.(collect(keys(DataSet.cellfields))) # this is how to retrieve the names of the data fields for (index, name) in enumerate(names) - if typeof(DataSet.cellfields[index])<: Tuple + if typeof(DataSet.cellfields[index]) <: Tuple # if we do a tuple of velocities, it appears difficult to deal with units # This will require some more work unit_name = "" - Data = DataSet.cellfields[index] - if unit(Data[1][1])!=NoUnits + Data = DataSet.cellfields[index] + if unit(Data[1][1]) != NoUnits error("potential error as vector data fields have units; please save them with no units!") end else unit_name = unit(DataSet.cellfields[index][1]) - Data = ustrip.(DataSet.cellfields[index]) + Data = ustrip.(DataSet.cellfields[index]) end - name_with_units = join([name," [$(unit_name)]"]); # add units to the name of the field - vtkfile[name_with_units, VTKCellData()] = Data[:]; - + name_with_units = join([name, " [$(unit_name)]"]) # add units to the name of the field + vtkfile[name_with_units, VTKCellData()] = Data[:] + end - outfiles = vtk_save(vtkfile); + outfiles = vtk_save(vtkfile) if verbose println("Saved file: $(outfiles[1])") end if !isnothing(pvd) - # Write movie + # Write movie pvd[time] = vtkfile end @@ -275,93 +274,91 @@ function write_paraview(DataSet::Q1Data, filename="test"; directory=nothing, pvd end - - """ write_paraview(DataSet::FEData, filename="test"; directory=nothing, pvd=nothing, time=nothing, verbose=true) Writes a `FEData` dataset (general finite element) to disk, which has cell and vertex field """ -function write_paraview(DataSet::FEData, filename="test"; directory=nothing, pvd=nothing, time=nothing, verbose=true) +function write_paraview(DataSet::FEData, filename = "test"; directory = nothing, pvd = nothing, time = nothing, verbose = true) # Create directory if required if !isnothing(directory) mkpath(directory) - filename = joinpath(directory, filename); # add directory name to pathname + filename = joinpath(directory, filename) # add directory name to pathname end - + connectivity = DataSet.connectivity - if size(DataSet.connectivity,1) == 4 + if size(DataSet.connectivity, 1) == 4 celltype = VTKCellTypes.VTK_TETRA - elseif size(DataSet.connectivity,1) == 8 + elseif size(DataSet.connectivity, 1) == 8 celltype = VTKCellTypes.VTK_HEXAHEDRON # we need to reorder this as pTatin uses a different ordering than VTK - id_reorder = [1,2,4,3,5,6,8,7] - connectivity = connectivity[id_reorder,:] + id_reorder = [1, 2, 4, 3, 5, 6, 8, 7] + connectivity = connectivity[id_reorder, :] else error("This element is not yet implemented") end - - # Create VTU file + + # Create VTU file points = DataSet.vertices - cells = MeshCell[]; - for i = 1: size(connectivity,2) - push!(cells, MeshCell(celltype, connectivity[:,i])) + cells = MeshCell[] + for i in 1:size(connectivity, 2) + push!(cells, MeshCell(celltype, connectivity[:, i])) end vtkfile = vtk_grid(filename, points, cells) # Add vertex data fields to VT* file - names = String.(collect(keys(DataSet.fields))); # this is how to retrieve the names of the data fields + names = String.(collect(keys(DataSet.fields))) # this is how to retrieve the names of the data fields for (index, name) in enumerate(names) - - if typeof(DataSet.fields[index])<: Tuple + + if typeof(DataSet.fields[index]) <: Tuple # if we do a tuple of velocities, it appears difficult to deal with units # This will require some more work unit_name = "" - Data = DataSet.fields[index] - if unit(Data[1][1])!=NoUnits + Data = DataSet.fields[index] + if unit(Data[1][1]) != NoUnits error("potential error as vector data fields have units; please save them with no units!") end else unit_name = unit(DataSet.fields[index][1]) - Data = ustrip.(DataSet.fields[index]) + Data = ustrip.(DataSet.fields[index]) end - - name_with_units = join([name," [$(unit_name)]"]); # add units to the name of the field - vtkfile[name_with_units] = Data; + + name_with_units = join([name, " [$(unit_name)]"]) # add units to the name of the field + vtkfile[name_with_units] = Data end # Add cell data fields to VT* file - names = String.(collect(keys(DataSet.cellfields))); # this is how to retrieve the names of the data fields + names = String.(collect(keys(DataSet.cellfields))) # this is how to retrieve the names of the data fields for (index, name) in enumerate(names) - if typeof(DataSet.cellfields[index])<: Tuple + if typeof(DataSet.cellfields[index]) <: Tuple # if we do a tuple of velocities, it appears difficult to deal with units # This will require some more work unit_name = "" - Data = DataSet.cellfields[index] - if unit(Data[1][1])!=NoUnits + Data = DataSet.cellfields[index] + if unit(Data[1][1]) != NoUnits error("potential error as vector data fields have units; please save them with no units!") end else unit_name = unit(DataSet.cellfields[index][1]) - Data = ustrip.(DataSet.cellfields[index]) + Data = ustrip.(DataSet.cellfields[index]) end - name_with_units = join([name," [$(unit_name)]"]); # add units to the name of the field - vtkfile[name_with_units, VTKCellData()] = Data[:]; + name_with_units = join([name, " [$(unit_name)]"]) # add units to the name of the field + vtkfile[name_with_units, VTKCellData()] = Data[:] end - outfiles = vtk_save(vtkfile); + outfiles = vtk_save(vtkfile) if verbose println("Saved file: $(outfiles[1])") end if !isnothing(pvd) - # Write movie + # Write movie pvd[time] = vtkfile end return pvd -end \ No newline at end of file +end diff --git a/src/ProfileProcessing.jl b/src/ProfileProcessing.jl index ad0a7815..8930da9a 100644 --- a/src/ProfileProcessing.jl +++ b/src/ProfileProcessing.jl @@ -22,26 +22,26 @@ Structure that holds profile data (interpolated/projected on the profile) Structure to store cross section data """ mutable struct ProfileData - vertical :: Bool # vertical:true, horizontal:false - start_lonlat :: Union{Nothing, Tuple{Float64,Float64}} - end_lonlat :: Union{Nothing, Tuple{Float64,Float64}} - depth :: Union{Nothing, Float64} - VolData :: Union{Nothing, GeophysicalModelGenerator.GeoData} - SurfData :: Union{Nothing, NamedTuple} - PointData :: Union{Nothing, NamedTuple} - - function ProfileData(;kwargs...) # this constructor allows to define only certain fields and leave the others blank - K = new(true,nothing,nothing,nothing,nothing,nothing,nothing) + vertical::Bool # vertical:true, horizontal:false + start_lonlat::Union{Nothing, Tuple{Float64, Float64}} + end_lonlat::Union{Nothing, Tuple{Float64, Float64}} + depth::Union{Nothing, Float64} + VolData::Union{Nothing, GeophysicalModelGenerator.GeoData} + SurfData::Union{Nothing, NamedTuple} + PointData::Union{Nothing, NamedTuple} + + function ProfileData(; kwargs...) # this constructor allows to define only certain fields and leave the others blank + K = new(true, nothing, nothing, nothing, nothing, nothing, nothing) for (key, value) in kwargs # make sure that start and end point are given as tuples of Float64 - if key==Symbol("start_lonlat") - setfield!(K, key, convert(Tuple{Float64,Float64},Float64.(value))) + if key == Symbol("start_lonlat") + setfield!(K, key, convert(Tuple{Float64, Float64}, Float64.(value))) setfield!(K, :vertical, true) - elseif key==Symbol("end_lonlat") - setfield!(K, key, convert(Tuple{Float64,Float64},Float64.(value))) + elseif key == Symbol("end_lonlat") + setfield!(K, key, convert(Tuple{Float64, Float64}, Float64.(value))) setfield!(K, :vertical, true) - elseif key==Symbol("depth") - setfield!(K, key, convert(Float64,value)) + elseif key == Symbol("depth") + setfield!(K, key, convert(Float64, value)) setfield!(K, :vertical, false) else setfield!(K, key, value) @@ -84,24 +84,24 @@ Structure that stores info about a GMG Dataset, which is useful to collect a wid """ mutable struct GMG_Dataset - Name :: String # Name of the dataset - Type :: String # Volumetric, Surface, Point, Screenshot - DirName :: String # Directory name or url of dataset - active :: Bool # active in the GUI or not? + Name::String # Name of the dataset + Type::String # Volumetric, Surface, Point, Screenshot + DirName::String # Directory name or url of dataset + active::Bool # active in the GUI or not? - function GMG_Dataset(Name::String,Type::String,DirName::String,active::Bool=false) + function GMG_Dataset(Name::String, Type::String, DirName::String, active::Bool = false) Type = strip(Type) Name = strip(Name) DirName = strip(DirName) - if !any(occursin.(Type,["Volume","Surface","Point","Screenshot","Topography"])) + if !any(occursin.(Type, ["Volume", "Surface", "Point", "Screenshot", "Topography"])) error("Type should be either: Volume,Surface,Point,Topography or Screenshot. Is: $Type.") end - if DirName[end-4:end] == ".jld2" - DirName = DirName[1:end-5] + if DirName[(end - 4):end] == ".jld2" + DirName = DirName[1:(end - 5)] end - new(Name,Type,DirName,active) + return new(Name, Type, DirName, active) end end @@ -150,21 +150,21 @@ Here, the meaning of the variables is: """ function load_dataset_file(file_datasets::String) - datasets = readdlm(file_datasets,',',skipstart =1); # read information on datasets to be used from text file - n = size(datasets,1) + datasets = readdlm(file_datasets, ',', skipstart = 1) # read information on datasets to be used from text file + n = size(datasets, 1) # Deal with last column (in case it is not specified or not specified everywhere) - if size(datasets,2)==4 - active = datasets[:,4] - active = replace(active,""=>true) - active = Bool.(active) - elseif size(datasets,2)==3 - active = ones(Bool,n) + if size(datasets, 2) == 4 + active = datasets[:, 4] + active = replace(active, "" => true) + active = Bool.(active) + elseif size(datasets, 2) == 3 + active = ones(Bool, n) end Datasets = Vector{GMG_Dataset}() - for i=1:n - push!(Datasets, GMG_Dataset( String(datasets[i,1]), String(datasets[i,3]), String(datasets[i,2]), active[i])) + for i in 1:n + push!(Datasets, GMG_Dataset(String(datasets[i, 1]), String(datasets[i, 3]), String(datasets[i, 2]), active[i])) end return Datasets @@ -179,30 +179,30 @@ This loads all the active datasets in `Datasets`, and returns a NamedTuple with function load_GMG(Datasets::Vector{GMG_Dataset}) - DataPoint = NamedTuple(); - DataSurf = NamedTuple(); - DataScreenshot = NamedTuple(); - DataVol = NamedTuple(); - DataTopo = NamedTuple(); + DataPoint = NamedTuple() + DataSurf = NamedTuple() + DataScreenshot = NamedTuple() + DataVol = NamedTuple() + DataTopo = NamedTuple() for data in Datasets if data.active # load into NamedTuple (I'm sure this can be done more compact somehow..) loaded_data = load_GMG(data) - if data.Type=="Volume" - DataVol = merge(DataVol,loaded_data) - elseif data.Type=="Surface" - DataSurf = merge(DataSurf,loaded_data) - elseif data.Type=="Point" - DataPoint = merge(DataPoint,loaded_data) - elseif data.Type=="Screenshot" - DataScreenshot = merge(DataScreenshot,loaded_data) - elseif data.Type=="Topography" - DataTopo = merge(DataTopo,loaded_data) + if data.Type == "Volume" + DataVol = merge(DataVol, loaded_data) + elseif data.Type == "Surface" + DataSurf = merge(DataSurf, loaded_data) + elseif data.Type == "Point" + DataPoint = merge(DataPoint, loaded_data) + elseif data.Type == "Screenshot" + DataScreenshot = merge(DataScreenshot, loaded_data) + elseif data.Type == "Topography" + DataTopo = merge(DataTopo, loaded_data) end end end - Data = (Volume=DataVol, Surface=DataSurf, Point=DataPoint, Screenshot=DataScreenshot, Topography=DataTopo) + Data = (Volume = DataVol, Surface = DataSurf, Point = DataPoint, Screenshot = DataScreenshot, Topography = DataTopo) return Data end @@ -215,31 +215,39 @@ This takes different volumetric datasets (specified in `VolData`) & merges them You need to either provide the "reference" dataset within the NamedTuple (`dataset_preferred`), or the lat/lon/depth and dimensions of the new dataset. """ -function combine_vol_data(VolData::NamedTuple; lat=nothing, lon=nothing, depth=nothing, dims=(100,100,100), dataset_preferred = 1) +function combine_vol_data(VolData::NamedTuple; lat = nothing, lon = nothing, depth = nothing, dims = (100, 100, 100), dataset_preferred = 1) # Get dimensions of new Data_set i = dataset_preferred - if isnothing(lon); lon = extrema(VolData[i].lon.val); end - if isnothing(lat); lat = extrema(VolData[i].lat.val); end - if isnothing(depth); depth = extrema(VolData[i].depth.val); end - if isnothing(dims); dims = size(VolData[i].depth.val); end + if isnothing(lon) + lon = extrema(VolData[i].lon.val) + end + if isnothing(lat) + lat = extrema(VolData[i].lat.val) + end + if isnothing(depth) + depth = extrema(VolData[i].depth.val) + end + if isnothing(dims) + dims = size(VolData[i].depth.val) + end # Create reference dataset - lon1D = range(lon..., dims[1]) - lat1D = range(lat..., dims[2]) - z1D = range(depth..., dims[3]) - Lon,Lat,Z = xyz_grid(lon1D, lat1D, z1D); - DataSetRef = GeoData(Lon, Lat, Z, (Temporary=Z,)) + lon1D = range(lon..., dims[1]) + lat1D = range(lat..., dims[2]) + z1D = range(depth..., dims[3]) + Lon, Lat, Z = xyz_grid(lon1D, lat1D, z1D) + DataSetRef = GeoData(Lon, Lat, Z, (Temporary = Z,)) # Loop through all datasets DataSet_Names = String.(keys(VolData)) - for (i,DataSet) in enumerate(VolData) - DataSet_interp = interpolate_datafields(DataSet, Lon,Lat,Z) - names_fields = String.(keys(DataSet_interp.fields)) - for (j,name) in enumerate(names_fields) - name_new_field = DataSet_Names[i]*"_"*name # name of new field includes name of dataset + for (i, DataSet) in enumerate(VolData) + DataSet_interp = interpolate_datafields(DataSet, Lon, Lat, Z) + names_fields = String.(keys(DataSet_interp.fields)) + for (j, name) in enumerate(names_fields) + name_new_field = DataSet_Names[i] * "_" * name # name of new field includes name of dataset # Note: we use ustrip here, and thereby remove the values, as the cross-section routine made problems - DataSetRef = addfield(DataSetRef,name_new_field, ustrip.(DataSet_interp.fields[j])) + DataSetRef = addfield(DataSetRef, name_new_field, ustrip.(DataSet_interp.fields[j])) end end @@ -255,19 +263,19 @@ end Creates a cross-section through a volumetric 3D dataset `VolData` with the data supplied in `Profile`. `Depth_extent` can be the minimum & maximum depth for vertical profiles """ -function create_profile_volume!(Profile::ProfileData, VolData::AbstractGeneralGrid; DimsVolCross::NTuple=(100,100), Depth_extent=nothing) +function create_profile_volume!(Profile::ProfileData, VolData::AbstractGeneralGrid; DimsVolCross::NTuple = (100, 100), Depth_extent = nothing) if Profile.vertical # take a vertical cross section - cross_tmp = cross_section(VolData,dims=DimsVolCross, Start=Profile.start_lonlat,End=Profile.end_lonlat,Depth_extent=Depth_extent) # create the cross section + cross_tmp = cross_section(VolData, dims = DimsVolCross, Start = Profile.start_lonlat, End = Profile.end_lonlat, Depth_extent = Depth_extent) # create the cross section # flatten cross section and add this data to the structure - x_profile = flatten_cross_section(cross_tmp,Start=Profile.start_lonlat) - cross_tmp = addfield(cross_tmp,"x_profile",x_profile) + x_profile = flatten_cross_section(cross_tmp, Start = Profile.start_lonlat) + cross_tmp = addfield(cross_tmp, "x_profile", x_profile) else # take a horizontal cross section - cross_tmp = cross_section(VolData, Depth_level=Profile.depth, Interpolate=true, dims=DimsVolCross) + cross_tmp = cross_section(VolData, Depth_level = Profile.depth, Interpolate = true, dims = DimsVolCross) end Profile.VolData = cross_tmp # assign to Profile data structure @@ -276,27 +284,27 @@ end ### internal function to process surface data - contrary to the volume data, we here have to save lon/lat/depth pairs for every surface data set, so we create a NamedTuple of GeoData data sets -function create_profile_surface!(Profile::ProfileData, DataSet::NamedTuple; DimsSurfCross=(100,)) +function create_profile_surface!(Profile::ProfileData, DataSet::NamedTuple; DimsSurfCross = (100,)) num_datasets = length(DataSet) tmp = NamedTuple() # initialize empty one DataSetName = keys(DataSet) # Names of the datasets - for idata = 1:num_datasets + for idata in 1:num_datasets # load data set --> each data set is a single GeoData structure, so we'll only have to get the respective key to load the correct type data_tmp = DataSet[idata] if Profile.vertical # take a vertical cross section - data = cross_section_surface(data_tmp, dims=DimsSurfCross, Start=Profile.start_lonlat, End=Profile.end_lonlat) # create the cross section + data = cross_section_surface(data_tmp, dims = DimsSurfCross, Start = Profile.start_lonlat, End = Profile.end_lonlat) # create the cross section # flatten cross section and add this data to the structure - x_profile = flatten_cross_section(data,Start=Profile.start_lonlat) - data = addfield(data,"x_profile",x_profile) + x_profile = flatten_cross_section(data, Start = Profile.start_lonlat) + data = addfield(data, "x_profile", x_profile) # add the data set as a NamedTuple - data_NT = NamedTuple{(DataSetName[idata],)}((data,)) - tmp = merge(tmp,data_NT) + data_NT = NamedTuple{(DataSetName[idata],)}((data,)) + tmp = merge(tmp, data_NT) else # we do not have this implemented @@ -310,40 +318,40 @@ end ### function to process point data - contrary to the volume data, we here have to save lon/lat/depth pairs for every point data set -function create_profile_point!(Profile::ProfileData, DataSet::NamedTuple; section_width=50km) +function create_profile_point!(Profile::ProfileData, DataSet::NamedTuple; section_width = 50km) num_datasets = length(DataSet) tmp = NamedTuple() # initialize empty one DataSetName = keys(DataSet) # Names of the datasets - for idata = 1:num_datasets + for idata in 1:num_datasets # load data set --> each data set is a single GeoData structure, so we'll only have to get the respective key to load the correct type data_tmp = DataSet[idata] if Profile.vertical # take a vertical cross section - data = cross_section_points(data_tmp, Start=Profile.start_lonlat, End=Profile.end_lonlat, section_width = section_width) # create the cross section + data = cross_section_points(data_tmp, Start = Profile.start_lonlat, End = Profile.end_lonlat, section_width = section_width) # create the cross section if isnothing(data) # do nothing, as there is no data else # flatten cross section and add this data to the structure - x_profile = flatten_cross_section(data,Start=Profile.start_lonlat) - data = addfield(data,"x_profile",x_profile) + x_profile = flatten_cross_section(data, Start = Profile.start_lonlat) + data = addfield(data, "x_profile", x_profile) # add the data set as a NamedTuple - data_NT = NamedTuple{(DataSetName[idata],)}((data,)) - tmp = merge(tmp,data_NT) + data_NT = NamedTuple{(DataSetName[idata],)}((data,)) + tmp = merge(tmp, data_NT) Profile.PointData = tmp # assign to profile data structure end else # take a horizontal cross section - data = cross_section(data_tmp, Depth_level=Profile.depth, section_width = section_width) # create the cross section + data = cross_section(data_tmp, Depth_level = Profile.depth, section_width = section_width) # create the cross section if isnothing(data) # do nothing, as there is no data else # add the data set as a NamedTuple - data_NT = NamedTuple{(DataSetName[idata],)}((data,)) - tmp = merge(tmp,data_NT) + data_NT = NamedTuple{(DataSetName[idata],)}((data,)) + tmp = merge(tmp, data_NT) Profile.PointData = tmp # assign to profile data structure end end @@ -358,13 +366,13 @@ end Extracts data along a vertical or horizontal profile """ -function extract_ProfileData!(Profile::ProfileData,VolData::Union{Nothing,GeoData}=nothing, SurfData::NamedTuple=NamedTuple(), PointData::NamedTuple=NamedTuple(); DimsVolCross=(100,100),Depth_extent=nothing,DimsSurfCross=(100,),section_width=50km) +function extract_ProfileData!(Profile::ProfileData, VolData::Union{Nothing, GeoData} = nothing, SurfData::NamedTuple = NamedTuple(), PointData::NamedTuple = NamedTuple(); DimsVolCross = (100, 100), Depth_extent = nothing, DimsSurfCross = (100,), section_width = 50km) if !isnothing(VolData) - create_profile_volume!(Profile, VolData; DimsVolCross=DimsVolCross, Depth_extent=Depth_extent) + create_profile_volume!(Profile, VolData; DimsVolCross = DimsVolCross, Depth_extent = Depth_extent) end - create_profile_surface!(Profile, SurfData, DimsSurfCross=DimsSurfCross) - create_profile_point!(Profile, PointData, section_width=section_width) + create_profile_surface!(Profile, SurfData, DimsSurfCross = DimsSurfCross) + create_profile_point!(Profile, PointData, section_width = section_width) return nothing end @@ -375,13 +383,13 @@ This reads the picked profiles from disk and returns a vector of ProfileData function read_picked_profiles(ProfileCoordFile::String) profiles = Vector{ProfileData}() - profile_data = readdlm(ProfileCoordFile,skipstart=1,',') + profile_data = readdlm(ProfileCoordFile, skipstart = 1, ',') - for i=1:size(profile_data,1) - start_lonlat = (profile_data[i,2:3]...,) - end_lonlat = (profile_data[i,4:5]...,) - profile = ProfileData(start_lonlat=start_lonlat, end_lonlat=end_lonlat) - push!(profiles,profile) + for i in 1:size(profile_data, 1) + start_lonlat = (profile_data[i, 2:3]...,) + end_lonlat = (profile_data[i, 4:5]...,) + profile = ProfileData(start_lonlat = start_lonlat, end_lonlat = end_lonlat) + push!(profiles, profile) end return profiles @@ -393,16 +401,16 @@ end This is a convenience function (mostly for backwards compatibility with the MATLAB GUI) that loads the data from file & projects it onto a profile """ -function extract_ProfileData(ProfileCoordFile::String,ProfileNumber::Int64,DataSetFile::String; DimsVolCross=(100,100),DepthVol=nothing,DimsSurfCross=(100,),WidthPointProfile=50km) +function extract_ProfileData(ProfileCoordFile::String, ProfileNumber::Int64, DataSetFile::String; DimsVolCross = (100, 100), DepthVol = nothing, DimsSurfCross = (100,), WidthPointProfile = 50km) # read profile profile_list = read_picked_profiles(ProfileCoordFile) profile = profile_list[ProfileNumber] - println("lon start ", profile.start_lonlat[1]) - println("lat start ", profile.start_lonlat[2]) - println("lon end ", profile.end_lonlat[1]) - println("lat end ", profile.end_lonlat[2]) + println("lon start ", profile.start_lonlat[1]) + println("lat start ", profile.start_lonlat[2]) + println("lon end ", profile.end_lonlat[1]) + println("lat end ", profile.end_lonlat[2]) # read all datasets: Datasets_all = load_dataset_file(DataSetFile) @@ -414,9 +422,11 @@ function extract_ProfileData(ProfileCoordFile::String,ProfileNumber::Int64,DataS VolData_combined = combine_vol_data(VolData) # project data onto profile: - extract_ProfileData!(profile, VolData_combined, SurfData, PointData, - DimsVolCross=DimsVolCross, DimsSurfCross=DimsSurfCross, - Depth_extent=DepthVol, section_width=WidthPointProfile) + extract_ProfileData!( + profile, VolData_combined, SurfData, PointData, + DimsVolCross = DimsVolCross, DimsSurfCross = DimsSurfCross, + Depth_extent = DepthVol, section_width = WidthPointProfile + ) return profile end diff --git a/src/Setup_geometry.jl b/src/Setup_geometry.jl index 6fc7a653..0c87a6b2 100644 --- a/src/Setup_geometry.jl +++ b/src/Setup_geometry.jl @@ -11,13 +11,13 @@ import Base: show # These are routines that help to create input geometries, such as slabs with a given angle # -export add_box!, add_sphere!, add_ellipsoid!, add_cylinder!, add_layer!, add_polygon!, add_slab!, add_stripes!, add_volcano!, add_fault!, - make_volc_topo, - ConstantTemp, LinearTemp, HalfspaceCoolingTemp, SpreadingRateTemp, LithosphericTemp, LinearWeightedTemperature, - McKenzie_subducting_slab, - ConstantPhase, LithosphericPhases, - Trench, compute_slab_surface, - compute_thermal_structure, compute_phase +export add_box!, add_sphere!, add_ellipsoid!, add_cylinder!, add_layer!, add_polygon!, add_slab!, add_stripes!, add_volcano!, add_fault!, + make_volc_topo, + ConstantTemp, LinearTemp, HalfspaceCoolingTemp, SpreadingRateTemp, LithosphericTemp, LinearWeightedTemperature, + McKenzie_subducting_slab, + ConstantPhase, LithosphericPhases, + Trench, compute_slab_surface, + compute_thermal_structure, compute_phase """ ind2D = flatten_index_dimensions(Phase, ind_vec::Vector{CartesianIndex{3}}) @@ -25,8 +25,8 @@ export add_box!, add_sphere!, add_ellipsoid!, add_cylinder!, add_layer!, add_po This converts the indices to purely 2D indices if the array `phase` is 2D """ function flatten_index_dimensions(Phase, ind_vec::Vector{CartesianIndex{3}}) - if length(size(Phase))==2 - ind2D = Vector{CartesianIndex{2}}(undef,length(ind_vec)) + if length(size(Phase)) == 2 + ind2D = Vector{CartesianIndex{2}}(undef, length(ind_vec)) for (num, ind) in enumerate(ind_vec) ind2D[num] = CartesianIndex(ind[1], ind[3]) end @@ -42,9 +42,9 @@ end This converts the indices to purely 2D indices if the array `phase` is 2D """ -function flatten_index_dimensions(Phase::AbstractArray{T, N}, ind_vec::Array{Bool, 3}) where {T,N} - if N==2 - ind2D = Vector{CartesianIndex{2}}(undef,length(ind_vec)) +function flatten_index_dimensions(Phase::AbstractArray{T, N}, ind_vec::Array{Bool, 3}) where {T, N} + if N == 2 + ind2D = Vector{CartesianIndex{2}}(undef, length(ind_vec)) for (num, ind) in enumerate(ind_vec) ind2D[num] = CartesianIndex(ind[1], ind[3]) end @@ -108,26 +108,28 @@ julia> write_paraview(Model3D,"LaMEM_ModelSetup") # Save model to para "LaMEM_ModelSetup.vts" ``` """ -function add_stripes!(Phase, Grid::AbstractGeneralGrid; # required input - stripAxes = (1,1,0), # activate stripes along dimensions x, y and z when set to 1 - stripeWidth = 0.2, # full width of a stripe - stripeSpacing = 1, # spacing between two stripes centers - Origin = nothing, # origin - StrikeAngle = 0, # strike - DipAngle = 0, # dip angle - phase = ConstantPhase(3), # phase to be striped - stripePhase = ConstantPhase(4), # stripe phase - cell = false ) # if true, Phase and Temp are defined on cell centers +function add_stripes!( + Phase, Grid::AbstractGeneralGrid; # required input + stripAxes = (1, 1, 0), # activate stripes along dimensions x, y and z when set to 1 + stripeWidth = 0.2, # full width of a stripe + stripeSpacing = 1, # spacing between two stripes centers + Origin = nothing, # origin + StrikeAngle = 0, # strike + DipAngle = 0, # dip angle + phase = ConstantPhase(3), # phase to be striped + stripePhase = ConstantPhase(4), # stripe phase + cell = false + ) # if true, Phase and Temp are defined on cell centers # warnings - if stripeWidth >= stripeSpacing/2.0 + if stripeWidth >= stripeSpacing / 2.0 print("WARNING: stripeWidth should be strictly < stripeSpacing/2.0, otherwise phase is overwritten by the stripePhase\n") elseif sum(stripAxes .== 0) == 3 print("WARNING: at least one axis should be set to 1 e.g. stripAxes = (1,0,0), otherwise no stripes will be added\n") end # Retrieve 3D data arrays for the grid - X,Y,Z = coordinate_grids(Grid, cell=cell) + X, Y, Z = coordinate_grids(Grid, cell = cell) # sets origin if isnothing(Origin) @@ -135,35 +137,34 @@ function add_stripes!(Phase, Grid::AbstractGeneralGrid; # require end # Perform rotation of 3D coordinates: - Xrot = X .- Origin[1]; - Yrot = Y .- Origin[2]; - Zrot = Z .- Origin[3]; + Xrot = X .- Origin[1] + Yrot = Y .- Origin[2] + Zrot = Z .- Origin[3] - Rot3D!(Xrot,Yrot,Zrot, StrikeAngle, DipAngle) + Rot3D!(Xrot, Yrot, Zrot, StrikeAngle, DipAngle) - ph_ind = findall(Phase .== phase.phase); + ph_ind = findall(Phase .== phase.phase) ind = Int64[] if stripAxes[1] == 1 - indX = findall( abs.(Xrot[ph_ind] .% stripeSpacing) .<= stripeWidth/2.0); - ind = vcat(ind,indX); + indX = findall(abs.(Xrot[ph_ind] .% stripeSpacing) .<= stripeWidth / 2.0) + ind = vcat(ind, indX) end if stripAxes[2] == 1 - indY = findall( abs.(Yrot[ph_ind] .% stripeSpacing) .<= stripeWidth/2.0); - ind = vcat(ind,indY); + indY = findall(abs.(Yrot[ph_ind] .% stripeSpacing) .<= stripeWidth / 2.0) + ind = vcat(ind, indY) end if stripAxes[3] == 1 - indZ = findall( abs.(Zrot[ph_ind] .% stripeSpacing) .<= stripeWidth/2.0); - ind = vcat(ind,indZ); + indZ = findall(abs.(Zrot[ph_ind] .% stripeSpacing) .<= stripeWidth / 2.0) + ind = vcat(ind, indZ) end - Phase[ph_ind[ind]] .= stripePhase.phase; + Phase[ph_ind[ind]] .= stripePhase.phase return nothing end - """ add_box!(Phase, Temp, Grid::AbstractGeneralGrid; xlim::Tuple = (20,100), [ylim::Tuple = (1,10)], zlim::Tuple = (10,80), Origin=nothing, StrikeAngle=0, DipAngle=0, @@ -223,25 +224,27 @@ julia> write_paraview(Grid,"LaMEM_ModelSetup") # Save model to paraview "LaMEM_ModelSetup.vts" ``` """ -function add_box!(Phase, Temp, Grid::AbstractGeneralGrid; # required input - xlim::Tuple = (20,100), ylim=nothing, zlim::Tuple = (10,80), # limits of the box - Origin=nothing, StrikeAngle=0, DipAngle=0, # origin & dip/strike - phase = ConstantPhase(1), # Sets the phase number(s) in the box - T=nothing, # Sets the thermal structure (various functions are available) - cell=false ) # if true, Phase and Temp are defined on cell centers +function add_box!( + Phase, Temp, Grid::AbstractGeneralGrid; # required input + xlim::Tuple = (20, 100), ylim = nothing, zlim::Tuple = (10, 80), # limits of the box + Origin = nothing, StrikeAngle = 0, DipAngle = 0, # origin & dip/strike + phase = ConstantPhase(1), # Sets the phase number(s) in the box + T = nothing, # Sets the thermal structure (various functions are available) + cell = false + ) # if true, Phase and Temp are defined on cell centers # Retrieve 3D data arrays for the grid - X,Y,Z = coordinate_grids(Grid, cell=cell) + X, Y, Z = coordinate_grids(Grid, cell = cell) # ensure that the input arrays have the correct size #@assert size(X) == size(Phase) == size(Temp) # Limits of block - if ylim==nothing + if ylim == nothing ylim = (minimum(Y), maximum(Y)) end - if Origin==nothing + if Origin == nothing Origin = (xlim[1], ylim[1], zlim[2]) # upper-left corner end @@ -253,25 +256,27 @@ function add_box!(Phase, Temp, Grid::AbstractGeneralGrid; # required input end # Perform rotation of 3D coordinates: - Xrot = X .- Origin[1]; - Yrot = Y .- Origin[2]; - Zrot = Z .- Origin[3]; + Xrot = X .- Origin[1] + Yrot = Y .- Origin[2] + Zrot = Z .- Origin[3] - Rot3D!(Xrot,Yrot,Zrot, StrikeAngle, DipAngle) + Rot3D!(Xrot, Yrot, Zrot, StrikeAngle, DipAngle) # Set phase number & thermal structure in the full domain ztop = maximum(zlim) - Origin[3] zbot = minimum(zlim) - Origin[3] - ind = findall( (Xrot .>= (minimum(xlim) - Origin[1])) .& (Xrot .<= (maximum(xlim) - Origin[1])) .& - (Yrot .>= (minimum(ylim) - Origin[2])) .& (Yrot .<= (maximum(ylim) - Origin[2])) .& - (Zrot .>= zbot) .& (Zrot .<= ztop) ) + ind = findall( + (Xrot .>= (minimum(xlim) - Origin[1])) .& (Xrot .<= (maximum(xlim) - Origin[1])) .& + (Yrot .>= (minimum(ylim) - Origin[2])) .& (Yrot .<= (maximum(ylim) - Origin[2])) .& + (Zrot .>= zbot) .& (Zrot .<= ztop) + ) ind_flat = flatten_index_dimensions(Phase, ind) if !isempty(ind_flat) - # Compute thermal structure accordingly. See routines below for different options + # Compute thermal structure accordingly. See routines below for different options if T != nothing - if isa(T,LithosphericTemp) + if isa(T, LithosphericTemp) Phase[ind_flat] = compute_phase(Phase[ind_flat], Temp[ind_flat], Xrot[ind], Yrot[ind], Zrot[ind], phase) end Temp[ind_flat] = compute_thermal_structure(Temp[ind_flat], Xrot[ind], Yrot[ind], Zrot[ind], Phase[ind_flat], T) @@ -341,17 +346,19 @@ julia> write_paraview(Model3D,"LaMEM_ModelSetup") # Save model to para "LaMEM_ModelSetup.vts" ``` """ -function add_layer!(Phase, Temp, Grid::AbstractGeneralGrid; # required input - xlim=nothing, ylim=nothing, zlim=nothing, # limits of the layer - phase = ConstantPhase(1), # Sets the phase number(s) in the box - T=nothing, # Sets the thermal structure (various functions are available) - cell = false ) # if true, Phase and Temp are defined on cell centers +function add_layer!( + Phase, Temp, Grid::AbstractGeneralGrid; # required input + xlim = nothing, ylim = nothing, zlim = nothing, # limits of the layer + phase = ConstantPhase(1), # Sets the phase number(s) in the box + T = nothing, # Sets the thermal structure (various functions are available) + cell = false + ) # if true, Phase and Temp are defined on cell centers # Retrieve 3D data arrays for the grid - X,Y,Z = coordinate_grids(Grid, cell=cell) + X, Y, Z = coordinate_grids(Grid, cell = cell) # Limits of block - if isnothing(xlim)==isnothing(ylim)==isnothing(zlim) + if isnothing(xlim) == isnothing(ylim) == isnothing(zlim) error("You need to specify at least one of the limits (xlim, ylim, zlim)") end @@ -366,10 +373,11 @@ function add_layer!(Phase, Temp, Grid::AbstractGeneralGrid; # required input end # Set phase number & thermal structure in the full domain - ind = findall( (X .>= (xlim[1])) .& (X .<= (xlim[2])) .& - (Y .>= (ylim[1])) .& (Y .<= (ylim[2])) .& - (Z .>= (zlim[1])) .& (Z .<= (zlim[2])) - ) + ind = findall( + (X .>= (xlim[1])) .& (X .<= (xlim[2])) .& + (Y .>= (ylim[1])) .& (Y .<= (ylim[2])) .& + (Z .>= (zlim[1])) .& (Z .<= (zlim[2])) + ) ind_flat = flatten_index_dimensions(Phase, ind) @@ -387,9 +395,6 @@ function add_layer!(Phase, Temp, Grid::AbstractGeneralGrid; # required input end - - - """ add_sphere!(Phase, Temp, Grid::AbstractGeneralGrid; cen::Tuple = (0,0,-1), radius::Number, phase = ConstantPhase(1). @@ -433,16 +438,18 @@ julia> write_paraview(Model3D,"LaMEM_ModelSetup") # Save model to para "LaMEM_ModelSetup.vts" ``` """ -function add_sphere!(Phase, Temp, Grid::AbstractGeneralGrid; # required input - cen::Tuple = (0,0,-1), radius::Number, # center and radius of the sphere - phase = ConstantPhase(1), # Sets the phase number(s) in the sphere - T=nothing, cell=false ) # Sets the thermal structure (various functions are available) +function add_sphere!( + Phase, Temp, Grid::AbstractGeneralGrid; # required input + cen::Tuple = (0, 0, -1), radius::Number, # center and radius of the sphere + phase = ConstantPhase(1), # Sets the phase number(s) in the sphere + T = nothing, cell = false + ) # Sets the thermal structure (various functions are available) # Retrieve 3D data arrays for the grid - X,Y,Z = coordinate_grids(Grid, cell=cell) + X, Y, Z = coordinate_grids(Grid, cell = cell) # Set phase number & thermal structure in the full domain - ind = findall(((X .- cen[1]).^2 + (Y .- cen[2]).^2 + (Z .- cen[3]).^2).^0.5 .< radius) + ind = findall(((X .- cen[1]) .^ 2 + (Y .- cen[2]) .^ 2 + (Z .- cen[3]) .^ 2) .^ 0.5 .< radius) ind_flat = flatten_index_dimensions(Phase, ind) @@ -504,33 +511,39 @@ julia> write_paraview(Model3D,"LaMEM_ModelSetup") # Save model to para "LaMEM_ModelSetup.vts" ``` """ -function add_ellipsoid!(Phase, Temp, Grid::AbstractGeneralGrid; # required input - cen::Tuple = (-1,-1,-1), axes::Tuple = (0.2,0.1,0.5), # center and semi-axes of the ellpsoid - Origin=nothing, StrikeAngle=0, DipAngle=0, # origin & dip/strike - phase = ConstantPhase(1), # Sets the phase number(s) in the box - T=nothing, cell=false ) # Sets the thermal structure (various functions are available) - - if Origin==nothing +function add_ellipsoid!( + Phase, Temp, Grid::AbstractGeneralGrid; # required input + cen::Tuple = (-1, -1, -1), axes::Tuple = (0.2, 0.1, 0.5), # center and semi-axes of the ellpsoid + Origin = nothing, StrikeAngle = 0, DipAngle = 0, # origin & dip/strike + phase = ConstantPhase(1), # Sets the phase number(s) in the box + T = nothing, cell = false + ) # Sets the thermal structure (various functions are available) + + if Origin == nothing Origin = cen # center end # Retrieve 3D data arrays for the grid - X,Y,Z = coordinate_grids(Grid, cell=cell) + X, Y, Z = coordinate_grids(Grid, cell = cell) # Perform rotation of 3D coordinates: - Xrot = X .- Origin[1]; - Yrot = Y .- Origin[2]; - Zrot = Z .- Origin[3]; + Xrot = X .- Origin[1] + Yrot = Y .- Origin[2] + Zrot = Z .- Origin[3] - Rot3D!(Xrot,Yrot,Zrot, StrikeAngle, DipAngle) + Rot3D!(Xrot, Yrot, Zrot, StrikeAngle, DipAngle) # Set phase number & thermal structure in the full domain - x2 = axes[1]^2 - y2 = axes[2]^2 - z2 = axes[3]^2 + x2 = axes[1]^2 + y2 = axes[2]^2 + z2 = axes[3]^2 cenRot = cen .- Origin - ind = findall((((Xrot .- cenRot[1]).^2)./x2 + ((Yrot .- cenRot[2]).^2)./y2 + - ((Zrot .- cenRot[3]).^2)./z2) .^0.5 .<= 1) + ind = findall( + ( + ((Xrot .- cenRot[1]) .^ 2) ./ x2 + ((Yrot .- cenRot[2]) .^ 2) ./ y2 + + ((Zrot .- cenRot[3]) .^ 2) ./ z2 + ) .^ 0.5 .<= 1 + ) ind_flat = flatten_index_dimensions(Phase, ind) @@ -591,33 +604,35 @@ julia> write_paraview(Model3D,"LaMEM_ModelSetup") # Save model to para "LaMEM_ModelSetup.vts" ``` """ -function add_cylinder!(Phase, Temp, Grid::AbstractGeneralGrid; # required input - base::Tuple = (-1,-1,-1.5), cap::Tuple = (-1,-1,-0.5), radius::Number, # center and radius of the sphere - phase = ConstantPhase(1), # Sets the phase number(s) in the sphere - T=nothing, cell=false ) # Sets the thermal structure (various functions are available) +function add_cylinder!( + Phase, Temp, Grid::AbstractGeneralGrid; # required input + base::Tuple = (-1, -1, -1.5), cap::Tuple = (-1, -1, -0.5), radius::Number, # center and radius of the sphere + phase = ConstantPhase(1), # Sets the phase number(s) in the sphere + T = nothing, cell = false + ) # Sets the thermal structure (various functions are available) # axis vector of cylinder axVec = cap .- base - ax2 = (axVec[1]^2 + axVec[2]^2 + axVec[3]^2) + ax2 = (axVec[1]^2 + axVec[2]^2 + axVec[3]^2) # Retrieve 3D data arrays for the grid - X,Y,Z = coordinate_grids(Grid, cell=cell) + X, Y, Z = coordinate_grids(Grid, cell = cell) # distance between grid points and cylinder base - dx_b = X .- base[1] - dy_b = Y .- base[2] - dz_b = Z .- base[3] + dx_b = X .- base[1] + dy_b = Y .- base[2] + dz_b = Z .- base[3] # find normalized parametric coordinate of a point-axis projection - t = (axVec[1] .* dx_b .+ axVec[2] .* dy_b .+ axVec[3] .* dz_b) ./ ax2 + t = (axVec[1] .* dx_b .+ axVec[2] .* dy_b .+ axVec[3] .* dz_b) ./ ax2 # find distance vector between point and axis - dx = dx_b .- t.*axVec[1] - dy = dy_b .- t.*axVec[2] - dz = dz_b .- t.*axVec[3] + dx = dx_b .- t .* axVec[1] + dy = dy_b .- t .* axVec[2] + dz = dz_b .- t .* axVec[3] # Set phase number & thermal structure in the full domain - ind = findall((t .>= 0.0) .& (t .<= 1.0) .& ((dx.^2 + dy.^2 + dz.^2).^0.5 .<= radius)) + ind = findall((t .>= 0.0) .& (t .<= 1.0) .& ((dx .^ 2 + dy .^ 2 + dz .^ 2) .^ 0.5 .<= radius)) ind_flat = flatten_index_dimensions(Phase, ind) @@ -635,11 +650,11 @@ function add_cylinder!(Phase, Temp, Grid::AbstractGeneralGrid; # required input end # Internal function that rotates the coordinates -function Rot3D!(X,Y,Z, StrikeAngle, DipAngle) +function Rot3D!(X, Y, Z, StrikeAngle, DipAngle) # precompute trigonometric functions (expensive!) - sindStrikeAngle, cosStrikeAngle = sincosd(StrikeAngle) - sinDipAngle, cosDipAngle = sincosd(-DipAngle) # note the minus here to be consistent with the earlier version of the code + sindStrikeAngle, cosStrikeAngle = sincosd(StrikeAngle) + sinDipAngle, cosDipAngle = sincosd(-DipAngle) # note the minus here to be consistent with the earlier version of the code for i in eachindex(X) X[i], Y[i], Z[i] = Rot3D(X[i], Y[i], Z[i], cosStrikeAngle, sindStrikeAngle, cosDipAngle, sinDipAngle) end @@ -648,7 +663,6 @@ function Rot3D!(X,Y,Z, StrikeAngle, DipAngle) end - """ add_polygon!(Phase, Temp, Grid::AbstractGeneralGrid; xlim=(), ylim::Tuple = (0.0,0.8), zlim=(), phase = ConstantPhase(1), T=nothing, cell=false ) @@ -691,10 +705,12 @@ julia> write_paraview(Model3D,"LaMEM_ModelSetup") # Save model to para ``` """ -function add_polygon!(Phase, Temp, Grid::AbstractGeneralGrid; # required input - xlim=(), ylim::Tuple = (0.0,0.8), zlim=(), # limits of the box - phase = ConstantPhase(1), # Sets the phase number(s) in the box - T=nothing, cell=false ) # Sets the thermal structure (various functions are available) +function add_polygon!( + Phase, Temp, Grid::AbstractGeneralGrid; # required input + xlim = (), ylim::Tuple = (0.0, 0.8), zlim = (), # limits of the box + phase = ConstantPhase(1), # Sets the phase number(s) in the box + T = nothing, cell = false + ) # Sets the thermal structure (various functions are available) xlim_ = Float64.(collect(xlim)) @@ -703,25 +719,25 @@ function add_polygon!(Phase, Temp, Grid::AbstractGeneralGrid; # required input # Retrieve 3D data arrays for the grid - X,Y,Z = coordinate_grids(Grid, cell=cell) + X, Y, Z = coordinate_grids(Grid, cell = cell) - ind = zeros(Bool,size(X)) - ind_slice = zeros(Bool,size(X[:,1,:])) + ind = zeros(Bool, size(X)) + ind_slice = zeros(Bool, size(X[:, 1, :])) # find points within the polygon, only in 2D - for i = 1:size(Y)[2] - if Y[1,i,1] >= ylim_[1] && Y[1,i,1]<=ylim_[2] - inpolygon!(ind_slice, xlim_,zlim_, X[:,i,:], Z[:,i,:]) - ind[:,i,:] = ind_slice + for i in 1:size(Y)[2] + if Y[1, i, 1] >= ylim_[1] && Y[1, i, 1] <= ylim_[2] + inpolygon!(ind_slice, xlim_, zlim_, X[:, i, :], Z[:, i, :]) + ind[:, i, :] = ind_slice else - ind[:,i,:] = zeros(size(X[:,1,:])) + ind[:, i, :] = zeros(size(X[:, 1, :])) end end if !isempty(ind) # Compute thermal structure accordingly. See routines below for different options if T != nothing - Temp[ind] = compute_thermal_structure(Temp[ind], X[ind], Y[ind], Z[ind], Phase[ind], T) + Temp[ind] = compute_thermal_structure(Temp[ind], X[ind], Y[ind], Z[ind], Phase[ind], T) end # Set the phase. Different routines are available for that - see below. @@ -736,16 +752,16 @@ end Perform rotation for a point in 3D space """ -function Rot3D(X::_T,Y::_T,Z::_T, cosStrikeAngle::_T, sindStrikeAngle::_T, cosDipAngle::_T, sinDipAngle::_T) where _T<:Number +function Rot3D(X::_T, Y::_T, Z::_T, cosStrikeAngle::_T, sindStrikeAngle::_T, cosDipAngle::_T, sinDipAngle::_T) where {_T <: Number} # rotation matrixes #roty = [cosd(-DipAngle) 0 sind(-DipAngle) ; 0 1 0 ; -sind(-DipAngle) 0 cosd(-DipAngle)]; - roty = @SMatrix [cosDipAngle 0 sinDipAngle ; 0 1 0 ; -sinDipAngle 0 cosDipAngle]; # note that dip-angle is changed from before! - rotz = @SMatrix [cosStrikeAngle -sindStrikeAngle 0 ; sindStrikeAngle cosStrikeAngle 0 ; 0 0 1] + roty = @SMatrix [cosDipAngle 0 sinDipAngle ; 0 1 0 ; -sinDipAngle 0 cosDipAngle] # note that dip-angle is changed from before! + rotz = @SMatrix [cosStrikeAngle -sindStrikeAngle 0 ; sindStrikeAngle cosStrikeAngle 0 ; 0 0 1] - CoordVec = @SVector [X, Y, Z] - CoordRot = rotz*CoordVec; - CoordRot = roty*CoordRot; + CoordVec = @SVector [X, Y, Z] + CoordRot = rotz * CoordVec + CoordRot = roty * CoordRot return CoordRot[1], CoordRot[2], CoordRot[3] end @@ -783,29 +799,30 @@ Optional Parameters - background - this allows loading in a topography and only adding the volcano on top (also allows stacking of several cones to get a volcano with different slopes) """ function add_volcano!( - Phases, - Temp, - Grid::CartData; - volcanic_phase = 1, - center = (0,0,0), - height = 0.0, - radius = 0.0, - crater = 0.0, - base = 0.0, - background = nothing, - T = HalfspaceCoolingTemp(Age=0) -) - H = make_volc_topo(Grid; - center = center, - height = height, - radius = radius, - crater = crater, - base = base, + Phases, + Temp, + Grid::CartData; + volcanic_phase = 1, + center = (0, 0, 0), + height = 0.0, + radius = 0.0, + crater = 0.0, + base = 0.0, + background = nothing, + T = HalfspaceCoolingTemp(Age = 0) + ) + H = make_volc_topo( + Grid; + center = center, + height = height, + radius = radius, + crater = crater, + base = base, background = background ) - ni = size(Grid.x) - ind = fill(false, ni...) + ni = size(Grid.x) + ind = fill(false, ni...) depth = similar(Grid.z.val) for k in axes(ind, 3) @@ -885,102 +902,105 @@ julia> write_paraview(Topo,"VolcanoTopo") # Save topography to paravie Saved file: VolcanoTopo.vts ``` """ -function make_volc_topo(Grid::LaMEM_grid; - center::Array{Float64, 1}, - height::Float64, - radius::Float64, - crater=0.0, - base=0.0, - background=nothing) +function make_volc_topo( + Grid::LaMEM_grid; + center::Array{Float64, 1}, + height::Float64, + radius::Float64, + crater = 0.0, + base = 0.0, + background = nothing + ) # create nondimensionalization object - CharUnits = SI_units(length=1000m); + CharUnits = SI_units(length = 1000m) # get node grid - X = Grid.Xn[:,:,1]; - Y = Grid.Yn[:,:,1]; - nx = size(X,1); - ny = size(X,2); + X = Grid.Xn[:, :, 1] + Y = Grid.Yn[:, :, 1] + nx = size(X, 1) + ny = size(X, 2) # compute radial distance to volcano center - DX = X .- center[1] - DY = Y .- center[2] - RD = (DX.^2 .+ DY.^2).^0.5 + DX = X .- center[1] + DY = Y .- center[2] + RD = (DX .^ 2 .+ DY .^ 2) .^ 0.5 # get radial distance from crater rim RD .-= crater # find position relative to crater rim - dr = radius - crater - pos = (-RD ./ dr .+ 1) + dr = radius - crater + pos = (-RD ./ dr .+ 1) ## assign topography - H = zeros(Float64, (nx,ny)) + H = zeros(Float64, (nx, ny)) # check if there is a background supplied if background === nothing - H .= base + H .= base else background = nondimensionalize(background, CharUnits) if size(background) == size(X) H .= background - elseif size(background) == size(reshape(X,nx,ny,1)) - H .= background[:,:,1] + elseif size(background) == size(reshape(X, nx, ny, 1)) + H .= background[:, :, 1] else error("Size of background must be ", string(nx), "x", string(ny)) end end - ind = findall(x->0.0<=x<1.0, pos) - H[ind] .= pos[ind] .* (height-base) .+ base - ind = findall(x->x>= 1.0, pos) + ind = findall(x -> 0.0 <= x < 1.0, pos) + H[ind] .= pos[ind] .* (height - base) .+ base + ind = findall(x -> x >= 1.0, pos) H[ind] .= height # dimensionalize Topo = dimensionalize(H, km, CharUnits) # build and return CartData - return CartData(reshape(X,nx,ny,1), reshape(Y,nx,ny,1), reshape(Topo,nx,ny,1), (Topography=reshape(Topo,nx,ny,1),)) + return CartData(reshape(X, nx, ny, 1), reshape(Y, nx, ny, 1), reshape(Topo, nx, ny, 1), (Topography = reshape(Topo, nx, ny, 1),)) end -function make_volc_topo(Grid::CartData; - center = (0,0,0), - height = 0.0, - radius = 0.0, - crater = 0.0, - base = 0.0, - background = nothing -) +function make_volc_topo( + Grid::CartData; + center = (0, 0, 0), + height = 0.0, + radius = 0.0, + crater = 0.0, + base = 0.0, + background = nothing + ) # get node grid - X = @views Grid.x.val[:,:,1] - Y = @views Grid.y.val[:,:,1] - nx = size(X, 1) - ny = size(X, 2) - pos = similar(X) + X = @views Grid.x.val[:, :, 1] + Y = @views Grid.y.val[:, :, 1] + nx = size(X, 1) + ny = size(X, 2) + pos = similar(X) for i in eachindex(pos) # compute radial distance to volcano center - DX = X[i] - center[1] - DY = Y[i] - center[2] - RD = √(DX^2 + DY^2) + DX = X[i] - center[1] + DY = Y[i] - center[2] + RD = √(DX^2 + DY^2) # get radial distance from crater rim - RD -= crater + RD -= crater # find position relative to crater rim - dr = radius - crater - pos[i] = -RD / dr + 1 + dr = radius - crater + pos[i] = -RD / dr + 1 end ## assign topography - H = zeros(nx,ny) + H = zeros(nx, ny) # check if there is a background supplied if background === nothing - H .= base + H .= base else # background = nondimensionalize(background, CharUnits) if size(background) == size(X) H .= background - elseif size(background) == size(reshape(X,nx,ny,1)) - H .= @views background[:,:,1] + elseif size(background) == size(reshape(X, nx, ny, 1)) + H .= @views background[:, :, 1] else error("Size of background must be ", nx, "x", ny) end @@ -1036,12 +1056,12 @@ Parameters end function compute_thermal_structure(Temp, X, Y, Z, Phase, s::LinearTemp) - @unpack Ttop, Tbot = s + @unpack Ttop, Tbot = s - dz = Z[end]-Z[1]; - dT = Tbot - Ttop + dz = Z[end] - Z[1] + dT = Tbot - Ttop - Temp = abs.(Z./dz).*dT .+ Ttop + Temp = abs.(Z ./ dz) .* dT .+ Ttop return Temp end @@ -1061,22 +1081,22 @@ Parameters @with_kw_noshow mutable struct HalfspaceCoolingTemp <: AbstractThermalStructure Tsurface = 0 # top T Tmantle = 1350 # bottom T - Age = 60 # thermal age of plate [in Myrs] + Age = 60 # thermal age of plate [in Myrs] Adiabat = 0 # Adiabatic gradient in K/km end function compute_thermal_structure(Temp, X, Y, Z, Phase, s::HalfspaceCoolingTemp) - @unpack Tsurface, Tmantle, Age, Adiabat = s + @unpack Tsurface, Tmantle, Age, Adiabat = s - kappa = 1e-6; - SecYear = 3600*24*365 - dz = Z[end]-Z[1]; - ThermalAge = Age*1e6*SecYear; + kappa = 1.0e-6 + SecYear = 3600 * 24 * 365 + dz = Z[end] - Z[1] + ThermalAge = Age * 1.0e6 * SecYear - MantleAdiabaticT = Tmantle .+ Adiabat*abs.(Z); # Adiabatic temperature of mantle + MantleAdiabaticT = Tmantle .+ Adiabat * abs.(Z) # Adiabatic temperature of mantle for i in eachindex(Temp) - Temp[i] = (Tsurface .- Tmantle)*erfc((abs.(Z[i])*1e3)./(2*sqrt(kappa*ThermalAge))) + MantleAdiabaticT[i]; + Temp[i] = (Tsurface .- Tmantle) * erfc((abs.(Z[i]) * 1.0e3) ./ (2 * sqrt(kappa * ThermalAge))) + MantleAdiabaticT[i] end return Temp end @@ -1107,42 +1127,42 @@ Note: the thermal age at the mid oceanic ridge is set to 1 year to avoid divisio MORside = "left" # side of box where the MOR is located SpreadingVel = 3 # spreading velocity [cm/yr] AgeRidge = 0 # Age of the ridge [Myrs] - maxAge = 60 # maximum thermal age of plate [Myrs] + maxAge = 60 # maximum thermal age of plate [Myrs] end function compute_thermal_structure(Temp, X, Y, Z, Phase, s::SpreadingRateTemp) - @unpack Tsurface, Tmantle, Adiabat, MORside, SpreadingVel, AgeRidge, maxAge = s - - kappa = 1e-6; - SecYear = 3600*24*365 - dz = Z[end]-Z[1]; - - MantleAdiabaticT = Tmantle .+ Adiabat*abs.(Z); # Adiabatic temperature of mantle - - if MORside=="left" - Distance = X .- X[1,1,1]; - elseif MORside=="right" - Distance = X[end,1,1] .- X; - elseif MORside=="front" - Distance = Y .- Y[1,1,1]; - elseif MORside=="back" - Distance = Y[1,end,1] .- Y; + @unpack Tsurface, Tmantle, Adiabat, MORside, SpreadingVel, AgeRidge, maxAge = s + + kappa = 1.0e-6 + SecYear = 3600 * 24 * 365 + dz = Z[end] - Z[1] + + MantleAdiabaticT = Tmantle .+ Adiabat * abs.(Z) # Adiabatic temperature of mantle + + if MORside == "left" + Distance = X .- X[1, 1, 1] + elseif MORside == "right" + Distance = X[end, 1, 1] .- X + elseif MORside == "front" + Distance = Y .- Y[1, 1, 1] + elseif MORside == "back" + Distance = Y[1, end, 1] .- Y else error("unknown side") end for i in eachindex(Temp) - ThermalAge = abs(Distance[i]*1e3*1e2)/SpreadingVel + AgeRidge*1e6; # Thermal age in years - if ThermalAge>maxAge*1e6 - ThermalAge = maxAge*1e6 + ThermalAge = abs(Distance[i] * 1.0e3 * 1.0e2) / SpreadingVel + AgeRidge * 1.0e6 # Thermal age in years + if ThermalAge > maxAge * 1.0e6 + ThermalAge = maxAge * 1.0e6 end - ThermalAge = ThermalAge*SecYear; - if ThermalAge==0 - ThermalAge = 1e-6 # doesn't like zero + ThermalAge = ThermalAge * SecYear + if ThermalAge == 0 + ThermalAge = 1.0e-6 # doesn't like zero end - Temp[i] = (Tsurface .- Tmantle)*erfc((abs.(Z[i])*1e3)./(2*sqrt(kappa*ThermalAge))) + MantleAdiabaticT[i]; + Temp[i] = (Tsurface .- Tmantle) * erfc((abs.(Z[i]) * 1.0e3) ./ (2 * sqrt(kappa * ThermalAge))) + MantleAdiabaticT[i] end return Temp end @@ -1195,12 +1215,12 @@ struct Thermal_parameters{A} ρCp::A H::A function Thermal_parameters(ni) - ρ = zeros(ni) - Cp = zeros(ni) - k = zeros(ni) - ρCp = zeros(ni) - H = zeros(ni) - new{typeof(ρ)}(ρ,Cp,k,ρCp,H) + ρ = zeros(ni) + Cp = zeros(ni) + k = zeros(ni) + ρCp = zeros(ni) + H = zeros(ni) + return new{typeof(ρ)}(ρ, Cp, k, ρCp, H) end end @@ -1209,159 +1229,160 @@ function compute_thermal_structure(Temp, X, Y, Z, Phase, s::LithosphericTemp) dtfac, nz, rheology = s # Create 1D depth profile within the box - z = LinRange(round(maximum(Z)),round(minimum(Z)),nz) # [km] - z = @. z*1e3 # [m] - dz = z[2] - z[1] # Gride resolution + z = LinRange(round(maximum(Z)), round(minimum(Z)), nz) # [km] + z = @. z * 1.0e3 # [m] + dz = z[2] - z[1] # Gride resolution # Initialize 1D arrays for explicit solver - T = zeros(nz) - phase = Int64.(zeros(nz)) + T = zeros(nz) + phase = Int64.(zeros(nz)) # Assign phase id from Phase to 1D phase array - phaseid = (minimum(Phase):1:maximum(Phase)) - ztop = round(maximum(Z[findall(Phase .== phaseid[1])])) - zlayer = zeros(length(phaseid)) - for i = 1:length(phaseid) + phaseid = (minimum(Phase):1:maximum(Phase)) + ztop = round(maximum(Z[findall(Phase .== phaseid[1])])) + zlayer = zeros(length(phaseid)) + for i in 1:length(phaseid) # Calculate layer thickness from Phase array - zlayer[i] = round(minimum(Z[findall(Phase .== phaseid[i])])) - zlayer[i] = zlayer[i]*1.0e3 + zlayer[i] = round(minimum(Z[findall(Phase .== phaseid[i])])) + zlayer[i] = zlayer[i] * 1.0e3 end - for i = 1:length(phaseid) + for i in 1:length(phaseid) # Assign phase ids - ind = findall((z .>= zlayer[i]) .& (z .<= ztop)) - phase[ind] .= phaseid[i] - ztop = zlayer[i] + ind = findall((z .>= zlayer[i]) .& (z .<= ztop)) + phase[ind] .= phaseid[i] + ztop = zlayer[i] end # Setup initial T-profile - Tpot = Tpot + 273.15 # Potential temp [K] - Tsurface = Tsurface + 273.15 # Surface temperature [ K ] - T = @. Tpot + abs.(z./1.0e3)*dTadi # Initial T-profile [ K ] - T[1] = Tsurface + Tpot = Tpot + 273.15 # Potential temp [K] + Tsurface = Tsurface + 273.15 # Surface temperature [ K ] + T = @. Tpot + abs.(z ./ 1.0e3) * dTadi # Initial T-profile [ K ] + T[1] = Tsurface - args = (;) - thermal_parameters = Thermal_parameters(nz) + args = (;) + thermal_parameters = Thermal_parameters(nz) ## Update thermal parameters ======================================== # - compute_density!(thermal_parameters.ρ,rheology,phase,args) - compute_heatcapacity!(thermal_parameters.Cp,rheology,phase,args) - compute_conductivity!(thermal_parameters.k,rheology,phase,args) - thermal_parameters.ρCp .= @. thermal_parameters.Cp * thermal_parameters.ρ - compute_radioactive_heat!(thermal_parameters.H,rheology,phase,args) + compute_density!(thermal_parameters.ρ, rheology, phase, args) + compute_heatcapacity!(thermal_parameters.Cp, rheology, phase, args) + compute_conductivity!(thermal_parameters.k, rheology, phase, args) + thermal_parameters.ρCp .= @. thermal_parameters.Cp * thermal_parameters.ρ + compute_radioactive_heat!(thermal_parameters.H, rheology, phase, args) # Thermal diffusivity [ m^2/s ] - κ = maximum(thermal_parameters.k) / + κ = maximum(thermal_parameters.k) / minimum(thermal_parameters.ρ) / minimum(thermal_parameters.Cp) ## =================================================================== # ## Time stability criterion ========================================= # - tfac = 60.0*60.0*24.0*365.25 # Seconds per year - age = age*1.0e6*tfac # Age in seconds - dtexp = dz^2.0/2.0/κ # Stability criterion for explicit - dt = dtfac*dtexp # [s] - nit = Int64(ceil(age/dt)) # Number of iterations - time = zeros(nit) # Time array - - for i = 1:nit + tfac = 60.0 * 60.0 * 24.0 * 365.25 # Seconds per year + age = age * 1.0e6 * tfac # Age in seconds + dtexp = dz^2.0 / 2.0 / κ # Stability criterion for explicit + dt = dtfac * dtexp # [s] + nit = Int64(ceil(age / dt)) # Number of iterations + time = zeros(nit) # Time array + + for i in 1:nit if i > 1 - time[i] = time[i-1] + dt + time[i] = time[i - 1] + dt end SolveDiff1Dexplicit_vary!( T, thermal_parameters, - ubound,lbound, - utbf,ltbf, + ubound, lbound, + utbf, ltbf, dz, - dt) + dt + ) end - interp_linear_T = linear_interpolation(-z./1.0e3, T.-273.15) # create interpolation object + interp_linear_T = linear_interpolation(-z ./ 1.0e3, T .- 273.15) # create interpolation object Temp = interp_linear_T(-Z) return Temp end function SolveDiff1Dexplicit_vary!( - T, - thermal_parameters, - ubound,lbound, - utbf,ltbf, - di, - dt -) - nz = length(T) - T0 = T + T, + thermal_parameters, + ubound, lbound, + utbf, ltbf, + di, + dt + ) + nz = length(T) + T0 = T if ubound == "const" - T[1] = T0[1] + T[1] = T0[1] elseif ubound == "flux" - kB = (thermal_parameters.k[2] + thermal_parameters.k[1])/2.0 - kA = (thermal_parameters.k[1] + thermal_parameters.k[1])/2.0 - a = (dt*(kA + kB)) / (di^2.0 * thermal_parameters.ρCp[1]) - b = 1 - (dt*(kA + kB)) / (di^2.0 * thermal_parameters.ρCp[1]) - c = (dt*2.0*utbf)/(di * thermal_parameters.ρCp[1]) - T[1] = a*T0[2] + b*T0[1] + c + - thermal_parameters.H[1]*dt/thermal_parameters.ρCp[1] + kB = (thermal_parameters.k[2] + thermal_parameters.k[1]) / 2.0 + kA = (thermal_parameters.k[1] + thermal_parameters.k[1]) / 2.0 + a = (dt * (kA + kB)) / (di^2.0 * thermal_parameters.ρCp[1]) + b = 1 - (dt * (kA + kB)) / (di^2.0 * thermal_parameters.ρCp[1]) + c = (dt * 2.0 * utbf) / (di * thermal_parameters.ρCp[1]) + T[1] = a * T0[2] + b * T0[1] + c + + thermal_parameters.H[1] * dt / thermal_parameters.ρCp[1] end if lbound == "const" - T[nz] = T0[nz] + T[nz] = T0[nz] elseif lbound == "flux" - kB = (thermal_parameters.k[nz] + thermal_parameters.k[nz])/2.0 - kA = (thermal_parameters.k[nz] + thermal_parameters.k[nz-1])/2.0 - a = (dt*(kA + kB)) / (di^2.0 * thermal_parameters.ρCp[nz]) - b = 1 - (dt*(kA + kB)) / (di^2.0 * thermal_parameters.ρCp[nz]) - c = -(dt*2.0*ltbf) / (di * thermal_parameters.ρCp[nz]) - T[nz] = a*T0[nz-1] + b*T0[nz] + c + kB = (thermal_parameters.k[nz] + thermal_parameters.k[nz]) / 2.0 + kA = (thermal_parameters.k[nz] + thermal_parameters.k[nz - 1]) / 2.0 + a = (dt * (kA + kB)) / (di^2.0 * thermal_parameters.ρCp[nz]) + b = 1 - (dt * (kA + kB)) / (di^2.0 * thermal_parameters.ρCp[nz]) + c = -(dt * 2.0 * ltbf) / (di * thermal_parameters.ρCp[nz]) + T[nz] = a * T0[nz - 1] + b * T0[nz] + c end - kAi = @. (thermal_parameters.k[1:end-2] + thermal_parameters.k[2:end-1])/2.0 - kBi = @. (thermal_parameters.k[2:end-1] + thermal_parameters.k[3:end])/2.0 - ai = @. (kBi*dt)/(di^2.0*thermal_parameters.ρCp[2:end-1]) - bi = @. 1.0 - (dt*(kAi + kBi))/(di^2.0*thermal_parameters.ρCp[2:end-1]) - ci = @. (kAi*dt)/(di^2.0*thermal_parameters.ρCp[2:end-1]) - T[2:end-1] = @. ai*T0[3:end] + bi*T0[2:end-1] + ci*T0[1:end-2] + - thermal_parameters.H[2:end-1]*dt/thermal_parameters.ρCp[2:end-1] + kAi = @. (thermal_parameters.k[1:(end - 2)] + thermal_parameters.k[2:(end - 1)]) / 2.0 + kBi = @. (thermal_parameters.k[2:(end - 1)] + thermal_parameters.k[3:end]) / 2.0 + ai = @. (kBi * dt) / (di^2.0 * thermal_parameters.ρCp[2:(end - 1)]) + bi = @. 1.0 - (dt * (kAi + kBi)) / (di^2.0 * thermal_parameters.ρCp[2:(end - 1)]) + ci = @. (kAi * dt) / (di^2.0 * thermal_parameters.ρCp[2:(end - 1)]) + T[2:(end - 1)] = @. ai * T0[3:end] + bi * T0[2:(end - 1)] + ci * T0[1:(end - 2)] + + thermal_parameters.H[2:(end - 1)] * dt / thermal_parameters.ρCp[2:(end - 1)] return T end function example_CLrheology(; - ρM=3.0e3, # Density [ kg/m^3 ] - CpM=1.0e3, # Specific heat capacity [ J/kg/K ] - kM=2.3, # Thermal conductivity [ W/m/K ] - HM=0.0, # Radiogenic heat source per mass [H] = W/kg; [H] = [Q/rho] - ρUC=2.7e3, # Density [ kg/m^3 ] - CpUC=1.0e3, # Specific heat capacity [ J/kg/K ] - kUC=3.0, # Thermal conductivity [ W/m/K ] - HUC=617.0e-12, # Radiogenic heat source per mass [H] = W/kg; [H] = [Q/rho] - ρLC=2.9e3, # Density [ kg/m^3 ] - CpLC=1.0e3, # Specific heat capacity [ J/kg/K ] - kLC=2.0, # Thermal conductivity [ W/m/K ] - HLC=43.0e-12, # Radiogenic heat source per mass [H] = W/kg; [H] = [Q/rho] -) + ρM = 3.0e3, # Density [ kg/m^3 ] + CpM = 1.0e3, # Specific heat capacity [ J/kg/K ] + kM = 2.3, # Thermal conductivity [ W/m/K ] + HM = 0.0, # Radiogenic heat source per mass [H] = W/kg; [H] = [Q/rho] + ρUC = 2.7e3, # Density [ kg/m^3 ] + CpUC = 1.0e3, # Specific heat capacity [ J/kg/K ] + kUC = 3.0, # Thermal conductivity [ W/m/K ] + HUC = 617.0e-12, # Radiogenic heat source per mass [H] = W/kg; [H] = [Q/rho] + ρLC = 2.9e3, # Density [ kg/m^3 ] + CpLC = 1.0e3, # Specific heat capacity [ J/kg/K ] + kLC = 2.0, # Thermal conductivity [ W/m/K ] + HLC = 43.0e-12, # Radiogenic heat source per mass [H] = W/kg; [H] = [Q/rho] + ) rheology = ( # Name = "UpperCrust", SetMaterialParams(; - Phase = 1, - Density = ConstantDensity(; ρ=ρUC), - HeatCapacity = ConstantHeatCapacity(; Cp=CpUC), - Conductivity = ConstantConductivity(; k=kUC), - RadioactiveHeat = ConstantRadioactiveHeat(; H_r=HUC*ρUC), # [H] = W/m^3 + Phase = 1, + Density = ConstantDensity(; ρ = ρUC), + HeatCapacity = ConstantHeatCapacity(; Cp = CpUC), + Conductivity = ConstantConductivity(; k = kUC), + RadioactiveHeat = ConstantRadioactiveHeat(; H_r = HUC * ρUC), # [H] = W/m^3 ), # Name = "LowerCrust", SetMaterialParams(; - Phase = 2, - Density = ConstantDensity(; ρ=ρLC), - HeatCapacity = ConstantHeatCapacity(; Cp=CpLC), - Conductivity = ConstantConductivity(; k=kLC), - RadioactiveHeat = ConstantRadioactiveHeat(; H_r=HLC*ρLC), # [H] = W/m^3 + Phase = 2, + Density = ConstantDensity(; ρ = ρLC), + HeatCapacity = ConstantHeatCapacity(; Cp = CpLC), + Conductivity = ConstantConductivity(; k = kLC), + RadioactiveHeat = ConstantRadioactiveHeat(; H_r = HLC * ρLC), # [H] = W/m^3 ), # Name = "LithosphericMantle", SetMaterialParams(; - Phase = 3, - Density = ConstantDensity(; ρ=ρM), - HeatCapacity = ConstantHeatCapacity(; Cp=CpM), - Conductivity = ConstantConductivity(; k=kM), - RadioactiveHeat = ConstantRadioactiveHeat(; H_r=HM*ρM), # [H] = W/m^3 + Phase = 3, + Density = ConstantDensity(; ρ = ρM), + HeatCapacity = ConstantHeatCapacity(; Cp = CpM), + Conductivity = ConstantConductivity(; k = kM), + RadioactiveHeat = ConstantRadioactiveHeat(; H_r = HM * ρM), # [H] = W/m^3 ), ) return rheology @@ -1389,7 +1410,6 @@ function compute_phase(Phase, Temp, X, Y, Z, s::ConstantPhase) end - """ LithosphericPhases(Layers=[10 20 15], Phases=[1 2 3 4], Tlab=nothing ) @@ -1403,9 +1423,9 @@ Parameters """ @with_kw_noshow mutable struct LithosphericPhases <: AbstractPhaseNumber - Layers = [10., 20., 15.] - Phases = [1, 2 , 3, 4] - Tlab = nothing + Layers = [10.0, 20.0, 15.0] + Phases = [1, 2, 3, 4] + Tlab = nothing end @@ -1429,22 +1449,22 @@ Parameters - Ztop - Vertical coordinate of top of model box - Grid - Grid structure (usually obtained with read_LaMEM_inputfile) """ -function compute_phase(Phase, Temp, X, Y, Z, s::LithosphericPhases; Ztop=0) - @unpack Layers, Phases, Tlab = s +function compute_phase(Phase, Temp, X, Y, Z, s::LithosphericPhases; Ztop = 0) + @unpack Layers, Phases, Tlab = s Phase .= Phases[end] - for i = 1 : length(Layers) - Zbot = Ztop-Layers[i] - ind = findall( ( Z .>= Zbot) .& (Z .<= Ztop) ); + for i in 1:length(Layers) + Zbot = Ztop - Layers[i] + ind = findall((Z .>= Zbot) .& (Z .<= Ztop)) Phase[ind] .= Phases[i] - Ztop = Zbot + Ztop = Zbot end # set phase to mantle if requested if Tlab != nothing - ind = findall(Temp .> Tlab) + ind = findall(Temp .> Tlab) Phase[ind] .= Phases[end] end @@ -1452,7 +1472,7 @@ function compute_phase(Phase, Temp, X, Y, Z, s::LithosphericPhases; Ztop=0) end # allow AbstractGeneralGrid instead of Z and Ztop -compute_phase(Phase, Temp, Grid::LaMEM_grid, s::LithosphericPhases) = compute_phase(Phase, Temp, Grid.X, Grid.Y, Grid.Z, s::LithosphericPhases, Ztop=maximum(Grid.coord_z)) +compute_phase(Phase, Temp, Grid::LaMEM_grid, s::LithosphericPhases) = compute_phase(Phase, Temp, Grid.X, Grid.Y, Grid.Z, s::LithosphericPhases, Ztop = maximum(Grid.coord_z)) """ @@ -1472,11 +1492,11 @@ Parameters """ @with_kw_noshow mutable struct McKenzie_subducting_slab <: AbstractThermalStructure Tsurface::Float64 = 20.0 # top T - Tmantle::Float64 = 1350.0 # bottom T - Adiabat::Float64 = 0.4 # Adiabatic gradient in K/km - v_cm_yr::Float64 = 2.0 # velocity of subduction [cm/yr] - κ::Float64 = 1e-6 # Thermal diffusivity [m2/s] - it::Int64 = 36 # number of harmonic summation (look Mckenzie formula) + Tmantle::Float64 = 1350.0 # bottom T + Adiabat::Float64 = 0.4 # Adiabatic gradient in K/km + v_cm_yr::Float64 = 2.0 # velocity of subduction [cm/yr] + κ::Float64 = 1.0e-6 # Thermal diffusivity [m2/s] + it::Int64 = 36 # number of harmonic summation (look Mckenzie formula) end """ @@ -1496,34 +1516,34 @@ Parameters - `Phase`: Phase array - `s`: `McKenzie_subducting_slab` """ -function compute_thermal_structure(Temp, X, Y, Z,Phase, s::McKenzie_subducting_slab) +function compute_thermal_structure(Temp, X, Y, Z, Phase, s::McKenzie_subducting_slab) @unpack Tsurface, Tmantle, Adiabat, v_cm_yr, κ, it = s # Thickness of the layer: - Thickness = (maximum(Z)-minimum(Z)); - Zshift = Z .- Z[end] # McKenzie model is defined with Z = 0 at the bottom of the slab + Thickness = (maximum(Z) - minimum(Z)) + Zshift = Z .- Z[end] # McKenzie model is defined with Z = 0 at the bottom of the slab # Convert subduction velocity from cm/yr -> m/s; - convert_velocity = 1/(100.0*365.25*60.0*60.0*24.0); - v_s = v_cm_yr*convert_velocity; + convert_velocity = 1 / (100.0 * 365.25 * 60.0 * 60.0 * 24.0) + v_s = v_cm_yr * convert_velocity # calculate the thermal Reynolds number - Re = (v_s*Thickness*1000)/2/κ; # factor 1000 to transfer Thickness from km to m + Re = (v_s * Thickness * 1000) / 2 / κ # factor 1000 to transfer Thickness from km to m # McKenzie model - sc = 1/Thickness - σ = ones(size(Temp)); + sc = 1 / Thickness + σ = ones(size(Temp)) # Dividi et impera - for i=1:it - a = (-1.0).^(i)./(i.*pi) - b = (Re .- (Re.^2 .+ i^2.0 .* pi^2.0).^(0.5)) .*X .*sc; - c = sin.(i .*pi .*(1 .- abs.(Zshift .*sc))) ; - e = exp.(b); - σ .+= 2*a.*e.*c + for i in 1:it + a = (-1.0) .^ (i) ./ (i .* pi) + b = (Re .- (Re .^ 2 .+ i^2.0 .* pi^2.0) .^ (0.5)) .* X .* sc + c = sin.(i .* pi .* (1 .- abs.(Zshift .* sc))) + e = exp.(b) + σ .+= 2 * a .* e .* c end - Temp .= Tsurface .+ (Tmantle-Tsurface).*σ; - Temp .= Temp + (Adiabat*abs.(Z)) + Temp .= Tsurface .+ (Tmantle - Tsurface) .* σ + Temp .= Temp + (Adiabat * abs.(Z)) return Temp end @@ -1544,12 +1564,12 @@ Parameters """ @with_kw_noshow mutable struct LinearWeightedTemperature <: AbstractThermalStructure - w_min::Float64 = 0.0; - w_max::Float64 = 1.0; - crit_dist::Float64 = 100.0; - dir::Symbol =:X; - F1::AbstractThermalStructure = ConstantTemp(); - F2::AbstractThermalStructure = ConstantTemp(); + w_min::Float64 = 0.0 + w_max::Float64 = 1.0 + crit_dist::Float64 = 100.0 + dir::Symbol = :X + F1::AbstractThermalStructure = ConstantTemp() + F2::AbstractThermalStructure = ConstantTemp() end """ @@ -1571,35 +1591,35 @@ can be used to smooth the temperature field from continent ocean: - then modify F. """ function compute_thermal_structure(Temp, X, Y, Z, Phase, s::LinearWeightedTemperature) - @unpack w_min, w_max, crit_dist,dir = s; - @unpack F1, F2 = s; + @unpack w_min, w_max, crit_dist, dir = s + @unpack F1, F2 = s if dir === :X - dist = X; - elseif dir ===:Y - dist = Y; + dist = X + elseif dir === :Y + dist = Y else - dist = Z; + dist = Z end # compute the 1D thermal structures - Temp1 = zeros(size(Temp)); - Temp2 = zeros(size(Temp)); - Temp1 = compute_thermal_structure(Temp1, X, Y, Z, Phase, F1); - Temp2 = compute_thermal_structure(Temp2, X, Y, Z, Phase, F2); + Temp1 = zeros(size(Temp)) + Temp2 = zeros(size(Temp)) + Temp1 = compute_thermal_structure(Temp1, X, Y, Z, Phase, F1) + Temp2 = compute_thermal_structure(Temp2, X, Y, Z, Phase, F2) # Compute the weights - weight = w_min .+(w_max-w_min) ./(crit_dist) .*(dist) + weight = w_min .+ (w_max - w_min) ./ (crit_dist) .* (dist) - ind_1 = findall(weight .>w_max); - ind_2 = findall(weight . w_max) + ind_2 = findall(weight .< w_min) # Change the weight - weight[ind_1] .= w_max; - weight[ind_2] .= w_min; + weight[ind_1] .= w_max + weight[ind_2] .= w_min # Average temperature - Temp .= Temp1 .*(1.0 .- weight) + Temp2 .* weight; + Temp .= Temp1 .* (1.0 .- weight) + Temp2 .* weight return Temp end @@ -1644,37 +1664,37 @@ Parameters """ @with_kw_noshow mutable struct Trench{Nseg} <: AbstractTrenchSlab - Start::NTuple{Nseg,Float64} = (0.0,0.0) # Start (x,y) coordinates of trench (in mapview) - End::NTuple{Nseg,Float64} = (0.0,1.0) # End (x,y) coordinates of trench (in mapview) + Start::NTuple{Nseg, Float64} = (0.0, 0.0) # Start (x,y) coordinates of trench (in mapview) + End::NTuple{Nseg, Float64} = (0.0, 1.0) # End (x,y) coordinates of trench (in mapview) n_seg::Int64 = 50 # number of segments in downdip direction - Length:: Float64 = 400.0 # length of the slab - Thickness:: Float64 = 100.0 # thickness of the slab - Lb:: Float64 = 200.0 # Length at which all the bending is happening (Lb<=Length) + Length::Float64 = 400.0 # length of the slab + Thickness::Float64 = 100.0 # thickness of the slab + Lb::Float64 = 200.0 # Length at which all the bending is happening (Lb<=Length) θ_max::Float64 = 45.0 # max bending angle, (must be converted into radians) direction::Float64 = -1.0 # Direction of the bending angle (-1= left to right or 1.0=right to left) - d_decoupling:: Float64 = 100 # decoupling depth of the slab + d_decoupling::Float64 = 100 # decoupling depth of the slab type_bending::Symbol = :Ribe # Mode Ribe | Linear | Customize WeakzoneThickness::Float64 = 0.0 # Thickness of the weakzone WeakzonePhase::Int64 = 5 # Phase of the weak zone end -function show(io::IO, g::Trench{Nseg}) where Nseg - println(io,"Trench{$Nseg}, $(g.n_seg) segments") - println(io," Trench [Start/End] : $(g.Start) - $(g.End) [km]") - println(io," Slab [Thickness] : $(g.Thickness) km") - println(io," Slab [Length] : $(g.Length) km") - println(io," Bending length [Lb] : $(g.Lb) km") - println(io," Max. angle [θ_max] : $(g.θ_max)ᵒ") - if g.direction==-1.0 - println(io," Dip [direction] : left to right [$(g.direction)]") +function show(io::IO, g::Trench{Nseg}) where {Nseg} + println(io, "Trench{$Nseg}, $(g.n_seg) segments") + println(io, " Trench [Start/End] : $(g.Start) - $(g.End) [km]") + println(io, " Slab [Thickness] : $(g.Thickness) km") + println(io, " Slab [Length] : $(g.Length) km") + println(io, " Bending length [Lb] : $(g.Lb) km") + println(io, " Max. angle [θ_max] : $(g.θ_max)ᵒ") + if g.direction == -1.0 + println(io, " Dip [direction] : left to right [$(g.direction)]") else - println(io," Dip [direction] : right to left [$(g.direction)]") + println(io, " Dip [direction] : right to left [$(g.direction)]") end - println(io," Depth [d_decoupling]: $(g.d_decoupling) km") - println(io," Bending [type_bending]: $(g.type_bending)") - println(io," [WeakzoneThickness] : $(g.WeakzoneThickness) km") - if g.WeakzoneThickness>0 - println(io," Weakzone phase : $(g.WeakzonePhase)") + println(io, " Depth [d_decoupling]: $(g.d_decoupling) km") + println(io, " Bending [type_bending]: $(g.type_bending)") + println(io, " [WeakzoneThickness] : $(g.WeakzoneThickness) km") + if g.WeakzoneThickness > 0 + println(io, " Weakzone phase : $(g.WeakzonePhase)") end return nothing @@ -1697,57 +1717,57 @@ Next, it compute the coordinates assuming that the trench is at 0.0, and assumin """ function compute_slab_surface(trench::Trench) - @unpack Thickness, Length, n_seg, Lb, θ_max, type_bending, direction, WeakzoneThickness = trench; + @unpack Thickness, Length, n_seg, Lb, θ_max, type_bending, direction, WeakzoneThickness = trench # Convert θ_max into radians - θ_max *= π / 180; + θ_max *= π / 180 # Allocate the top, mid and bottom surface - Top = zeros(n_seg+1,2); - Bottom = zeros(n_seg+1,2); - WeakZone = zeros(n_seg+1,2); - Bottom[1,2] = -Thickness; - WeakZone[1,2] = WeakzoneThickness; - MidS = zeros(n_seg+1,2); - MidS[1,2] = -Thickness/2; + Top = zeros(n_seg + 1, 2) + Bottom = zeros(n_seg + 1, 2) + WeakZone = zeros(n_seg + 1, 2) + Bottom[1, 2] = -Thickness + WeakZone[1, 2] = WeakzoneThickness + MidS = zeros(n_seg + 1, 2) + MidS[1, 2] = -Thickness / 2 # Initialize the length. - l = 0.0; # initial length - it = 1; # iteration + l = 0.0 # initial length + it = 1 # iteration - dl = Length/n_seg; # dl - while lLb + if l > Lb θ = θ_max elseif type === :Ribe # Compute theta - θ = θ_max*l^2*((3*Lb-2*l))/(Lb^3); + θ = θ_max * l^2 * ((3 * Lb - 2 * l)) / (Lb^3) elseif type === :Linear # Compute the actual angle - θ = l*(θ_max-0)/(Lb); + θ = l * (θ_max - 0) / (Lb) end return θ end @@ -1785,64 +1805,65 @@ end Function that finds the perpendicular distance to the top and bottom of the slab `d`, and the current length of the slab `l`. """ -function find_slab_distance!(ls, d, X,Y,Z, Top, Bottom, trench::Trench) - @unpack Thickness, Length, n_seg, Start, End, direction = trench; +function find_slab_distance!(ls, d, X, Y, Z, Top, Bottom, trench::Trench) + @unpack Thickness, Length, n_seg, Start, End, direction = trench # Perform rotation of 3D coordinates along the angle from Start -> End: - Xrot = X .- Start[1]; - Yrot = Y .- Start[2]; + Xrot = X .- Start[1] + Yrot = Y .- Start[2] - StrikeAngle = -atand((End[2]-Start[2])/(End[1]-Start[1])) - Rot3D!(Xrot,Yrot,Z, StrikeAngle, 0.0) + StrikeAngle = -atand((End[2] - Start[2]) / (End[1] - Start[1])) + Rot3D!(Xrot, Yrot, Z, StrikeAngle, 0.0) - xb = Rot3D(End[1]-Start[1],End[2]-Start[2], 0.0, cosd(StrikeAngle), sind(StrikeAngle), 1.0, 0.0) + xb = Rot3D(End[1] - Start[1], End[2] - Start[2], 0.0, cosd(StrikeAngle), sind(StrikeAngle), 1.0, 0.0) # dl - dl = trench.Length/n_seg; + dl = trench.Length / n_seg l = 0 # length at the trench position - D = @SVector [Top[1,2], Bottom[1,2], Bottom[1,2],Top[1,2] ] + D = @SVector [Top[1, 2], Bottom[1, 2], Bottom[1, 2], Top[1, 2]] # Construct the slab - for i = 1:(n_seg-1) - ln = l+dl; + for i in 1:(n_seg - 1) + ln = l + dl - pa = (Top[i,1], Top[i,2]); # D = 0 | L = l - pb = (Bottom[i,1], Bottom[i,2]); # D = -Thickness | L=l + pa = (Top[i, 1], Top[i, 2]) # D = 0 | L = l + pb = (Bottom[i, 1], Bottom[i, 2]) # D = -Thickness | L=l - pc = (Bottom[i+1,1],Bottom[i+1,2]); # D = -Thickness |L=L+dl - pd = (Top[i+1,1],Top[i+1,2]) # D = 0| L = L+dl + pc = (Bottom[i + 1, 1], Bottom[i + 1, 2]) # D = -Thickness |L=L+dl + pd = (Top[i + 1, 1], Top[i + 1, 2]) # D = 0| L = L+dl # Create the polygon - poly_y = @SVector [pa[1],pb[1],pc[1],pd[1]]; - poly_z = @SVector [pa[2],pb[2],pc[2],pd[2]]; + poly_y = @SVector [pa[1], pb[1], pc[1], pd[1]] + poly_z = @SVector [pa[2], pb[2], pc[2], pd[2]] # find a sub set of particles - ymin,ymax = extrema(poly_y); - zmin,zmax = extrema(poly_z); + ymin, ymax = extrema(poly_y) + zmin, zmax = extrema(poly_z) - ind_s = findall(0.0.<= Xrot.<= xb[1] .&& ymin .<= Yrot .<= ymax .&& zmin .<= Z .<= zmax); + ind_s = findall(0.0 .<= Xrot .<= xb[1] .&& ymin .<= Yrot .<= ymax .&& zmin .<= Z .<= zmax) # Find the particles - yp = Yrot[ind_s]; - zp = Z[ind_s]; + yp = Yrot[ind_s] + zp = Z[ind_s] # Initialize the ind that are going to be used by inpoly - ind = zeros(Bool,size(zp)); - inpolygon!(ind,poly_y,poly_z,yp,zp); # determine whether points are inside the polygon or not + ind = zeros(Bool, size(zp)) + inpolygon!(ind, poly_y, poly_z, yp, zp) # determine whether points are inside the polygon or not # indexes of the segment ind_seg = ind_s[ind] # Loop over the chosen particles and interpolate the current value of L and D. for ip in ind_seg - point_ = (Yrot[ip], Z[ip]); - d[ip] = -distance_to_linesegment(point_, pa, pd) - ls[ip] = distance_to_linesegment(point_, pb, pa) + l + point_ = (Yrot[ip], Z[ip]) + d[ip] = -distance_to_linesegment(point_, pa, pd) + ls[ip] = distance_to_linesegment(point_, pb, pa) + l end #Update l - l = ln; + l = ln end + return end @@ -1851,33 +1872,33 @@ end Computes the distance normal distance from a point `p` to a line segment defined by the points `v` and `w`. """ -function distance_to_linesegment(p::NTuple{2,_T}, v::NTuple{2,_T}, w::NTuple{2,_T}) where _T<:Number +function distance_to_linesegment(p::NTuple{2, _T}, v::NTuple{2, _T}, w::NTuple{2, _T}) where {_T <: Number} dx = w[1] - v[1] dy = w[2] - v[2] - l2 = dx*dx + dy*dy # i.e. |w-v|^2 - avoid a sqrt + l2 = dx * dx + dy * dy # i.e. |w-v|^2 - avoid a sqrt if l2 == 0.0 dx = p[1] - v[1] dy = p[2] - v[2] - return sqrt(dx*dx + dy*dy) # v == w case + return sqrt(dx * dx + dy * dy) # v == w case end # Consider the line extending the segment, parameterized as v + t (w - v). # We find projection of point p onto the line. # It falls where t = [(p-v) . (w-v)] / |w-v|^2 - t = ((p[1] - v[1])*dx + (p[2] - v[2])*dy) / l2 + t = ((p[1] - v[1]) * dx + (p[2] - v[2]) * dy) / l2 if t < 0.0 dx = p[1] - v[1] dy = p[2] - v[2] - return sqrt(dx*dx + dy*dy) # Beyond the 'v' end of the segment + return sqrt(dx * dx + dy * dy) # Beyond the 'v' end of the segment elseif t > 1.0 dx = p[1] - w[1] dy = p[2] - w[2] - return sqrt(dx*dx + dy*dy) # Beyond the 'w' end of the segment + return sqrt(dx * dx + dy * dy) # Beyond the 'w' end of the segment end projection_x = v[1] + t * dx projection_y = v[2] + t * dy dx = p[1] - projection_x dy = p[2] - projection_y - return sqrt(dx*dx + dy*dy) + return sqrt(dx * dx + dy * dy) end """ @@ -1914,48 +1935,50 @@ julia> add_slab!(Phase, Temp, Cart, trench, phase = phase, T = TsHC) ``` """ -function add_slab!(Phase, Temp, Grid::AbstractGeneralGrid, trench::Trench; # required input +function add_slab!( + Phase, Temp, Grid::AbstractGeneralGrid, trench::Trench; # required input phase::AbstractPhaseNumber = ConstantPhase(1), # Sets the phase number(s) in the slab - T::Union{AbstractThermalStructure,Nothing} = nothing, cell=false ) # Sets the thermal structure (various functions are available), + T::Union{AbstractThermalStructure, Nothing} = nothing, cell = false + ) # Sets the thermal structure (various functions are available), # Retrieve 3D data arrays for the grid - X,Y,Z = coordinate_grids(Grid, cell=cell) + X, Y, Z = coordinate_grids(Grid, cell = cell) # Compute top and bottom of the slab - Top,Bottom, WeakZone = compute_slab_surface(trench); + Top, Bottom, WeakZone = compute_slab_surface(trench) # Find the distance to the slab (along & perpendicular) - d = fill(NaN,size(Grid)); # -> d = distance perpendicular to the slab - ls = fill(NaN,size(Grid)); # -> l = length from the trench along the slab - find_slab_distance!(ls, d, X,Y,Z, Top, Bottom, trench); + d = fill(NaN, size(Grid)) # -> d = distance perpendicular to the slab + ls = fill(NaN, size(Grid)) # -> l = length from the trench along the slab + find_slab_distance!(ls, d, X, Y, Z, Top, Bottom, trench) # Function to fill up the temperature and the phase. - ind = findall((-trench.Thickness .<= d .<= 0.0)); + ind = findall((-trench.Thickness .<= d .<= 0.0)) if !isempty(ind) if isa(T, LinearWeightedTemperature) - l_decouplingind = findall(Top[:,2].<=-trench.d_decoupling); + l_decouplingind = findall(Top[:, 2] .<= -trench.d_decoupling) if !isempty(l_decouplingind) - l_decoupling = Top[l_decouplingind[1],1]; - T.crit_dist = abs(l_decoupling); + l_decoupling = Top[l_decouplingind[1], 1] + T.crit_dist = abs(l_decoupling) end end # Compute thermal structure accordingly. See routines below for different options {Future: introducing the length along the trench for having lateral varying properties along the trench} if !isnothing(T) - Temp[ind] = compute_thermal_structure(Temp[ind], ls[ind], Y[ind], d[ind], Phase[ind], T); + Temp[ind] = compute_thermal_structure(Temp[ind], ls[ind], Y[ind], d[ind], Phase[ind], T) end # Set the phase Phase[ind] = compute_phase(Phase[ind], Temp[ind], ls[ind], Y[ind], d[ind], phase) # Add a weak zone on top of the slab (indicated by a phase number but not by temperature) - if trench.WeakzoneThickness>0.0 - d_weakzone = fill(NaN,size(Grid)); # -> d = distance perpendicular to the slab - ls_weakzone = fill(NaN,size(Grid)); # -> l = length from the trench along the slab - find_slab_distance!(ls_weakzone, d_weakzone, X,Y,Z, WeakZone, Top, trench); + if trench.WeakzoneThickness > 0.0 + d_weakzone = fill(NaN, size(Grid)) # -> d = distance perpendicular to the slab + ls_weakzone = fill(NaN, size(Grid)) # -> l = length from the trench along the slab + find_slab_distance!(ls_weakzone, d_weakzone, X, Y, Z, WeakZone, Top, trench) - ind = findall( (-trench.WeakzoneThickness .<= d_weakzone .<= 0.0) .& (Z .>-trench.d_decoupling) ); + ind = findall((-trench.WeakzoneThickness .<= d_weakzone .<= 0.0) .& (Z .> -trench.d_decoupling)) Phase[ind] .= trench.WeakzonePhase end end @@ -2001,21 +2024,21 @@ add_fault!(Phase, Temp, Grid; ``` """ function add_fault!( - Phase, - Temp, - Grid::AbstractGeneralGrid; - Start=(20,100), - End=(10,80), - Fault_thickness=10.0, - Depth_extent=nothing, - DipAngle=0e0, - phase=ConstantPhase(1), - T=nothing, - cell=false -) + Phase, + Temp, + Grid::AbstractGeneralGrid; + Start = (20, 100), + End = (10, 80), + Fault_thickness = 10.0, + Depth_extent = nothing, + DipAngle = 0.0e0, + phase = ConstantPhase(1), + T = nothing, + cell = false + ) # Extract the coordinates - X, Y, Z = coordinate_grids(Grid, cell=cell) + X, Y, Z = coordinate_grids(Grid, cell = cell) # Calculate the direction vector from Start to End direction = (End[1] - Start[1], End[2] - Start[2]) @@ -2028,20 +2051,20 @@ function add_fault!( # Create a mask for the fault region fault_mask = falses(size(X)) - for k in 1:size(Z, 3), j in 1:size(Y, 2), i in 1:size(X, 1) - # Rotate the point using the dip angle - x_rot, y_rot, z_rot = Rot3D(X[i, j, k], Y[i, j, k], Z[i, j, k], 1.0, 0.0, cosd(DipAngle), sind(DipAngle)) - - # Calculate the projection of the rotated point onto the fault line - projection_length = (x_rot - Start[1]) * unit_direction[1] + (y_rot - Start[2]) * unit_direction[2] - if 0 ≤ projection_length ≤ length - # Calculate the perpendicular distance to the fault line - perpendicular_distance = abs((x_rot - Start[1]) * unit_direction[2] - (y_rot - Start[2]) * unit_direction[1]) - if perpendicular_distance ≤ fault_half_thickness - fault_mask[i, j, k] = true - end + for k in 1:size(Z, 3), j in 1:size(Y, 2), i in 1:size(X, 1) + # Rotate the point using the dip angle + x_rot, y_rot, z_rot = Rot3D(X[i, j, k], Y[i, j, k], Z[i, j, k], 1.0, 0.0, cosd(DipAngle), sind(DipAngle)) + + # Calculate the projection of the rotated point onto the fault line + projection_length = (x_rot - Start[1]) * unit_direction[1] + (y_rot - Start[2]) * unit_direction[2] + if 0 ≤ projection_length ≤ length + # Calculate the perpendicular distance to the fault line + perpendicular_distance = abs((x_rot - Start[1]) * unit_direction[2] - (y_rot - Start[2]) * unit_direction[1]) + if perpendicular_distance ≤ fault_half_thickness + fault_mask[i, j, k] = true end end + end ind = findall(fault_mask) diff --git a/src/WaterFlow.jl b/src/WaterFlow.jl index 20633cd0..775f6b85 100644 --- a/src/WaterFlow.jl +++ b/src/WaterFlow.jl @@ -10,15 +10,15 @@ export waterflows Computes the spacing with central differences """ -function spacing(lon,lat) +function spacing(lon, lat) dlon = zeros(size(lon.val)[1:2]) dlat = zeros(size(lat.val)[1:2]) - @views dlon[2:end-1,:] = (lon.val[3:end,:,1] - lon.val[1:end-2,:,1])/2 - dlon[1,:] = dlon[2,:] - dlon[end,:] = dlon[end-1,:] - dlat[:,2:end-1] = (lat.val[:,3:end,1] - lat.val[:,1:end-2,1])/2 - dlat[:,1] = dlat[:,2] - dlat[:,end] = dlat[:,end-1] + @views dlon[2:(end - 1), :] = (lon.val[3:end, :, 1] - lon.val[1:(end - 2), :, 1]) / 2 + dlon[1, :] = dlon[2, :] + dlon[end, :] = dlon[end - 1, :] + dlat[:, 2:(end - 1)] = (lat.val[:, 3:end, 1] - lat.val[:, 1:(end - 2), 1]) / 2 + dlat[:, 1] = dlat[:, 2] + dlat[:, end] = dlat[:, end - 1] return dlon, dlat end @@ -29,11 +29,11 @@ Returns the cell area for a Topographic dataset in m² (required for upstream ar """ function cell_area(Topo::GeoData) - proj = ProjectionPoint(Lon=mean(Topo.lon.val[:]), Lat=mean(Topo.lat.val[:])) + proj = ProjectionPoint(Lon = mean(Topo.lon.val[:]), Lat = mean(Topo.lat.val[:])) Topo_cart = convert2CartData(Topo, proj) dx, dy = spacing(Topo_cart.x, Topo_cart.y) - area_m2 = dx.*dy*1e6 + area_m2 = dx .* dy * 1.0e6 return area_m2 end @@ -68,29 +68,31 @@ GeoData ``` """ -function waterflows(Topo::GeoData, flowdir_fn= WhereTheWaterFlows.d8dir_feature; feedback_fn=nothing, drain_pits=true, bnd_as_sink=true, rainfall=nothing, minsize=300) +function waterflows(Topo::GeoData, flowdir_fn = WhereTheWaterFlows.d8dir_feature; feedback_fn = nothing, drain_pits = true, bnd_as_sink = true, rainfall = nothing, minsize = 300) cellarea = cell_area(Topo) cellarea_m2 = cellarea if !isnothing(rainfall) - @assert typeof(rainfall) == Array{Float64,2} + @assert typeof(rainfall) == Array{Float64, 2} cellarea = rainfall end - dem = Topo.depth.val[:,:,1] + dem = Topo.depth.val[:, :, 1] - ni = size(Topo.depth.val) - area = zeros(ni) - slen = zeros(Int64, ni) - dir = zeros(Int8, ni) - nout = zeros(Int8, ni) - nin = zeros(Int8, ni) - c = zeros(Int64, ni) + ni = size(Topo.depth.val) + area = zeros(ni) + slen = zeros(Int64, ni) + dir = zeros(Int8, ni) + nout = zeros(Int8, ni) + nin = zeros(Int8, ni) + c = zeros(Int64, ni) - area[:,:,1], slen[:,:,1], dir[:,:,1], nout[:,:,1], nin[:,:,1], sinks, pits, c[:,:,1], bnds = waterflows(dem, cellarea, flowdir_fn; - feedback_fn=feedback_fn, drain_pits=drain_pits, bnd_as_sink=bnd_as_sink) + area[:, :, 1], slen[:, :, 1], dir[:, :, 1], nout[:, :, 1], nin[:, :, 1], sinks, pits, c[:, :, 1], bnds = waterflows( + dem, cellarea, flowdir_fn; + feedback_fn = feedback_fn, drain_pits = drain_pits, bnd_as_sink = bnd_as_sink + ) - catchment_large = prune_catchments(c, minsize; val=0) + catchment_large = prune_catchments(c, minsize; val = 0) _, id_max = findmax(area) largest_catchment = catchment_large .== catchment_large[id_max] @@ -100,6 +102,6 @@ function waterflows(Topo::GeoData, flowdir_fn= WhereTheWaterFlows.d8dir_feature; log10_area = log10.(area) log10_largest_area = log10.(largest_area) - Topo_water = addfield(Topo,(;area, slen, dir, nout, nin, c, cellarea_m2, catchment_large, log10_area, largest_catchment, largest_area, log10_largest_area)) + Topo_water = addfield(Topo, (; area, slen, dir, nout, nin, c, cellarea_m2, catchment_large, log10_area, largest_catchment, largest_area, log10_largest_area)) return Topo_water, sinks, pits, bnds end diff --git a/src/data_import.jl b/src/data_import.jl index 711bec1d..72952caa 100644 --- a/src/data_import.jl +++ b/src/data_import.jl @@ -11,94 +11,94 @@ export screenshot_to_GeoData, screenshot_to_CartData, screenshot_to_UTMData, get # import CSV data using standard library functions # here we assume that the data is indeed comma separated and that comments are preceded with a "#" -function ReadCSV_LatLon(filename::AbstractString,DepthCon::AbstractString) +function ReadCSV_LatLon(filename::AbstractString, DepthCon::AbstractString) # import data from file with coordinates given in lat/lon/depth format and additional data given in additional columns # the idea here is to assign the data to a structure of the type GeoData which will then be used for further processing - data,hdr = readdlm(filename,',', Float64,'\n'; header=true, skipblanks=true, comments=true, comment_char='#') + data, hdr = readdlm(filename, ',', Float64, '\n'; header = true, skipblanks = true, comments = true, comment_char = '#') # initialize array of structures to store the data # while doing so, separate the unit from the variable name - ndata = size(data,1) # number of entries - nfields = size(data,2) # number of fields + ndata = size(data, 1) # number of entries + nfields = size(data, 2) # number of fields # declare some variables as local, otherwise they are not known outside of the following for loop local LonData local LatData local DepthData local vals_range - vals_range = zeros(Int64,nfields-3) - ivals = 1; + vals_range = zeros(Int64, nfields - 3) + ivals = 1 # get the fields for lon/lat/depth - for ifield = 1:nfields - if occursin("lon",hdr[ifield]) - lon_ind = ifield; - varname = GetVariableName(hdr[ifield])# get variable name - varunit = GetVariableUnit(hdr[ifield])# get variable unit - LonData = data[1:end,ifield] - elseif occursin("lat",hdr[ifield]) - lat_ind = ifield; - varname = GetVariableName(hdr[ifield])# get variable name - varunit = GetVariableUnit(hdr[ifield])# get variable unit - LatData = data[1:end,ifield] - elseif occursin("depth",hdr[ifield]) + for ifield in 1:nfields + if occursin("lon", hdr[ifield]) + lon_ind = ifield + varname = GetVariableName(hdr[ifield]) # get variable name + varunit = GetVariableUnit(hdr[ifield]) # get variable unit + LonData = data[1:end, ifield] + elseif occursin("lat", hdr[ifield]) + lat_ind = ifield + varname = GetVariableName(hdr[ifield]) # get variable name + varunit = GetVariableUnit(hdr[ifield]) # get variable unit + LatData = data[1:end, ifield] + elseif occursin("depth", hdr[ifield]) # ISSUE: WE DEFINE DEPTH AS NEGATIVE, BUT HOW DO WE SET THAT? # WE COULD ADD A FLAG THAT INDICATES THE DEPTH CONVENTION AND # TREAT IT ACCORDINGLY - depth_ind = ifield; - varname = GetVariableName(hdr[ifield])# get variable name - varunit = GetVariableUnit(hdr[ifield])# get variable unit + depth_ind = ifield + varname = GetVariableName(hdr[ifield]) # get variable name + varunit = GetVariableUnit(hdr[ifield]) # get variable unit # take care of positive or negative convection for depth (here we use negative) - if cmp(DepthCon,"positive")==0 # if depth is given as positive values, convert to negative - DepthData = -1*data[1:end,ifield] - elseif cmp(DepthCon,"negative")==0 - DepthData = data[1:end,ifield] + if cmp(DepthCon, "positive") == 0 # if depth is given as positive values, convert to negative + DepthData = -1 * data[1:end, ifield] + elseif cmp(DepthCon, "negative") == 0 + DepthData = data[1:end, ifield] else # default behaviour assumes that dpeth is negative - DepthData = data[1:end,ifield] + DepthData = data[1:end, ifield] end # if depth is given in m, convert to km - if cmp(varunit,"m")==0 - DepthData = DepthData./1e3; + if cmp(varunit, "m") == 0 + DepthData = DepthData ./ 1.0e3 end else vals_range[ivals] = ifield - ivals = ivals+1 + ivals = ivals + 1 end end # create named tuple for additional data - tmp_hdr = hdr[vals_range]; - tmp_data = data[1:end,vals_range]; + tmp_hdr = hdr[vals_range] + tmp_data = data[1:end, vals_range] - nhdr = size(tmp_hdr,1) + nhdr = size(tmp_hdr, 1) tmp_vec = Vector{Vector{Float64}}(undef, nhdr) # this is used for later tuple creation, I haven't found a better way around - for ihdr = 1:nhdr + for ihdr in 1:nhdr # take care of the header strings - varname = GetVariableName(tmp_hdr[ihdr])# get variable name - varunit = GetVariableUnit(tmp_hdr[ihdr])# get variable unit - if cmp(varunit,"%")==0 - tmp_hdr[ihdr] = string(varname,"_percentage") + varname = GetVariableName(tmp_hdr[ihdr]) # get variable name + varunit = GetVariableUnit(tmp_hdr[ihdr]) # get variable unit + if cmp(varunit, "%") == 0 + tmp_hdr[ihdr] = string(varname, "_percentage") else - tmp_hdr[ihdr] = string(varname,"_",varunit) + tmp_hdr[ihdr] = string(varname, "_", varunit) end # take care of the matrix columns - tmp_vec[ihdr] = tmp_data[1:end,ihdr]; + tmp_vec[ihdr] = tmp_data[1:end, ihdr] end - hdr_tpl = Tuple(Symbol(x) for x in tmp_hdr) # convert header to tuple - data_tpl = Tuple.(tmp_vec for i in size(tmp_vec,1)) # convert data to tuple + hdr_tpl = Tuple(Symbol(x) for x in tmp_hdr) # convert header to tuple + data_tpl = Tuple.(tmp_vec for i in size(tmp_vec, 1)) # convert data to tuple tmp = NamedTuple{hdr_tpl}(data_tpl) println(typeof(tmp)) # initialize data structure - importdata = GeoData(LonData,LatData,DepthData,tmp) + importdata = GeoData(LonData, LatData, DepthData, tmp) # assign data to output return importdata @@ -110,13 +110,13 @@ function GetVariableName(inputstring::SubString{String}) inputstring = String(inputstring) # assume that if the string contains a unit, it is given in brackets (),[],{} indfirst = nothing - iloop = 1 - str2find = ["(","[","{"] + iloop = 1 + str2find = ["(", "[", "{"] # find first occurrence of one of the brackets while isnothing(indfirst) - indfirst = findfirst(str2find[iloop],inputstring) + indfirst = findfirst(str2find[iloop], inputstring) iloop = iloop + 1 - if iloop>length(str2find) + if iloop > length(str2find) break end end @@ -125,7 +125,7 @@ function GetVariableName(inputstring::SubString{String}) if isnothing(indfirst) return inputstring else - indfirst = indfirst[1]-1 + indfirst = indfirst[1] - 1 return inputstring[1:indfirst] end end @@ -135,13 +135,13 @@ function GetVariableUnit(inputstring::SubString{String}) inputstring = String(inputstring) # assume that if the string contains a unit, it is given in brackets (),[],{} indfirst = nothing - iloop = 1; - firststr2find = ["(","[","{"] - laststr2find = [")","]","}"] + iloop = 1 + firststr2find = ["(", "[", "{"] + laststr2find = [")", "]", "}"] while isnothing(indfirst) - indfirst = findfirst(firststr2find[iloop],inputstring) + indfirst = findfirst(firststr2find[iloop], inputstring) iloop = iloop + 1 - if iloop>length(firststr2find) + if iloop > length(firststr2find) break end end @@ -149,16 +149,15 @@ function GetVariableUnit(inputstring::SubString{String}) if isnothing(indfirst) return "none" else - indlast = findfirst(laststr2find[iloop-1],inputstring) - indfirst = indfirst[1]+1 - indlast = indlast[1]-1 + indlast = findfirst(laststr2find[iloop - 1], inputstring) + indfirst = indfirst[1] + 1 + indlast = indlast[1] - 1 return inputstring[indfirst:indlast] end end - """ screenshot_to_GeoData(filename::String, Corner_LowerLeft, Corner_UpperRight; Corner_LowerRight=nothing, Corner_UpperLeft=nothing, Cartesian=false, UTM=false, UTMzone, isnorth=true, fieldname::Symbol=:colors) @@ -170,14 +169,14 @@ The lower right and upper left corners can be specified optionally (to take non- *Note*: if your data is in `UTM` coordinates you also need to provide the `UTMzone` and whether we are on the northern hemisphere or not (`isnorth`). """ -function screenshot_to_GeoData(filename::String, Corner_LowerLeft, Corner_UpperRight; Corner_LowerRight=nothing, Corner_UpperLeft=nothing, Cartesian=false, UTM=false, UTMzone=nothing, isnorth::Bool=true, fieldname::Symbol=:colors) +function screenshot_to_GeoData(filename::String, Corner_LowerLeft, Corner_UpperRight; Corner_LowerRight = nothing, Corner_UpperLeft = nothing, Cartesian = false, UTM = false, UTMzone = nothing, isnorth::Bool = true, fieldname::Symbol = :colors) - img = load(filename) # load image + img = load(filename) # load image # Define lon/lat/depth of lower left corner # try to determine if this is a horizontal profile or not - if abs(Corner_UpperRight[3]-Corner_LowerLeft[3])>0.0 + if abs(Corner_UpperRight[3] - Corner_LowerLeft[3]) > 0.0 DepthProfile = true else DepthProfile = false @@ -186,11 +185,11 @@ function screenshot_to_GeoData(filename::String, Corner_LowerLeft, Corner_UpperR # We should be able to either define 4 corners or only 2 and reconstruct the other two from the if isnothing(Corner_LowerRight) || isnothing(Corner_UpperLeft) if DepthProfile - Corner_LowerRight = (Corner_UpperRight[1], Corner_UpperRight[2], Corner_LowerLeft[3]) - Corner_UpperLeft = (Corner_LowerLeft[1], Corner_LowerLeft[2], Corner_UpperRight[3]) + Corner_LowerRight = (Corner_UpperRight[1], Corner_UpperRight[2], Corner_LowerLeft[3]) + Corner_UpperLeft = (Corner_LowerLeft[1], Corner_LowerLeft[2], Corner_UpperRight[3]) else - Corner_LowerRight = (Corner_UpperRight[1], Corner_LowerLeft[2], Corner_LowerLeft[3]) - Corner_UpperLeft = (Corner_LowerLeft[1], Corner_UpperRight[2], Corner_UpperRight[3]) + Corner_LowerRight = (Corner_UpperRight[1], Corner_LowerLeft[2], Corner_LowerLeft[3]) + Corner_UpperLeft = (Corner_LowerLeft[1], Corner_UpperRight[2], Corner_UpperRight[3]) end end @@ -198,10 +197,10 @@ function screenshot_to_GeoData(filename::String, Corner_LowerLeft, Corner_UpperR if Cartesian println("Extracting CartData from: $(filename)") println(" └ Corners: x y z") - println(" └ lower left = ($(rpad( Corner_LowerLeft[1],7)), $(rpad( Corner_LowerLeft[2],7)), $(rpad( Corner_LowerLeft[3],7)))") - println(" └ lower right = ($(rpad(Corner_LowerRight[1],7)), $(rpad(Corner_LowerRight[2],7)), $(rpad(Corner_LowerRight[3],7)))") - println(" └ upper left = ($(rpad( Corner_UpperLeft[1],7)), $(rpad( Corner_UpperLeft[2],7)), $(rpad( Corner_UpperLeft[3],7)))") - println(" └ upper right = ($(rpad(Corner_UpperRight[1],7)), $(rpad(Corner_UpperRight[2],7)), $(rpad(Corner_UpperRight[3],7)))") + println(" └ lower left = ($(rpad(Corner_LowerLeft[1], 7)), $(rpad(Corner_LowerLeft[2], 7)), $(rpad(Corner_LowerLeft[3], 7)))") + println(" └ lower right = ($(rpad(Corner_LowerRight[1], 7)), $(rpad(Corner_LowerRight[2], 7)), $(rpad(Corner_LowerRight[3], 7)))") + println(" └ upper left = ($(rpad(Corner_UpperLeft[1], 7)), $(rpad(Corner_UpperLeft[2], 7)), $(rpad(Corner_UpperLeft[3], 7)))") + println(" └ upper right = ($(rpad(Corner_UpperRight[1], 7)), $(rpad(Corner_UpperRight[2], 7)), $(rpad(Corner_UpperRight[3], 7)))") end if UTM if isnothing(UTMzone) @@ -209,42 +208,42 @@ function screenshot_to_GeoData(filename::String, Corner_LowerLeft, Corner_UpperR end println("Extracting UTMData from: $(filename)") if isnorth - println(" UTM Zone $(UTMzone) Northern Hemisphere") + println(" UTM Zone $(UTMzone) Northern Hemisphere") else - println(" UTM Zone $(UTMzone) Southern Hemisphere") + println(" UTM Zone $(UTMzone) Southern Hemisphere") end println(" └ Corners: E-W (x) | N-S (y) | depth (z)") - println(" └ lower left = ($(rpad( Corner_LowerLeft[1],7)), $(rpad( Corner_LowerLeft[2],7)), $(rpad( Corner_LowerLeft[3],7)))") - println(" └ lower right = ($(rpad(Corner_LowerRight[1],7)), $(rpad(Corner_LowerRight[2],7)), $(rpad(Corner_LowerRight[3],7)))") - println(" └ upper left = ($(rpad( Corner_UpperLeft[1],7)), $(rpad( Corner_UpperLeft[2],7)), $(rpad( Corner_UpperLeft[3],7)))") - println(" └ upper right = ($(rpad(Corner_UpperRight[1],7)), $(rpad(Corner_UpperRight[2],7)), $(rpad(Corner_UpperRight[3],7)))") + println(" └ lower left = ($(rpad(Corner_LowerLeft[1], 7)), $(rpad(Corner_LowerLeft[2], 7)), $(rpad(Corner_LowerLeft[3], 7)))") + println(" └ lower right = ($(rpad(Corner_LowerRight[1], 7)), $(rpad(Corner_LowerRight[2], 7)), $(rpad(Corner_LowerRight[3], 7)))") + println(" └ upper left = ($(rpad(Corner_UpperLeft[1], 7)), $(rpad(Corner_UpperLeft[2], 7)), $(rpad(Corner_UpperLeft[3], 7)))") + println(" └ upper right = ($(rpad(Corner_UpperRight[1], 7)), $(rpad(Corner_UpperRight[2], 7)), $(rpad(Corner_UpperRight[3], 7)))") end if (!Cartesian) && (!UTM) println("Extracting GeoData from: $(filename)") println(" └ Corners: lon lat depth") - println(" └ lower left = ($(rpad( Corner_LowerLeft[1],7)), $(rpad( Corner_LowerLeft[2],7)), $(rpad( Corner_LowerLeft[3],7)))") - println(" └ lower right = ($(rpad(Corner_LowerRight[1],7)), $(rpad(Corner_LowerRight[2],7)), $(rpad(Corner_LowerRight[3],7)))") - println(" └ upper left = ($(rpad( Corner_UpperLeft[1],7)), $(rpad( Corner_UpperLeft[2],7)), $(rpad( Corner_UpperLeft[3],7)))") - println(" └ upper right = ($(rpad(Corner_UpperRight[1],7)), $(rpad(Corner_UpperRight[2],7)), $(rpad(Corner_UpperRight[3],7)))") + println(" └ lower left = ($(rpad(Corner_LowerLeft[1], 7)), $(rpad(Corner_LowerLeft[2], 7)), $(rpad(Corner_LowerLeft[3], 7)))") + println(" └ lower right = ($(rpad(Corner_LowerRight[1], 7)), $(rpad(Corner_LowerRight[2], 7)), $(rpad(Corner_LowerRight[3], 7)))") + println(" └ upper left = ($(rpad(Corner_UpperLeft[1], 7)), $(rpad(Corner_UpperLeft[2], 7)), $(rpad(Corner_UpperLeft[3], 7)))") + println(" └ upper right = ($(rpad(Corner_UpperRight[1], 7)), $(rpad(Corner_UpperRight[2], 7)), $(rpad(Corner_UpperRight[3], 7)))") end # Reconstruct the 4 corners into a matrix - i = 1; Corners_lon = [Corner_UpperLeft[i] Corner_UpperRight[i]; Corner_LowerLeft[i] Corner_LowerRight[i]; ] - i = 2; Corners_lat = [Corner_UpperLeft[i] Corner_UpperRight[i]; Corner_LowerLeft[i] Corner_LowerRight[i]; ] - i = 3; Corners_depth = [Corner_UpperLeft[i] Corner_UpperRight[i]; Corner_LowerLeft[i] Corner_LowerRight[i]; ] + i = 1; Corners_lon = [Corner_UpperLeft[i] Corner_UpperRight[i]; Corner_LowerLeft[i] Corner_LowerRight[i]; ] + i = 2; Corners_lat = [Corner_UpperLeft[i] Corner_UpperRight[i]; Corner_LowerLeft[i] Corner_LowerRight[i]; ] + i = 3; Corners_depth = [Corner_UpperLeft[i] Corner_UpperRight[i]; Corner_LowerLeft[i] Corner_LowerRight[i]; ] - # i = 1; Corners_lon = [Corner_LowerLeft[i] Corner_LowerRight[i]; Corner_UpperLeft[i] Corner_UpperRight[i];] - # i = 2; Corners_lat = [Corner_LowerLeft[i] Corner_LowerRight[i]; Corner_UpperLeft[i] Corner_UpperRight[i];] - # i = 3; Corners_depth = [Corner_LowerLeft[i] Corner_LowerRight[i]; Corner_UpperLeft[i] Corner_UpperRight[i];] + # i = 1; Corners_lon = [Corner_LowerLeft[i] Corner_LowerRight[i]; Corner_UpperLeft[i] Corner_UpperRight[i];] + # i = 2; Corners_lat = [Corner_LowerLeft[i] Corner_LowerRight[i]; Corner_UpperLeft[i] Corner_UpperRight[i];] + # i = 3; Corners_depth = [Corner_LowerLeft[i] Corner_LowerRight[i]; Corner_UpperLeft[i] Corner_UpperRight[i];] # Extract the colors from the grid - img_RGB = convert.(RGB, img) # convert to RGB data + img_RGB = convert.(RGB, img) # convert to RGB data # extract the red-green-blue values from the image - r = zeros(size(img_RGB)) - g = zeros(size(img_RGB)) - b = zeros(size(img_RGB)) + r = zeros(size(img_RGB)) + g = zeros(size(img_RGB)) + b = zeros(size(img_RGB)) for i in eachindex(g) r[i] = Float64(img_RGB[i].r) g[i] = Float64(img_RGB[i].g) @@ -252,35 +251,35 @@ function screenshot_to_GeoData(filename::String, Corner_LowerLeft, Corner_UpperR end # Construct depth, lon and lat 2D grids from the corner points through linear interpolation - grid_size = size(r) - xs = [1,grid_size[1]]; - zs = [1,grid_size[2]]; - interp_linear_lon = linear_interpolation((xs, zs), Corners_lon) # create interpolation object - interp_linear_lat = linear_interpolation((xs, zs), Corners_lat) # create interpolation object - interp_linear_depth = linear_interpolation((xs, zs), Corners_depth) # create interpolation object + grid_size = size(r) + xs = [1, grid_size[1]] + zs = [1, grid_size[2]] + interp_linear_lon = linear_interpolation((xs, zs), Corners_lon) # create interpolation object + interp_linear_lat = linear_interpolation((xs, zs), Corners_lat) # create interpolation object + interp_linear_depth = linear_interpolation((xs, zs), Corners_depth) # create interpolation object # Interpolate - X_int,Y_int,Depth = xyz_grid(1:grid_size[1],1:grid_size[2],0) - X = interp_linear_lon.(X_int, Y_int); - Y = interp_linear_lat.(X_int, Y_int); - Depth = interp_linear_depth.(X_int, Y_int); + X_int, Y_int, Depth = xyz_grid(1:grid_size[1], 1:grid_size[2], 0) + X = interp_linear_lon.(X_int, Y_int) + Y = interp_linear_lat.(X_int, Y_int) + Depth = interp_linear_depth.(X_int, Y_int) # Transfer to 3D arrays (check if needed or not; if yes, redo error message in struct routine) - red = zeros(size(Depth)); red[:,:,1] = r; - green = zeros(size(Depth)); green[:,:,1] = g; - blue = zeros(size(Depth)); blue[:,:,1] = b; + red = zeros(size(Depth)); red[:, :, 1] = r + green = zeros(size(Depth)); green[:, :, 1] = g + blue = zeros(size(Depth)); blue[:, :, 1] = b # Create GeoData structure - NOTE: RGB data must be 2D matrixes, not 3D! - color_data = NamedTuple{(fieldname,)}(((red,green,blue),)); + color_data = NamedTuple{(fieldname,)}(((red, green, blue),)) if Cartesian - data_Image = CartData(X, Y, Depth, color_data) + data_Image = CartData(X, Y, Depth, color_data) end if UTM - data_Image = UTMData(X, Y, Depth, UTMzone, isnorth, color_data) + data_Image = UTMData(X, Y, Depth, UTMzone, isnorth, color_data) end if (!Cartesian) && (!UTM) - data_Image = GeoData(X, Y, Depth, color_data) + data_Image = GeoData(X, Y, Depth, color_data) end return data_Image end @@ -291,11 +290,11 @@ end Does the same as `screenshot_to_GeoData`, but returns a `CartData` structure """ -function screenshot_to_CartData(filename::String, Corner_LowerLeft, Corner_UpperRight; Corner_LowerRight=nothing, Corner_UpperLeft=nothing, fieldname::Symbol=:colors) +function screenshot_to_CartData(filename::String, Corner_LowerLeft, Corner_UpperRight; Corner_LowerRight = nothing, Corner_UpperLeft = nothing, fieldname::Symbol = :colors) # first create a GeoData struct - Data_Cart = screenshot_to_GeoData(filename, Corner_LowerLeft, Corner_UpperRight; Corner_LowerRight=Corner_LowerRight, Corner_UpperLeft=Corner_UpperLeft, Cartesian=true, fieldname=fieldname) + Data_Cart = screenshot_to_GeoData(filename, Corner_LowerLeft, Corner_UpperRight; Corner_LowerRight = Corner_LowerRight, Corner_UpperLeft = Corner_UpperLeft, Cartesian = true, fieldname = fieldname) return Data_Cart @@ -307,11 +306,11 @@ end Does the same as `screenshot_to_GeoData`, but returns for UTM data Note that you have to specify the `UTMzone` and `isnorth` """ -function screenshot_to_UTMData(filename::String, Corner_LowerLeft, Corner_UpperRight; Corner_LowerRight=nothing, Corner_UpperLeft=nothing, UTMzone::Int64=nothing, isnorth::Bool=true, fieldname::Symbol=:colors) +function screenshot_to_UTMData(filename::String, Corner_LowerLeft, Corner_UpperRight; Corner_LowerRight = nothing, Corner_UpperLeft = nothing, UTMzone::Int64 = nothing, isnorth::Bool = true, fieldname::Symbol = :colors) - # first create a GeoData struct - Data_UTM = screenshot_to_GeoData(filename, Corner_LowerLeft, Corner_UpperRight; Corner_LowerRight=Corner_LowerRight, Corner_UpperLeft=Corner_UpperLeft, Cartesian=false, UTM=true, UTMzone=UTMzone, isnorth=isnorth, fieldname=fieldname) - return Data_UTM + # first create a GeoData struct + Data_UTM = screenshot_to_GeoData(filename, Corner_LowerLeft, Corner_UpperRight; Corner_LowerRight = Corner_LowerRight, Corner_UpperLeft = Corner_UpperLeft, Cartesian = false, UTM = true, UTMzone = UTMzone, isnorth = isnorth, fieldname = fieldname) + return Data_UTM end """ @@ -322,32 +321,32 @@ Extracts longitude, latitude, depth and magnitude from a QuakeML file that has b function getlonlatdepthmag_QuakeML(filename::String) # The QuakeML format consists of a tree with quite a lot of branches, so we have to traverse it to quite some extent to get the desired values # using LightXML: extension??? - xdoc = parse_file(filename); # parse the whole file - xroot =root(xdoc); - catalogues = get_elements_by_tagname(xroot,"eventParameters"); - catalogue = catalogues[1]; - events = get_elements_by_tagname(catalogue,"event"); # now those are all events - num_events = size(events,1); + xdoc = parse_file(filename) # parse the whole file + xroot = root(xdoc) + catalogues = get_elements_by_tagname(xroot, "eventParameters") + catalogue = catalogues[1] + events = get_elements_by_tagname(catalogue, "event") # now those are all events + num_events = size(events, 1) # allocate, lat,lon,depth,magnitude - lon = zeros(num_events,1); - lat = zeros(num_events,1); - depth = zeros(num_events,1); - mag = zeros(num_events,1); + lon = zeros(num_events, 1) + lat = zeros(num_events, 1) + depth = zeros(num_events, 1) + mag = zeros(num_events, 1) # now loop over the events and assign the respective values - for ievent = 1:num_events - tmp_event = events[ievent]; - origin = get_elements_by_tagname(events[ievent], "origin"); - magnitude = get_elements_by_tagname(events[ievent], "magnitude"); + for ievent in 1:num_events + tmp_event = events[ievent] + origin = get_elements_by_tagname(events[ievent], "origin") + magnitude = get_elements_by_tagname(events[ievent], "magnitude") # this is a bit dirty, if you find a better/cleaner way, be my guest... - lon[ievent] = parse(Float64,string(collect(child_nodes(collect(child_elements(get_elements_by_tagname(origin[1], "longitude")[1]))[1]))[1])) - lat[ievent] = parse(Float64,string(collect(child_nodes(collect(child_elements(get_elements_by_tagname(origin[1], "latitude")[1]))[1]))[1])) - depth[ievent] = parse(Float64,string(collect(child_nodes(collect(child_elements(get_elements_by_tagname(origin[1], "depth")[1]))[1]))[1])) - mag[ievent] = parse(Float64,string(collect(child_nodes(get_elements_by_tagname(get_elements_by_tagname(magnitude[1],"mag")[1],"value")[1]))[1])); + lon[ievent] = parse(Float64, string(collect(child_nodes(collect(child_elements(get_elements_by_tagname(origin[1], "longitude")[1]))[1]))[1])) + lat[ievent] = parse(Float64, string(collect(child_nodes(collect(child_elements(get_elements_by_tagname(origin[1], "latitude")[1]))[1]))[1])) + depth[ievent] = parse(Float64, string(collect(child_nodes(collect(child_elements(get_elements_by_tagname(origin[1], "depth")[1]))[1]))[1])) + mag[ievent] = parse(Float64, string(collect(child_nodes(get_elements_by_tagname(get_elements_by_tagname(magnitude[1], "mag")[1], "value")[1]))[1])) end - - Data_ISC = GeoData(lon,lat,-1*depth/1e3,(Magnitude=mag,Depth=-1*depth/1e3*km)); + + Data_ISC = GeoData(lon, lat, -1 * depth / 1.0e3, (Magnitude = mag, Depth = -1 * depth / 1.0e3 * km)) return Data_ISC end diff --git a/src/data_types.jl b/src/data_types.jl index 67df1796..3a84dc20 100644 --- a/src/data_types.jl +++ b/src/data_types.jl @@ -3,10 +3,10 @@ import Base: show, size, extrema -export GeoData, ParaviewData, UTMData, CartData, Q1Data, FEData, - lonlatdepth_grid, xyz_grid, velocity_spherical_to_cartesian!, - convert2UTMzone, convert2CartData, convert2FEData, ProjectionPoint, - coordinate_grids, create_CartGrid, CartGrid, flip +export GeoData, ParaviewData, UTMData, CartData, Q1Data, FEData, + lonlatdepth_grid, xyz_grid, velocity_spherical_to_cartesian!, + convert2UTMzone, convert2CartData, convert2FEData, ProjectionPoint, + coordinate_grids, create_CartGrid, CartGrid, flip """ @@ -22,12 +22,12 @@ export GeoData, ParaviewData, UTMData, CartData, Q1Data, FEData, Structure that holds the coordinates of a point that is used to project a data set from Lon/Lat to a Cartesian grid and vice-versa. """ struct ProjectionPoint - Lat :: Float64 - Lon :: Float64 - EW :: Float64 - NS :: Float64 - zone :: Int64 - isnorth :: Bool + Lat::Float64 + Lon::Float64 + EW::Float64 + NS::Float64 + zone::Int64 + isnorth::Bool end """ @@ -35,12 +35,12 @@ end Defines a projection point used for map projections, by specifying latitude and longitude """ -function ProjectionPoint(; Lat=49.9929, Lon=8.2473) +function ProjectionPoint(; Lat = 49.9929, Lon = 8.2473) # Default = Mainz (center of universe) - x_lla = LLA(Lat, Lon, 0.0); # Lat/Lon/Alt of geodesy package + x_lla = LLA(Lat, Lon, 0.0) # Lat/Lon/Alt of geodesy package x_utmz = UTMZ(x_lla, wgs84) # UTMZ of - ProjectionPoint(Lat, Lon, x_utmz.x, x_utmz.y, Int64(x_utmz.zone), x_utmz.isnorth) + return ProjectionPoint(Lat, Lon, x_utmz.x, x_utmz.y, Int64(x_utmz.zone), x_utmz.isnorth) end """ @@ -51,10 +51,10 @@ Defines a projection point used for map projections, by specifying UTM coordinat """ function ProjectionPoint(EW::Float64, NS::Float64, Zone::Int64, isnorth::Bool) - x_utmz = UTMZ(EW,NS,0.0,Zone, isnorth) # UTMZ of - x_lla = LLA(x_utmz, wgs84); # Lat/Lon/Alt of geodesy package + x_utmz = UTMZ(EW, NS, 0.0, Zone, isnorth) # UTMZ of + x_lla = LLA(x_utmz, wgs84) # Lat/Lon/Alt of geodesy package - ProjectionPoint(x_lla.lat, x_lla.lon, EW, NS, Zone, isnorth) + return ProjectionPoint(x_lla.lat, x_lla.lon, EW, NS, Zone, isnorth) end @@ -145,59 +145,59 @@ GeoData ``` """ struct GeoData <: AbstractGeneralGrid - lon :: GeoUnit - lat :: GeoUnit - depth :: GeoUnit - fields :: NamedTuple - atts :: Dict + lon::GeoUnit + lat::GeoUnit + depth::GeoUnit + fields::NamedTuple + atts::Dict # Ensure that the data is of the correct format - function GeoData(lon,lat,depth,fields,atts=nothing) + function GeoData(lon, lat, depth, fields, atts = nothing) # check depth & convert it to units of km in case no units are given or it has different length units - if unit.(depth[1])==NoUnits - depth = depth*km # in case depth has no dimensions + if unit.(depth[1]) == NoUnits + depth = depth * km # in case depth has no dimensions end - depth = uconvert.(km,depth) # convert to km + depth = uconvert.(km, depth) # convert to km depth = GeoUnit(depth) # convert to GeoUnit structure with units of km if isa(lat, StepRangeLen) - lat = Vector(lat); + lat = Vector(lat) end if isa(lon, StepRangeLen) - lon = Vector(lon); + lon = Vector(lon) end - + # Check ordering of the arrays in case of 3D -- the check is not bullet proof for now - if sum(size(lon).>1)==3 - if maximum(abs.(diff(lon,dims=2)))>maximum(abs.(diff(lon,dims=1))) || maximum(abs.(diff(lon,dims=3)))>maximum(abs.(diff(lon,dims=1))) - @warn ("It appears that the lon array has a wrong ordering") - end - if maximum(abs.(diff(lat,dims=1)))>maximum(abs.(diff(lat,dims=2))) || maximum(abs.(diff(lat,dims=3)))>maximum(abs.(diff(lat,dims=2))) - @warn ("It appears that the lat array has a wrong ordering") - end + if sum(size(lon) .> 1) == 3 + if maximum(abs.(diff(lon, dims = 2))) > maximum(abs.(diff(lon, dims = 1))) || maximum(abs.(diff(lon, dims = 3))) > maximum(abs.(diff(lon, dims = 1))) + @warn ("It appears that the lon array has a wrong ordering") + end + if maximum(abs.(diff(lat, dims = 1))) > maximum(abs.(diff(lat, dims = 2))) || maximum(abs.(diff(lat, dims = 3))) > maximum(abs.(diff(lat, dims = 2))) + @warn ("It appears that the lat array has a wrong ordering") + end end # fields should be a NamedTuple. In case we simply provide an array, lets transfer it accordingly - if !(typeof(fields)<: NamedTuple) - if (typeof(fields)<: Tuple) - if length(fields)==1 - fields = (DataSet1=first(fields),) # The field is a tuple; create a NamedTuple from it + if !(typeof(fields) <: NamedTuple) + if (typeof(fields) <: Tuple) + if length(fields) == 1 + fields = (DataSet1 = first(fields),) # The field is a tuple; create a NamedTuple from it else error("Please employ a NamedTuple as input, rather than a Tuple") # out of luck end else - fields = (DataSet1=fields,) + fields = (DataSet1 = fields,) end end - DataField = fields[1]; - if typeof(DataField)<: Tuple - DataField = DataField[1]; # in case we have velocity vectors as input + DataField = fields[1] + if typeof(DataField) <: Tuple + DataField = DataField[1] # in case we have velocity vectors as input end - if !(size(lon)==size(lat)==size(depth)==size(DataField)) + if !(size(lon) == size(lat) == size(depth) == size(DataField)) error("The size of Lon/Lat/Depth and the Fields should all be the same!") end @@ -206,14 +206,14 @@ struct GeoData <: AbstractGeneralGrid atts = Dict("note" => "No attributes were given to this dataset") else # check if a dict was given - if !(typeof(atts)<: Dict) + if !(typeof(atts) <: Dict) error("Attributes should be given as Dict!") end end - return new(lon,lat,depth,fields,atts) + return new(lon, lat, depth, fields, atts) - end + end end size(d::GeoData) = size(d.lon.val) @@ -221,29 +221,29 @@ extrema(d::GeoData) = [extrema(d.lon); extrema(d.lat); extrema(d.depth)] # Print an overview of the Geodata struct: function Base.show(io::IO, d::GeoData) - println(io,"GeoData ") - println(io," size : $(size(d.lon))") - println(io," lon ϵ [ $(first(d.lon.val)) : $(last(d.lon.val))]") - println(io," lat ϵ [ $(first(d.lat.val)) : $(last(d.lat.val))]") - if any(isnan.(NumValue(d.depth))) - z_vals = extrema(d.depth.val[isnan.(d.depth.val).==false]) - println(io," depth ϵ [ $(z_vals[1]) : $(z_vals[2])]; has NaN's") + println(io, "GeoData ") + println(io, " size : $(size(d.lon))") + println(io, " lon ϵ [ $(first(d.lon.val)) : $(last(d.lon.val))]") + println(io, " lat ϵ [ $(first(d.lat.val)) : $(last(d.lat.val))]") + if any(isnan.(NumValue(d.depth))) + z_vals = extrema(d.depth.val[isnan.(d.depth.val) .== false]) + println(io, " depth ϵ [ $(z_vals[1]) : $(z_vals[2])]; has NaN's") else z_vals = extrema(d.depth.val) - println(io," depth ϵ [ $(z_vals[1]) : $(z_vals[2])]") + println(io, " depth ϵ [ $(z_vals[1]) : $(z_vals[2])]") end - println(io," fields : $(keys(d.fields))") + println(io, " fields : $(keys(d.fields))") # Only print attributes if we have non-default attributes - if any( propertynames(d) .== :atts) + return if any(propertynames(d) .== :atts) show_atts = true - if haskey(d.atts,"note") - if d.atts["note"]=="No attributes were given to this dataset" + if haskey(d.atts, "note") + if d.atts["note"] == "No attributes were given to this dataset" show_atts = false end end if show_atts - println(io," attributes: $(keys(d.atts))") + println(io, " attributes: $(keys(d.atts))") end end end @@ -264,7 +264,7 @@ GeoData fields : (:Z,) ``` """ -GeoData(lld::Tuple) = GeoData(lld[1],lld[2],lld[3],(Z=lld[3],)) +GeoData(lld::Tuple) = GeoData(lld[1], lld[2], lld[3], (Z = lld[3],)) """ @@ -279,39 +279,39 @@ julia> Data_cart = convert(ParaviewData, Data_set) ``` """ mutable struct ParaviewData <: AbstractGeneralGrid - x :: GeoUnit - y :: GeoUnit - z :: GeoUnit - fields :: NamedTuple + x::GeoUnit + y::GeoUnit + z::GeoUnit + fields::NamedTuple end size(d::ParaviewData) = size(d.x.val) # Print an overview of the ParaviewData struct: function Base.show(io::IO, d::ParaviewData) - println(io,"ParaviewData ") - println(io," size : $(size(d.x))") - println(io," x ϵ [ $(first(d.x.val)) : $(last(d.x.val))]") - println(io," y ϵ [ $(first(d.y.val)) : $(last(d.y.val))]") - if any(isnan.(NumValue(d.z))) - z_vals = extrema(d.z.val[isnan.(d.z.val).==false]) - println(io," z ϵ [ $(z_vals[1]) : $(z_vals[2])]; has NaN's") + println(io, "ParaviewData ") + println(io, " size : $(size(d.x))") + println(io, " x ϵ [ $(first(d.x.val)) : $(last(d.x.val))]") + println(io, " y ϵ [ $(first(d.y.val)) : $(last(d.y.val))]") + if any(isnan.(NumValue(d.z))) + z_vals = extrema(d.z.val[isnan.(d.z.val) .== false]) + println(io, " z ϵ [ $(z_vals[1]) : $(z_vals[2])]; has NaN's") else z_vals = extrema(d.z.val) - println(io," z ϵ [ $(z_vals[1]) : $(z_vals[2])]") + println(io, " z ϵ [ $(z_vals[1]) : $(z_vals[2])]") end - println(io," fields: $(keys(d.fields))") + println(io, " fields: $(keys(d.fields))") # Only print attributes if we have non-default attributes - if any( propertynames(d) .== :atts) + return if any(propertynames(d) .== :atts) show_atts = true - if haskey(d.atts,"note") - if d.atts["note"]=="No attributes were given to this dataset" + if haskey(d.atts, "note") + if d.atts["note"] == "No attributes were given to this dataset" show_atts = false end end if show_atts - println(io," attributes: $(keys(d.atts))") + println(io, " attributes: $(keys(d.atts))") end end @@ -321,17 +321,17 @@ end function Base.convert(::Type{ParaviewData}, d::GeoData) # Utilize the Geodesy.jl package & use the Cartesian Earth-Centered-Earth-Fixed (ECEF) coordinate system - lon = Array(ustrip.(d.lon.val)); - lat = Array(ustrip.(d.lat.val)); - LLA_Data = LLA.(lat,lon, Array(ustrip.(d.depth.val))*1000); # convert to LLA from Geodesy package - X,Y,Z = zeros(size(lon)), zeros(size(lon)), zeros(size(lon)); + lon = Array(ustrip.(d.lon.val)) + lat = Array(ustrip.(d.lat.val)) + LLA_Data = LLA.(lat, lon, Array(ustrip.(d.depth.val)) * 1000) # convert to LLA from Geodesy package + X, Y, Z = zeros(size(lon)), zeros(size(lon)), zeros(size(lon)) # convert to cartesian ECEF reference frame. Note that we use kilometers and the wgs84 for i in eachindex(X) data_xyz = ECEF(LLA_Data[i], wgs84) - X[i] = data_xyz.x/1e3; - Y[i] = data_xyz.y/1e3; - Z[i] = data_xyz.z/1e3; + X[i] = data_xyz.x / 1.0e3 + Y[i] = data_xyz.y / 1.0e3 + Z[i] = data_xyz.z / 1.0e3 end @@ -342,7 +342,7 @@ function Base.convert(::Type{ParaviewData}, d::GeoData) # In case any of the fields in the tuple has length 3, it is assumed to be a vector, so transfer it field_names = keys(d.fields) - for i=1:length(d.fields) + for i in 1:length(d.fields) if typeof(d.fields[i]) <: Tuple if length(d.fields[i]) == 3 # the tuple has length 3, which is therefore assumed to be a velocity vector @@ -357,12 +357,10 @@ function Base.convert(::Type{ParaviewData}, d::GeoData) end - - return ParaviewData(GeoUnit(X),GeoUnit(Y),GeoUnit(Z),d.fields) + return ParaviewData(GeoUnit(X), GeoUnit(Y), GeoUnit(Z), d.fields) end - """ UTMData(EW::Any, NS:Any, depth::GeoUnit, UTMZone::Int, NorthernHemisphere=true, fields::NamedTuple) @@ -413,59 +411,59 @@ julia> write_paraview(Data_set1, "Data_set1") ``` """ struct UTMData <: AbstractGeneralGrid - EW :: GeoUnit - NS :: GeoUnit - depth :: GeoUnit - zone :: Any - northern :: Any - fields :: NamedTuple - atts :: Dict + EW::GeoUnit + NS::GeoUnit + depth::GeoUnit + zone::Any + northern::Any + fields::NamedTuple + atts::Dict # Ensure that the data is of the correct format - function UTMData(EW,NS,depth,zone,northern,fields,atts=nothing) + function UTMData(EW, NS, depth, zone, northern, fields, atts = nothing) # check depth & convert it to units of km in case no units are given or it has different length units - if unit.(depth)[1]==NoUnits - depth = depth*m # in case depth has no dimensions + if unit.(depth)[1] == NoUnits + depth = depth * m # in case depth has no dimensions end - depth = uconvert.(m,depth) # convert to meters + depth = uconvert.(m, depth) # convert to meters depth = GeoUnit(depth) # convert to GeoUnit structure with units of meters # Check ordering of the arrays in case of 3D - if sum(size(EW).>1)==3 - if maximum(abs.(diff(EW,dims=2)))>maximum(abs.(diff(EW,dims=1))) || maximum(abs.(diff(EW,dims=3)))>maximum(abs.(diff(EW,dims=1))) + if sum(size(EW) .> 1) == 3 + if maximum(abs.(diff(EW, dims = 2))) > maximum(abs.(diff(EW, dims = 1))) || maximum(abs.(diff(EW, dims = 3))) > maximum(abs.(diff(EW, dims = 1))) @warn "It appears that the EW array has a wrong ordering" end - if maximum(abs.(diff(NS,dims=1)))>maximum(abs.(diff(NS,dims=2))) || maximum(abs.(diff(NS,dims=3)))>maximum(abs.(diff(NS,dims=2))) + if maximum(abs.(diff(NS, dims = 1))) > maximum(abs.(diff(NS, dims = 2))) || maximum(abs.(diff(NS, dims = 3))) > maximum(abs.(diff(NS, dims = 2))) @warn "It appears that the NS array has a wrong ordering" end end # fields should be a NamedTuple. In case we simply provide an array, lets transfer it accordingly - if !(typeof(fields)<: NamedTuple) - if (typeof(fields)<: Tuple) - if length(fields)==1 - fields = (DataSet1=first(fields),) # The field is a tuple; create a NamedTuple from it + if !(typeof(fields) <: NamedTuple) + if (typeof(fields) <: Tuple) + if length(fields) == 1 + fields = (DataSet1 = first(fields),) # The field is a tuple; create a NamedTuple from it else error("Please employ a NamedTuple as input, rather than a Tuple") # out of luck end else - fields = (DataSet1=fields,) + fields = (DataSet1 = fields,) end end - DataField = fields[1]; - if typeof(DataField)<: Tuple - DataField = DataField[1]; # in case we have velocity vectors as input + DataField = fields[1] + if typeof(DataField) <: Tuple + DataField = DataField[1] # in case we have velocity vectors as input end - if !(size(EW)==size(NS)==size(depth)==size(DataField)) + if !(size(EW) == size(NS) == size(depth) == size(DataField)) error("The size of EW/NS/Depth and the Fields should all be the same!") end - if length(zone)==1 - zone = ones(Int64,size(EW))*zone - northern = ones(Bool,size(EW))*northern + if length(zone) == 1 + zone = ones(Int64, size(EW)) * zone + northern = ones(Bool, size(EW)) * northern end # take care of attributes @@ -474,14 +472,14 @@ struct UTMData <: AbstractGeneralGrid atts = Dict("note" => "No attributes were given to this dataset") else # check if a dict was given - if !(typeof(atts)<: Dict) + if !(typeof(atts) <: Dict) error("Attributes should be given as Dict!") end end - return new(EW,NS,depth,zone,northern, fields,atts) + return new(EW, NS, depth, zone, northern, fields, atts) - end + end end size(d::UTMData) = size(d.EW.val) @@ -489,36 +487,36 @@ extrema(d::UTMData) = [extrema(d.EW.val); extrema(d.NS.val); extrema(d.depth.val # Print an overview of the UTMData struct: function Base.show(io::IO, d::UTMData) - println(io,"UTMData ") + println(io, "UTMData ") if d.northern[1] - println(io," UTM zone : $(minimum(d.zone))-$(maximum(d.zone)) North") + println(io, " UTM zone : $(minimum(d.zone))-$(maximum(d.zone)) North") else - println(io," UTM zone : $(minimum(d.zone))-$(maximum(d.zone)) South") + println(io, " UTM zone : $(minimum(d.zone))-$(maximum(d.zone)) South") end - println(io," size : $(size(d.EW))") - println(io," EW ϵ [ $(first(d.EW.val)) : $(last(d.EW.val))]") - println(io," NS ϵ [ $(first(d.NS.val)) : $(last(d.NS.val))]") + println(io, " size : $(size(d.EW))") + println(io, " EW ϵ [ $(first(d.EW.val)) : $(last(d.EW.val))]") + println(io, " NS ϵ [ $(first(d.NS.val)) : $(last(d.NS.val))]") - if any(isnan.(NumValue(d.depth))) - z_vals = extrema(d.depth.val[isnan.(d.depth.val).==false]) - println(io," depth ϵ [ $(z_vals[1]) : $(z_vals[2])]; has NaNs") + if any(isnan.(NumValue(d.depth))) + z_vals = extrema(d.depth.val[isnan.(d.depth.val) .== false]) + println(io, " depth ϵ [ $(z_vals[1]) : $(z_vals[2])]; has NaNs") else z_vals = extrema(d.depth.val) - println(io," depth ϵ [ $(z_vals[1]) : $(z_vals[2])]") + println(io, " depth ϵ [ $(z_vals[1]) : $(z_vals[2])]") end - println(io," fields : $(keys(d.fields))") + println(io, " fields : $(keys(d.fields))") # Only print attributes if we have non-default attributes - if any( propertynames(d) .== :atts) + return if any(propertynames(d) .== :atts) show_atts = true - if haskey(d.atts,"note") - if d.atts["note"]=="No attributes were given to this dataset" + if haskey(d.atts, "note") + if d.atts["note"] == "No attributes were given to this dataset" show_atts = false end end if show_atts - println(io," attributes: $(keys(d.atts))") + println(io, " attributes: $(keys(d.atts))") end end end @@ -528,33 +526,33 @@ Converts a `UTMData` structure to a `GeoData` structure """ function Base.convert(::Type{GeoData}, d::UTMData) - Lat = zeros(size(d.EW)); - Lon = zeros(size(d.EW)); + Lat = zeros(size(d.EW)) + Lon = zeros(size(d.EW)) for i in eachindex(d.EW.val) # Use functions of the Geodesy package to convert to LLA - utmz_i = UTMZ(d.EW.val[i],d.NS.val[i],Float64(ustrip.(d.depth.val[i])),d.zone[i],d.northern[i]) - lla_i = LLA(utmz_i,wgs84) - lon = lla_i.lon; - # if lon<0; lon = 360+lon; end # as GMT expects this + utmz_i = UTMZ(d.EW.val[i], d.NS.val[i], Float64(ustrip.(d.depth.val[i])), d.zone[i], d.northern[i]) + lla_i = LLA(utmz_i, wgs84) + lon = lla_i.lon + # if lon<0; lon = 360+lon; end # as GMT expects this Lat[i] = lla_i.lat Lon[i] = lon end # handle the case where an old GeoData structure is converted - if any( propertynames(d) .== :atts) - atts = d.atts; + if any(propertynames(d) .== :atts) + atts = d.atts else atts = Dict("note" => "No attributes were given to this dataset") # assign the default end depth = d.depth.val - if d.depth[1].unit==m - depth = depth/1000 + if d.depth[1].unit == m + depth = depth / 1000 end - return GeoData(Lon,Lat,depth,d.fields,atts) + return GeoData(Lon, Lat, depth, d.fields, atts) end @@ -563,32 +561,32 @@ Converts a `GeoData` structure to a `UTMData` structure """ function Base.convert(::Type{UTMData}, d::GeoData) - EW = zeros(size(d.lon)); - NS = zeros(size(d.lon)); - depth = zeros(size(d.lon)); - zone = zeros(Int64,size(d.lon)); - northern = zeros(Bool,size(d.lon)); + EW = zeros(size(d.lon)) + NS = zeros(size(d.lon)) + depth = zeros(size(d.lon)) + zone = zeros(Int64, size(d.lon)) + northern = zeros(Bool, size(d.lon)) for i in eachindex(d.lon.val) # Use functions of the Geodesy package to convert to LLA - lla_i = LLA(d.lat.val[i],d.lon.val[i],Float64(ustrip.(d.depth.val[i])*1e3)) - utmz_i = UTMZ(lla_i, wgs84) + lla_i = LLA(d.lat.val[i], d.lon.val[i], Float64(ustrip.(d.depth.val[i]) * 1.0e3)) + utmz_i = UTMZ(lla_i, wgs84) EW[i] = utmz_i.x NS[i] = utmz_i.y depth[i] = utmz_i.z - zone[i] = utmz_i.zone; + zone[i] = utmz_i.zone northern[i] = utmz_i.isnorth end # handle the case where an old GeoData structure is converted - if any( propertynames(d) .== :atts) - atts = d.atts; + if any(propertynames(d) .== :atts) + atts = d.atts else atts = Dict("note" => "No attributes were given to this dataset") # assign the default end - return UTMData(EW,NS,depth,zone, northern, d.fields, atts) + return UTMData(EW, NS, depth, zone, northern, d.fields, atts) end @@ -598,21 +596,21 @@ end This flips the data in the structure in a certain dimension (default is z [3]) """ -function flip(Data::GeoData, dimension=3) +function flip(Data::GeoData, dimension = 3) - depth = reverse(Data.depth.val,dims=dimension)*Data.depth.unit # flip depth - lon = reverse(Data.lon.val,dims=dimension)*Data.lon.unit # flip - lat = reverse(Data.lat.val,dims=dimension)*Data.lat.unit # flip + depth = reverse(Data.depth.val, dims = dimension) * Data.depth.unit # flip depth + lon = reverse(Data.lon.val, dims = dimension) * Data.lon.unit # flip + lat = reverse(Data.lat.val, dims = dimension) * Data.lat.unit # flip # flip fields - fields = Data.fields; - name_keys = keys(fields) - for ifield = 1:length(fields) - dat = reverse(fields[ifield],dims=dimension); # flip direction + fields = Data.fields + name_keys = keys(fields) + for ifield in 1:length(fields) + dat = reverse(fields[ifield], dims = dimension) # flip direction fields = merge(fields, [name_keys[ifield] => dat]) # replace in existing NTuple end - return GeoData(lon,lat,depth, fields) + return GeoData(lon, lat, depth, fields) end @@ -626,36 +624,35 @@ Converts a `GeoData` structure to fixed UTM zone, around a given `ProjectionPoin """ function convert2UTMzone(d::GeoData, proj::ProjectionPoint) - EW = zeros(size(d.lon)); - NS = zeros(size(d.lon)); - zone = zeros(Int64,size(d.lon)); - northern = zeros(Bool,size(d.lon)); - trans = UTMfromLLA(proj.zone, proj.isnorth, wgs84) + EW = zeros(size(d.lon)) + NS = zeros(size(d.lon)) + zone = zeros(Int64, size(d.lon)) + northern = zeros(Bool, size(d.lon)) + trans = UTMfromLLA(proj.zone, proj.isnorth, wgs84) for i in eachindex(d.lon.val) # Use functions of the Geodesy package to convert to LLA - lla_i = LLA(d.lat.val[i],d.lon.val[i],Float64(ustrip.(d.depth.val[i])*1e3)) - utm_i = trans(lla_i) + lla_i = LLA(d.lat.val[i], d.lon.val[i], Float64(ustrip.(d.depth.val[i]) * 1.0e3)) + utm_i = trans(lla_i) EW[i] = utm_i.x NS[i] = utm_i.y - zone[i] = proj.zone; + zone[i] = proj.zone northern[i] = proj.isnorth end # handle the case where an old GeoData structure is converted - if any( propertynames(d) .== :atts) - atts = d.atts; + if any(propertynames(d) .== :atts) + atts = d.atts else atts = Dict("note" => "No attributes were given to this dataset") # assign the default end - return UTMData(EW,NS,d.depth.val,zone, northern, d.fields,atts) + return UTMData(EW, NS, d.depth.val, zone, northern, d.fields, atts) end - """ CartData(x::Any, y::Any, z::GeoUnit, fields::NamedTuple) @@ -708,49 +705,49 @@ which would allow visualizing this in paraview in the usual manner: """ struct CartData <: AbstractGeneralGrid - x :: GeoUnit - y :: GeoUnit - z :: GeoUnit - fields :: NamedTuple - atts :: Dict + x::GeoUnit + y::GeoUnit + z::GeoUnit + fields::NamedTuple + atts::Dict # Ensure that the data is of the correct format - function CartData(x,y,z,fields,atts=nothing) + function CartData(x, y, z, fields, atts = nothing) # Check ordering of the arrays in case of 3D - if sum(size(x).>1)==3 - if maximum(abs.(diff(x,dims=2)))>maximum(abs.(diff(x,dims=1))) || maximum(abs.(diff(x,dims=3)))>maximum(abs.(diff(x,dims=1))) + if sum(size(x) .> 1) == 3 + if maximum(abs.(diff(x, dims = 2))) > maximum(abs.(diff(x, dims = 1))) || maximum(abs.(diff(x, dims = 3))) > maximum(abs.(diff(x, dims = 1))) @warn "It appears that the x-array has a wrong ordering" end - if maximum(abs.(diff(y,dims=1)))>maximum(abs.(diff(y,dims=2))) || maximum(abs.(diff(y,dims=3)))>maximum(abs.(diff(y,dims=2))) + if maximum(abs.(diff(y, dims = 1))) > maximum(abs.(diff(y, dims = 2))) || maximum(abs.(diff(y, dims = 3))) > maximum(abs.(diff(y, dims = 2))) @warn "It appears that the y-array has a wrong ordering" end end # check depth & convert it to units of km in case no units are given or it has different length units - x = convert!(x,km) - y = convert!(y,km) - z = convert!(z,km) + x = convert!(x, km) + y = convert!(y, km) + z = convert!(z, km) # fields should be a NamedTuple. In case we simply provide an array, lets transfer it accordingly - if !(typeof(fields)<: NamedTuple) - if (typeof(fields)<: Tuple) - if length(fields)==1 - fields = (DataSet1=first(fields),) # The field is a tuple; create a NamedTuple from it + if !(typeof(fields) <: NamedTuple) + if (typeof(fields) <: Tuple) + if length(fields) == 1 + fields = (DataSet1 = first(fields),) # The field is a tuple; create a NamedTuple from it else error("Please employ a NamedTuple as input, rather than a Tuple") # out of luck end else - fields = (DataSet1=fields,) + fields = (DataSet1 = fields,) end end - DataField = fields[1]; - if typeof(DataField)<: Tuple - DataField = DataField[1]; # in case we have velocity vectors as input + DataField = fields[1] + if typeof(DataField) <: Tuple + DataField = DataField[1] # in case we have velocity vectors as input end - if !(size(x)==size(y)==size(z)==size(DataField)) + if !(size(x) == size(y) == size(z) == size(DataField)) error("The size of x/y/z and the Fields should all be the same!") end @@ -760,14 +757,14 @@ struct CartData <: AbstractGeneralGrid atts = Dict("note" => "No attributes were given to this dataset") else # check if a dict was given - if !(typeof(atts)<: Dict) + if !(typeof(atts) <: Dict) error("Attributes should be given as Dict!") end end - return new(x,y,z,fields,atts) + return new(x, y, z, fields, atts) - end + end end size(d::CartData) = size(d.x.val) @@ -775,32 +772,32 @@ extrema(d::CartData) = [extrema(d.x.val); extrema(d.y.val); extrema(d.z.val)] # Print an overview of the UTMData struct: function Base.show(io::IO, d::CartData) - println(io,"CartData ") - println(io," size : $(size(d.x))") - println(io," x ϵ [ $(minimum(d.x.val)) : $(maximum(d.x.val))]") - println(io," y ϵ [ $(minimum(d.y.val)) : $(maximum(d.y.val))]") - - if any(isnan.(NumValue(d.z))) - z_vals = extrema(d.z.val[isnan.(d.z.val).==false]) - println(io," z ϵ [ $(z_vals[1]) : $(z_vals[2])]; has NaN's") + println(io, "CartData ") + println(io, " size : $(size(d.x))") + println(io, " x ϵ [ $(minimum(d.x.val)) : $(maximum(d.x.val))]") + println(io, " y ϵ [ $(minimum(d.y.val)) : $(maximum(d.y.val))]") + + if any(isnan.(NumValue(d.z))) + z_vals = extrema(d.z.val[isnan.(d.z.val) .== false]) + println(io, " z ϵ [ $(z_vals[1]) : $(z_vals[2])]; has NaN's") else z_vals = extrema(d.z.val) - println(io," z ϵ [ $(z_vals[1]) : $(z_vals[2])]") + println(io, " z ϵ [ $(z_vals[1]) : $(z_vals[2])]") end - println(io," fields : $(keys(d.fields))") + println(io, " fields : $(keys(d.fields))") # Only print attributes if we have non-default attributes - if any( propertynames(d) .== :atts) + return if any(propertynames(d) .== :atts) show_atts = true - if haskey(d.atts,"note") - if d.atts["note"]=="No attributes were given to this dataset" + if haskey(d.atts, "note") + if d.atts["note"] == "No attributes were given to this dataset" show_atts = false end end if show_atts - println(io," attributes: $(keys(d.atts))") + println(io, " attributes: $(keys(d.atts))") end end end @@ -821,8 +818,7 @@ CartData attributes: ["note"] ``` """ -CartData(xyz::Tuple) = CartData(xyz[1],xyz[2],xyz[3],(Z=xyz[3],)) - +CartData(xyz::Tuple) = CartData(xyz[1], xyz[2], xyz[3], (Z = xyz[3],)) """ @@ -832,8 +828,10 @@ This transfers a `CartData` dataset to a `UTMData` dataset, that has a single UT """ function convert2UTMzone(d::CartData, proj::ProjectionPoint) - return UTMData(ustrip.(d.x.val).*1e3 .+ proj.EW,ustrip.(d.y.val).*1e3 .+ proj.NS, - ustrip.(d.z.val).*1e3,proj.zone, proj.isnorth, d.fields, d.atts) + return UTMData( + ustrip.(d.x.val) .* 1.0e3 .+ proj.EW, ustrip.(d.y.val) .* 1.0e3 .+ proj.NS, + ustrip.(d.z.val) .* 1.0e3, proj.zone, proj.isnorth, d.fields, d.atts + ) end @@ -843,15 +841,17 @@ Converts a `UTMData` structure to a `CartData` structure, which essentially tran """ function convert2CartData(d::UTMData, proj::ProjectionPoint) - # handle the case where an old structure is converted - if any( propertynames(d) .== :atts) - atts = d.atts; - else - atts = Dict("note" => "No attributes were given to this dataset") # assign the default - end + # handle the case where an old structure is converted + if any(propertynames(d) .== :atts) + atts = d.atts + else + atts = Dict("note" => "No attributes were given to this dataset") # assign the default + end - return CartData( (ustrip.(d.EW.val) .- proj.EW)./1e3, (ustrip.(d.NS.val) .- proj.NS)./1e3, - ustrip.(d.depth.val)./1e3, d.fields,atts) + return CartData( + (ustrip.(d.EW.val) .- proj.EW) ./ 1.0e3, (ustrip.(d.NS.val) .- proj.NS) ./ 1.0e3, + ustrip.(d.depth.val) ./ 1.0e3, d.fields, atts + ) end @@ -861,9 +861,11 @@ Converts a `GeoData` structure to a `CartData` structure, which essentially tran """ function convert2CartData(d::GeoData, proj::ProjectionPoint) - d_UTM = convert2UTMzone(d,proj) - return CartData( (ustrip.(d_UTM.EW.val) .- proj.EW)./1e3, (ustrip.(d_UTM.NS.val) .- proj.NS)./1e3, - ustrip.(d_UTM.depth.val), d_UTM.fields,d_UTM.atts) + d_UTM = convert2UTMzone(d, proj) + return CartData( + (ustrip.(d_UTM.EW.val) .- proj.EW) ./ 1.0e3, (ustrip.(d_UTM.NS.val) .- proj.NS) ./ 1.0e3, + ustrip.(d_UTM.depth.val), d_UTM.fields, d_UTM.atts + ) end """ @@ -901,35 +903,35 @@ julia> size(Lon) """ function lonlatdepth_grid(Lon::Any, Lat::Any, Depth::Any) - nLon = length(Lon) - nLat = length(Lat) - nDepth = length(Depth) + nLon = length(Lon) + nLat = length(Lat) + nDepth = length(Depth) - if nLon==nLat==nDepth==1 + if nLon == nLat == nDepth == 1 error("Cannot use this routine for a 3D point (no need to create a grid in that case") end - if maximum([length(size(Lon)), length(size(Lat)), length(size(Depth))])>1 + if maximum([length(size(Lon)), length(size(Lat)), length(size(Depth))]) > 1 error("You can only give 1D vectors or numbers as input") end - Lon3D = zeros(nLon,nLat,nDepth); - Lat3D = zeros(nLon,nLat,nDepth); - Depth3D = zeros(nLon,nLat,nDepth); + Lon3D = zeros(nLon, nLat, nDepth) + Lat3D = zeros(nLon, nLat, nDepth) + Depth3D = zeros(nLon, nLat, nDepth) - for i=1:nLon - for j=1:nLat - for k=1:nDepth - Lon3D[i,j,k] = ustrip.(Lon[i]); - Lat3D[i,j,k] = ustrip.(Lat[j]); - Depth3D[i,j,k] = ustrip.(Depth[k]); + for i in 1:nLon + for j in 1:nLat + for k in 1:nDepth + Lon3D[i, j, k] = ustrip.(Lon[i]) + Lat3D[i, j, k] = ustrip.(Lat[j]) + Depth3D[i, j, k] = ustrip.(Depth[k]) end end end # Add dimensions back - Lon3D = Lon3D*unit( Lon[1]) - Lat3D = Lat3D*unit( Lat[1]) - Depth3D = Depth3D*unit(Depth[1]) + Lon3D = Lon3D * unit(Lon[1]) + Lat3D = Lat3D * unit(Lat[1]) + Depth3D = Depth3D * unit(Depth[1]) return Lon3D, Lat3D, Depth3D end @@ -949,7 +951,7 @@ julia> size(X) See `lonlatdepth_grid` for more examples. """ function xyz_grid(X_vec::Any, Y_vec::Any, Z_vec::Any) - return X,Y,Z = lonlatdepth_grid(X_vec,Y_vec,Z_vec) + return X, Y, Z = lonlatdepth_grid(X_vec, Y_vec, Z_vec) end @@ -967,35 +969,38 @@ function velocity_spherical_to_cartesian!(Data::GeoData, Velocity::Tuple) # Note: This is partly based on scripts originally written by Tobias Baumann, Uni Mainz for i in eachindex(Data.lat.val) - az = Data.lon.val[i]; - el = Data.lat.val[i]; + az = Data.lon.val[i] + el = Data.lat.val[i] - R = [-sind(az) -sind(el)*cosd(az) cosd(el)*cosd(az); - cosd(az) -sind(el)*sind(az) cosd(el)*sind(az); - 0.0 cosd(el) sind(el) ]; + R = [ + -sind(az) -sind(el) * cosd(az) cosd(el) * cosd(az); + cosd(az) -sind(el) * sind(az) cosd(el) * sind(az); + 0.0 cosd(el) sind(el) + ] - V_sph = [Velocity[1][i]; Velocity[2][i]; Velocity[3][i] ]; + V_sph = [Velocity[1][i]; Velocity[2][i]; Velocity[3][i] ] # Normalize spherical velocity - V_mag = sum(sqrt.(V_sph.^2)); # magnitude - V_norm = V_sph/V_mag + V_mag = sum(sqrt.(V_sph .^ 2)) # magnitude + V_norm = V_sph / V_mag - V_xyz_norm = R*V_norm; - V_xyz = V_xyz_norm.*V_mag; # scale with magnitude + V_xyz_norm = R * V_norm + V_xyz = V_xyz_norm .* V_mag # scale with magnitude # in-place saving of rotated velocity - Velocity[1][i] = V_xyz[1]; - Velocity[2][i] = V_xyz[2]; - Velocity[3][i] = V_xyz[3]; + Velocity[1][i] = V_xyz[1] + Velocity[2][i] = V_xyz[2] + Velocity[3][i] = V_xyz[3] end + return end # Internal function that converts arrays to a GeoUnit with certain units -function convert!(d,u) - if unit.(d)[1]==NoUnits - d = d*u # in case it has no dimensions +function convert!(d, u) + if unit.(d)[1] == NoUnits + d = d * u # in case it has no dimensions end - d = uconvert.(u,d) # convert to u + d = uconvert.(u, d) # convert to u d = GeoUnit(d) # convert to GeoUnit structure with units of u return d @@ -1005,38 +1010,38 @@ end out = average_q1(d::Array) 3D linear averaging of a 3D array """ -function average_q1(d::Array) +function average_q1(d::Array) # we are using multidimensional iterations in julia here following https://julialang.org/blog/2016/02/iteration/ - out = zeros(eltype(d),size(d) .- 1) + out = zeros(eltype(d), size(d) .- 1) R = CartesianIndices(out) Ifirst, Ilast = first(R), last(R) I1 = oneunit(Ifirst) for I in R n, s = 0, zero(eltype(out)) - for J in max(Ifirst, I):min(Ilast + I1, I+I1) + for J in max(Ifirst, I):min(Ilast + I1, I + I1) s += d[J] n += 1 end - out[I] = s/n + out[I] = s / n end return out -end +end """ X,Y,Z = coordinate_grids(Data::CartData; cell=false) Returns 3D coordinate arrays """ -function coordinate_grids(Data::CartData; cell=false) - X,Y,Z = NumValue(Data.x), NumValue(Data.y), NumValue(Data.z) +function coordinate_grids(Data::CartData; cell = false) + X, Y, Z = NumValue(Data.x), NumValue(Data.y), NumValue(Data.z) if cell - X,Y,Z = average_q1(X),average_q1(Y), average_q1(Z) + X, Y, Z = average_q1(X), average_q1(Y), average_q1(Z) end - return X,Y,Z + return X, Y, Z end """ @@ -1044,14 +1049,14 @@ end Returns 3D coordinate arrays """ -function coordinate_grids(Data::GeoData; cell=false) - X,Y,Z = NumValue(Data.lon), NumValue(Data.lat), NumValue(Data.depth) +function coordinate_grids(Data::GeoData; cell = false) + X, Y, Z = NumValue(Data.lon), NumValue(Data.lat), NumValue(Data.depth) if cell - X,Y,Z = average_q1(X),average_q1(Y), average_q1(Z) + X, Y, Z = average_q1(X), average_q1(Y), average_q1(Z) end - return X,Y,Z + return X, Y, Z end """ @@ -1059,15 +1064,15 @@ end Returns 3D coordinate arrays """ -function coordinate_grids(Data::UTMData; cell=false) +function coordinate_grids(Data::UTMData; cell = false) - X,Y,Z = NumValue(Data.EW), NumValue(Data.NS), NumValue(Data.depth) + X, Y, Z = NumValue(Data.EW), NumValue(Data.NS), NumValue(Data.depth) if cell - X,Y,Z = average_q1(X),average_q1(Y), average_q1(Z) + X, Y, Z = average_q1(X), average_q1(Y), average_q1(Z) end - return X,Y,Z + return X, Y, Z end """ @@ -1075,13 +1080,13 @@ end Returns 3D coordinate arrays """ -function coordinate_grids(Data::ParaviewData; cell=false) - X,Y,Z = xyz_grid(NumValue(Data.x), NumValue(Data.y), NumValue(Data.z)) +function coordinate_grids(Data::ParaviewData; cell = false) + X, Y, Z = xyz_grid(NumValue(Data.x), NumValue(Data.y), NumValue(Data.z)) if cell - X,Y,Z = average_q1(X),average_q1(Y), average_q1(Z) + X, Y, Z = average_q1(X), average_q1(Y), average_q1(Z) end - - return X,Y,Z + + return X, Y, Z end @@ -1089,14 +1094,14 @@ end Structure that holds data for an orthogonal cartesian grid, which can be described with 1D vectors """ struct CartGrid{FT, D} <: AbstractGeneralGrid - ConstantΔ :: Bool # Constant spacing (true in all cases for now) - N :: NTuple{D,Int} # Number of grid points in every direction - Δ :: NTuple{D,FT} # (constant) spacing in every direction - L :: NTuple{D,FT} # Domain size - min :: NTuple{D,FT} # start of the grid in every direction - max :: NTuple{D,FT} # end of the grid in every direction - coord1D :: NTuple{D,Vector{FT}} # Tuple with 1D vectors in all directions - coord1D_cen :: NTuple{D,Vector{FT}} # Tuple with 1D vectors of center points in all directions + ConstantΔ::Bool # Constant spacing (true in all cases for now) + N::NTuple{D, Int} # Number of grid points in every direction + Δ::NTuple{D, FT} # (constant) spacing in every direction + L::NTuple{D, FT} # Domain size + min::NTuple{D, FT} # start of the grid in every direction + max::NTuple{D, FT} # end of the grid in every direction + coord1D::NTuple{D, Vector{FT}} # Tuple with 1D vectors in all directions + coord1D_cen::NTuple{D, Vector{FT}} # Tuple with 1D vectors of center points in all directions end size(d::CartGrid) = d.N @@ -1143,11 +1148,11 @@ CartGrid{Float64, 2} """ function create_CartGrid(; - size=(), - x = nothing, z = nothing, y = nothing, - extent = nothing, - CharDim = nothing -) + size = (), + x = nothing, z = nothing, y = nothing, + extent = nothing, + CharDim = nothing + ) if isa(size, Number) size = (size,) # transfer to tuple @@ -1156,74 +1161,76 @@ function create_CartGrid(; extent = (extent,) end N = size - dim = length(N) + dim = length(N) # Specify domain by length in every direction if !isnothing(extent) - x,y,z = nothing, nothing, nothing - x = (0., extent[1]) - if dim>1 - z = (-extent[2], 0.0) # vertical direction (negative) + x, y, z = nothing, nothing, nothing + x = (0.0, extent[1]) + if dim > 1 + z = (-extent[2], 0.0) # vertical direction (negative) end - if dim>2 - y = (0., extent[3]) + if dim > 2 + y = (0.0, extent[3]) end end FT = typeof(x[1]) - if dim==1 + if dim == 1 x = FT.(x) L = (x[2] - x[1],) - X₁= (x[1], ) - elseif dim==2 - x,z = FT.(x), FT.(z) + X₁ = (x[1],) + elseif dim == 2 + x, z = FT.(x), FT.(z) L = (x[2] - x[1], z[2] - z[1]) - X₁= (x[1], z[1]) + X₁ = (x[1], z[1]) else - x,y,z = FT.(x), FT.(y), FT.(z) + x, y, z = FT.(x), FT.(y), FT.(z) L = (x[2] - x[1], y[2] - y[1], z[2] - z[1]) - X₁= (x[1], y[1], z[1]) + X₁ = (x[1], y[1], z[1]) end - Xₙ = X₁ .+ L - Δ = L ./ (N .- 1) + Xₙ = X₁ .+ L + Δ = L ./ (N .- 1) # nondimensionalize if !isnothing(CharDim) - X₁, Xₙ, Δ, L = GeoUnit.(X₁), GeoUnit.(Xₙ), GeoUnit.(Δ), GeoUnit.(L) + X₁, Xₙ, Δ, L = GeoUnit.(X₁), GeoUnit.(Xₙ), GeoUnit.(Δ), GeoUnit.(L) - X₁ = ntuple( i -> nondimensionalize(X₁[i], CharDim), dim) - Xₙ = ntuple( i -> nondimensionalize(Xₙ[i], CharDim), dim) - Δ = ntuple( i -> nondimensionalize(Δ[i], CharDim), dim) - L = ntuple( i -> nondimensionalize(L[i], CharDim), dim) + X₁ = ntuple(i -> nondimensionalize(X₁[i], CharDim), dim) + Xₙ = ntuple(i -> nondimensionalize(Xₙ[i], CharDim), dim) + Δ = ntuple(i -> nondimensionalize(Δ[i], CharDim), dim) + L = ntuple(i -> nondimensionalize(L[i], CharDim), dim) - X₁, Xₙ, Δ, L = NumValue.(X₁), NumValue.(Xₙ), NumValue.(Δ), NumValue.(L) + X₁, Xₙ, Δ, L = NumValue.(X₁), NumValue.(Xₙ), NumValue.(Δ), NumValue.(L) end # Generate 1D coordinate arrays of vertices in all directions - coord1D=() - for idim=1:dim - coord1D = (coord1D..., Vector(range(X₁[idim], Xₙ[idim]; length = N[idim] ))) + coord1D = () + for idim in 1:dim + coord1D = (coord1D..., Vector(range(X₁[idim], Xₙ[idim]; length = N[idim]))) end # Generate 1D coordinate arrays centers in all directionbs - coord1D_cen=() - for idim=1:dim - coord1D_cen = (coord1D_cen..., Vector(range(X₁[idim]+Δ[idim]/2, Xₙ[idim]-Δ[idim]/2; length = N[idim]-1 ))) + coord1D_cen = () + for idim in 1:dim + coord1D_cen = (coord1D_cen..., Vector(range(X₁[idim] + Δ[idim] / 2, Xₙ[idim] - Δ[idim] / 2; length = N[idim] - 1))) end - ConstantΔ = true; - return CartGrid(ConstantΔ,N,Δ,L,X₁,Xₙ,coord1D, coord1D_cen) + ConstantΔ = true + return CartGrid(ConstantΔ, N, Δ, L, X₁, Xₙ, coord1D, coord1D_cen) end # view grid object function show(io::IO, g::CartGrid{FT, DIM}) where {FT, DIM} - print(io, "CartGrid{$FT, $DIM} \n", - " size: $(g.N) \n", - " length: $(g.L) \n", - " domain: $(domain_string(g)) \n", - " grid spacing Δ: $(g.Δ) \n") + return print( + io, "CartGrid{$FT, $DIM} \n", + " size: $(g.N) \n", + " length: $(g.L) \n", + " domain: $(domain_string(g)) \n", + " grid spacing Δ: $(g.Δ) \n" + ) end @@ -1231,17 +1238,17 @@ end function domain_string(grid::CartGrid{FT, DIM}) where {FT, DIM} xₗ, xᵣ = grid.coord1D[1][1], grid.coord1D[1][end] - if DIM>1 + if DIM > 1 yₗ, yᵣ = grid.coord1D[2][1], grid.coord1D[2][end] end - if DIM>2 + if DIM > 2 zₗ, zᵣ = grid.coord1D[3][1], grid.coord1D[3][end] end - if DIM==1 + if DIM == 1 return "x ∈ [$xₗ, $xᵣ]" - elseif DIM==2 + elseif DIM == 2 return "x ∈ [$xₗ, $xᵣ], z ∈ [$yₗ, $yᵣ]" - elseif DIM==3 + elseif DIM == 3 return "x ∈ [$xₗ, $xᵣ], y ∈ [$yₗ, $yᵣ], z ∈ [$zₗ, $zᵣ]" end end @@ -1252,23 +1259,23 @@ end Returns 3D coordinate arrays """ -function coordinate_grids(Data::CartGrid; cell=false) +function coordinate_grids(Data::CartGrid; cell = false) x_vec = NumValue(Data.coord1D[1]) y_vec = NumValue(Data.coord1D[2]) z_vec = NumValue(Data.coord1D[3]) if cell - x_vec = (x_vec[2:end] + x_vec[1:end-1])/2 - z_vec = (z_vec[2:end] + z_vec[1:end-1])/2 - if length(y_vec)>1 - y_vec = (y_vec[2:end] + y_vec[1:end-1])/2 + x_vec = (x_vec[2:end] + x_vec[1:(end - 1)]) / 2 + z_vec = (z_vec[2:end] + z_vec[1:(end - 1)]) / 2 + if length(y_vec) > 1 + y_vec = (y_vec[2:end] + y_vec[1:(end - 1)]) / 2 end end - - X,Y,Z = xyz_grid(x_vec, y_vec, z_vec) - return X,Y,Z + X, Y, Z = xyz_grid(x_vec, y_vec, z_vec) + + return X, Y, Z end """ @@ -1276,76 +1283,74 @@ end Returns a CartData set given a cartesian grid `Grid` and `fields` defined on that grid. """ -function CartData(Grid::CartGrid, fields::NamedTuple; y_val=0.0) - if length(Grid.N)==3 - X,Y,Z = xyz_grid(Grid.coord1D[1], Grid.coord1D[2], Grid.coord1D[3]) # 3D grid - elseif length(Grid.N)==2 - X,Y,Z = xyz_grid(Grid.coord1D[1], y_val, Grid.coord1D[2]) # 2D grid +function CartData(Grid::CartGrid, fields::NamedTuple; y_val = 0.0) + if length(Grid.N) == 3 + X, Y, Z = xyz_grid(Grid.coord1D[1], Grid.coord1D[2], Grid.coord1D[3]) # 3D grid + elseif length(Grid.N) == 2 + X, Y, Z = xyz_grid(Grid.coord1D[1], y_val, Grid.coord1D[2]) # 2D grid # the fields need to be reshaped from 2D to 3D arrays; we replace them in the NamedTuple as follows names = keys(fields) - for ifield = 1:length(names) - dat = reshape(fields[ifield],Grid.N[1],1,Grid.N[2]); # reshape into 3D form + for ifield in 1:length(names) + dat = reshape(fields[ifield], Grid.N[1], 1, Grid.N[2]) # reshape into 3D form fields = merge(fields, [names[ifield] => dat]) end end - return CartData(X,Y,Z, fields) + return CartData(X, Y, Z, fields) end - - """ Holds a Q1 Finite Element Data set with vertex and cell data. The specified coordinates are the ones of the vertices. """ struct Q1Data <: AbstractGeneralGrid - x :: GeoUnit - y :: GeoUnit - z :: GeoUnit - fields :: NamedTuple - cellfields :: NamedTuple - atts :: Dict + x::GeoUnit + y::GeoUnit + z::GeoUnit + fields::NamedTuple + cellfields::NamedTuple + atts::Dict # Ensure that the data is of the correct format - function Q1Data(x,y,z,fields,cellfields, atts=nothing) + function Q1Data(x, y, z, fields, cellfields, atts = nothing) # Check ordering of the arrays in case of 3D - if sum(size(x).>1)==3 - if maximum(abs.(diff(x,dims=2)))>maximum(abs.(diff(x,dims=1))) || maximum(abs.(diff(x,dims=3)))>maximum(abs.(diff(x,dims=1))) + if sum(size(x) .> 1) == 3 + if maximum(abs.(diff(x, dims = 2))) > maximum(abs.(diff(x, dims = 1))) || maximum(abs.(diff(x, dims = 3))) > maximum(abs.(diff(x, dims = 1))) @warn "It appears that the x-array has a wrong ordering" end - if maximum(abs.(diff(y,dims=1)))>maximum(abs.(diff(y,dims=2))) || maximum(abs.(diff(y,dims=3)))>maximum(abs.(diff(y,dims=2))) + if maximum(abs.(diff(y, dims = 1))) > maximum(abs.(diff(y, dims = 2))) || maximum(abs.(diff(y, dims = 3))) > maximum(abs.(diff(y, dims = 2))) @warn "It appears that the y-array has a wrong ordering" end end # check depth & convert it to units of km in case no units are given or it has different length units - x = convert!(x,km) - y = convert!(y,km) - z = convert!(z,km) + x = convert!(x, km) + y = convert!(y, km) + z = convert!(z, km) # fields should be a NamedTuple. In case we simply provide an array, lets transfer it accordingly - if !(typeof(fields)<: NamedTuple) - if (typeof(fields)<: Tuple) - if length(fields)==1 - fields = (DataSet1=first(fields),) # The field is a tuple; create a NamedTuple from it + if !(typeof(fields) <: NamedTuple) + if (typeof(fields) <: Tuple) + if length(fields) == 1 + fields = (DataSet1 = first(fields),) # The field is a tuple; create a NamedTuple from it else error("Please employ a NamedTuple as input, rather than a Tuple") # out of luck end else - fields = (DataSet1=fields,) + fields = (DataSet1 = fields,) end end - DataField = fields[1]; - if typeof(DataField)<: Tuple - DataField = DataField[1]; # in case we have velocity vectors as input + DataField = fields[1] + if typeof(DataField) <: Tuple + DataField = DataField[1] # in case we have velocity vectors as input end - if !(size(x)==size(y)==size(z)==size(DataField)) + if !(size(x) == size(y) == size(z) == size(DataField)) error("The size of x/y/z and the vertex fields should all be the same!") end @@ -1355,14 +1360,14 @@ struct Q1Data <: AbstractGeneralGrid atts = Dict("note" => "No attributes were given to this dataset") else # check if a dict was given - if !(typeof(atts)<: Dict) + if !(typeof(atts) <: Dict) error("Attributes should be given as Dict!") end end - return new(x,y,z,fields,cellfields,atts) + return new(x, y, z, fields, cellfields, atts) - end + end end size(d::Q1Data) = size(d.x.val) .- 1 # size of mesh @@ -1370,31 +1375,31 @@ extrema(d::Q1Data) = [extrema(d.x.val); extrema(d.y.val); extrema(d.z.val)] # Print an overview of the Q1Data struct: function Base.show(io::IO, d::Q1Data) - println(io,"Q1Data ") - println(io," size : $(size(d))") - println(io," x ϵ [ $(minimum(d.x.val)) : $(maximum(d.x.val))]") - println(io," y ϵ [ $(minimum(d.y.val)) : $(maximum(d.y.val))]") - - if any(isnan.(NumValue(d.z))) - z_vals = extrema(d.z.val[isnan.(d.z.val).==false]) - println(io," z ϵ [ $(z_vals[1]) : $(z_vals[2])]; has NaN's") + println(io, "Q1Data ") + println(io, " size : $(size(d))") + println(io, " x ϵ [ $(minimum(d.x.val)) : $(maximum(d.x.val))]") + println(io, " y ϵ [ $(minimum(d.y.val)) : $(maximum(d.y.val))]") + + if any(isnan.(NumValue(d.z))) + z_vals = extrema(d.z.val[isnan.(d.z.val) .== false]) + println(io, " z ϵ [ $(z_vals[1]) : $(z_vals[2])]; has NaN's") else z_vals = extrema(d.z.val) - println(io," z ϵ [ $(z_vals[1]) : $(z_vals[2])]") + println(io, " z ϵ [ $(z_vals[1]) : $(z_vals[2])]") end - println(io," fields : $(keys(d.fields))") - println(io," cellfields : $(keys(d.cellfields))") + println(io, " fields : $(keys(d.fields))") + println(io, " cellfields : $(keys(d.cellfields))") # Only print attributes if we have non-default attributes - if any( propertynames(d) .== :atts) + return if any(propertynames(d) .== :atts) show_atts = true - if haskey(d.atts,"note") - if d.atts["note"]=="No attributes were given to this dataset" + if haskey(d.atts, "note") + if d.atts["note"] == "No attributes were given to this dataset" show_atts = false end end if show_atts - println(io," attributes: $(keys(d.atts))") + println(io, " attributes: $(keys(d.atts))") end end end @@ -1416,7 +1421,7 @@ CartData attributes: ["note"] ``` """ -Q1Data(xyz::Tuple) = Q1Data(xyz[1],xyz[2],xyz[3],(Z=xyz[3],),NamedTuple()) +Q1Data(xyz::Tuple) = Q1Data(xyz[1], xyz[2], xyz[3], (Z = xyz[3],), NamedTuple()) """ @@ -1432,58 +1437,62 @@ Parameters - `cellfields` with the fields of the cells """ -struct FEData{dim, points_per_cell} - vertices :: Array{Float64} - connectivity :: Array{Int64} - fields :: NamedTuple - cellfields :: NamedTuple +struct FEData{dim, points_per_cell} + vertices::Array{Float64} + connectivity::Array{Int64} + fields::NamedTuple + cellfields::NamedTuple # Ensure that the data is of the correct format - function FEData(vertices,connectivity,fields=nothing,cellfields=nothing) - if isnothing(fields); fields = NamedTuple(); end - if isnothing(cellfields); cellfields = NamedTuple(); end + function FEData(vertices, connectivity, fields = nothing, cellfields = nothing) + if isnothing(fields) + fields = NamedTuple() + end + if isnothing(cellfields) + cellfields = NamedTuple() + end - dim = size(vertices,1) - points_per_cell = size(connectivity,1) - if points_per_cell>size(connectivity,2) + dim = size(vertices, 1) + points_per_cell = size(connectivity, 1) + if points_per_cell > size(connectivity, 2) println("# of points_per_cell > size(connectivity,2). Are you sure the ordering is ok?") end - if dim>size(vertices,2) + if dim > size(vertices, 2) println("# of dims > size(vertices,2). Are you sure the ordering is ok?") end - - return new{dim,points_per_cell}(vertices,connectivity,fields,cellfields) - end + + return new{dim, points_per_cell}(vertices, connectivity, fields, cellfields) + end end # Print an overview of the FEData struct: function Base.show(io::IO, d::FEData{dim, points_per_cell}) where {dim, points_per_cell} - println(io,"FEData{$dim,$points_per_cell} ") - println(io," elements : $(size(d.connectivity,2))") - println(io," vertices : $(size(d.vertices,2))") - println(io," x ϵ [ $(minimum(d.vertices,dims=2)[1]) : $(maximum(d.vertices,dims=2)[1])]") - println(io," y ϵ [ $(minimum(d.vertices,dims=2)[2]) : $(maximum(d.vertices,dims=2)[2])]") - println(io," z ϵ [ $(minimum(d.vertices,dims=2)[3]) : $(maximum(d.vertices,dims=2)[3])]") - println(io," fields : $(keys(d.fields))") - println(io," cellfields : $(keys(d.cellfields))") + println(io, "FEData{$dim,$points_per_cell} ") + println(io, " elements : $(size(d.connectivity, 2))") + println(io, " vertices : $(size(d.vertices, 2))") + println(io, " x ϵ [ $(minimum(d.vertices, dims = 2)[1]) : $(maximum(d.vertices, dims = 2)[1])]") + println(io, " y ϵ [ $(minimum(d.vertices, dims = 2)[2]) : $(maximum(d.vertices, dims = 2)[2])]") + println(io, " z ϵ [ $(minimum(d.vertices, dims = 2)[3]) : $(maximum(d.vertices, dims = 2)[3])]") + println(io, " fields : $(keys(d.fields))") + return println(io, " cellfields : $(keys(d.cellfields))") end -extrema(d::FEData) = extrema(d.vertices, dims=2) -size(d::FEData) = size(d.connectivity,2) +extrema(d::FEData) = extrema(d.vertices, dims = 2) +size(d::FEData) = size(d.connectivity, 2) """ X,Y,Z = coordinate_grids(Data::Q1Data; cell=false) Returns 3D coordinate arrays """ -function coordinate_grids(Data::Q1Data; cell=false) - X,Y,Z = NumValue(Data.x), NumValue(Data.y), NumValue(Data.z) +function coordinate_grids(Data::Q1Data; cell = false) + X, Y, Z = NumValue(Data.x), NumValue(Data.y), NumValue(Data.z) if cell - X,Y,Z = average_q1(X),average_q1(Y), average_q1(Z) + X, Y, Z = average_q1(X), average_q1(Y), average_q1(Z) end - return X,Y,Z + return X, Y, Z end @@ -1494,42 +1503,44 @@ Creates a Q1 FEM mesh from the `Q1Data` data which holds the vertex coordinates """ function convert2FEData(data::Q1Data) - X,Y,Z = coordinate_grids(data); - + X, Y, Z = coordinate_grids(data) + # Unique number of all vertices - el_num = zeros(Int64,size(X)) - num = 1; + el_num = zeros(Int64, size(X)) + num = 1 for I in eachindex(el_num) - el_num[I] = num; - num += 1; + el_num[I] = num + num += 1 end - + # Coordinates of all vertices vertices = [X[:]'; Y[:]'; Z[:]'] - + # Connectivity of all cells - nelx,nely,nelz = size(X) .- 1 - connectivity = zeros(Int64, 8, nelx*nely*nelz) - n = 1; - for k=1:nelz - for j=1:nely - for i=1:nelx - connectivity[:,n] = [el_num[i,j,k ], el_num[i+1,j,k ], el_num[i,j+1,k ], el_num[i+1,j+1,k ], - el_num[i,j,k+1], el_num[i+1,j,k+1], el_num[i,j+1,k+1], el_num[i+1,j+1,k+1]] - n += 1 + nelx, nely, nelz = size(X) .- 1 + connectivity = zeros(Int64, 8, nelx * nely * nelz) + n = 1 + for k in 1:nelz + for j in 1:nely + for i in 1:nelx + connectivity[:, n] = [ + el_num[i, j, k], el_num[i + 1, j, k], el_num[i, j + 1, k], el_num[i + 1, j + 1, k], + el_num[i, j, k + 1], el_num[i + 1, j, k + 1], el_num[i, j + 1, k + 1], el_num[i + 1, j + 1, k + 1], + ] + n += 1 end end end - data_fields=() + data_fields = () for f in data.fields data_fields = (data_fields..., f[:]) end - data_cellfields=() + data_cellfields = () for f in data.cellfields data_cellfields = (data_cellfields..., f[:]) end - return FEData(vertices,connectivity, NamedTuple{keys(data.fields)}(data_fields), NamedTuple{keys(data.cellfields)}(data_cellfields)) + return FEData(vertices, connectivity, NamedTuple{keys(data.fields)}(data_fields), NamedTuple{keys(data.cellfields)}(data_cellfields)) end diff --git a/src/event_counts.jl b/src/event_counts.jl index 8405e018..cb3e8fe1 100644 --- a/src/event_counts.jl +++ b/src/event_counts.jl @@ -13,15 +13,15 @@ The search radius is `R=radius_factor*(Δx² + Δy² + Δz²)^(1/3)` `Grid_counts` is `Grid` but with an additional field `Count` that has the number of hits """ -function point_to_nearest_grid(Point::CartData, Grid::CartData; radius_factor=1) +function point_to_nearest_grid(Point::CartData, Grid::CartData; radius_factor = 1) @assert length(size(Point.x)) == 1 # call routine - Count = point_to_nearest_grid(NumValue(Point.x),NumValue(Point.y), NumValue(Point.z), NumValue(Grid.x),NumValue(Grid.y),NumValue(Grid.z); radius_factor=radius_factor) + Count = point_to_nearest_grid(NumValue(Point.x), NumValue(Point.y), NumValue(Point.z), NumValue(Grid.x), NumValue(Grid.y), NumValue(Grid.z); radius_factor = radius_factor) # return CartGrid with added field - return addfield(Grid,"Count",Count); + return addfield(Grid, "Count", Count) end @@ -33,13 +33,13 @@ vicinity of 3D `CartGrid` specified by `Grid`. The search radius is `R=radius_fa `Grid_counts` is `Grid` but with an additional field `Count` that has the number of hits """ -function point_to_nearest_grid(pt_x,pt_y,pt_z, Grid::CartData; radius_factor=1) +function point_to_nearest_grid(pt_x, pt_y, pt_z, Grid::CartData; radius_factor = 1) # call routine - Count = point_to_nearest_grid(pt_x,pt_y,pt_z, NumValue(Grid.x),NumValue(Grid.y),NumValue(Grid.z); radius_factor=radius_factor) + Count = point_to_nearest_grid(pt_x, pt_y, pt_z, NumValue(Grid.x), NumValue(Grid.y), NumValue(Grid.z); radius_factor = radius_factor) # return CartGrid with added field - return addfield(Grid,"Count",Count); + return addfield(Grid, "Count", Count) end @@ -53,15 +53,15 @@ The search radius is `R=radius_factor*(Δx² + Δy² + Δz²)^(1/3)` `Grid_counts` is `Grid` but with an additional field `Count` that has the number of hits """ -function point_to_nearest_grid(Point::GeoData, Grid::GeoData; radius_factor=1) +function point_to_nearest_grid(Point::GeoData, Grid::GeoData; radius_factor = 1) @assert length(size(Point.lon)) == 1 # call routine - Count = point_to_nearest_grid(NumValue(Point.lon),NumValue(Point.lat), NumValue(Point.depth), NumValue(Grid.lon),NumValue(Grid.lat),NumValue(Grid.depth); radius_factor=radius_factor) + Count = point_to_nearest_grid(NumValue(Point.lon), NumValue(Point.lat), NumValue(Point.depth), NumValue(Grid.lon), NumValue(Grid.lat), NumValue(Grid.depth); radius_factor = radius_factor) # return CartGrid with added field - return addfield(Grid,"Count",Count); + return addfield(Grid, "Count", Count) end @@ -73,13 +73,13 @@ vicinity of 3D `GeoData` specified by `Grid`. The search radius is `R=radius_fac `Grid_counts` is `Grid` but with an additional field `Count` that has the number of hits """ -function point_to_nearest_grid(pt_x,pt_y,pt_z, Grid::GeoData; radius_factor=1) +function point_to_nearest_grid(pt_x, pt_y, pt_z, Grid::GeoData; radius_factor = 1) # call routine - Count = point_to_nearest_grid(pt_x,pt_y,pt_z, NumValue(Grid.lon),NumValue(Grid.lat),NumValue(Grid.depth); radius_factor=radius_factor) + Count = point_to_nearest_grid(pt_x, pt_y, pt_z, NumValue(Grid.lon), NumValue(Grid.lat), NumValue(Grid.depth); radius_factor = radius_factor) # return CartGrid with added field - return addfield(Grid,"Count",Count); + return addfield(Grid, "Count", Count) end """ @@ -90,23 +90,23 @@ vicinity of 3D grid point specified by `X`,`Y`,`Z` 3D coordinate arrays, with re The search radius is `R=radius_factor*(Δx² + Δy² + Δz²)^(1/3)` """ -function point_to_nearest_grid(pt_x,pt_y,pt_z, X,Y,Z; radius_factor=1) - - data = zeros(3,length(pt_x)); - data[1,:],data[2,:],data[3,:] = pt_x[:], pt_y[:], pt_z[:] +function point_to_nearest_grid(pt_x, pt_y, pt_z, X, Y, Z; radius_factor = 1) + + data = zeros(3, length(pt_x)) + data[1, :], data[2, :], data[3, :] = pt_x[:], pt_y[:], pt_z[:] tree = BallTree(data) # Generate tree with EQ data - + # Grid spacing - Δx,Δy,Δz = X[2,1,1]-X[1], Y[1,2,1]-Y[1], Z[1,1,2]-Z[1] + Δx, Δy, Δz = X[2, 1, 1] - X[1], Y[1, 2, 1] - Y[1], Z[1, 1, 2] - Z[1] + + points = zeros(3, length(X)) + points[1, :], points[2, :], points[3, :] = X[:], Y[:], Z[:] - points = zeros(3,length(X)); - points[1,:],points[2,:],points[3,:] = X[:], Y[:], Z[:] - - radius = radius_factor*(Δx^2 + Δy^2 + Δz^2)^(1/3) # search radius - idxs = inrange(tree, points, radius) # find points (NearestNeighbors package) - le = length.(idxs) # number of points + radius = radius_factor * (Δx^2 + Δy^2 + Δz^2)^(1 / 3) # search radius + idxs = inrange(tree, points, radius) # find points (NearestNeighbors package) + le = length.(idxs) # number of points - count = zeros(Int64,size(X)); + count = zeros(Int64, size(X)) for i in eachindex(X) count[i] = le[i] end @@ -143,33 +143,33 @@ GeoData ```julia """ -function countmap(DataSet::GeoData,field::String,stepslon::Int64,stepslat::Int64) +function countmap(DataSet::GeoData, field::String, stepslon::Int64, stepslat::Int64) - lon = unique(DataSet.lon.val) - lat = unique(DataSet.lat.val) + lon = unique(DataSet.lon.val) + lat = unique(DataSet.lat.val) # create new lon/lat arrays which hold the boundaries of the control areas - lonstep = LinRange(lon[1],lon[end],stepslon) - latstep = LinRange(lat[1],lat[end],stepslat) - dlon = abs(lonstep[2]-lonstep[1]) - dlat = abs(latstep[2]-latstep[1]) - loncen = lonstep[1]+dlon/2:dlon:lonstep[end]-dlon/2 - latcen = latstep[1]+dlat/2:dlat:latstep[end]-dlat/2 - countmap = zeros(length(loncen),length(latcen)) - - expr = Meta.parse(field) - if !haskey(DataSet.fields,expr[1]) + lonstep = LinRange(lon[1], lon[end], stepslon) + latstep = LinRange(lat[1], lat[end], stepslat) + dlon = abs(lonstep[2] - lonstep[1]) + dlat = abs(latstep[2] - latstep[1]) + loncen = (lonstep[1] + dlon / 2):dlon:(lonstep[end] - dlon / 2) + latcen = (latstep[1] + dlat / 2):dlat:(latstep[end] - dlat / 2) + countmap = zeros(length(loncen), length(latcen)) + + expr = Meta.parse(field) + if !haskey(DataSet.fields, expr[1]) error("The GeoData set does not have the field: $(expr[1])") end # count the ones in every control area for i in eachindex(loncen) for j in eachindex(latcen) - indi = findall((lon .>= lonstep[i]) .& (lon .<= lonstep[i+1])) - indj = findall((lat .>= latstep[j]) .& (lat .<= latstep[j+1])) - dataint = DataSet.fields[expr[1]][indi,indj,1] - count = sum(dataint) - countmap[i,j] = count + indi = findall((lon .>= lonstep[i]) .& (lon .<= lonstep[i + 1])) + indj = findall((lat .>= latstep[j]) .& (lat .<= latstep[j + 1])) + dataint = DataSet.fields[expr[1]][indi, indj, 1] + count = sum(dataint) + countmap[i, j] = count end end @@ -178,9 +178,9 @@ function countmap(DataSet::GeoData,field::String,stepslon::Int64,stepslat::Int64 countmap = countmap ./ maxcount # create new GeoData - Lon3D,Lat3D, Data = lonlatdepth_grid(loncen,latcen,0); - Data[:,:,1] .= countmap - DatasetcountMap = GeoData(Lon3D,Lat3D,Data,(countmap=Data,)) + Lon3D, Lat3D, Data = lonlatdepth_grid(loncen, latcen, 0) + Data[:, :, 1] .= countmap + DatasetcountMap = GeoData(Lon3D, Lat3D, Data, (countmap = Data,)) return DatasetcountMap end diff --git a/src/movies_from_pics.jl b/src/movies_from_pics.jl index 96ca42df..ccf72161 100644 --- a/src/movies_from_pics.jl +++ b/src/movies_from_pics.jl @@ -1,7 +1,7 @@ # This routines enables creating movies from a series of images -# That is perhaps not structly a GMG routine, but still part of the workflow +# That is perhaps not structly a GMG routine, but still part of the workflow # as images, generated by Paraview, often need to be merged into a movie. -# that can be done with FFMPEG, but determining the optimal parameters can be oftentimes tricky +# that can be done with FFMPEG, but determining the optimal parameters can be oftentimes tricky # (while ensuring that the movie works well on mac/windows etc) using FFMPEG @@ -27,33 +27,33 @@ Optional options - `:mov_hires`: Higher-resolution quicktime movie (larger filesize & not compatible with windows) - `collect`: suppresses output of `FFMPEG` if `true` (default). """ -function movie_from_images(; dir=pwd(), file=nothing, outfile=nothing, framerate=10, copy_to_current_dir=true, type=:mp4_default) - curdir = pwd(); +function movie_from_images(; dir = pwd(), file = nothing, outfile = nothing, framerate = 10, copy_to_current_dir = true, type = :mp4_default) + curdir = pwd() cd(dir) - files = split.(readdir(),".") + files = split.(readdir(), ".") files = files[length.(files) .== 3] # names should be filename.0001.png or something like that filenames = [f[1] for f in files] - fileext = files[1][3] - le = length(files[1][2]) + fileext = files[1][3] + le = length(files[1][2]) # try to determine the filename automatically (works if we have only one name in the director) - if length(unique(filenames))>1 && isnothing(file) + if length(unique(filenames)) > 1 && isnothing(file) error("you have more than one image series in the directory $dir; please specify the filename `file`.") elseif isnothing(file) file = unique(filenames)[1] end if isnothing(outfile) - outfile = file; #use same name as images + outfile = file #use same name as images end - if type==:mp4_default + if type == :mp4_default # this produces an *.mp4 movie that looks good on an ipad - outfile_ext = outfile*".mp4" + outfile_ext = outfile * ".mp4" cmd = `-y -framerate $framerate -f image2 -i $file.%0$(le)d.$fileext -vf pad="""width=ceil(iw/2)*2:height=ceil(ih/2)*2""" -f mp4 -vcodec libx264 -pix_fmt yuv420p $outfile_ext` - elseif type==:mov_hires - outfile_ext = outfile*".mov" + elseif type == :mov_hires + outfile_ext = outfile * ".mov" cmd = `-y -f image2 -framerate $framerate -i $file.%0$(le)d.$fileext -c:v prores_ks -profile:v 1 $outfile_ext` else error("unknown movie type $type") @@ -62,11 +62,11 @@ function movie_from_images(; dir=pwd(), file=nothing, outfile=nothing, framerate # run FFMPEG.exe(cmd, collect = true) - result = joinpath(pwd(),outfile_ext) + result = joinpath(pwd(), outfile_ext) if copy_to_current_dir # copy result - result = joinpath(curdir,outfile_ext) - cp(outfile_ext, result, force=true) + result = joinpath(curdir, outfile_ext) + cp(outfile_ext, result, force = true) end println("created movie: $result") diff --git a/src/nearest_points.jl b/src/nearest_points.jl index e0790d59..4936920c 100644 --- a/src/nearest_points.jl +++ b/src/nearest_points.jl @@ -14,14 +14,14 @@ Returns the index of the nearest point in (`X_pt`) to (`X`) and returns the inde function nearest_point_indices(X::Array, X_pt::Vector) # use nearest neighbour to interpolate data - coord = [X_pt';] - kdtree = KDTree(coord; leafsize = 10); - points = [vec(X)';]; - idx,_ = nn(kdtree, points); - + coord = [X_pt';] + kdtree = KDTree(coord; leafsize = 10) + points = [vec(X)';] + idx, _ = nn(kdtree, points) + # transform to correct shape - ind = zeros(Int64,size(X)) - ind[:] = idx + ind = zeros(Int64, size(X)) + ind[:] = idx return ind end @@ -31,17 +31,17 @@ end Returns the index of the nearest point in (`X_pt`,`Y_pt`) to (`X`,`Y`) and returns the index """ -function nearest_point_indices(X::Array,Y::Array, X_pt::Vector, Y_pt::Vector) +function nearest_point_indices(X::Array, Y::Array, X_pt::Vector, Y_pt::Vector) # use nearest neighbour to interpolate data - coord = [X_pt'; Y_pt']; - kdtree = KDTree(coord; leafsize = 10); - points = [vec(X)';vec(Y)']; - idx,_ = nn(kdtree, points); - + coord = [X_pt'; Y_pt'] + kdtree = KDTree(coord; leafsize = 10) + points = [vec(X)';vec(Y)'] + idx, _ = nn(kdtree, points) + # transform to correct shape - ind = zeros(Int64,size(X)) - ind[:] = idx + ind = zeros(Int64, size(X)) + ind[:] = idx return ind end @@ -52,17 +52,17 @@ end Returns the index of the nearest point in (`X_pt`,`Y_pt`,`Z_pt`) to (`X`,`Y`,`Z`) and returns the index """ -function nearest_point_indices(X::Array,Y::Array,Z::Array, X_pt::Vector,Y_pt::Vector,Z_pt::Vector) +function nearest_point_indices(X::Array, Y::Array, Z::Array, X_pt::Vector, Y_pt::Vector, Z_pt::Vector) # use nearest neighbour to interpolate data - coord = [X_pt'; Y_pt'; Z_pt']; - kdtree = KDTree(coord; leafsize = 10); - points = [vec(X)';vec(Y)'; vec(Z)']; - idx,_ = nn(kdtree, points); - + coord = [X_pt'; Y_pt'; Z_pt'] + kdtree = KDTree(coord; leafsize = 10) + points = [vec(X)';vec(Y)'; vec(Z)'] + idx, _ = nn(kdtree, points) + # transform to correct shape - ind = zeros(Int64,size(X)) - ind[:] = idx + ind = zeros(Int64, size(X)) + ind[:] = idx return ind end diff --git a/src/pTatin_IO.jl b/src/pTatin_IO.jl index f74536d6..13b00ce9 100644 --- a/src/pTatin_IO.jl +++ b/src/pTatin_IO.jl @@ -11,25 +11,25 @@ export write_pTatin_mesh, swap_yz_dims Returns a list with integers that are the tags for each of the cells """ function cell_tags_from_gmsh(mesh) - cell_entities = mesh.face_labeling.d_to_dface_to_entity[4] + cell_entities = mesh.face_labeling.d_to_dface_to_entity[4] cell_entities_unique = unique(cell_entities) - tag_unique = zeros(Int64,size(cell_entities_unique)) - - for i=1:length(cell_entities_unique) - for (n,tag) in enumerate(mesh.face_labeling.tag_to_entities) - if any(tag .== cell_entities_unique[i]) - tag_unique[i] = n - end + tag_unique = zeros(Int64, size(cell_entities_unique)) + + for i in 1:length(cell_entities_unique) + for (n, tag) in enumerate(mesh.face_labeling.tag_to_entities) + if any(tag .== cell_entities_unique[i]) + tag_unique[i] = n + end end end # create tags for cells - tags = zeros(Int64,length(cell_entities)) - for (i,entity) in enumerate(cell_entities_unique) - id = findall(cell_entities.==entity) + tags = zeros(Int64, length(cell_entities)) + for (i, entity) in enumerate(cell_entities_unique) + id = findall(cell_entities .== entity) tags[id] .= tag_unique[i] end - + return tags end @@ -39,13 +39,13 @@ end Write a binary file with the mesh information for pTatin """ -function write_pTatin_mesh(fe_mesh::FEData; out_file="md.bin", connectivity_zero_based=true) - +function write_pTatin_mesh(fe_mesh::FEData; out_file = "md.bin", connectivity_zero_based = true) + # Write mesh - write_FEmesh_mesh(fe_mesh; out_file=out_file, connectivity_zero_based=connectivity_zero_based); - + write_FEmesh_mesh(fe_mesh; out_file = out_file, connectivity_zero_based = connectivity_zero_based) + # Write cell and vertex fields - write_FEmesh_fields(fe_mesh); + write_FEmesh_fields(fe_mesh) return nothing end @@ -55,7 +55,7 @@ end Write a binary file with the mesh information for pTatin """ -write_pTatin_mesh(q1_mesh::Q1Data; out_file="md.bin", connectivity_zero_based=true) = write_pTatin_mesh(convert2FEData(q1_mesh), out_file=out_file, connectivity_zero_based=connectivity_zero_based) +write_pTatin_mesh(q1_mesh::Q1Data; out_file = "md.bin", connectivity_zero_based = true) = write_pTatin_mesh(convert2FEData(q1_mesh), out_file = out_file, connectivity_zero_based = connectivity_zero_based) """ @@ -63,38 +63,38 @@ write_pTatin_mesh(q1_mesh::Q1Data; out_file="md.bin", connectivity_zero_based=tr Writes a binary file with the mesh information for pTatin """ -function write_FEmesh_mesh(data::FEData; out_file="md.bin", connectivity_zero_based=true) - dims = size(data.vertices,1) - nverts = size(data.vertices,2) - arange = 0:(nverts-1) - @assert nverts>dims - - nvertices_cell = size(data.connectivity,1) - ncells = size(data.connectivity,2) - @assert ncells>nvertices_cell - +function write_FEmesh_mesh(data::FEData; out_file = "md.bin", connectivity_zero_based = true) + dims = size(data.vertices, 1) + nverts = size(data.vertices, 2) + arange = 0:(nverts - 1) + @assert nverts > dims + + nvertices_cell = size(data.connectivity, 1) + ncells = size(data.connectivity, 2) + @assert ncells > nvertices_cell + shift_connectivity = 0 if connectivity_zero_based shift_connectivity = 1 end - - npart = 1 # don't partition currenrtly - f = open(out_file,"w") - write(f,Int32(nverts)) # nverts - write(f,Int32(dims)) # coordinate_dimension - write(f,data.vertices[:]) # vertices + npart = 1 # don't partition currenrtly + + f = open(out_file, "w") + write(f, Int32(nverts)) # nverts + write(f, Int32(dims)) # coordinate_dimension + write(f, data.vertices[:]) # vertices - write(f,Int32(ncells)) # ncells - write(f,Int32(nvertices_cell)) # points-per-cell - write(f,Int32.(data.connectivity[:] .- shift_connectivity)) # cells + write(f, Int32(ncells)) # ncells + write(f, Int32(nvertices_cell)) # points-per-cell + write(f, Int32.(data.connectivity[:] .- shift_connectivity)) # cells - write(f,Int32(npart)) # npartitions - write(f,minimum(data.vertices,dims=2)) # bbmin - write(f,maximum(data.vertices,dims=2)) # bbmax + write(f, Int32(npart)) # npartitions + write(f, minimum(data.vertices, dims = 2)) # bbmin + write(f, maximum(data.vertices, dims = 2)) # bbmax - write(f,Int32(ncells)) # ncells - write(f,Int32.(arange)) # arange + write(f, Int32(ncells)) # ncells + write(f, Int32.(arange)) # arange close(f) println("Wrote pTatin mesh : $(out_file)") @@ -103,38 +103,37 @@ function write_FEmesh_mesh(data::FEData; out_file="md.bin", connectivity_zero_ba end - """ write_FEmesh_fields(data::FEData) Writes cell and vertex fields to disk to be read by pTatin """ -function write_FEmesh_fields(data::FEData) - - ncells = size(data.connectivity,2) - nverts = size(data.vertices,2) +function write_FEmesh_fields(data::FEData) + + ncells = size(data.connectivity, 2) + nverts = size(data.vertices, 2) # Mapping used internally dtype_map = Dict() - dtype_map[:Int16] = 10; - dtype_map[:Int32] = 11; - dtype_map[:Int64] = 12; - dtype_map[:Float32] = 20; - dtype_map[:Float64] = 21; + dtype_map[:Int16] = 10 + dtype_map[:Int32] = 11 + dtype_map[:Int64] = 12 + dtype_map[:Float32] = 20 + dtype_map[:Float64] = 21 # Write the cell fields if !isnothing(data.cellfields) names = keys(data.cellfields) - for i = 1: length(data.cellfields) - _write_field_file(String(names[i])*"_cell.bin", data.cellfields[i], ncells, dtype_map) + for i in 1:length(data.cellfields) + _write_field_file(String(names[i]) * "_cell.bin", data.cellfields[i], ncells, dtype_map) end end # Write the vertex fields if !isnothing(data.fields) names = keys(data.fields) - for i = 1: length(data.fields) - _write_field_file(String(names[i])*"_vertex.bin", data.fields[i], nverts, dtype_map) + for i in 1:length(data.fields) + _write_field_file(String(names[i]) * "_vertex.bin", data.fields[i], nverts, dtype_map) end end @@ -145,10 +144,10 @@ end function _write_field_file(fname::String, field, len, dtype_map) @assert length(field) == len - f = open(fname,"w") - write(f,Int32(len)) # length - write(f,Int32(dtype_map[Symbol(eltype(field))])) # field type - write(f,field[:]) # field + f = open(fname, "w") + write(f, Int32(len)) # length + write(f, Int32(dtype_map[Symbol(eltype(field))])) # field type + write(f, field[:]) # field close(f) println("Wrote pTatin field : $fname") @@ -162,6 +161,5 @@ This swaps the `y` and `z` dimensions of the FEData object, which is useful for """ function swap_yz_dims(fe_data::FEData) vertices = copy(fe_data.vertices) - return FEData(vertices[[1,3,2],:], fe_data.connectivity, fe_data.fields, fe_data.cellfields) + return FEData(vertices[[1, 3, 2], :], fe_data.connectivity, fe_data.fields, fe_data.cellfields) end - diff --git a/src/sea_level.jl b/src/sea_level.jl index 5daef227..f4901156 100644 --- a/src/sea_level.jl +++ b/src/sea_level.jl @@ -20,11 +20,11 @@ struct SeaLevel{T} function SeaLevel(name::Symbol; flip_elevation = false, flip_age = false) age, elevation = load_sea_level( - name; + name; flip_age = flip_age, flip_elevation = flip_elevation ) - new{eltype(age)}(age, elevation, name) + return new{eltype(age)}(age, elevation, name) end end @@ -45,4 +45,4 @@ function load_sea_level(name::Symbol; flip_elevation = false, flip_age = false) flip_elevation && reverse!(h) flip_age && reverse!(age) return h, age -end \ No newline at end of file +end diff --git a/src/sea_lvl.jl b/src/sea_lvl.jl index b55dc705..8ea34173 100644 --- a/src/sea_lvl.jl +++ b/src/sea_lvl.jl @@ -2,7 +2,7 @@ export sea_level_files, SeaLevel, load_sea_level, curve_name pkg_dir = pkgdir(GeophysicalModelGenerator) -const sea_level_path = joinpath(pkg_dir, joinpath("src","sea_level_data")) +const sea_level_path = joinpath(pkg_dir, joinpath("src", "sea_level_data")) const sea_level_files = Dict( :Spratt_800ka => "Spratt2016-800ka.txt", @@ -22,11 +22,11 @@ struct SeaLevel{T} function SeaLevel(name::Symbol; flip_elevation = false, flip_age = false) age, elevation = load_sea_level( - name; + name; flip_age = flip_age, flip_elevation = flip_elevation ) - new{eltype(age)}(age, elevation, name) + return new{eltype(age)}(age, elevation, name) end end @@ -40,10 +40,10 @@ Base.length(x::SeaLevel) = length(x.elevation) curve_name(x::SeaLevel) = x.name function load_sea_level(name::Symbol; flip_elevation = false, flip_age = false) - fname = sea_level_files[name] - data = readdlm(joinpath(sea_level_path, fname)) - age, h = data[:, 1], data[:, 2] + fname = sea_level_files[name] + data = readdlm(joinpath(sea_level_path, fname)) + age, h = data[:, 1], data[:, 2] flip_elevation && reverse!(h) flip_age && reverse!(age) return h, age -end \ No newline at end of file +end diff --git a/src/stl.jl b/src/stl.jl index fc79ce31..6762e60d 100644 --- a/src/stl.jl +++ b/src/stl.jl @@ -172,39 +172,39 @@ https://github.com/marmakoide/inside-3d-mesh This again is described in the following [paper](https://igl.ethz.ch/projects/winding-number/) by Alec Jacobson, Ladislav Kavan and Olga Sorkine-Hornung. """ -function isinside_closed_STL(mesh::Mesh, Pt::Vector, eps=1e-3) - - # Compute triangle vertices and their norms relative to X - M_vec = [mesh.position[i]-Pt[:] for i in eachindex(mesh.position)]; - M = zeros(length(M_vec),3); - for i=1:length(M_vec) - M[i,:] = Float64.(M_vec[i][1:3]); - end - M_norm = sqrt.(sum(M.^2,dims=2)) - - # Accumulate generalized winding number per triangle - winding_number = 0. - for iT=1:length(mesh) - t = mesh[iT].points; - M = zeros(3,3) - for i=1:3 - M[i,:] = t[i]-Pt[:]; - end - M_norm = sqrt.(sum(M.^2,dims=1)) +function isinside_closed_STL(mesh::Mesh, Pt::Vector, eps = 1.0e-3) - A,B,C = M[1,:], M[2,:], M[3,:] - a,b,c = M_norm[1], M_norm[2], M_norm[3] + # Compute triangle vertices and their norms relative to X + M_vec = [mesh.position[i] - Pt[:] for i in eachindex(mesh.position)] + M = zeros(length(M_vec), 3) + for i in 1:length(M_vec) + M[i, :] = Float64.(M_vec[i][1:3]) + end + M_norm = sqrt.(sum(M .^ 2, dims = 2)) + + # Accumulate generalized winding number per triangle + winding_number = 0.0 + for iT in 1:length(mesh) + t = mesh[iT].points + M = zeros(3, 3) + for i in 1:3 + M[i, :] = t[i] - Pt[:] + end + M_norm = sqrt.(sum(M .^ 2, dims = 1)) - winding_number += atan(det(M), (a * b * c) + c * dot(A, B) + a * dot(B, C) + b * dot(C, A)) - end + A, B, C = M[1, :], M[2, :], M[3, :] + a, b, c = M_norm[1], M_norm[2], M_norm[3] - # Job done - if winding_number >= 2pi - eps - isinside = true; - else + winding_number += atan(det(M), (a * b * c) + c * dot(A, B) + a * dot(B, C) + b * dot(C, A)) + end + + # Job done + if winding_number >= 2pi - eps + isinside = true + else isinside = false - end + end - return isinside + return isinside end diff --git a/src/surface_functions.jl b/src/surface_functions.jl index f69fc452..a8095476 100644 --- a/src/surface_functions.jl +++ b/src/surface_functions.jl @@ -1,7 +1,7 @@ # This contains a number of routines that deal with surfaces export remove_NaN_surface!, drape_on_topo, is_surface, fit_surface_to_points export above_surface, below_surface, interpolate_data_surface -import Base: +,- +import Base: +, - """ issurf = is_surface(surf::AbstractGeneralGrid) @@ -17,44 +17,44 @@ function is_surface(surf::AbstractGeneralGrid) return issurf end -function +(a::_T, b::_T) where _T<:AbstractGeneralGrid +function +(a::_T, b::_T) where {_T <: AbstractGeneralGrid} @assert size(a) == size(b) - return _addSurfaces(a,b) + return _addSurfaces(a, b) end -function -(a::_T, b::_T) where _T<:AbstractGeneralGrid +function -(a::_T, b::_T) where {_T <: AbstractGeneralGrid} @assert size(a) == size(b) - return _subtractSurfaces(a,b) + return _subtractSurfaces(a, b) end # Internal routines -_addSurfaces(a::_T, b::_T) where _T<:GeoData = GeoData(a.lon.val, a.lat.val, a.depth.val + b.depth.val, merge(a.fields,b.fields)) -_addSurfaces(a::_T, b::_T) where _T<:UTMData = UTMData(a.EW.val, a.NS.val, a.depth.val + b.depth.val, merge(a.fields,b.fields)) -_addSurfaces(a::_T, b::_T) where _T<:CartData = CartData(a.x.val, a.y.val, a.z.val + b.z.val, merge(a.fields,b.fields)) -_addSurfaces(a::_T, b::_T) where _T<:ParaviewData = ParaviewData(a.x.val, a.y.val, a.z.val + b.z.val, merge(a.fields,b.fields)) +_addSurfaces(a::_T, b::_T) where {_T <: GeoData} = GeoData(a.lon.val, a.lat.val, a.depth.val + b.depth.val, merge(a.fields, b.fields)) +_addSurfaces(a::_T, b::_T) where {_T <: UTMData} = UTMData(a.EW.val, a.NS.val, a.depth.val + b.depth.val, merge(a.fields, b.fields)) +_addSurfaces(a::_T, b::_T) where {_T <: CartData} = CartData(a.x.val, a.y.val, a.z.val + b.z.val, merge(a.fields, b.fields)) +_addSurfaces(a::_T, b::_T) where {_T <: ParaviewData} = ParaviewData(a.x.val, a.y.val, a.z.val + b.z.val, merge(a.fields, b.fields)) -_subtractSurfaces(a::_T, b::_T) where _T<:GeoData = GeoData(a.lon.val, a.lat.val, a.depth.val - b.depth.val, merge(a.fields,b.fields)) -_subtractSurfaces(a::_T, b::_T) where _T<:UTMData = UTMData(a.EW.val, a.NS.val, a.depth.val - b.depth.val, merge(a.fields,b.fields)) -_subtractSurfaces(a::_T, b::_T) where _T<:CartData = CartData(a.x.val, a.y.val, a.z.val - b.z.val, merge(a.fields,b.fields)) -_subtractSurfaces(a::_T, b::_T) where _T<:ParaviewData = ParaviewData(a.x.val, a.y.val, a.z.val - b.z.val, merge(a.fields,b.fields)) +_subtractSurfaces(a::_T, b::_T) where {_T <: GeoData} = GeoData(a.lon.val, a.lat.val, a.depth.val - b.depth.val, merge(a.fields, b.fields)) +_subtractSurfaces(a::_T, b::_T) where {_T <: UTMData} = UTMData(a.EW.val, a.NS.val, a.depth.val - b.depth.val, merge(a.fields, b.fields)) +_subtractSurfaces(a::_T, b::_T) where {_T <: CartData} = CartData(a.x.val, a.y.val, a.z.val - b.z.val, merge(a.fields, b.fields)) +_subtractSurfaces(a::_T, b::_T) where {_T <: ParaviewData} = ParaviewData(a.x.val, a.y.val, a.z.val - b.z.val, merge(a.fields, b.fields)) """ remove_NaN_surface!(Z::Array,X::Array,Y::Array) Removes NaN's from a grid `Z` by taking the closest points as specified by `X` and `Y`. """ -function remove_NaN_surface!(Z,X,Y) +function remove_NaN_surface!(Z, X, Y) @assert size(Z) == size(X) == size(Y) # use nearest neighbour to interpolate data - id = findall(isnan.(Z) .== false) - id_NaN = findall(isnan.(Z)) + id = findall(isnan.(Z) .== false) + id_NaN = findall(isnan.(Z)) - coord = [X[id]'; Y[id]']; - kdtree = KDTree(coord; leafsize = 10); + coord = [X[id]'; Y[id]'] + kdtree = KDTree(coord; leafsize = 10) - points = [X[id_NaN]'; Y[id_NaN]']; - idx,dist = nn(kdtree, points); + points = [X[id_NaN]'; Y[id_NaN]'] + idx, dist = nn(kdtree, points) Z[id_NaN] = Z[id[idx]] @@ -72,58 +72,60 @@ function drape_on_topo(Topo::GeoData, Data::GeoData) @assert is_surface(Topo) @assert is_surface(Data) - Lon,Lat,_ = lonlatdepth_grid( Topo.lon.val[:,1,1], Topo.lat.val[1,:,1],Topo.depth.val[1,1,:]); + Lon, Lat, _ = lonlatdepth_grid(Topo.lon.val[:, 1, 1], Topo.lat.val[1, :, 1], Topo.depth.val[1, 1, :]) # use nearest neighbour to interpolate data - idx = nearest_point_indices(Lon,Lat, vec(Data.lon.val), vec(Data.lat.val) ); + idx = nearest_point_indices(Lon, Lat, vec(Data.lon.val), vec(Data.lat.val)) - idx_out = findall( (Lon .< minimum(Data.lon.val)) .| (Lon .> maximum(Data.lon.val)) .| - (Lat .< minimum(Data.lat.val)) .| (Lat .> maximum(Data.lat.val)) ) + idx_out = findall( + (Lon .< minimum(Data.lon.val)) .| (Lon .> maximum(Data.lon.val)) .| + (Lat .< minimum(Data.lat.val)) .| (Lat .> maximum(Data.lat.val)) + ) - fields_new = Topo.fields; - field_names = keys(Data.fields); + fields_new = Topo.fields + field_names = keys(Data.fields) - for i = 1:length(Data.fields) + for i in 1:length(Data.fields) if typeof(Data.fields[i]) <: Tuple # vector or anything that contains more than 1 field data_tuple = Data.fields[i] # we have a tuple (likely a vector field), so we have to loop - data_array = zeros(typeof(data_tuple[1][1]),size(Topo.lon.val,1),size(Topo.lon.val,2),size(Topo.lon.val,3),length(Data.fields[i])); # create a 3D array that holds the 2D interpolated values - unit_array = zeros(size(data_array)); + data_array = zeros(typeof(data_tuple[1][1]), size(Topo.lon.val, 1), size(Topo.lon.val, 2), size(Topo.lon.val, 3), length(Data.fields[i])) # create a 3D array that holds the 2D interpolated values + unit_array = zeros(size(data_array)) - for j=1:length(data_tuple) - data_field = data_tuple[j]; - tmp = data_array[:,:,:,1]; - tmp = data_field[idx] - data_array[:,:,:,j] = tmp + for j in 1:length(data_tuple) + data_field = data_tuple[j] + tmp = data_array[:, :, :, 1] + tmp = data_field[idx] + data_array[:, :, :, j] = tmp end - data_new = tuple([data_array[:,:,:,c] for c in 1:size(data_array,4)]...) # transform 4D matrix to tuple + data_new = tuple([data_array[:, :, :, c] for c in 1:size(data_array, 4)]...) # transform 4D matrix to tuple # remove points outside domain - for j=1:length(data_tuple) - tmp = data_new[j]; + for j in 1:length(data_tuple) + tmp = data_new[j] tmp[idx_out] .= NaN - data_array[:,:,:,j] = tmp + data_array[:, :, :, j] = tmp end - data_new = tuple([data_array[:,:,:,c] for c in 1:size(data_array,4)]...) # transform 4D matrix to tuple + data_new = tuple([data_array[:, :, :, c] for c in 1:size(data_array, 4)]...) # transform 4D matrix to tuple else # scalar field - data_new = zeros(typeof(Data.fields[i][1]), size(Topo.lon.val,1),size(Topo.lon.val,2),size(Topo.lon.val,3)); - data_new = Data.fields[i][idx] # interpolate data field + data_new = zeros(typeof(Data.fields[i][1]), size(Topo.lon.val, 1), size(Topo.lon.val, 2), size(Topo.lon.val, 3)) + data_new = Data.fields[i][idx] # interpolate data field end # replace the one - new_field = NamedTuple{(field_names[i],)}((data_new,)) # Create a tuple with same name - fields_new = merge(fields_new, new_field); # replace the field in fields_new + new_field = NamedTuple{(field_names[i],)}((data_new,)) # Create a tuple with same name + fields_new = merge(fields_new, new_field) # replace the field in fields_new end - Topo_new = GeoData(Topo.lon.val,Topo.lat.val,Topo.depth.val, fields_new) + Topo_new = GeoData(Topo.lon.val, Topo.lat.val, Topo.depth.val, fields_new) return Topo_new end @@ -138,8 +140,8 @@ function drape_on_topo(Topo::CartData, Data::CartData) @assert is_surface(Topo) @assert is_surface(Data) - Topo_lonlat = GeoData(ustrip.(Topo.x.val),ustrip.(Topo.y.val), ustrip.(Topo.z.val), Topo.fields ) - Data_lonlat = GeoData(ustrip.(Data.x.val),ustrip.(Data.y.val), ustrip.(Data.z.val), Data.fields ) + Topo_lonlat = GeoData(ustrip.(Topo.x.val), ustrip.(Topo.y.val), ustrip.(Topo.z.val), Topo.fields) + Data_lonlat = GeoData(ustrip.(Data.x.val), ustrip.(Data.y.val), ustrip.(Data.z.val), Data.fields) Topo_new_lonlat = drape_on_topo(Topo_lonlat, Data_lonlat) @@ -158,9 +160,9 @@ This fits the `depth` values of the surface `surf` to the `depth` value of the c function fit_surface_to_points(surf::GeoData, lon_pt::Vector, lat_pt::Vector, depth_pt::Vector) @assert is_surface(surf) - idx = nearest_point_indices(NumValue(surf.lon),NumValue(surf.lat), lon_pt, lat_pt); + idx = nearest_point_indices(NumValue(surf.lon), NumValue(surf.lat), lon_pt, lat_pt) depth = NumValue(surf.depth) - depth[idx] .= depth_pt[idx]; + depth[idx] .= depth_pt[idx] surf_new = surf surf_new.depth .= depth @@ -177,7 +179,7 @@ This fits the `depth` values of the surface `surf` to the `depth` value of the c function fit_surface_to_points(surf::CartData, X_pt::Vector, Y_pt::Vector, Z_pt::Vector) @assert is_surface(surf) - idx = nearest_point_indices(NumValue(surf.x),NumValue(surf.y), X_pt[:], Y_pt[:]); + idx = nearest_point_indices(NumValue(surf.x), NumValue(surf.y), X_pt[:], Y_pt[:]) depth = NumValue(surf.z) depth = Z_pt[idx] @@ -187,7 +189,6 @@ function fit_surface_to_points(surf::CartData, X_pt::Vector, Y_pt::Vector, Z_pt: end - """ above_surface(Data::GeoData, DataSurface::GeoData; above=true) @@ -223,24 +224,24 @@ julia> Above = above_surface(Data_set3D, Data_Moho); Now, `Above` is a boolean array that is true for points above the surface and false for points below and at the surface. """ -function above_surface(Data::GeoData, DataSurface::GeoData; above=true) +function above_surface(Data::GeoData, DataSurface::GeoData; above = true) - if size(DataSurface.lon)[3]!=1 + if size(DataSurface.lon)[3] != 1 error("It seems that DataSurface is not a surface") end # Create interpolation object for surface - Lon_vec = DataSurface.lon.val[:,1,1]; - Lat_vec = DataSurface.lat.val[1,:,1]; - interpol = linear_interpolation((Lon_vec, Lat_vec), ustrip.(DataSurface.depth.val[:,:,1])); # create interpolation object + Lon_vec = DataSurface.lon.val[:, 1, 1] + Lat_vec = DataSurface.lat.val[1, :, 1] + interpol = linear_interpolation((Lon_vec, Lat_vec), ustrip.(DataSurface.depth.val[:, :, 1])) # create interpolation object - DepthSurface = interpol.(Data.lon.val,Data.lat.val); - DepthSurface = DepthSurface*unit(DataSurface.depth.val[1]) + DepthSurface = interpol.(Data.lon.val, Data.lat.val) + DepthSurface = DepthSurface * unit(DataSurface.depth.val[1]) if above - Above = Data.depth.val .> DepthSurface; + Above = Data.depth.val .> DepthSurface else - Above = Data.depth.val .< DepthSurface; + Above = Data.depth.val .< DepthSurface end return Above @@ -252,7 +253,7 @@ end Determines if points within the 3D `Data` structure are below the GeoData surface `DataSurface` """ function below_surface(Data::GeoData, DataSurface::GeoData) - return above_surface(Data::GeoData, DataSurface::GeoData; above=false) + return above_surface(Data::GeoData, DataSurface::GeoData; above = false) end """ @@ -260,12 +261,12 @@ end Determines if points within the 3D `Data_Cart` structure are above the Cartesian surface `DataSurface_Cart` """ -function above_surface(Data_Cart::ParaviewData, DataSurface_Cart::ParaviewData; above=true) +function above_surface(Data_Cart::ParaviewData, DataSurface_Cart::ParaviewData; above = true) - Data = GeoData(ustrip.(Data_Cart.x.val), ustrip.(Data_Cart.y.val), ustrip.(Data_Cart.z.val), Data_Cart.fields) - DataSurface = GeoData(ustrip.(DataSurface_Cart.x.val),ustrip.(DataSurface_Cart.y.val), ustrip.(DataSurface_Cart.z.val), DataSurface_Cart.fields ) + Data = GeoData(ustrip.(Data_Cart.x.val), ustrip.(Data_Cart.y.val), ustrip.(Data_Cart.z.val), Data_Cart.fields) + DataSurface = GeoData(ustrip.(DataSurface_Cart.x.val), ustrip.(DataSurface_Cart.y.val), ustrip.(DataSurface_Cart.z.val), DataSurface_Cart.fields) - return Above = above_surface(Data, DataSurface; above=above) + return Above = above_surface(Data, DataSurface; above = above) end """ @@ -273,17 +274,17 @@ end Determines if points within the 3D `Data_Cart` structure are above the Cartesian surface `DataSurface_Cart` """ -function above_surface(Data_Cart::Union{Q1Data,CartData}, DataSurface_Cart::CartData; above=true, cell=false) +function above_surface(Data_Cart::Union{Q1Data, CartData}, DataSurface_Cart::CartData; above = true, cell = false) - X,Y,Z = coordinate_grids(Data_Cart, cell=cell) + X, Y, Z = coordinate_grids(Data_Cart, cell = cell) if cell - Data = GeoData(ustrip.(X), ustrip.(Y), ustrip.(Z), Data_Cart.cellfields) + Data = GeoData(ustrip.(X), ustrip.(Y), ustrip.(Z), Data_Cart.cellfields) else - Data = GeoData(ustrip.(X), ustrip.(Y), ustrip.(Z), Data_Cart.fields) + Data = GeoData(ustrip.(X), ustrip.(Y), ustrip.(Z), Data_Cart.fields) end - DataSurface = GeoData(ustrip.(DataSurface_Cart.x.val),ustrip.(DataSurface_Cart.y.val), ustrip.(DataSurface_Cart.z.val), DataSurface_Cart.fields ) + DataSurface = GeoData(ustrip.(DataSurface_Cart.x.val), ustrip.(DataSurface_Cart.y.val), ustrip.(DataSurface_Cart.z.val), DataSurface_Cart.fields) - return Above = above_surface(Data, DataSurface; above=above) + return Above = above_surface(Data, DataSurface; above = above) end """ @@ -291,16 +292,16 @@ end Determines if points described by the `Grid` CartGrid structure are above the Cartesian surface `DataSurface_Cart` """ -function above_surface(Grid::CartGrid, DataSurface_Cart::CartData; above=true, cell=false) +function above_surface(Grid::CartGrid, DataSurface_Cart::CartData; above = true, cell = false) if cell - X,Y,Z = xyz_grid(Grid.coord1D_cen...) + X, Y, Z = xyz_grid(Grid.coord1D_cen...) else - X,Y,Z = xyz_grid(Grid.coord1D...) + X, Y, Z = xyz_grid(Grid.coord1D...) end - Data = CartData(X,Y,Z,(Z=Z,)) + Data = CartData(X, Y, Z, (Z = Z,)) - return above_surface(Data, DataSurface_Cart; above=above) + return above_surface(Data, DataSurface_Cart; above = above) end @@ -310,7 +311,7 @@ end Determines if points described by the `Grid` CartGrid structure are above the Cartesian surface `DataSurface_Cart` """ function below_surface(Grid::CartGrid, DataSurface_Cart::CartData) - return above_surface(Grid, DataSurface_Cart; above=false) + return above_surface(Grid, DataSurface_Cart; above = false) end @@ -320,7 +321,7 @@ end Determines if points within the 3D Data_Cart structure are below the Cartesian surface DataSurface_Cart """ function below_surface(Data_Cart::ParaviewData, DataSurface_Cart::ParaviewData) - return above_surface(Data_Cart::ParaviewData, DataSurface_Cart::ParaviewData; above=false) + return above_surface(Data_Cart::ParaviewData, DataSurface_Cart::ParaviewData; above = false) end """ @@ -328,8 +329,8 @@ end Determines if points within the 3D `Data_Cart` structure are below the Cartesian surface `DataSurface_Cart` """ -function below_surface(Data_Cart::Union{CartData,Q1Data}, DataSurface_Cart::CartData, cell=false) - return above_surface(Data_Cart, DataSurface_Cart; above=false, cell=cell) +function below_surface(Data_Cart::Union{CartData, Q1Data}, DataSurface_Cart::CartData, cell = false) + return above_surface(Data_Cart, DataSurface_Cart; above = false, cell = cell) end """ @@ -364,14 +365,14 @@ julia> Surf_interp = interpolate_data_surface(Data, surf) function interpolate_data_surface(V::ParaviewData, Surf::ParaviewData) # Create GeoData structure: - V_geo = GeoData(V.x.val, V.y.val, V.z.val, V.fields) - V_geo.depth.val = ustrip(V_geo.depth.val); + V_geo = GeoData(V.x.val, V.y.val, V.z.val, V.fields) + V_geo.depth.val = ustrip(V_geo.depth.val) - Surf_geo = GeoData(Surf.x.val, Surf.y.val, Surf.z.val, Surf.fields) - Surf_geo.depth.val = ustrip(Surf_geo.depth.val); + Surf_geo = GeoData(Surf.x.val, Surf.y.val, Surf.z.val, Surf.fields) + Surf_geo.depth.val = ustrip(Surf_geo.depth.val) - Surf_interp_geo = interpolate_data_surface(V_geo, Surf_geo) - Surf_interp = ParaviewData(Surf_interp_geo.lon.val, Surf_interp_geo.lat.val, ustrip.(Surf_interp_geo.depth.val), Surf_interp_geo.fields) + Surf_interp_geo = interpolate_data_surface(V_geo, Surf_geo) + Surf_interp = ParaviewData(Surf_interp_geo.lon.val, Surf_interp_geo.lat.val, ustrip.(Surf_interp_geo.depth.val), Surf_interp_geo.fields) return Surf_interp @@ -380,14 +381,14 @@ end function interpolate_data_surface(V::CartData, Surf::CartData) # Create GeoData structure: - V_geo = GeoData(V.x.val, V.y.val, V.z.val, V.fields) - V_geo.depth.val = ustrip(V_geo.depth.val); + V_geo = GeoData(V.x.val, V.y.val, V.z.val, V.fields) + V_geo.depth.val = ustrip(V_geo.depth.val) - Surf_geo = GeoData(Surf.x.val, Surf.y.val, Surf.z.val, Surf.fields) - Surf_geo.depth.val = ustrip(Surf_geo.depth.val); + Surf_geo = GeoData(Surf.x.val, Surf.y.val, Surf.z.val, Surf.fields) + Surf_geo.depth.val = ustrip(Surf_geo.depth.val) - Surf_interp_geo = interpolate_data_surface(V_geo, Surf_geo) - Surf_interp = CartData(Surf_interp_geo.lon.val, Surf_interp_geo.lat.val, ustrip.(Surf_interp_geo.depth.val), Surf_interp_geo.fields) + Surf_interp_geo = interpolate_data_surface(V_geo, Surf_geo) + Surf_interp = CartData(Surf_interp_geo.lon.val, Surf_interp_geo.lat.val, ustrip.(Surf_interp_geo.depth.val), Surf_interp_geo.fields) return Surf_interp diff --git a/src/transformation.jl b/src/transformation.jl index 898ead2c..7a837045 100644 --- a/src/transformation.jl +++ b/src/transformation.jl @@ -5,7 +5,6 @@ using StaticArrays export project_CartData, project_FEData_CartData - """ d_cart = project_CartData(d_cart::CartData, d::GeoData, p::ProjectionPoint) @@ -17,30 +16,30 @@ Projects all datafields from the GeoData struct `d` to the CartData struct `d_ca """ function project_CartData(d_cart::CartData, d::GeoData, p::ProjectionPoint) - Data_UTM = convert2UTMzone(d_cart, p) - Data_lonlat = convert(GeoData,Data_UTM) - + Data_UTM = convert2UTMzone(d_cart, p) + Data_lonlat = convert(GeoData, Data_UTM) + # Check whether the data sets have the same sign. If not, we may have to shift one by 360 degrees min_lon_cart, max_lon_cart = minimum(Data_lonlat.lon.val), maximum(Data_lonlat.lon.val) min_lon, max_lon = minimum(d.lon.val), maximum(d.lon.val) - - if (sign(min_lon)!=sign(min_lon_cart)) && (sign(max_lon)!=sign(max_lon_cart)) - # the longitude data has a different sign. This can happen if one of them is "West" (and has negative values), whereas the other has - if (min_lon_cart<0) + + if (sign(min_lon) != sign(min_lon_cart)) && (sign(max_lon) != sign(max_lon_cart)) + # the longitude data has a different sign. This can happen if one of them is "West" (and has negative values), whereas the other has + if (min_lon_cart < 0) Data_lonlat = GeoData(Data_lonlat.lon.val .+ 360, Data_lonlat.lat.val, ustrip.(Data_lonlat.depth.val), Data_lonlat.fields) end end - - if size(Data_lonlat.lon.val,3)==1 - z_new, fields_new = interpolate_datafields_2D(d,Data_lonlat.lon.val, Data_lonlat.lat.val) - - # Create new struct - d_cart = CartData(d_cart.x.val,d_cart.y.val,z_new,fields_new) - + + if size(Data_lonlat.lon.val, 3) == 1 + z_new, fields_new = interpolate_datafields_2D(d, Data_lonlat.lon.val, Data_lonlat.lat.val) + + # Create new struct + d_cart = CartData(d_cart.x.val, d_cart.y.val, z_new, fields_new) + else - d_data = interpolate_datafields(d, Data_lonlat.lon.val, Data_lonlat.lat.val, Data_lonlat.depth.val) - d_cart = CartData(d_cart.x.val,d_cart.y.val,d_cart.z.val,d_data.fields) - + d_data = interpolate_datafields(d, Data_lonlat.lon.val, Data_lonlat.lat.val, Data_lonlat.depth.val) + d_cart = CartData(d_cart.x.val, d_cart.y.val, d_cart.z.val, d_data.fields) + end return d_cart @@ -57,17 +56,17 @@ Projects all datafields from the GeoData struct `d` to the CartData struct `d_ca """ function project_CartData(d_cart::CartData, d_cart_data0::CartData) - - if size(d_cart_data0.x.val,3)==1 - z_new, fields_new = interpolate_datafields_2D(d,d_cart_data0.x.val, d_cart_data0.y.val) - - # Create new struct - d_cart = CartData(d_cart.x.val,d_cart.y.val,z_new,fields_new) - + + if size(d_cart_data0.x.val, 3) == 1 + z_new, fields_new = interpolate_datafields_2D(d, d_cart_data0.x.val, d_cart_data0.y.val) + + # Create new struct + d_cart = CartData(d_cart.x.val, d_cart.y.val, z_new, fields_new) + else - d_data = interpolate_datafields(d, d_cart_data0.x.val, d_cart_data0.y.val, d_cart_data0.z.val) - d_cart = CartData(d_cart.x.val,d_cart.y.val,d_cart.z.val,d_data.fields) - + d_data = interpolate_datafields(d, d_cart_data0.x.val, d_cart_data0.y.val, d_cart_data0.z.val) + d_cart = CartData(d_cart.x.val, d_cart.y.val, d_cart.z.val, d_data.fields) + end return d_cart @@ -86,63 +85,62 @@ Projects all datafields from the UTMData struct `d` to the CartData struct `d_ca """ function project_CartData(d_cart::CartData, d::UTMData, p::ProjectionPoint) - Data_UTM = convert2UTMzone(d_cart, p) - - if size(Data_UTM.EW.val,3)==1 - z_new, fields_new = interpolate_datafields_2D(d,Data_UTM.EW.val, Data_UTM.NS.val) - - # Create new struct - d_cart = CartData(d_cart.x.val,d_cart.y.val,z_new,fields_new) - + Data_UTM = convert2UTMzone(d_cart, p) + + if size(Data_UTM.EW.val, 3) == 1 + z_new, fields_new = interpolate_datafields_2D(d, Data_UTM.EW.val, Data_UTM.NS.val) + + # Create new struct + d_cart = CartData(d_cart.x.val, d_cart.y.val, z_new, fields_new) + else - d_data = interpolate_datafields(d, Data_UTM.EW.val, Data_UTM.NS.val, Data_UTM.depth.val) - d_cart = CartData(d_cart.x.val,d_cart.y.val,d_cart.z.val,d_data.fields) - + d_data = interpolate_datafields(d, Data_UTM.EW.val, Data_UTM.NS.val, Data_UTM.depth.val) + d_cart = CartData(d_cart.x.val, d_cart.y.val, d_cart.z.val, d_data.fields) + end return d_cart end - """ inside = point_in_tetrahedron(p::_T, a::_T, b::_T, c::_T, d::_T, tol=1e-10) Determines if a point `p` is inside a tetrahedron specified by `a`,`b`,`c`,`d` or not """ -function point_in_tetrahedron(p::_T, a::_T, b::_T, c::_T, d::_T, tol=1e-10) where _T<:Vector{Float64} +function point_in_tetrahedron(p::_T, a::_T, b::_T, c::_T, d::_T, tol = 1.0e-10) where {_T <: Vector{Float64}} # check bounding box - xmin = min(a[1],b[1],c[1],d[1]) - xmax = max(a[1],b[1],c[1],d[1]) - ymin = min(a[2],b[2],c[2],d[2]) - ymax = max(a[2],b[2],c[2],d[2]) - zmin = min(a[3],b[3],c[3],d[3]) - zmax = max(a[3],b[3],c[3],d[3]) - + xmin = min(a[1], b[1], c[1], d[1]) + xmax = max(a[1], b[1], c[1], d[1]) + ymin = min(a[2], b[2], c[2], d[2]) + ymax = max(a[2], b[2], c[2], d[2]) + zmin = min(a[3], b[3], c[3], d[3]) + zmax = max(a[3], b[3], c[3], d[3]) + inside = true - if p[1] < xmin || p[1] > xmax + if p[1] < xmin || p[1] > xmax inside = false end - if (p[2] < ymin || p[2] > ymax) && inside + if (p[2] < ymin || p[2] > ymax) && inside inside = false end - if (p[3] < zmin || p[3] > zmax) && inside + if (p[3] < zmin || p[3] > zmax) && inside inside = false end - + if inside v0 = @SVector [d[i] - a[i] for i in 1:3] v1 = @SVector [b[i] - a[i] for i in 1:3] v2 = @SVector [c[i] - a[i] for i in 1:3] v3 = @SVector [p[i] - a[i] for i in 1:3] - + denom = dot(v0, cross(v1, v2)) - + u = dot(v3, cross(v1, v2)) / denom v = dot(v0, cross(v3, v2)) / denom w = dot(v0, cross(v1, v3)) / denom - - inside = (u >= -tol) && (v >= -tol) && (w >= -tol) && (u + v + w <= 1 + tol) + + inside = (u >= -tol) && (v >= -tol) && (w >= -tol) && (u + v + w <= 1 + tol) end return inside @@ -157,40 +155,39 @@ function project_FEData_CartData(data_cart::CartData, data_fe::FEData) cellfields_regions = data_fe.cellfields.regions regions = zeros(Int64, size(data_cart.x.val)) - - for i = 1:size(data_fe.connectivity,2) # loop over tetrahedrons - tetra = data_fe.connectivity[:,i] - - a = data_fe.vertices[:,tetra[1]] - b = data_fe.vertices[:,tetra[2]] - c = data_fe.vertices[:,tetra[3]] - d = data_fe.vertices[:,tetra[4]] - - xmin = min(a[1],b[1],c[1],d[1]) - xmax = max(a[1],b[1],c[1],d[1]) - ymin = min(a[2],b[2],c[2],d[2]) - ymax = max(a[2],b[2],c[2],d[2]) - zmin = min(a[3],b[3],c[3],d[3]) - zmax = max(a[3],b[3],c[3],d[3]) - - ind = findall( data_cart.x.val .>= xmin .&& data_cart.x.val .<= xmax .&& - data_cart.y.val .>= ymin .&& data_cart.y.val .<= ymax .&& - data_cart.z.val .>= zmin .&& data_cart.z.val .<= zmax); + + for i in 1:size(data_fe.connectivity, 2) # loop over tetrahedrons + tetra = data_fe.connectivity[:, i] + + a = data_fe.vertices[:, tetra[1]] + b = data_fe.vertices[:, tetra[2]] + c = data_fe.vertices[:, tetra[3]] + d = data_fe.vertices[:, tetra[4]] + + xmin = min(a[1], b[1], c[1], d[1]) + xmax = max(a[1], b[1], c[1], d[1]) + ymin = min(a[2], b[2], c[2], d[2]) + ymax = max(a[2], b[2], c[2], d[2]) + zmin = min(a[3], b[3], c[3], d[3]) + zmax = max(a[3], b[3], c[3], d[3]) + + ind = findall( + data_cart.x.val .>= xmin .&& data_cart.x.val .<= xmax .&& + data_cart.y.val .>= ymin .&& data_cart.y.val .<= ymax .&& + data_cart.z.val .>= zmin .&& data_cart.z.val .<= zmax + ) for I in ind x = data_cart.x.val[I] y = data_cart.y.val[I] z = data_cart.z.val[I] - p = [x,y,z] - if point_in_tetrahedron(p,a,b,c,d) + p = [x, y, z] + if point_in_tetrahedron(p, a, b, c, d) regions[I] = cellfields_regions[i] end end end - return addfield(data_cart, (regions=regions,)) + return addfield(data_cart, (regions = regions,)) end - - - diff --git a/src/utils.jl b/src/utils.jl index 3684071f..96529b56 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -17,8 +17,10 @@ using NearestNeighbors Computes an (x,y,z)-grid from the vectors (vx,vy,vz). For more information, see the MATLAB documentation. """ -function meshgrid(vx::AbstractVector{T}, vy::AbstractVector{T}, - vz::AbstractVector{T}) where {T} +function meshgrid( + vx::AbstractVector{T}, vy::AbstractVector{T}, + vz::AbstractVector{T} + ) where {T} m, n, o = length(vy), length(vx), length(vz) vx = reshape(vx, 1, n, 1) vy = reshape(vy, m, 1, 1) @@ -26,7 +28,7 @@ function meshgrid(vx::AbstractVector{T}, vy::AbstractVector{T}, om = ones(Int, m) on = ones(Int, n) oo = ones(Int, o) - (vx[om, :, oo], vy[:, on, oo], vz[om, on, :]) + return (vx[om, :, oo], vy[:, on, oo], vz[om, on, :]) end """ @@ -35,14 +37,14 @@ end Add Fields Data to GeoData or CartData """ -function addfield(V::AbstractGeneralGrid,field_name::String,data::Any) - fields_new = V.fields; new_field = NamedTuple{(Symbol(field_name),)}((data,)); - fields_new = merge(fields_new, new_field); # replace the field in fields_new +function addfield(V::AbstractGeneralGrid, field_name::String, data::Any) + fields_new = V.fields; new_field = NamedTuple{(Symbol(field_name),)}((data,)) + fields_new = merge(fields_new, new_field) # replace the field in fields_new - if isa(V,GeoData) - V = GeoData(V.lon.val,V.lat.val,V.depth.val,fields_new) - elseif isa(V,CartData) - V = CartData(V.x.val,V.y.val,V.z.val,fields_new) + if isa(V, GeoData) + V = GeoData(V.lon.val, V.lat.val, V.depth.val, fields_new) + elseif isa(V, CartData) + V = CartData(V.x.val, V.y.val, V.z.val, fields_new) else error("addfield is only implemented for GeoData and CartData structures") end @@ -55,14 +57,14 @@ end Add `new_fields` fields to a `CartData` dataset """ -addfield(V::CartData,new_fields::NamedTuple) = CartData(V.x.val, V.y.val, V.z.val, merge(V.fields, new_fields)) +addfield(V::CartData, new_fields::NamedTuple) = CartData(V.x.val, V.y.val, V.z.val, merge(V.fields, new_fields)) """ V = addfield(V::GeoData,new_fields::NamedTuple) Add `new_fields` fields to a `GeoData` dataset """ -addfield(V::GeoData,new_fields::NamedTuple) = GeoData(V.lon.val, V.lat.val, V.depth.val, merge(V.fields, new_fields)) +addfield(V::GeoData, new_fields::NamedTuple) = GeoData(V.lon.val, V.lat.val, V.depth.val, merge(V.fields, new_fields)) """ @@ -70,7 +72,7 @@ addfield(V::GeoData,new_fields::NamedTuple) = GeoData(V.lon.val, V.lat.val, V.de Add `new_fields` fields to a `Q1Data` dataset; set `cellfield` to `true` if the field is a cell field; otherwise it is a vertex field """ -function addfield(V::Q1Data,new_fields::NamedTuple; cellfield=false) +function addfield(V::Q1Data, new_fields::NamedTuple; cellfield = false) if cellfield return Q1Data(V.x.val, V.y.val, V.z.val, V.fields, merge(V.cellfields, new_fields)) else @@ -84,7 +86,7 @@ end Add `new_fields` fields to a `FEData` dataset; set `cellfield` to `true` if the field is a cell field; otherwise it is a vertex field """ -function addfield(V::FEData,new_fields::NamedTuple; cellfield=false) +function addfield(V::FEData, new_fields::NamedTuple; cellfield = false) if cellfield return FEData(V.vertices, V.connectivity, V.fields, merge(V.cellfields, new_fields)) else @@ -96,7 +98,7 @@ end # this function is taken from @JeffreySarnoff function dropnames(namedtuple::NamedTuple, names::Tuple{Vararg{Symbol}}) keepnames = Base.diff_names(Base._nt_names(namedtuple), names) - return NamedTuple{keepnames}(namedtuple) + return NamedTuple{keepnames}(namedtuple) end """ @@ -105,14 +107,14 @@ end Removes the field with name `field_name` from the GeoData or CartData dataset """ -function removefield(V::AbstractGeneralGrid,field_name::Symbol) - fields_new = V.fields; - fields_new = dropnames(fields_new, (field_name,)) +function removefield(V::AbstractGeneralGrid, field_name::Symbol) + fields_new = V.fields + fields_new = dropnames(fields_new, (field_name,)) - if isa(V,GeoData) - V = GeoData(V.lon.val,V.lat.val,V.depth.val,fields_new) - elseif isa(V,CartData) - V = CartData(V.x.val,V.y.val,V.z.val,fields_new) + if isa(V, GeoData) + V = GeoData(V.lon.val, V.lat.val, V.depth.val, fields_new) + elseif isa(V, CartData) + V = CartData(V.x.val, V.y.val, V.z.val, fields_new) else error("removefield is only implemented for GeoData and CartData structures") end @@ -126,8 +128,8 @@ end Removes the field with name `field_name` from the GeoData or CartData dataset """ -function removefield(V::AbstractGeneralGrid,field_name::String) - return removefield(V,Symbol(field_name)) +function removefield(V::AbstractGeneralGrid, field_name::String) + return removefield(V, Symbol(field_name)) end """ @@ -136,10 +138,10 @@ end Removes the fields in the tuple `field_name` from the GeoData or CartData dataset """ -function removefield(V::AbstractGeneralGrid,field_name::NTuple{N,Symbol}) where N - - for ifield=1:N - V = removefield(V,field_name[ifield]) +function removefield(V::AbstractGeneralGrid, field_name::NTuple{N, Symbol}) where {N} + + for ifield in 1:N + V = removefield(V, field_name[ifield]) end return V @@ -175,56 +177,62 @@ GeoData """ -function cross_section_volume(V::AbstractGeneralGrid; dims=(100,100), Interpolate=false, Depth_level=nothing, Lat_level=nothing, Lon_level=nothing, Start=nothing, End=nothing, Depth_extent=nothing ) +function cross_section_volume(V::AbstractGeneralGrid; dims = (100, 100), Interpolate = false, Depth_level = nothing, Lat_level = nothing, Lon_level = nothing, Start = nothing, End = nothing, Depth_extent = nothing) - DataSetType = check_data_set(V); + DataSetType = check_data_set(V) if DataSetType != 3 error("cross_section_volume: the input data set has to be a volume!") end # extract the coordinates - X,Y,Z = coordinate_grids(V) + X, Y, Z = coordinate_grids(V) if !isnothing(Depth_level) # Horizontal slice CheckBounds(Z, Depth_level) if Interpolate - Lon,Lat,Depth = lonlatdepth_grid( LinRange(minimum(X), maximum(X), dims[1]), - LinRange(minimum(Y), maximum(Y), dims[2]), - Depth_level) + Lon, Lat, Depth = lonlatdepth_grid( + LinRange(minimum(X), maximum(X), dims[1]), + LinRange(minimum(Y), maximum(Y), dims[2]), + Depth_level + ) else - ind_z = argmin(abs.(NumValue(Z[1,1,:]) .- ustrip(Depth_level))) - iDepth = ind_z:ind_z; - iLon = 1:size(NumValue(X),1); - iLat = 1:size(NumValue(Y),2); + ind_z = argmin(abs.(NumValue(Z[1, 1, :]) .- ustrip(Depth_level))) + iDepth = ind_z:ind_z + iLon = 1:size(NumValue(X), 1) + iLat = 1:size(NumValue(Y), 2) end end if !isnothing(Lat_level) # vertical slice @ given latitude CheckBounds(Y, Lat_level) if Interpolate - Lon,Lat,Depth = lonlatdepth_grid( LinRange(minimum(X), maximum(X), dims[1]), - Lat_level, - LinRange(minimum(Z), maximum(Z), dims[2])) + Lon, Lat, Depth = lonlatdepth_grid( + LinRange(minimum(X), maximum(X), dims[1]), + Lat_level, + LinRange(minimum(Z), maximum(Z), dims[2]) + ) else - ind_l = argmin(abs.(Y[1,:,1] .- Lat_level)) - iDepth = 1:size(Z,3) - iLon = 1:size(X,1); - iLat = ind_l:ind_l + ind_l = argmin(abs.(Y[1, :, 1] .- Lat_level)) + iDepth = 1:size(Z, 3) + iLon = 1:size(X, 1) + iLat = ind_l:ind_l end end if !isnothing(Lon_level) # vertical slice @ given longitude CheckBounds(X, Lon_level) if Interpolate - Lon,Lat,Depth = lonlatdepth_grid( Lon_level, - LinRange(minimum(Y), maximum(Y), dims[1]), - LinRange(minimum(Z), maximum(Z), dims[2])) + Lon, Lat, Depth = lonlatdepth_grid( + Lon_level, + LinRange(minimum(Y), maximum(Y), dims[1]), + LinRange(minimum(Z), maximum(Z), dims[2]) + ) else - ind_l = argmin(abs.(X[:,1,1] .- Lon_level)) - iDepth = 1:size(Z,3) - iLat = 1:size(Y,2); - iLon = ind_l:ind_l + ind_l = argmin(abs.(X[:, 1, 1] .- Lon_level)) + iDepth = 1:size(Z, 3) + iLat = 1:size(Y, 2) + iLon = ind_l:ind_l end end @@ -233,7 +241,7 @@ function cross_section_volume(V::AbstractGeneralGrid; dims=(100,100), Interpolat if isnothing(End) error("Also define End coordinates if you indicate starting lon/lat value") end - Interpolate = true; # we must interpolate in this case + Interpolate = true # we must interpolate in this case # if the depth extent is given, modify the Z values to take this into account if !isnothing(Depth_extent) @@ -242,34 +250,38 @@ function cross_section_volume(V::AbstractGeneralGrid; dims=(100,100), Interpolat end end if !isnothing(Depth_extent) - Z = [Depth_extent[1] Depth_extent[2]]; + Z = [Depth_extent[1] Depth_extent[2]] end - Lon_dum,Lat_p,Depth_p = lonlatdepth_grid( Start[1], - LinRange(Start[2], End[2], dims[1]), - LinRange(minimum(Z), maximum(Z), dims[2])) + Lon_dum, Lat_p, Depth_p = lonlatdepth_grid( + Start[1], + LinRange(Start[2], End[2], dims[1]), + LinRange(minimum(Z), maximum(Z), dims[2]) + ) - Lon_p,Lat_dum,Depth = lonlatdepth_grid( LinRange(Start[1], End[1], dims[1]), - Start[2], - LinRange(minimum(Z), maximum(Z), dims[2])) + Lon_p, Lat_dum, Depth = lonlatdepth_grid( + LinRange(Start[1], End[1], dims[1]), + Start[2], + LinRange(minimum(Z), maximum(Z), dims[2]) + ) - Lon = zeros(dims[1],dims[2],1) - Lat = zeros(dims[1],dims[2],1) - Depth = zeros(dims[1],dims[2],1)*Depth_p[1] + Lon = zeros(dims[1], dims[2], 1) + Lat = zeros(dims[1], dims[2], 1) + Depth = zeros(dims[1], dims[2], 1) * Depth_p[1] # We need 3D matrixes for the paraview writing routine to know we are in 3D - Lon[:,:,1] = Lon_p[:,1,:] - Lat[:,:,1] = Lat_p[1,:,:] - Depth[:,:,1] = Depth_p[1,:,:] + Lon[:, :, 1] = Lon_p[:, 1, :] + Lat[:, :, 1] = Lat_p[1, :, :] + Depth[:, :, 1] = Depth_p[1, :, :] end if Interpolate # Interpolate data on profile - DataProfile = interpolate_datafields(V, Lon, Lat, NumValue(Depth)); + DataProfile = interpolate_datafields(V, Lon, Lat, NumValue(Depth)) else # extract data (no interpolation) - DataProfile = ExtractDataSets(V, iLon, iLat, iDepth); + DataProfile = ExtractDataSets(V, iLon, iLat, iDepth) end return DataProfile @@ -304,17 +316,17 @@ GeoData ``` """ -function cross_section_surface(S::AbstractGeneralGrid; dims=(100,), Interpolate=true, Depth_level=nothing, Lat_level=nothing, Lon_level=nothing, Start=nothing, End=nothing ) +function cross_section_surface(S::AbstractGeneralGrid; dims = (100,), Interpolate = true, Depth_level = nothing, Lat_level = nothing, Lon_level = nothing, Start = nothing, End = nothing) - DataSetType = check_data_set(S); + DataSetType = check_data_set(S) if DataSetType != 2 error("cross_section_surface: the input data set has to be a surface!") end - X,Y,Z = coordinate_grids(S) + X, Y, Z = coordinate_grids(S) - Lon_vec = X[:,1,1] - Lat_vec = Y[1,:,1] + Lon_vec = X[:, 1, 1] + Lat_vec = Y[1, :, 1] if !isnothing(Depth_level) # not working yet, as this requires the intersection of two interfaces error(" horizontal cross sections not working yet with surface data!") @@ -323,13 +335,13 @@ function cross_section_surface(S::AbstractGeneralGrid; dims=(100,), Interpolate= if !isnothing(Lat_level) # vertical slice @ given latitude # create a vector that spans the entire dataset @ a given latitutde Lon = LinRange(minimum(Lon_vec), maximum(Lon_vec), dims[1]) - Lat = ones(size(Lon))*Lat_level; + Lat = ones(size(Lon)) * Lat_level end if !isnothing(Lon_level) # vertical slice @ given longitude # create a vector that spans the entire dataset @ a given longitude Lat = LinRange(minimum(Lat_vec), maximum(Lat_vec), dims[1]) - Lon = ones(size(Lat))*Lon_level + Lon = ones(size(Lat)) * Lon_level end # diagonal profile defined by start and end lon/lat points @@ -339,47 +351,47 @@ function cross_section_surface(S::AbstractGeneralGrid; dims=(100,), Interpolate= end Lon = LinRange(Start[1], End[1], dims[1]) - Lat = LinRange(Start[2], End[2], dims[1]); + Lat = LinRange(Start[2], End[2], dims[1]) end # now interpolate the depth information of the surface to the profile in question - interpol = linear_interpolation((Lon_vec, Lat_vec), Z[:,:,1],extrapolation_bc=NaN); # create interpolation object, fill with NaNs if outside - depth_intp = interpol.(Lon, Lat)*km + interpol = linear_interpolation((Lon_vec, Lat_vec), Z[:, :, 1], extrapolation_bc = NaN) # create interpolation object, fill with NaNs if outside + depth_intp = interpol.(Lon, Lat) * km # also interpolate any other data that is stored in the GeoData structure on the profile - fields_new = S.fields; - field_names = keys(fields_new); - for i = 1:length(S.fields) + fields_new = S.fields + field_names = keys(fields_new) + for i in 1:length(S.fields) if typeof(S.fields[i]) <: Tuple # vector or anything that contains more than 1 field data_tuple = fields_new[i] # we have a tuple (likely a vector field), so we have to loop - data_array = zeros(size(Lon,1),size(Lon,2),length(data_tuple)); # create a 2D array that holds the 2D interpolated values - unit_array = zeros(size(data_array)); + data_array = zeros(size(Lon, 1), size(Lon, 2), length(data_tuple)) # create a 2D array that holds the 2D interpolated values + unit_array = zeros(size(data_array)) - for j=1:length(data_tuple) - interpol = linear_interpolation((Lon_vec, Lat_vec), dropdims(ustrip.(data_tuple[j]),dims=3),extrapolation_bc = NaN); # create interpolation object - data_array[:,:,j] = interpol.(Lon, Lat); + for j in 1:length(data_tuple) + interpol = linear_interpolation((Lon_vec, Lat_vec), dropdims(ustrip.(data_tuple[j]), dims = 3), extrapolation_bc = NaN) # create interpolation object + data_array[:, :, j] = interpol.(Lon, Lat) end - data_new = tuple([data_array[:,:,c] for c in 1:size(data_array,3)]...) # transform 3D matrix to tuple, do not add unit, as this creates an error in GMG (Issue), to add the unit: *unit(S.fields[i][1][1]) + data_new = tuple([data_array[:, :, c] for c in 1:size(data_array, 3)]...) # transform 3D matrix to tuple, do not add unit, as this creates an error in GMG (Issue), to add the unit: *unit(S.fields[i][1][1]) else # scalar field - interpol = linear_interpolation((Lon_vec, Lat_vec), dropdims(ustrip.(S.fields[i]),dims=3), extrapolation_bc = NaN); - data_new = interpol.(Lon, Lat)*unit(S.fields[i][1]); # interpolate data field + interpol = linear_interpolation((Lon_vec, Lat_vec), dropdims(ustrip.(S.fields[i]), dims = 3), extrapolation_bc = NaN) + data_new = interpol.(Lon, Lat) * unit(S.fields[i][1]) # interpolate data field end # replace the field - new_field = NamedTuple{(field_names[i],)}((data_new,)) # Create a tuple with same name and unit - fields_new = merge(fields_new, new_field); # replace the field in fields_new + new_field = NamedTuple{(field_names[i],)}((data_new,)) # Create a tuple with same name and unit + fields_new = merge(fields_new, new_field) # replace the field in fields_new end # create GeoData/CartData structure with the interpolated points - if isa(S,GeoData) - Data_profile = GeoData(Lon, Lat, depth_intp, fields_new); - elseif isa(S,CartData) - Data_profile = CartData(Lon, Lat, depth_intp, fields_new); + if isa(S, GeoData) + Data_profile = GeoData(Lon, Lat, depth_intp, fields_new) + elseif isa(S, CartData) + Data_profile = CartData(Lon, Lat, depth_intp, fields_new) else error("still to be implemented") end @@ -393,56 +405,56 @@ end Creates a projection of separate points (saved as a GeoData object) onto a chosen plane. Only points with a maximum distance of section_width are taken into account """ -function cross_section_points(P::GeoData; Depth_level=nothing, Lat_level=nothing, Lon_level=nothing, Start=nothing, End=nothing, section_width = 10km) +function cross_section_points(P::GeoData; Depth_level = nothing, Lat_level = nothing, Lon_level = nothing, Start = nothing, End = nothing, section_width = 10km) - DataSetType = check_data_set(P); + DataSetType = check_data_set(P) if DataSetType != 1 error("cross_section_points: the input data set has to be a pointwise data set!") end if !isnothing(Depth_level) - ind = findall(-0.5*ustrip(section_width) .< (NumValue(P.depth) .- ustrip(Depth_level)) .< 0.5*ustrip(section_width)) # find all points around the desired depth level, both units should be in km, so no unit transformation required + ind = findall(-0.5 * ustrip(section_width) .< (NumValue(P.depth) .- ustrip(Depth_level)) .< 0.5 * ustrip(section_width)) # find all points around the desired depth level, both units should be in km, so no unit transformation required # create temporary variables - lon_tmp = NumValue(P.lon.val[ind]) - lat_tmp = NumValue(P.lat.val[ind]) - depth_tmp = NumValue(P.depth.val[ind]) - depth_proj = ones(size(depth_tmp))*Depth_level + lon_tmp = NumValue(P.lon.val[ind]) + lat_tmp = NumValue(P.lat.val[ind]) + depth_tmp = NumValue(P.depth.val[ind]) + depth_proj = ones(size(depth_tmp)) * Depth_level # create fields that will be stored additionally on the GeoData structure - field_tmp = (depth_proj=depth_proj,lat_proj=lat_tmp,lon_proj=lon_tmp) # these are the projected points + field_tmp = (depth_proj = depth_proj, lat_proj = lat_tmp, lon_proj = lon_tmp) # these are the projected points end if !isnothing(Lat_level) # vertical slice @ given latitude - p_Point = ProjectionPoint(Lat=Lat_level,Lon=sum(P.lon.val)/length(P.lon.val)) # define the projection point (lat/lon) as the latitude and the mean of the longitudes of the data - P_UTM = convert2UTMzone(P, p_Point) # convert to UTM - ind = findall(-0.5*ustrip(uconvert(u"m",section_width)) .< (P_UTM.NS.val .- p_Point.NS) .< 0.5*ustrip(uconvert(u"m",section_width))) # find all points around the desired latitude level, UTM is in m, so we have to convert the section width + p_Point = ProjectionPoint(Lat = Lat_level, Lon = sum(P.lon.val) / length(P.lon.val)) # define the projection point (lat/lon) as the latitude and the mean of the longitudes of the data + P_UTM = convert2UTMzone(P, p_Point) # convert to UTM + ind = findall(-0.5 * ustrip(uconvert(u"m", section_width)) .< (P_UTM.NS.val .- p_Point.NS) .< 0.5 * ustrip(uconvert(u"m", section_width))) # find all points around the desired latitude level, UTM is in m, so we have to convert the section width # create temporary variables - lon_tmp = NumValue(P.lon.val[ind]) - lat_tmp = NumValue(P.lat.val[ind]) - depth_tmp = NumValue(P.depth.val[ind]) - lat_proj = ones(size(depth_tmp))*Lat_level + lon_tmp = NumValue(P.lon.val[ind]) + lat_tmp = NumValue(P.lat.val[ind]) + depth_tmp = NumValue(P.depth.val[ind]) + lat_proj = ones(size(depth_tmp)) * Lat_level # data to be stored on the new GeoData structure - field_tmp = (depth_proj=depth_tmp,lat_proj=lat_proj,lon_proj=lon_tmp) # these are the projected points + field_tmp = (depth_proj = depth_tmp, lat_proj = lat_proj, lon_proj = lon_tmp) # these are the projected points end if !isnothing(Lon_level) # vertical slice @ given longitude - p_Point = ProjectionPoint(Lat=sum(P.lat.val)/length(P.lat.val),Lon=Lon_level) # define the projection point (lat/lon) as the latitude and the mean of the longitudes of the data - P_UTM = convert2UTMzone(P,p_Point) # convert to UTM - ind = findall(-0.5*ustrip(uconvert(u"m",section_width)) .< (P_UTM.EW.val .- p_Point.EW) .< 0.5*ustrip(uconvert(u"m",section_width))) # find all points around the desired longitude level, UTM is in m, so we have to convert the section width + p_Point = ProjectionPoint(Lat = sum(P.lat.val) / length(P.lat.val), Lon = Lon_level) # define the projection point (lat/lon) as the latitude and the mean of the longitudes of the data + P_UTM = convert2UTMzone(P, p_Point) # convert to UTM + ind = findall(-0.5 * ustrip(uconvert(u"m", section_width)) .< (P_UTM.EW.val .- p_Point.EW) .< 0.5 * ustrip(uconvert(u"m", section_width))) # find all points around the desired longitude level, UTM is in m, so we have to convert the section width # create temporary variables - lon_tmp = NumValue(P.lon.val[ind]) - lat_tmp = NumValue(P.lat.val[ind]) - depth_tmp = NumValue(P.depth.val[ind]) - lon_proj = ones(size(depth_tmp))*Lon_level + lon_tmp = NumValue(P.lon.val[ind]) + lat_tmp = NumValue(P.lat.val[ind]) + depth_tmp = NumValue(P.depth.val[ind]) + lon_proj = ones(size(depth_tmp)) * Lon_level # create fields that will be stored on the GeoData structure - field_tmp = (depth_proj=depth_tmp,lat_proj=lat_tmp,lon_proj=lon_proj) # these are the projected points + field_tmp = (depth_proj = depth_tmp, lat_proj = lat_tmp, lon_proj = lon_proj) # these are the projected points end @@ -455,102 +467,101 @@ function cross_section_points(P::GeoData; Depth_level=nothing, Lat_level=nothing end # choose projection point based on Start and End coordinates of the profile - p_Point = ProjectionPoint(Lat=0.5*(Start[2]+End[2]),Lon=0.5*(Start[1]+End[1])) + p_Point = ProjectionPoint(Lat = 0.5 * (Start[2] + End[2]), Lon = 0.5 * (Start[1] + End[1])) # convert P to UTM Data P_UTM = convert2UTMzone(P, p_Point) # convert to UTM # create a GeoData set containing the points that create the profile plane (we need three points to uniquely define that plane) # here, we define the points in a way that the angle between P1-P2 and P1-P3 vectors is 90° --> useful for the cross product - Profile = GeoData([Start[1] Start[1] End[1]], [Start[2] Start[2] End[2]], [0 -200 0]*km, (depth = [0 -200 0]*km,)) - Profile_UTM = convert2UTMzone(Profile,p_Point) # convert to UTM + Profile = GeoData([Start[1] Start[1] End[1]], [Start[2] Start[2] End[2]], [0 -200 0] * km, (depth = [0 -200 0] * km,)) + Profile_UTM = convert2UTMzone(Profile, p_Point) # convert to UTM # compute the unit normal of the profile plane using the cross product # ATTENTION: UTM COORDINATES ARE IN M, WHILE DEPTH IS IN KM !!! - a1 = Profile_UTM.EW.val[2]-Profile_UTM.EW.val[1] - a2 = Profile_UTM.NS.val[2]-Profile_UTM.NS.val[1] - a3 = (Profile_UTM.depth.val[2]- Profile_UTM.depth.val[1]) * 1e3 + a1 = Profile_UTM.EW.val[2] - Profile_UTM.EW.val[1] + a2 = Profile_UTM.NS.val[2] - Profile_UTM.NS.val[1] + a3 = (Profile_UTM.depth.val[2] - Profile_UTM.depth.val[1]) * 1.0e3 - b1 = Profile_UTM.EW.val[3]- Profile_UTM.EW.val[1] - b2 = Profile_UTM.NS.val[3]- Profile_UTM.NS.val[1] - b3 = (Profile_UTM.depth.val[3]- Profile_UTM.depth.val[1]) * 1e3 + b1 = Profile_UTM.EW.val[3] - Profile_UTM.EW.val[1] + b2 = Profile_UTM.NS.val[3] - Profile_UTM.NS.val[1] + b3 = (Profile_UTM.depth.val[3] - Profile_UTM.depth.val[1]) * 1.0e3 - nx = a2*b3 - a3*b2 - ny = a3*b1 - a1*b3 - nz = a1*b2 - a2*b1 + nx = a2 * b3 - a3 * b2 + ny = a3 * b1 - a1 * b3 + nz = a1 * b2 - a2 * b1 - t = (nx*Profile_UTM.EW.val[1] .- nx*P_UTM.EW.val .+ ny*Profile_UTM.NS.val[1] .- ny*P_UTM.NS.val .+ nz*Profile_UTM.depth.val[1]*1e3 .- nz*P_UTM.depth.val*1e3)/(nx*nx+ny*ny+nz*nz) + t = (nx * Profile_UTM.EW.val[1] .- nx * P_UTM.EW.val .+ ny * Profile_UTM.NS.val[1] .- ny * P_UTM.NS.val .+ nz * Profile_UTM.depth.val[1] * 1.0e3 .- nz * P_UTM.depth.val * 1.0e3) / (nx * nx + ny * ny + nz * nz) # compute the distance to the plane - dist = sqrt.((t.*nx).^2 + (t.*ny).^2 + (t.*nz).^2) + dist = sqrt.((t .* nx) .^ 2 + (t .* ny) .^ 2 + (t .* nz) .^ 2) # find the points that are within the required window around the profile - ind = findall(-0.5*ustrip(uconvert(u"m",section_width)) .< dist .< 0.5*ustrip(uconvert(u"m",section_width))) # find all points around the profile (distance is treated in m) + ind = findall(-0.5 * ustrip(uconvert(u"m", section_width)) .< dist .< 0.5 * ustrip(uconvert(u"m", section_width))) # find all points around the profile (distance is treated in m) # project the points on the plane (only the relevant ones) - px = P_UTM.EW.val[ind] + t[ind].*nx - py = P_UTM.NS.val[ind] + t[ind].*ny - pz = P_UTM.depth.val[ind]*1e3 + t[ind].*nz # convert depth to m + px = P_UTM.EW.val[ind] + t[ind] .* nx + py = P_UTM.NS.val[ind] + t[ind] .* ny + pz = P_UTM.depth.val[ind] * 1.0e3 + t[ind] .* nz # convert depth to m # the projected points are given in UTM coordinates and not in lon/lat/depth # therefore we have to recompute the lat/lon/depth values of the projected points # then we will return a GeoData structure with all information included trans = LLAfromUTM(p_Point.zone, p_Point.isnorth, wgs84) # set up transformation - plon = zeros(size(ind)); - plat = zeros(size(ind)); - pdepth = zeros(size(ind)); + plon = zeros(size(ind)) + plat = zeros(size(ind)) + pdepth = zeros(size(ind)) for i in eachindex(ind) - utmi = UTM(px[i],py[i],pz[i]) - llai = trans(utmi) + utmi = UTM(px[i], py[i], pz[i]) + llai = trans(utmi) - plon[i] = llai.lon - plat[i] = llai.lat - pdepth[i] = llai.alt + plon[i] = llai.lon + plat[i] = llai.lat + pdepth[i] = llai.alt end # data to be stored in the GeoData structure - field_tmp = (depth_proj=pdepth/1e3,lat_proj=plat,lon_proj=plon) # these are the projected points + field_tmp = (depth_proj = pdepth / 1.0e3, lat_proj = plat, lon_proj = plon) # these are the projected points end # also transfer any other data that is stored in the GeoData structure - fields_new = P.fields; - field_names = keys(fields_new); - for i = 1:length(P.fields) + fields_new = P.fields + field_names = keys(fields_new) + for i in 1:length(P.fields) if typeof(P.fields[i]) <: Tuple # vector or anything that contains more than 1 field data_tuple = fields_new[i] # we have a tuple (likely a vector field), so we have to loop - data_array = zeros(size(ind,1),length(data_tuple)); # create a 2D array that holds the chosen values + data_array = zeros(size(ind, 1), length(data_tuple)) # create a 2D array that holds the chosen values - for j=1:length(data_tuple) - data_array[:,j] = ustrip.(data_tuple[i][ind]) + for j in 1:length(data_tuple) + data_array[:, j] = ustrip.(data_tuple[i][ind]) end - data_new = tuple([data_array[:,:,c] for c in 1:size(data_array,3)]...) # transform 2D matrix to tuple, do not consider the unit as it creates an error in GMG (Issue), to add the unit: *unit.(P.fields[i][1][1] + data_new = tuple([data_array[:, :, c] for c in 1:size(data_array, 3)]...) # transform 2D matrix to tuple, do not consider the unit as it creates an error in GMG (Issue), to add the unit: *unit.(P.fields[i][1][1] else # scalar field - data_new = fields_new[i][ind]; # interpolate data field + data_new = fields_new[i][ind] # interpolate data field end # replace the field - new_field = NamedTuple{(field_names[i],)}((data_new,)) # Create a tuple with same name and unit - fields_new = merge(fields_new, new_field); # replace the field in fields_new + new_field = NamedTuple{(field_names[i],)}((data_new,)) # Create a tuple with same name and unit + fields_new = merge(fields_new, new_field) # replace the field in fields_new end # merge old and new fields - fields_new = merge(fields_new,field_tmp); + fields_new = merge(fields_new, field_tmp) # create a GeoData structure to return - if length(ind)>0 - Data_profile = GeoData(P.lon.val[ind],P.lat.val[ind],P.depth.val[ind],(fields_new)) + if length(ind) > 0 + Data_profile = GeoData(P.lon.val[ind], P.lat.val[ind], P.depth.val[ind], (fields_new)) else Data_profile = nothing end - return Data_profile end @@ -585,19 +596,19 @@ GeoData ``` """ -function cross_section(DataSet::AbstractGeneralGrid; dims=(100,100), Interpolate=false, Depth_level=nothing, Lat_level=nothing, Lon_level=nothing, Start=nothing, End=nothing, Depth_extent=nothing, section_width=50km) +function cross_section(DataSet::AbstractGeneralGrid; dims = (100, 100), Interpolate = false, Depth_level = nothing, Lat_level = nothing, Lon_level = nothing, Start = nothing, End = nothing, Depth_extent = nothing, section_width = 50km) - DataSetType = check_data_set(DataSet); # check which kind of data set we are dealing with + DataSetType = check_data_set(DataSet) # check which kind of data set we are dealing with - if DataSetType==1 # points + if DataSetType == 1 # points DataProfile = cross_section_points(DataSet; Depth_level, Lat_level, Lon_level, Start, End, section_width) - elseif DataSetType==2 # surface + elseif DataSetType == 2 # surface DataProfile = cross_section_surface(DataSet; dims, Depth_level, Lat_level, Lon_level, Start, End) - elseif DataSetType==3 # volume + elseif DataSetType == 3 # volume DataProfile = cross_section_volume(DataSet; dims, Interpolate, Depth_level, Lat_level, Lon_level, Start, End, Depth_extent) # add field that has coordinates along the profile - DataProfile = addfield(DataProfile,"FlatCrossSection", flatten_cross_section(DataProfile)) + DataProfile = addfield(DataProfile, "FlatCrossSection", flatten_cross_section(DataProfile)) end return DataProfile @@ -631,12 +642,12 @@ CartData """ function flatten_cross_section(V::CartData) - x_new = sqrt.((V.x.val.-V.x.val[1,1,1]).^2 .+ (V.y.val.-V.y.val[1,1,1]).^2) # NOTE: the result is in km, as V.x and V.y are stored in km + x_new = sqrt.((V.x.val .- V.x.val[1, 1, 1]) .^ 2 .+ (V.y.val .- V.y.val[1, 1, 1]) .^ 2) # NOTE: the result is in km, as V.x and V.y are stored in km # Data_Cross_2D = CartData(x_new,V.y.val.*0.0, V.z.val, V.fields) - return x_new + return x_new end @@ -654,17 +665,17 @@ end ``` """ -function flatten_cross_section(V::GeoData; Start=nothing) +function flatten_cross_section(V::GeoData; Start = nothing) if isnothing(Start) - lla_start = LLA(V.lat.val[1][1][1],V.lon.val[1][1][1],0.0) # start point, at the surface + lla_start = LLA(V.lat.val[1][1][1], V.lon.val[1][1][1], 0.0) # start point, at the surface else - lla_start = LLA(Start[2],Start[1],0.0); + lla_start = LLA(Start[2], Start[1], 0.0) end - x_new = zeros(size(V.lon)); + x_new = zeros(size(V.lon)) for i in eachindex(x_new) - x_new[i] = euclidean_distance(LLA(V.lat.val[i],V.lon.val[i],0.0), lla_start) /1e3 # compute distance as if points were at the surface, CONVERTED TO KM !!! + x_new[i] = euclidean_distance(LLA(V.lat.val[i], V.lon.val[i], 0.0), lla_start) / 1.0e3 # compute distance as if points were at the surface, CONVERTED TO KM !!! end return x_new @@ -716,7 +727,7 @@ GeoData ``` """ -function extract_subvolume(V::GeoData; Interpolate=false, Lon_level=nothing, Lat_level=nothing, Depth_level=nothing, dims=(50,50,50)) +function extract_subvolume(V::GeoData; Interpolate = false, Lon_level = nothing, Lat_level = nothing, Depth_level = nothing, dims = (50, 50, 50)) if isnothing(Lon_level) Lon_level = (minimum(V.lon.val), maximum(V.lon.val)) @@ -728,26 +739,28 @@ function extract_subvolume(V::GeoData; Interpolate=false, Lon_level=nothing, Lat Depth_level = (minimum(V.depth.val), maximum(V.depth.val)) end if Interpolate - Lon,Lat,Depth = lonlatdepth_grid( LinRange(Lon_level[1], Lon_level[2], dims[1]), - LinRange(Lat_level[1], Lat_level[2], dims[2]), - LinRange(Depth_level[1], Depth_level[2], dims[3]) ); - Data_extract = interpolate_datafields(V, Lon, Lat, Depth) + Lon, Lat, Depth = lonlatdepth_grid( + LinRange(Lon_level[1], Lon_level[2], dims[1]), + LinRange(Lat_level[1], Lat_level[2], dims[2]), + LinRange(Depth_level[1], Depth_level[2], dims[3]) + ) + Data_extract = interpolate_datafields(V, Lon, Lat, Depth) else # Don't interpolate - i_s, i_e = argmin(abs.(V.lon.val[:,1,1] .- Lon_level[1])), argmin(abs.(V.lon.val[:,1,1] .- Lon_level[2])) - iLon = i_s:i_e; + i_s, i_e = argmin(abs.(V.lon.val[:, 1, 1] .- Lon_level[1])), argmin(abs.(V.lon.val[:, 1, 1] .- Lon_level[2])) + iLon = i_s:i_e - i_s, i_e = argmin(abs.(V.lat.val[1,:,1] .- Lat_level[1])), argmin(abs.(V.lat.val[1,:,1] .- Lat_level[2])) - iLat = i_s:i_e; + i_s, i_e = argmin(abs.(V.lat.val[1, :, 1] .- Lat_level[1])), argmin(abs.(V.lat.val[1, :, 1] .- Lat_level[2])) + iLat = i_s:i_e - i_s, i_e = argmin(abs.(V.depth.val[1,1,:] .- ustrip(Depth_level[1]))), argmin(abs.(V.depth.val[1,1,:] .- ustrip(Depth_level[2]))) - step = 1; - if i_e typeof(Data_extracted.fields.Data_Int) ``` """ -function extract_subvolume(V::CartData; - Interpolate=true, - X_level=nothing, - X_cross=nothing, - Y_level=nothing, - Z_level=nothing, - dims=(50,50,50)) +function extract_subvolume( + V::CartData; + Interpolate = true, + X_level = nothing, + X_cross = nothing, + Y_level = nothing, + Z_level = nothing, + dims = (50, 50, 50) + ) if isnothing(X_level) X_level = (minimum(V.x.val), maximum(V.x.val)) @@ -835,60 +850,62 @@ function extract_subvolume(V::CartData; Z_level = (minimum(V.z.val), maximum(V.z.val)) end - if Interpolate==true && size(V.x.val)[3]>1 - X,Y,Z = lonlatdepth_grid( LinRange(X_level[1], X_level[2], dims[1]), - LinRange(Y_level[1], Y_level[2], dims[2]), - LinRange(Z_level[1], Z_level[2], dims[3]) ); + if Interpolate == true && size(V.x.val)[3] > 1 + X, Y, Z = lonlatdepth_grid( + LinRange(X_level[1], X_level[2], dims[1]), + LinRange(Y_level[1], Y_level[2], dims[2]), + LinRange(Z_level[1], Z_level[2], dims[3]) + ) - Data_extract = interpolate_datafields(V, X, Y, Z) + Data_extract = interpolate_datafields(V, X, Y, Z) - elseif size(V.x.val)[3]==1 + elseif size(V.x.val)[3] == 1 # we are dealing with a vertical cross-section through a 3D dataset computed with cross_section(V,Start=.., End=...) - Xcross=V.fields.FlatCrossSection; + Xcross = V.fields.FlatCrossSection if isnothing(X_level) X_level = extrema(Xcross) end - dims_cross=(dims[1],dims[2],1); + dims_cross = (dims[1], dims[2], 1) # we need to interpolate the data onto a new grid given by X_level and Z_level - X_level_cross = X_level; + X_level_cross = X_level - interpol_x = linear_interpolation(Xcross[:,1,1], V.x.val[:,1,1],extrapolation_bc = NaN); # create interpolation object - interpol_y = linear_interpolation(Xcross[:,1,1], V.y.val[:,1,1],extrapolation_bc = NaN); # create interpolation object + interpol_x = linear_interpolation(Xcross[:, 1, 1], V.x.val[:, 1, 1], extrapolation_bc = NaN) # create interpolation object + interpol_y = linear_interpolation(Xcross[:, 1, 1], V.y.val[:, 1, 1], extrapolation_bc = NaN) # create interpolation object - X_level = interpol_x.(X_level_cross) - Y_level = interpol_y.(X_level_cross) + X_level = interpol_x.(X_level_cross) + Y_level = interpol_y.(X_level_cross) x = LinRange(X_level_cross[1], X_level_cross[2], dims_cross[1]) z = LinRange(Z_level[1], Z_level[2], dims_cross[2]) - X,Y,Z = zeros(dims[1],dims[2],1), zeros(dims[1],dims[2],1), zeros(dims[1],dims[2],1) + X, Y, Z = zeros(dims[1], dims[2], 1), zeros(dims[1], dims[2], 1), zeros(dims[1], dims[2], 1) X_cross = zero(X) - for (i,x_val) in enumerate(x), (j,z_val) in enumerate(z) - X[i,j,1] = interpol_x(x_val) - Y[i,j,1] = interpol_y(x_val) - Z[i,j,1] = z_val - X_cross[i,j,1] = x_val + for (i, x_val) in enumerate(x), (j, z_val) in enumerate(z) + X[i, j, 1] = interpol_x(x_val) + Y[i, j, 1] = interpol_y(x_val) + Z[i, j, 1] = z_val + X_cross[i, j, 1] = x_val end - Data_extract = interpolate_data_fields_cross_section(V, X, Y, Z, X_cross); + Data_extract = interpolate_data_fields_cross_section(V, X, Y, Z, X_cross) else # Don't interpolate - i_s, i_e = argmin(abs.(V.x.val[:,1,1] .- X_level[1])), argmin(abs.(V.x.val[:,1,1] .- X_level[2])) - iLon = i_s:i_e; + i_s, i_e = argmin(abs.(V.x.val[:, 1, 1] .- X_level[1])), argmin(abs.(V.x.val[:, 1, 1] .- X_level[2])) + iLon = i_s:i_e - i_s, i_e = argmin(abs.(V.y.val[1,:,1] .- Y_level[1])), argmin(abs.(V.y.val[1,:,1] .- Y_level[2])) - iLat = i_s:i_e; + i_s, i_e = argmin(abs.(V.y.val[1, :, 1] .- Y_level[1])), argmin(abs.(V.y.val[1, :, 1] .- Y_level[2])) + iLat = i_s:i_e - i_s, i_e = argmin(abs.(V.z.val[1,1,:] .- ustrip(Z_level[1]))), argmin(abs.(V.z.val[1,1,:] .- ustrip(Z_level[2]))) - step = 1; - if i_emax_Data + min_Data, max_Data = NumValue(minimum(Data.val)), NumValue(maximum(Data.val)) + return if ustrip(Data_Cross) < min_Data || ustrip(Data_Cross) > max_Data error("Outside bounds [$min_Data : $max_Data]; $Data_Cross") end end function CheckBounds(Data::AbstractArray, Data_Cross) - min_Data, max_Data = NumValue(minimum(Data)), NumValue(maximum(Data)); - if ustrip(Data_Cross) < min_Data || ustrip(Data_Cross)>max_Data + min_Data, max_Data = NumValue(minimum(Data)), NumValue(maximum(Data)) + return if ustrip(Data_Cross) < min_Data || ustrip(Data_Cross) > max_Data error("Outside bounds [$min_Data : $max_Data]; $Data_Cross") end end @@ -957,7 +974,7 @@ function check_data_set(DataSet::GeoData) if length(size(DataSet.lon)) == 1 # scattered points return 1 else - if any(size(DataSet.lon).==1) # surface data + if any(size(DataSet.lon) .== 1) # surface data return 2 else # volume data return 3 @@ -969,7 +986,7 @@ function check_data_set(DataSet::CartData) if length(size(DataSet.x)) == 1 # scattered points return 1 else - if any(size(DataSet.x).==1) # surface data + if any(size(DataSet.x) .== 1) # surface data return 2 else # volume data return 3 @@ -1006,63 +1023,63 @@ julia> Data_set2= interpolate_datafields(Data_set1, X,Y,Z) """ function interpolate_datafields(V::AbstractGeneralGrid, Lon, Lat, Depth) - X,Y,Z = coordinate_grids(V) + X, Y, Z = coordinate_grids(V) - Lon_vec = NumValue(X[:,1,1]); - Lat_vec = NumValue(Y[1,:,1]); - Depth_vec = Z[1,1,:]; - if Depth_vec[1]>Depth_vec[end] + Lon_vec = NumValue(X[:, 1, 1]) + Lat_vec = NumValue(Y[1, :, 1]) + Depth_vec = Z[1, 1, :] + if Depth_vec[1] > Depth_vec[end] ReverseData = true else ReverseData = false end - fields_new = V.fields; - field_names = keys(fields_new); - for i = 1:length(V.fields) + fields_new = V.fields + field_names = keys(fields_new) + for i in 1:length(V.fields) if typeof(V.fields[i]) <: Tuple # vector or anything that contains more than 1 field data_tuple = fields_new[i] # we have a tuple (likely a vector field), so we have to loop - data_array = zeros(size(Lon,1),size(Lon,2),size(Lon,3),length(data_tuple)); # create a 3D array that holds the 2D interpolated values - unit_array = zeros(size(data_array)); + data_array = zeros(size(Lon, 1), size(Lon, 2), size(Lon, 3), length(data_tuple)) # create a 3D array that holds the 2D interpolated values + unit_array = zeros(size(data_array)) - for j=1:length(data_tuple) + for j in 1:length(data_tuple) if ReverseData - ndim = length(size(data_tuple[j])) - interpol = linear_interpolation((Lon_vec, Lat_vec, reverse(Depth_vec)), reverse(ustrip.(data_tuple[j]), dims=ndim) ,extrapolation_bc = NaN); # create interpolation object + ndim = length(size(data_tuple[j])) + interpol = linear_interpolation((Lon_vec, Lat_vec, reverse(Depth_vec)), reverse(ustrip.(data_tuple[j]), dims = ndim), extrapolation_bc = NaN) # create interpolation object else - interpol = linear_interpolation((Lon_vec, Lat_vec, Depth_vec), ustrip.(data_tuple[j]),extrapolation_bc = NaN); # create interpolation object + interpol = linear_interpolation((Lon_vec, Lat_vec, Depth_vec), ustrip.(data_tuple[j]), extrapolation_bc = NaN) # create interpolation object end - data_array[:,:,:,j] = interpol.(Lon, Lat, ustrip.(Depth)); + data_array[:, :, :, j] = interpol.(Lon, Lat, ustrip.(Depth)) end - data_new = tuple([data_array[:,:,:,c] for c in 1:size(data_array,4)]...) # transform 3D matrix to tuple + data_new = tuple([data_array[:, :, :, c] for c in 1:size(data_array, 4)]...) # transform 3D matrix to tuple else # scalar field if ReverseData - ndim = length(size(V.fields[i])) - interpol = linear_interpolation((Lon_vec, Lat_vec, reverse(Depth_vec)), reverse(V.fields[i], dims=ndim), extrapolation_bc = NaN,); # create interpolation object + ndim = length(size(V.fields[i])) + interpol = linear_interpolation((Lon_vec, Lat_vec, reverse(Depth_vec)), reverse(V.fields[i], dims = ndim), extrapolation_bc = NaN) # create interpolation object else - interpol = linear_interpolation((Lon_vec, Lat_vec, Depth_vec), V.fields[i], extrapolation_bc = NaN); # create interpolation object + interpol = linear_interpolation((Lon_vec, Lat_vec, Depth_vec), V.fields[i], extrapolation_bc = NaN) # create interpolation object end - data_new = interpol.(Lon, Lat, ustrip.(Depth)); # interpolate data field - if isa(V.fields[i][1],Int64) - data_new = round.(Int64,data_new) + data_new = interpol.(Lon, Lat, ustrip.(Depth)) # interpolate data field + if isa(V.fields[i][1], Int64) + data_new = round.(Int64, data_new) end end # replace the one - new_field = NamedTuple{(field_names[i],)}((data_new,)) # Create a tuple with same name - fields_new = merge(fields_new, new_field); # replace the field in fields_new + new_field = NamedTuple{(field_names[i],)}((data_new,)) # Create a tuple with same name + fields_new = merge(fields_new, new_field) # replace the field in fields_new end # Create a GeoData struct with the newly interpolated fields - if isa(V,GeoData) - Data_profile = GeoData(Lon, Lat, Depth, fields_new); - elseif isa(V,CartData) - Data_profile = CartData(Lon, Lat, Depth, fields_new); + if isa(V, GeoData) + Data_profile = GeoData(Lon, Lat, Depth, fields_new) + elseif isa(V, CartData) + Data_profile = CartData(Lon, Lat, Depth, fields_new) else error("still to be implemented") end @@ -1077,55 +1094,55 @@ Interpolates a data field `V` on a grid defined by `UTM,Depth` """ function interpolate_datafields(V::UTMData, EW, NS, Depth) - EW_vec = V.EW.val[:,1,1]; - NS_vec = V.NS.val[1,:,1]; - Depth_vec = V.depth.val[1,1,:]; - if Depth_vec[1]>Depth_vec[end] + EW_vec = V.EW.val[:, 1, 1] + NS_vec = V.NS.val[1, :, 1] + Depth_vec = V.depth.val[1, 1, :] + if Depth_vec[1] > Depth_vec[end] ReverseData = true else ReverseData = false end - fields_new = V.fields; - field_names = keys(fields_new); - for i = 1:length(V.fields) + fields_new = V.fields + field_names = keys(fields_new) + for i in 1:length(V.fields) if typeof(V.fields[i]) <: Tuple # vector or anything that contains more than 1 field data_tuple = fields_new[i] # we have a tuple (likely a vector field), so we have to loop - data_array = zeros(size(EW,1),size(EW,2),size(EW,3),length(data_tuple)); # create a 3D array that holds the 2D interpolated values - unit_array = zeros(size(data_array)); + data_array = zeros(size(EW, 1), size(EW, 2), size(EW, 3), length(data_tuple)) # create a 3D array that holds the 2D interpolated values + unit_array = zeros(size(data_array)) - for j=1:length(data_tuple) + for j in 1:length(data_tuple) if ReverseData - ndim = length(size(data_tuple[j])) - interpol = linear_interpolation((EW_vec, NS_vec, reverse(Depth_vec)), reverse(ustrip.(data_tuple[j]), dims=ndim) ,extrapolation_bc = NaN); # create interpolation object + ndim = length(size(data_tuple[j])) + interpol = linear_interpolation((EW_vec, NS_vec, reverse(Depth_vec)), reverse(ustrip.(data_tuple[j]), dims = ndim), extrapolation_bc = NaN) # create interpolation object else - interpol = linear_interpolation((EW_vec, NS_vec, Depth_vec), ustrip.(data_tuple[j]),extrapolation_bc = NaN); # create interpolation object + interpol = linear_interpolation((EW_vec, NS_vec, Depth_vec), ustrip.(data_tuple[j]), extrapolation_bc = NaN) # create interpolation object end - data_array[:,:,:,j] = interpol.(EW, NS, Depth); + data_array[:, :, :, j] = interpol.(EW, NS, Depth) end - data_new = tuple([data_array[:,:,:,c] for c in 1:size(data_array,4)]...) # transform 3D matrix to tuple + data_new = tuple([data_array[:, :, :, c] for c in 1:size(data_array, 4)]...) # transform 3D matrix to tuple else # scalar field if ReverseData - ndim = length(size(V.fields[i])) - interpol = linear_interpolation((EW_vec, NS_vec, reverse(Depth_vec)), reverse(V.fields[i], dims=ndim), extrapolation_bc = NaN,); # create interpolation object + ndim = length(size(V.fields[i])) + interpol = linear_interpolation((EW_vec, NS_vec, reverse(Depth_vec)), reverse(V.fields[i], dims = ndim), extrapolation_bc = NaN) # create interpolation object else - interpol = linear_interpolation((EW_vec, NS_vec, Depth_vec), V.fields[i], extrapolation_bc = NaN); # create interpolation object + interpol = linear_interpolation((EW_vec, NS_vec, Depth_vec), V.fields[i], extrapolation_bc = NaN) # create interpolation object end - data_new = interpol.(EW, NS, Depth); # interpolate data field + data_new = interpol.(EW, NS, Depth) # interpolate data field end # replace the one - new_field = NamedTuple{(field_names[i],)}((data_new,)) # Create a tuple with same name - fields_new = merge(fields_new, new_field); # replace the field in fields_new + new_field = NamedTuple{(field_names[i],)}((data_new,)) # Create a tuple with same name + fields_new = merge(fields_new, new_field) # replace the field in fields_new end # Create a GeoData struct with the newly interpolated fields - Data_profile = UTMData(EW, NS, Depth, fields_new); + Data_profile = UTMData(EW, NS, Depth, fields_new) return Data_profile end @@ -1137,55 +1154,55 @@ Interpolates a data field `V` on a 2D grid defined by `Lon,Lat`. Typically used """ function interpolate_datafields_2D(V::GeoData, Lon, Lat) - Lon_vec = V.lon.val[:,1,1]; - Lat_vec = V.lat.val[1,:,1]; + Lon_vec = V.lon.val[:, 1, 1] + Lat_vec = V.lat.val[1, :, 1] - fields_new = V.fields; - field_names = keys(fields_new); - for i = 1:length(V.fields) + fields_new = V.fields + field_names = keys(fields_new) + for i in 1:length(V.fields) if typeof(V.fields[i]) <: Tuple # vector or anything that contains more than 1 field data_tuple = fields_new[i] # we have a tuple (likely a vector field), so we have to loop - data_array = zeros(size(Lon,1),size(Lon,2),size(Lon,3),length(data_tuple)); # create a 3D array that holds the 2D interpolated values - unit_array = zeros(size(data_array)); + data_array = zeros(size(Lon, 1), size(Lon, 2), size(Lon, 3), length(data_tuple)) # create a 3D array that holds the 2D interpolated values + unit_array = zeros(size(data_array)) - for j=1:length(data_tuple) - if length(size(data_tuple[j]))==3 - interpol = linear_interpolation((Lon_vec, Lat_vec), ustrip.(data_tuple[j][:,:,1]),extrapolation_bc = Flat()); # create interpolation object + for j in 1:length(data_tuple) + if length(size(data_tuple[j])) == 3 + interpol = linear_interpolation((Lon_vec, Lat_vec), ustrip.(data_tuple[j][:, :, 1]), extrapolation_bc = Flat()) # create interpolation object else - interpol = linear_interpolation((Lon_vec, Lat_vec), ustrip.(data_tuple[j]),extrapolation_bc = Flat()); # create interpolation object + interpol = linear_interpolation((Lon_vec, Lat_vec), ustrip.(data_tuple[j]), extrapolation_bc = Flat()) # create interpolation object end - data_array[:,:,1,j] = interpol.(Lon, Lat); + data_array[:, :, 1, j] = interpol.(Lon, Lat) end - if length(size(data_tuple[1]))==3 - data_new = tuple([data_array[:,:,:,c] for c in 1:size(data_array,4)]...) # transform 3D matrix to tuple + if length(size(data_tuple[1])) == 3 + data_new = tuple([data_array[:, :, :, c] for c in 1:size(data_array, 4)]...) # transform 3D matrix to tuple else - data_new = tuple([data_array[:,:,1,c] for c in 1:size(data_array,4)]...) # transform 3D matrix to tuple + data_new = tuple([data_array[:, :, 1, c] for c in 1:size(data_array, 4)]...) # transform 3D matrix to tuple end else # scalar field - if length(size(V.fields[i]))==3 - interpol = linear_interpolation((Lon_vec, Lat_vec), V.fields[i][:,:,1], extrapolation_bc = Flat()); # create interpolation object + if length(size(V.fields[i])) == 3 + interpol = linear_interpolation((Lon_vec, Lat_vec), V.fields[i][:, :, 1], extrapolation_bc = Flat()) # create interpolation object else - interpol = linear_interpolation((Lon_vec, Lat_vec), V.fields[i], extrapolation_bc = Flat()); # create interpolation object + interpol = linear_interpolation((Lon_vec, Lat_vec), V.fields[i], extrapolation_bc = Flat()) # create interpolation object end - data_new = interpol.(Lon, Lat); # interpolate data field + data_new = interpol.(Lon, Lat) # interpolate data field end # replace the one - new_field = NamedTuple{(field_names[i],)}((data_new,)) # Create a tuple with same name - fields_new = merge(fields_new, new_field); # replace the field in fields_new + new_field = NamedTuple{(field_names[i],)}((data_new,)) # Create a tuple with same name + fields_new = merge(fields_new, new_field) # replace the field in fields_new end # Interpolate z-coordinate as well - if length(size(V.lon))==3 - interpol = linear_interpolation((Lon_vec, Lat_vec), V.depth.val[:,:,1], extrapolation_bc = Flat()); # create interpolation object + if length(size(V.lon)) == 3 + interpol = linear_interpolation((Lon_vec, Lat_vec), V.depth.val[:, :, 1], extrapolation_bc = Flat()) # create interpolation object else - interpol = linear_interpolation((Lon_vec, Lat_vec), V.depth.val, extrapolation_bc = Flat()); # create interpolation object + interpol = linear_interpolation((Lon_vec, Lat_vec), V.depth.val, extrapolation_bc = Flat()) # create interpolation object end - depth_new = interpol.(Lon, Lat); + depth_new = interpol.(Lon, Lat) # Create a GeoData struct with the newly interpolated fields @@ -1201,8 +1218,8 @@ end Interpolates a data field `V` on a 2D grid defined by `UTM`. Typically used for horizontal surfaces """ function interpolate_datafields_2D(V::UTMData, EW, NS) - EW_vec = V.EW.val[:,1,1]; - NS_vec = V.NS.val[1,:,1]; + EW_vec = V.EW.val[:, 1, 1] + NS_vec = V.NS.val[1, :, 1] return InterpolateDataFields2D_vecs(EW_vec, NS_vec, V.depth, V.fields, EW, NS) end @@ -1212,8 +1229,8 @@ end Interpolates a data field `V` on a 2D CartData grid defined by `X`,`Y`. Typically used for horizontal surfaces """ function interpolate_datafields_2D(V::CartData, X, Y) - X_vec = V.x.val[:,1,1]; - Y_vec = V.y.val[1,:,1]; + X_vec = V.x.val[:, 1, 1] + Y_vec = V.y.val[1, :, 1] return InterpolateDataFields2D_vecs(X_vec, Y_vec, V.z, V.fields, X, Y) end @@ -1226,23 +1243,23 @@ Typically used for horizontal surfaces. Note: `Original` should have orthogonal coordinates. If it has not, e.g., because it was rotated, you'll have to specify the angle `Rotate` that it was rotated by """ -function interpolate_datafields_2D(Original::CartData, New::CartData; Rotate=0.0, Translate=(0.0,0.0,0.0), Scale=(1.0,1.0,1.0)) - if (Rotate!=0.0) || any(Translate .!= (0,0,0)) || any(Scale .!= (1.0,1.0,1.0)) - Original_r = rotate_translate_scale(Original, Rotate = -1.0*Rotate, Translate = -1.0.*Translate, Scale=Scale); - New_r = rotate_translate_scale(New, Rotate = -1.0*Rotate, Translate = -1.0.*Translate, Scale=Scale); +function interpolate_datafields_2D(Original::CartData, New::CartData; Rotate = 0.0, Translate = (0.0, 0.0, 0.0), Scale = (1.0, 1.0, 1.0)) + if (Rotate != 0.0) || any(Translate .!= (0, 0, 0)) || any(Scale .!= (1.0, 1.0, 1.0)) + Original_r = rotate_translate_scale(Original, Rotate = -1.0 * Rotate, Translate = -1.0 .* Translate, Scale = Scale) + New_r = rotate_translate_scale(New, Rotate = -1.0 * Rotate, Translate = -1.0 .* Translate, Scale = Scale) else - Original_r = Original; - New_r = New; + Original_r = Original + New_r = New end - X_vec = Original_r.x.val[:,1,1]; - Y_vec = Original_r.y.val[1,:,1]; + X_vec = Original_r.x.val[:, 1, 1] + Y_vec = Original_r.y.val[1, :, 1] Xnew = New_r.x.val Ynew = New_r.y.val Znew, fields_new = GeophysicalModelGenerator.InterpolateDataFields2D_vecs(X_vec, Y_vec, Original_r.z, Original_r.fields, Xnew, Ynew) - return CartData(New.x.val,New.y.val,Znew, fields_new) + return CartData(New.x.val, New.y.val, Znew, fields_new) end """ @@ -1254,23 +1271,23 @@ Typically used for horizontal surfaces. Note: `Original` should have orthogonal coordinates. If it has not, e.g., because it was rotated, you'll have to specify the angle `Rotate` that it was rotated by """ -function interpolate_datafields_2D(Original::GeoData, New::GeoData; Rotate=0.0, Translate=(0.0,0.0,0.0), Scale=(1.0,1.0,1.0)) - if (Rotate!=0.0) || any(Translate .!= (0,0,0)) || any(Scale .!= (1.0,1.0,1.0)) - Original_r = rotate_translate_scale(Original, Rotate = -1.0*Rotate, Translate = -1.0.*Translate, Scale=Scale); - New_r = rotate_translate_scale(New, Rotate = -1.0*Rotate, Translate = -1.0.*Translate, Scale=Scale); +function interpolate_datafields_2D(Original::GeoData, New::GeoData; Rotate = 0.0, Translate = (0.0, 0.0, 0.0), Scale = (1.0, 1.0, 1.0)) + if (Rotate != 0.0) || any(Translate .!= (0, 0, 0)) || any(Scale .!= (1.0, 1.0, 1.0)) + Original_r = rotate_translate_scale(Original, Rotate = -1.0 * Rotate, Translate = -1.0 .* Translate, Scale = Scale) + New_r = rotate_translate_scale(New, Rotate = -1.0 * Rotate, Translate = -1.0 .* Translate, Scale = Scale) else - Original_r = Original; - New_r = New; + Original_r = Original + New_r = New end - Lon_vec = Original_r.lon.val[:,1,1]; - Lat_vec = Original_r.lat.val[1,:,1]; + Lon_vec = Original_r.lon.val[:, 1, 1] + Lat_vec = Original_r.lat.val[1, :, 1] Lon_new = New_r.lon.val Lat_new = New_r.lat.val Znew, fields_new = GeophysicalModelGenerator.InterpolateDataFields2D_vecs(Lon_vec, Lat_vec, Original_r.depth, Original_r.fields, Lon_new, Lat_new) - return GeoData(New.lon.val,New.lat.val,Znew, fields_new) + return GeoData(New.lon.val, New.lat.val, Znew, fields_new) end """ @@ -1279,7 +1296,7 @@ end Interpolates a 3D data set `V` with a projection point `proj=(Lat, Lon)` on a plane defined by `x` and `y`, where `x` and `y` are uniformly spaced. Returns the 2D array `Surf_interp`. """ -function interpolate_datafields_2D(V::GeoData, x::AbstractRange, y::AbstractRange; Lat=49.9929, Lon=8.2473) +function interpolate_datafields_2D(V::GeoData, x::AbstractRange, y::AbstractRange; Lat = 49.9929, Lon = 8.2473) # Default: Lat=49.9929, Lon=8.2473 => Mainz (center of universe) proj = ProjectionPoint(; Lat = Lat, Lon = Lon) return interpolate_datafields_2D(V::GeoData, proj, x, y) @@ -1298,70 +1315,70 @@ Interpolates a data field `V` on a 2D grid defined by `UTM`. Typically used for """ function InterpolateDataFields2D_vecs(EW_vec, NS_vec, depth, fields_new, EW, NS) - # fields_new = V.fields; - field_names = keys(fields_new); - for i = 1:length(fields_new) + # fields_new = V.fields; + field_names = keys(fields_new) + for i in 1:length(fields_new) data_tuple = fields_new[i] - if (typeof(data_tuple) <: Tuple) & (!contains(String(field_names[i]),"colors")) + if (typeof(data_tuple) <: Tuple) & (!contains(String(field_names[i]), "colors")) # vector or anything that contains more than 1 field - data_array = zeros(size(EW,1),size(EW,2),size(EW,3),length(data_tuple)); # create a 3D array that holds the 2D interpolated values - unit_array = zeros(size(data_array)); + data_array = zeros(size(EW, 1), size(EW, 2), size(EW, 3), length(data_tuple)) # create a 3D array that holds the 2D interpolated values + unit_array = zeros(size(data_array)) - for j=1:length(data_tuple) - interpol = linear_interpolation((EW_vec, NS_vec), ustrip.(data_tuple[j]),extrapolation_bc = Flat()); # create interpolation object - data_array[:,:,1,j] = interpol.(EW, NS); + for j in 1:length(data_tuple) + interpol = linear_interpolation((EW_vec, NS_vec), ustrip.(data_tuple[j]), extrapolation_bc = Flat()) # create interpolation object + data_array[:, :, 1, j] = interpol.(EW, NS) end - data_new = tuple([data_array[:,:,1,c] for c in 1:size(data_array,4)]...) # transform 3D matrix to tuple + data_new = tuple([data_array[:, :, 1, c] for c in 1:size(data_array, 4)]...) # transform 3D matrix to tuple - elseif contains(String(field_names[i]),"colors") + elseif contains(String(field_names[i]), "colors") # This is a 3D matrix. We need to interpolate each color channel separately, while using nearest neighbour # as you don't want to average colors # use nearest neighbour to interpolate data - X,Y,_ = xyz_grid(EW_vec, NS_vec, depth.val[1]); - - coord = [vec(X)'; vec(Y)']; - kdtree = KDTree(coord; leafsize = 10); - points = [vec(EW)';vec(NS)']; - idx,dist = nn(kdtree, points); - I = CartesianIndices(axes(X)) - I_idx = I[idx] - I_loc = CartesianIndices(axes(EW)) - - data_array = zeros(size(EW,1),size(EW,2),size(EW,3),length(data_tuple)); # create a 3D array that holds the 2D interpolated values - for (n,i) in enumerate(I_loc) - ix,iy = i[1],i[2] - for j=1:length(data_tuple) - data_array[ix,iy,1,j] = data_tuple[j][I_idx[n]] + X, Y, _ = xyz_grid(EW_vec, NS_vec, depth.val[1]) + + coord = [vec(X)'; vec(Y)'] + kdtree = KDTree(coord; leafsize = 10) + points = [vec(EW)';vec(NS)'] + idx, dist = nn(kdtree, points) + I = CartesianIndices(axes(X)) + I_idx = I[idx] + I_loc = CartesianIndices(axes(EW)) + + data_array = zeros(size(EW, 1), size(EW, 2), size(EW, 3), length(data_tuple)) # create a 3D array that holds the 2D interpolated values + for (n, i) in enumerate(I_loc) + ix, iy = i[1], i[2] + for j in 1:length(data_tuple) + data_array[ix, iy, 1, j] = data_tuple[j][I_idx[n]] end end - data_new = tuple([data_array[:,:,1,c] for c in 1:size(data_array,4)]...) # transform 3D matrix to tuple + data_new = tuple([data_array[:, :, 1, c] for c in 1:size(data_array, 4)]...) # transform 3D matrix to tuple else # scalar field - if length(size(data_tuple))==3 - interpol = linear_interpolation((EW_vec, NS_vec), data_tuple[:,:,1], extrapolation_bc = Flat()); # create interpolation object + if length(size(data_tuple)) == 3 + interpol = linear_interpolation((EW_vec, NS_vec), data_tuple[:, :, 1], extrapolation_bc = Flat()) # create interpolation object else - interpol = linear_interpolation((EW_vec, NS_vec), data_tuple, extrapolation_bc = Flat()); # create interpolation object + interpol = linear_interpolation((EW_vec, NS_vec), data_tuple, extrapolation_bc = Flat()) # create interpolation object end - data_new = interpol.(EW, NS); # interpolate data field + data_new = interpol.(EW, NS) # interpolate data field end # replace the one - new_field = NamedTuple{(field_names[i],)}((data_new,)) # Create a tuple with same name - fields_new = merge(fields_new, new_field); # replace the field in fields_new + new_field = NamedTuple{(field_names[i],)}((data_new,)) # Create a tuple with same name + fields_new = merge(fields_new, new_field) # replace the field in fields_new end # Interpolate z-coordinate as well - if length(size(depth))==3 - interpol = linear_interpolation((EW_vec, NS_vec), depth.val[:,:,1], extrapolation_bc = Flat()); # create interpolation object + if length(size(depth)) == 3 + interpol = linear_interpolation((EW_vec, NS_vec), depth.val[:, :, 1], extrapolation_bc = Flat()) # create interpolation object else - interpol = linear_interpolation((EW_vec, NS_vec), depth.val, extrapolation_bc = Flat()); # create interpolation object + interpol = linear_interpolation((EW_vec, NS_vec), depth.val, extrapolation_bc = Flat()) # create interpolation object end - depth_new = interpol.(EW, NS); + depth_new = interpol.(EW, NS) # Create a UTMData struct with the newly interpolated fields @@ -1373,53 +1390,53 @@ end # Extracts a sub-data set using indices function ExtractDataSets(V::AbstractGeneralGrid, iLon, iLat, iDepth) - X,Y,Z = coordinate_grids(V) + X, Y, Z = coordinate_grids(V) - Lon = zeros(typeof(X[1]), length(iLon),length(iLat),length(iDepth)); - Lat = zeros(typeof(Y[1]), length(iLon),length(iLat),length(iDepth)); - Depth = zeros(typeof(Z[1]), length(iLon),length(iLat),length(iDepth)); + Lon = zeros(typeof(X[1]), length(iLon), length(iLat), length(iDepth)) + Lat = zeros(typeof(Y[1]), length(iLon), length(iLat), length(iDepth)) + Depth = zeros(typeof(Z[1]), length(iLon), length(iLat), length(iDepth)) - iLo = 1:length(iLon); - iLa = 1:length(iLat); - iDe = 1:length(iDepth) - Lon[iLo,iLa,iDe] = X[iLon, iLat, iDepth]; - Lat[iLo,iLa,iDe] = Y[iLon, iLat, iDepth]; - Depth[iLo,iLa,iDe] = Z[iLon, iLat, iDepth]; + iLo = 1:length(iLon) + iLa = 1:length(iLat) + iDe = 1:length(iDepth) + Lon[iLo, iLa, iDe] = X[iLon, iLat, iDepth] + Lat[iLo, iLa, iDe] = Y[iLon, iLat, iDepth] + Depth[iLo, iLa, iDe] = Z[iLon, iLat, iDepth] - fields_new = V.fields; - field_names = keys(fields_new); - for i = 1:length(V.fields) + fields_new = V.fields + field_names = keys(fields_new) + for i in 1:length(V.fields) if typeof(V.fields[i]) <: Tuple # vector or anything that contains more than 1 field data_tuple = fields_new[i] # we have a tuple (likely a vector field), so we have to loop - data_array = zeros(typeof(data_tuple[1][1]),length(iLon),length(iLat),length(iDepth),length(data_tuple)); # create a 3D array that holds the 2D interpolated values - unit_array = zeros(size(data_array)); + data_array = zeros(typeof(data_tuple[1][1]), length(iLon), length(iLat), length(iDepth), length(data_tuple)) # create a 3D array that holds the 2D interpolated values + unit_array = zeros(size(data_array)) - for j=1:length(data_tuple) - data_field = data_tuple[j]; - data_array[:,:,:,j] = data_field[iLon, iLat, iDepth]; + for j in 1:length(data_tuple) + data_field = data_tuple[j] + data_array[:, :, :, j] = data_field[iLon, iLat, iDepth] end - data_new = tuple([data_array[:,:,:,c] for c in 1:size(data_array,4)]...) # transform 4D matrix to tuple + data_new = tuple([data_array[:, :, :, c] for c in 1:size(data_array, 4)]...) # transform 4D matrix to tuple else # scalar field - data_new = zeros(typeof(V.fields[i][1]), length(iLon),length(iLat),length(iDepth)); - data_new[iLo,iLa,iDe] = V.fields[i][iLon, iLat, iDepth] # interpolate data field + data_new = zeros(typeof(V.fields[i][1]), length(iLon), length(iLat), length(iDepth)) + data_new[iLo, iLa, iDe] = V.fields[i][iLon, iLat, iDepth] # interpolate data field end # replace the one - new_field = NamedTuple{(field_names[i],)}((data_new,)) # Create a tuple with same name - fields_new = merge(fields_new, new_field); # replace the field in fields_new + new_field = NamedTuple{(field_names[i],)}((data_new,)) # Create a tuple with same name + fields_new = merge(fields_new, new_field) # replace the field in fields_new end # Create a GeoData struct with the newly interpolated fields - if isa(V,GeoData) - Data_profile = GeoData(Lon, Lat, Depth, fields_new); - elseif isa(V,CartData) - Data_profile = CartData(Lon, Lat, Depth, fields_new); + return if isa(V, GeoData) + Data_profile = GeoData(Lon, Lat, Depth, fields_new) + elseif isa(V, CartData) + Data_profile = CartData(Lon, Lat, Depth, fields_new) else error("Not yet implemented") end @@ -1434,26 +1451,26 @@ Subtracts the horizontal average of the 3D data array V. If `Percentage=true`, the result is given as percentage; otherwise absolute values are returned """ -function subtract_horizontalmean(V::AbstractArray{T, 3}; Percentage=false) where T +function subtract_horizontalmean(V::AbstractArray{T, 3}; Percentage = false) where {T} - nx = size(V,1); - ny = size(V,2); - NumLayers = size(V,3); # get the number of depth levels + nx = size(V, 1) + ny = size(V, 2) + NumLayers = size(V, 3) # get the number of depth levels if Percentage - V_sub = zeros(size(V)); # no units + V_sub = zeros(size(V)) # no units else - V_sub = zeros(typeof(V[1]), size(V)); + V_sub = zeros(typeof(V[1]), size(V)) end - for iLayer = 1:NumLayers - average = mean(filter(!isnan, vec(V[:,:,iLayer]))); + for iLayer in 1:NumLayers + average = mean(filter(!isnan, vec(V[:, :, iLayer]))) if Percentage - V_sub[:,:,iLayer] = ustrip(V[:,:,iLayer]) .- ustrip(average); - V_sub[:,:,iLayer] = V_sub[:,:,iLayer]./ustrip(average)*100.0; # the result is normalized + V_sub[:, :, iLayer] = ustrip(V[:, :, iLayer]) .- ustrip(average) + V_sub[:, :, iLayer] = V_sub[:, :, iLayer] ./ ustrip(average) * 100.0 # the result is normalized else - V_sub[:,:,iLayer] = V[:,:,iLayer] .- average; + V_sub[:, :, iLayer] = V[:, :, iLayer] .- average end end @@ -1468,25 +1485,25 @@ Subtracts the horizontal average of the 2D data array V. If `Percentage=true`, the result is given as percentage; otherwise absolute values are returned """ -function subtract_horizontalmean(V::AbstractArray{T, 2}; Percentage=false) where T +function subtract_horizontalmean(V::AbstractArray{T, 2}; Percentage = false) where {T} - nx = size(V,1); - NumLayers = size(V,2); # get the number of depth levels + nx = size(V, 1) + NumLayers = size(V, 2) # get the number of depth levels if Percentage - V_sub = zeros(size(V)); # no units + V_sub = zeros(size(V)) # no units else - V_sub = zeros(typeof(V[1]), size(V)); + V_sub = zeros(typeof(V[1]), size(V)) end - for iLayer = 1:NumLayers - average = mean(filter(!isnan, vec(V[:,iLayer]))); + for iLayer in 1:NumLayers + average = mean(filter(!isnan, vec(V[:, iLayer]))) if Percentage - V_sub[:,iLayer] = ustrip(V[:,iLayer]) .- ustrip(average); - V_sub[:,iLayer] = V_sub[:,iLayer]./ustrip(average)*100.0; # the result is normalized + V_sub[:, iLayer] = ustrip(V[:, iLayer]) .- ustrip(average) + V_sub[:, iLayer] = V_sub[:, iLayer] ./ ustrip(average) * 100.0 # the result is normalized else - V_sub[:,iLayer] = V[:,iLayer] .- average; + V_sub[:, iLayer] = V[:, iLayer] .- average end end @@ -1510,18 +1527,19 @@ julia> data = parse_columns_CSV(data_file, 4) """ function parse_columns_CSV(data_file, num_columns) - data = zeros(size(data_file,1), num_columns); - for (row_num,row) in enumerate(data_file) - num = 0; - for i=1:length(row) - if typeof(row[i])==Float64 - num += 1; - data[row_num,num] = row[i] + data = zeros(size(data_file, 1), num_columns) + for (row_num, row) in enumerate(data_file) + num = 0 + for i in 1:length(row) + if typeof(row[i]) == Float64 + num += 1 + data[row_num, num] = row[i] else - try parse(Float64,row[i]) - num += 1; - data[row_num,num] = parse(Float64,row[i]) + try + parse(Float64, row[i]) + num += 1 + data[row_num, num] = parse(Float64, row[i]) catch end end @@ -1585,7 +1603,7 @@ GeoData ``` """ -function votemap(DataSets::Vector{GeoData}, criteria::Vector{String}; dims=(50,50,50)) +function votemap(DataSets::Vector{GeoData}, criteria::Vector{String}; dims = (50, 50, 50)) numDataSets = length(DataSets) @@ -1594,64 +1612,64 @@ function votemap(DataSets::Vector{GeoData}, criteria::Vector{String}; dims=(50,5 end # Determine the overlapping lon/lat/depth regions of all datasets - lon_limits = [minimum(DataSets[1].lon.val); maximum(DataSets[1].lon.val)]; - lat_limits = [minimum(DataSets[1].lat.val); maximum(DataSets[1].lat.val)]; - z_limits = [minimum(DataSets[1].depth.val); maximum(DataSets[1].depth.val)]; - for i=1:numDataSets - lon_limits[1] = maximum([lon_limits[1] minimum(DataSets[i].lon.val)]); - lon_limits[2] = minimum([lon_limits[2] maximum(DataSets[i].lon.val)]); + lon_limits = [minimum(DataSets[1].lon.val); maximum(DataSets[1].lon.val)] + lat_limits = [minimum(DataSets[1].lat.val); maximum(DataSets[1].lat.val)] + z_limits = [minimum(DataSets[1].depth.val); maximum(DataSets[1].depth.val)] + for i in 1:numDataSets + lon_limits[1] = maximum([lon_limits[1] minimum(DataSets[i].lon.val)]) + lon_limits[2] = minimum([lon_limits[2] maximum(DataSets[i].lon.val)]) - lat_limits[1] = maximum([lat_limits[1] minimum(DataSets[i].lat.val)]); - lat_limits[2] = minimum([lat_limits[2] maximum(DataSets[i].lat.val)]); + lat_limits[1] = maximum([lat_limits[1] minimum(DataSets[i].lat.val)]) + lat_limits[2] = minimum([lat_limits[2] maximum(DataSets[i].lat.val)]) - z_limits[1] = maximum([z_limits[1] minimum(DataSets[i].depth.val)]); - z_limits[2] = minimum([z_limits[2] maximum(DataSets[i].depth.val)]); + z_limits[1] = maximum([z_limits[1] minimum(DataSets[i].depth.val)]) + z_limits[2] = minimum([z_limits[2] maximum(DataSets[i].depth.val)]) end # Loop over all datasets, and interpolate the data set to the new (usually smaller) domain - votemap = zeros(Int64,dims) - for i=1:numDataSets - VoteMap_Local = zeros(Int64,dims) + votemap = zeros(Int64, dims) + for i in 1:numDataSets + VoteMap_Local = zeros(Int64, dims) # Interpolate data set to smaller domain - DataSet = extract_subvolume(DataSets[i]; Interpolate=true, Lon_level=lon_limits, Lat_level=lat_limits, Depth_level=z_limits, dims=dims); + DataSet = extract_subvolume(DataSets[i]; Interpolate = true, Lon_level = lon_limits, Lat_level = lat_limits, Depth_level = z_limits, dims = dims) # Extract the criteria to evaluate - expr = Meta.parse(criteria[i]); # the expression, such as Vs>1.0 + expr = Meta.parse(criteria[i]) # the expression, such as Vs>1.0 # Extract data field - if !haskey(DataSet.fields,expr.args[2]) + if !haskey(DataSet.fields, expr.args[2]) error("The GeoData set does not have the field: $(expr.args[2])") end - Array3D = ustrip.(DataSet.fields[expr.args[2]]); # strip units, just in case + Array3D = ustrip.(DataSet.fields[expr.args[2]]) # strip units, just in case # Modify the value, to be Array3D - expr_mod = Expr(:call, expr.args[1], :($Array3D), expr.args[3]); # modify the original expression to use Array3D as variable name + expr_mod = Expr(:call, expr.args[1], :($Array3D), expr.args[3]) # modify the original expression to use Array3D as variable name # The expression should have a ".", such as Array .> 1.0. If not, it will not apply this in a pointwise manner # Here, we add this dot if it is not there yet - if cmp(String(expr_mod.args[1])[1],Char('.'))==1 - expr_mod.args[1] = Symbol(".",expr_mod.args[1]); + if cmp(String(expr_mod.args[1])[1], Char('.')) == 1 + expr_mod.args[1] = Symbol(".", expr_mod.args[1]) end - ind = eval(expr_mod); # evaluate the modified expression - VoteMap_Local[ind] .= 1; # assign vote-map + ind = eval(expr_mod) # evaluate the modified expression + VoteMap_Local[ind] .= 1 # assign vote-map - votemap = votemap + VoteMap_Local; # Sum + votemap = votemap + VoteMap_Local # Sum end - DataSet = extract_subvolume(DataSets[1], Interpolate=true, Lon_level=lon_limits, Lat_level=lat_limits, Depth_level=z_limits, dims=dims); + DataSet = extract_subvolume(DataSets[1], Interpolate = true, Lon_level = lon_limits, Lat_level = lat_limits, Depth_level = z_limits, dims = dims) # Construct GeoData set that holds the votemap (makes it easier to write paraview files) - VoteData = GeoData(DataSet.lon.val,DataSet.lat.val,DataSet.depth.val, (votemap=votemap,)); + VoteData = GeoData(DataSet.lon.val, DataSet.lat.val, DataSet.depth.val, (votemap = votemap,)) return VoteData end # Make this work for single data sets as well -function votemap(DataSets::GeoData, criteria::String; dims=(50,50,50)) - votemap([DataSets], [criteria]; dims=dims) +function votemap(DataSets::GeoData, criteria::String; dims = (50, 50, 50)) + return votemap([DataSets], [criteria]; dims = dims) end """ @@ -1686,40 +1704,40 @@ ParaviewData fields: (:Depth,) ``` """ -function rotate_translate_scale(Data::Union{ParaviewData, CartData}; Rotate::Number=0.0, Translate=(0,0,0), Scale=(1.0,1.0,1.0), Xc=(0.0,0.0)) +function rotate_translate_scale(Data::Union{ParaviewData, CartData}; Rotate::Number = 0.0, Translate = (0, 0, 0), Scale = (1.0, 1.0, 1.0), Xc = (0.0, 0.0)) - X,Y,Z = copy(Data.x.val), copy(Data.y.val), copy(Data.z.val); # Extract coordinates - Xr,Yr,Zr = X,Y,Z; # Rotated coordinates + X, Y, Z = copy(Data.x.val), copy(Data.y.val), copy(Data.z.val) # Extract coordinates + Xr, Yr, Zr = X, Y, Z # Rotated coordinates # 1) Scaling - if length(Scale)==1 - Scale = [Scale, Scale, Scale]; + if length(Scale) == 1 + Scale = [Scale, Scale, Scale] end - Xr .*= Scale[1]; - Yr .*= Scale[2]; - Zr .*= Scale[3]; + Xr .*= Scale[1] + Yr .*= Scale[2] + Zr .*= Scale[3] # 2) 2D rotation around X/Y axis, around center of box - Xm,Ym = 0.0, 0.0; - R = [cosd(Rotate) -sind(Rotate); sind(Rotate) cosd(Rotate)]; # 2D rotation matrix + Xm, Ym = 0.0, 0.0 + R = [cosd(Rotate) -sind(Rotate); sind(Rotate) cosd(Rotate)] # 2D rotation matrix for i in eachindex(X) - Rot_XY = R*[X[i]-Xc[1]; Y[i]-Xc[2]]; - Xr[i] = Rot_XY[1] + Xc[1]; - Yr[i] = Rot_XY[2] + Xc[2]; + Rot_XY = R * [X[i] - Xc[1]; Y[i] - Xc[2]] + Xr[i] = Rot_XY[1] + Xc[1] + Yr[i] = Rot_XY[2] + Xc[2] end # 3) Add translation - Xr .+= Translate[1]; - Yr .+= Translate[2]; - Zr .+= Translate[3]; + Xr .+= Translate[1] + Yr .+= Translate[2] + Zr .+= Translate[3] # Modify original structure if isa(Data, ParaviewData) - Data_r = ParaviewData(Xr,Yr,Zr, Data.fields) + Data_r = ParaviewData(Xr, Yr, Zr, Data.fields) else - Data_r = CartData(Xr,Yr,Zr, Data.fields) + Data_r = CartData(Xr, Yr, Zr, Data.fields) end return Data_r @@ -1732,13 +1750,13 @@ end Computes lithostatic pressure from a 3D density array, assuming constant soacing `dz` in vertical direction. Optionally, the gravitational acceleration `g` can be specified. """ -function lithostatic_pressure!(Plithos::Array{T,N}, Density::Array{T,N}, dz::Number; g=9.81) where {T,N} +function lithostatic_pressure!(Plithos::Array{T, N}, Density::Array{T, N}, dz::Number; g = 9.81) where {T, N} - Plithos[:] = Density*dz*g; + Plithos[:] = Density * dz * g - selectdim(Plithos,N,size(Plithos)[N]) .= 0 # set upper row to zero + selectdim(Plithos, N, size(Plithos)[N]) .= 0 # set upper row to zero - Plithos[:] = reverse!(cumsum(reverse!(Plithos),dims=N)) + Plithos[:] = reverse!(cumsum(reverse!(Plithos), dims = N)) return nothing end @@ -1749,17 +1767,17 @@ end Checks if points given by matrices `X` and `Y` are in or on (both cases return true) a polygon given by `PolyX` and `PolyY`. Boolean `fast` will trigger faster version that may miss points that are exactly on the edge of the polygon. Speedup is a factor of 3. """ -function inpolygon!(INSIDE::Matrix{Bool}, PolyX::Vector{T}, PolyY::Vector{T}, X::Matrix{T}, Y::Matrix{T}; fast=false) where T <: Real - if fast - for j = 1 : size(X, 2) - for i = 1 : size(X, 1) - INSIDE[i,j] = inpoly_fast(PolyX, PolyY, X[i,j], Y[i,j]) +function inpolygon!(INSIDE::Matrix{Bool}, PolyX::Vector{T}, PolyY::Vector{T}, X::Matrix{T}, Y::Matrix{T}; fast = false) where {T <: Real} + return if fast + for j in 1:size(X, 2) + for i in 1:size(X, 1) + INSIDE[i, j] = inpoly_fast(PolyX, PolyY, X[i, j], Y[i, j]) end end else - for j = 1 : size(X, 2) - for i = 1 : size(X, 1) - INSIDE[i,j] = (inpoly(PolyX, PolyY, X[i,j], Y[i,j]) || inpoly(PolyY, PolyX, Y[i,j], X[i,j])) + for j in 1:size(X, 2) + for i in 1:size(X, 1) + INSIDE[i, j] = (inpoly(PolyX, PolyY, X[i, j], Y[i, j]) || inpoly(PolyY, PolyX, Y[i, j], X[i, j])) end end end @@ -1771,13 +1789,13 @@ end Same as above but `inside`, `X` and `Y` and are vectors. """ -function inpolygon!(inside::Vector{Bool}, PolyX::AbstractVector{T}, PolyY::AbstractVector{T}, x::Vector{T}, y::Vector{T}; fast=false) where T <: Real - if fast - for i = eachindex(x) +function inpolygon!(inside::Vector{Bool}, PolyX::AbstractVector{T}, PolyY::AbstractVector{T}, x::Vector{T}, y::Vector{T}; fast = false) where {T <: Real} + return if fast + for i in eachindex(x) inside[i] = inpoly_fast(PolyX, PolyY, x[i], y[i]) end else - for i = eachindex(x) + for i in eachindex(x) inside[i] = (inpoly(PolyX, PolyY, x[i], y[i]) || inpoly(PolyY, PolyX, y[i], x[i])) end end @@ -1789,12 +1807,12 @@ end Checks if a point given by x and y is in or on (both cases return true) a polygon given by PolyX and PolyY, iSteps and jSteps provide the connectivity between the polygon edges. This function should be used through inpolygon!(). """ -function inpoly(PolyX::AbstractVector{T}, PolyY::AbstractVector{T}, x::T, y::T) where T <: Real +function inpoly(PolyX::AbstractVector{T}, PolyY::AbstractVector{T}, x::T, y::T) where {T <: Real} inside1, inside2, inside3, inside4 = false, false, false, false n = length(PolyX) for i in eachindex(PolyX) - j = i-1 - j += n * (j<1) + j = i - 1 + j += n * (j < 1) xi = PolyX[i] xi = PolyX[i] yi = PolyY[i] @@ -1828,12 +1846,12 @@ end Faster version of inpoly() but will miss some points that are on the edge of the polygon. """ -function inpoly_fast(PolyX::Vector{T}, PolyY::Vector{T}, x::T, y::T) where T <: Real +function inpoly_fast(PolyX::Vector{T}, PolyY::Vector{T}, x::T, y::T) where {T <: Real} inside = false n = length(PolyX) for i in eachindex(PolyX) - j = i-1 - j += n * (j<1) + j = i - 1 + j += n * (j < 1) xi = PolyX[i] yi = PolyY[i] xj = PolyX[j] @@ -1845,6 +1863,3 @@ function inpoly_fast(PolyX::Vector{T}, PolyY::Vector{T}, x::T, y::T) where T <: end return inside end - - - diff --git a/src/voxel_gravity.jl b/src/voxel_gravity.jl index 3e128366..8682c07c 100644 --- a/src/voxel_gravity.jl +++ b/src/voxel_gravity.jl @@ -30,57 +30,59 @@ Optional arguments: - `outName`: name of the paraview output (do not include file type) - `printing`: activate printing of additional information [`true` or `false`] """ -function voxel_grav(X::Array{Float64, 3}, Y::Array{Float64, 3}, Z::Array{Float64, 3}, RHO::Array{Float64, 3}; - refMod="AVG", lengthUnit="m", rhoTol=1e-9, Topo=[], outName="Bouguer", printing=true) +function voxel_grav( + X::Array{Float64, 3}, Y::Array{Float64, 3}, Z::Array{Float64, 3}, RHO::Array{Float64, 3}; + refMod = "AVG", lengthUnit = "m", rhoTol = 1.0e-9, Topo = [], outName = "Bouguer", printing = true + ) ## check input X, Y, Z, RHO, RefMod, rhoTol, Topo, orient = checkInput(X, Y, Z, RHO, refMod, lengthUnit, rhoTol, Topo, outName, printing) ################ precompute things ################ # define constants - G = 6.67408e-11 + G = 6.67408e-11 # get coordinate vectors - x_vec = X[:,1,1] - y_vec = Y[1,:,1] - z_vec = Z[1,1,:] + x_vec = X[:, 1, 1] + y_vec = Y[1, :, 1] + z_vec = Z[1, 1, :] # cut everything above sea level - ind = findall(x->x<=0, z_vec) - X = X[:,:,ind] - Y = Y[:,:,ind] - Z = Z[:,:,ind] - RHO = RHO[:,:,ind] + ind = findall(x -> x <= 0, z_vec) + X = X[:, :, ind] + Y = Y[:, :, ind] + Z = Z[:, :, ind] + RHO = RHO[:, :, ind] RefMod = RefMod[ind] # check dimensions - nx = size(X,1); - ny = size(X,2); - nz = size(X,3); + nx = size(X, 1) + ny = size(X, 2) + nz = size(X, 3) # subtract reference model - for i = 1 : nz - RHO[:,:,i] .= RHO[:,:,i] .- RefMod[i] + for i in 1:nz + RHO[:, :, i] .= RHO[:, :, i] .- RefMod[i] end # interpolate density grid to cell centers - DRHO = RHO[1:end-1,1:end-1,1:end-1] .+ RHO[2:end,1:end-1,1:end-1] .+ RHO[2:end,2:end,1:end-1] .+ RHO[1:end-1,2:end,1:end-1] + - RHO[1:end-1,1:end-1,2:end] .+ RHO[2:end,1:end-1,2:end] .+ RHO[2:end,2:end,2:end] .+ RHO[1:end-1,2:end,2:end] + DRHO = RHO[1:(end - 1), 1:(end - 1), 1:(end - 1)] .+ RHO[2:end, 1:(end - 1), 1:(end - 1)] .+ RHO[2:end, 2:end, 1:(end - 1)] .+ RHO[1:(end - 1), 2:end, 1:(end - 1)] + + RHO[1:(end - 1), 1:(end - 1), 2:end] .+ RHO[2:end, 1:(end - 1), 2:end] .+ RHO[2:end, 2:end, 2:end] .+ RHO[1:(end - 1), 2:end, 2:end] DRHO = DRHO ./ 8 # voxel volume - dx = X[2,1,1] - X[1,1,1] - dy = Y[1,2,1] - Y[1,1,1] - dz = Z[1,1,2] - Z[1,1,1] - dV = abs(dx*dy*dz) + dx = X[2, 1, 1] - X[1, 1, 1] + dy = Y[1, 2, 1] - Y[1, 1, 1] + dz = Z[1, 1, 2] - Z[1, 1, 1] + dV = abs(dx * dy * dz) # dV * G is a constants VG_vox = dV * G # coordinate vector of cell center grid - xCells = x_vec[1:end-1] .+ dx/2 - yCells = y_vec[1:end-1] .+ dy/2 - zCells = z_vec[1:end-1] .+ dz/2 + xCells = x_vec[1:(end - 1)] .+ dx / 2 + yCells = y_vec[1:(end - 1)] .+ dy / 2 + zCells = z_vec[1:(end - 1)] .+ dz / 2 # precompute distances if printing @@ -95,40 +97,40 @@ function voxel_grav(X::Array{Float64, 3}, Y::Array{Float64, 3}, Z::Array{Float64 ############# compute bouguer anomaly ############# if printing @printf "Computing Bouguer anomaly:" - @time dg = computeBoug(nx,ny,nz,DRHO,d_cube,VG_vox,zCells,rhoTol) + @time dg = computeBoug(nx, ny, nz, DRHO, d_cube, VG_vox, zCells, rhoTol) @printf "\n" else - dg = computeBoug(nx,ny,nz,DRHO,d_cube,VG_vox,zCells,rhoTol) + dg = computeBoug(nx, ny, nz, DRHO, d_cube, VG_vox, zCells, rhoTol) end ################################################### ############ compute bouguer gradients ############ if printing @printf "Computing Bouguer gradients:" - @time gradX, gradY = computeBougGrads(nx,ny,dg) + @time gradX, gradY = computeBougGrads(nx, ny, dg) @printf "\n" else - gradX, gradY = computeBougGrads(nx,ny,dg) + gradX, gradY = computeBougGrads(nx, ny, dg) end ################################################### ############## write info and output ############## - numRel = length(findall(x->abs(x)>rhoTol, DRHO)) - frac = numRel/length(DRHO) - coords = cat(X[:,:,1],Y[:,:,1],Topo,dims=4) - coords = permutedims(coords,[4,1,2,3]) - vtkfile = vtk_grid(outName,coords) + numRel = length(findall(x -> abs(x) > rhoTol, DRHO)) + frac = numRel / length(DRHO) + coords = cat(X[:, :, 1], Y[:, :, 1], Topo, dims = 4) + coords = permutedims(coords, [4, 1, 2, 3]) + vtkfile = vtk_grid(outName, coords) if printing - @printf "%.3f %% of the domain contained anomalous densities. If this is more than expected, adjust rhoTol for faster computation.\n" 100*frac + @printf "%.3f %% of the domain contained anomalous densities. If this is more than expected, adjust rhoTol for faster computation.\n" 100 * frac @printf "Writing output...\n" - vtkfile["Bouguer Anomaly [mGal]"] = dg + vtkfile["Bouguer Anomaly [mGal]"] = dg vtkfile["Boug_Gradient_X [mGal/m]"] = gradX vtkfile["Boug_Gradient_Y [mGal/m]"] = gradY outfiles = vtk_save(vtkfile) @printf "Wrote output to: %s\n\n" outfiles else - vtkfile["Bouguer Anomaly [mGal]"] = dg + vtkfile["Bouguer Anomaly [mGal]"] = dg vtkfile["Boug_Gradient_X [mGal/m]"] = gradX vtkfile["Boug_Gradient_Y [mGal/m]"] = gradY outfiles = vtk_save(vtkfile) @@ -138,24 +140,21 @@ function voxel_grav(X::Array{Float64, 3}, Y::Array{Float64, 3}, Z::Array{Float64 if orient == 1 return dg, gradX, gradY else - return permutedims(dg, [2,1]), permutedims(gradX, [2,1]), permutedims(gradY, [2,1]) + return permutedims(dg, [2, 1]), permutedims(gradX, [2, 1]), permutedims(gradY, [2, 1]) end end - - - function checkInput(X, Y, Z, RHO, refMod, lengthUnit, rhoTol, Topo, outName, printing) # orientation - if X[1,1,1] ≠ X[2,1,1] && X[1,1,1] == X[1,2,1] && X[1,1,1] == X[1,1,2] + if X[1, 1, 1] ≠ X[2, 1, 1] && X[1, 1, 1] == X[1, 2, 1] && X[1, 1, 1] == X[1, 1, 2] orientation = 1 - elseif X[1,1,1] == X[2,1,1] && X[1,1,1] ≠ X[1,2,1] && X[1,1,1] == X[1,1,2] + elseif X[1, 1, 1] == X[2, 1, 1] && X[1, 1, 1] ≠ X[1, 2, 1] && X[1, 1, 1] == X[1, 1, 2] orientation = 2 - X = permutedims(X, [2,1,3]) - Y = permutedims(Y, [2,1,3]) - Z = permutedims(Z, [2,1,3]) - RHO = permutedims(RHO, [2,1,3]) + X = permutedims(X, [2, 1, 3]) + Y = permutedims(Y, [2, 1, 3]) + Z = permutedims(Z, [2, 1, 3]) + RHO = permutedims(RHO, [2, 1, 3]) else error("Coordinate orientation looks wrong!") end @@ -164,27 +163,27 @@ function checkInput(X, Y, Z, RHO, refMod, lengthUnit, rhoTol, Topo, outName, pri if !(size(X) == size(Y) && size(X) == size(Z) && size(X) == size(RHO)) error("X, Y, Z, RHO must be 3D matrices of the same size.") end - nz = size(X,3) + nz = size(X, 3) # check if grid is regular - dx = diff(X,dims=1)[:] - dy = diff(Y,dims=2)[:] - dz = diff(Z,dims=3)[:] - tol = 1e-12 - if !(all(a->a dx[1]-tol,dx) && all(a->a dy[1]-tol,dy) && all(a->a dz[1]-tol,dz)) + dx = diff(X, dims = 1)[:] + dy = diff(Y, dims = 2)[:] + dz = diff(Z, dims = 3)[:] + tol = 1.0e-12 + if !(all(a -> a < dx[1] + tol && a > dx[1] - tol, dx) && all(a -> a < dy[1] + tol && a > dy[1] - tol, dy) && all(a -> a < dz[1] + tol && a > dz[1] - tol, dz)) error("Non-regular grids are not supported yet") end # Topo if !isempty(Topo) - if !(size(Topo,1) == size(X,1) && size(Topo,2) == size(X,2)) + if !(size(Topo, 1) == size(X, 1) && size(Topo, 2) == size(X, 2)) if printing @printf "Topo input dimensions do not fit X, Y, Z, RHO. Using a flat topography.\n" end - Topo = zeros(size(X,1),size(X,2)) + Topo = zeros(size(X, 1), size(X, 2)) end else - Topo = zeros(size(X,1),size(X,2)) + Topo = zeros(size(X, 1), size(X, 2)) end # tolerance @@ -201,10 +200,10 @@ function checkInput(X, Y, Z, RHO, refMod, lengthUnit, rhoTol, Topo, outName, pri if printing @printf "Converting coordinates to meters.\n\n" end - X = X .* 1000; - Y = Y .* 1000; - Z = Z .* 1000; - Topo = Topo .* 1000; + X = X .* 1000 + Y = Y .* 1000 + Z = Z .* 1000 + Topo = Topo .* 1000 else error("lengthUnit should be \"m\" or \"km\".") end @@ -218,15 +217,15 @@ function checkInput(X, Y, Z, RHO, refMod, lengthUnit, rhoTol, Topo, outName, pri end else if refMod == "NE" - RefMod = RHO[end,end,:] + RefMod = RHO[end, end, :] elseif refMod == "SE" - RefMod = RHO[end,1,:] + RefMod = RHO[end, 1, :] elseif refMod == "SW" - RefMod = RHO[1,1,:] + RefMod = RHO[1, 1, :] elseif refMod == "NW" - RefMod = RHO[1,end,:] + RefMod = RHO[1, end, :] elseif refMod == "AVG" - RefMod = !mean([1.,1.,1.],RHO) + RefMod = !mean([1.0, 1.0, 1.0], RHO) else error("RefMod should be NE, SE, SW, NW, AVG or a vector with one value for each depth.") end @@ -242,38 +241,46 @@ function checkInput(X, Y, Z, RHO, refMod, lengthUnit, rhoTol, Topo, outName, pri end function precompDist(x_vec, y_vec, xCells, yCells, zCells, nx, ny, nz) - d_square = zeros(nx-1,ny-1,nz-1) + d_square = zeros(nx - 1, ny - 1, nz - 1) - for iX = 1 : nx - 1 - for iY = 1 : ny - 1 - for iZ = 1 : nz - 1 - d_square[iX,iY,iZ] = (xCells[iX]-x_vec[1])^2 + (yCells[iY]-y_vec[1])^2 + zCells[iZ]^2 + for iX in 1:(nx - 1) + for iY in 1:(ny - 1) + for iZ in 1:(nz - 1) + d_square[iX, iY, iZ] = (xCells[iX] - x_vec[1])^2 + (yCells[iY] - y_vec[1])^2 + zCells[iZ]^2 end end end - d_cube = d_square .^1.5 + d_cube = d_square .^ 1.5 return d_cube end -function computeBoug(nx,ny,nz,DRHO,d_cube,VG_vox,zCells,rhoTol) - mGal = 1e5 - - dg = zeros(nx,ny) - - for jX = 1 : nx-1 - for jY = 1 : ny-1 - for jZ = 1 : nz-1 - if (DRHO[jX,jY,jZ] > rhoTol || DRHO[jX,jY,jZ] < -rhoTol) - for iX = 1 : nx - for iY = 1 : ny - d_indX = jX-iX - if d_indX < 0; d_indX = abs(d_indX); else d_indX = d_indX + 1; end - d_indY = jY-iY; - if d_indY < 0; d_indY = abs(d_indY); else d_indY = d_indY + 1; end - d_c = d_cube[d_indX,d_indY,jZ]; - dg[iX,iY] = dg[iX,iY] + VG_vox * DRHO[jX,jY,jZ] * -zCells[jZ]/d_c; +function computeBoug(nx, ny, nz, DRHO, d_cube, VG_vox, zCells, rhoTol) + mGal = 1.0e5 + + dg = zeros(nx, ny) + + for jX in 1:(nx - 1) + for jY in 1:(ny - 1) + for jZ in 1:(nz - 1) + if (DRHO[jX, jY, jZ] > rhoTol || DRHO[jX, jY, jZ] < -rhoTol) + for iX in 1:nx + for iY in 1:ny + d_indX = jX - iX + if d_indX < 0 + d_indX = abs(d_indX) + else + d_indX = d_indX + 1 + end + d_indY = jY - iY + if d_indY < 0 + d_indY = abs(d_indY) + else + d_indY = d_indY + 1 + end + d_c = d_cube[d_indX, d_indY, jZ] + dg[iX, iY] = dg[iX, iY] + VG_vox * DRHO[jX, jY, jZ] * -zCells[jZ] / d_c end end end @@ -286,23 +293,23 @@ function computeBoug(nx,ny,nz,DRHO,d_cube,VG_vox,zCells,rhoTol) return dg end -function computeBougGrads(nx,ny,dg) - gradX = zeros(nx,ny) - gradY = zeros(nx,ny) +function computeBougGrads(nx, ny, dg) + gradX = zeros(nx, ny) + gradY = zeros(nx, ny) - for iY = 1 : ny - itp = Interpolations.interpolate(dg[:,iY], BSpline(Quadratic(Reflect(OnCell())))) - for iX = 1 : nx - grad = Interpolations.gradient(itp,iX) - gradX[iX,iY] = grad[1] + for iY in 1:ny + itp = Interpolations.interpolate(dg[:, iY], BSpline(Quadratic(Reflect(OnCell())))) + for iX in 1:nx + grad = Interpolations.gradient(itp, iX) + gradX[iX, iY] = grad[1] end end - for iX = 1 : nx - itp = Interpolations.interpolate(dg[iX,:], BSpline(Quadratic(Reflect(OnCell())))) - for iY = 1 : ny - grad = Interpolations.gradient(itp,iY) - gradY[iX,iY] = grad[1] + for iX in 1:nx + itp = Interpolations.interpolate(dg[iX, :], BSpline(Quadratic(Reflect(OnCell())))) + for iY in 1:ny + grad = Interpolations.gradient(itp, iY) + gradY[iX, iY] = grad[1] end end diff --git a/test/runtests.jl b/test/runtests.jl index bcf3ef42..b8a4b186 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -2,7 +2,7 @@ using GeophysicalModelGenerator using Test @testset verbose = true "GeophysicalModelGenerator" begin - + @testset "Data import.jl" begin include("test_data_import.jl") end @@ -81,7 +81,7 @@ using Test @testset "ASAGI_IO" begin include("test_ASAGI_IO.jl") end - + @testset "Chmy" begin include("test_Chmy.jl") end @@ -93,4 +93,4 @@ include("test_tutorials.jl") # Cleanup foreach(rm, filter(endswith(".vts"), readdir())) foreach(rm, filter(endswith(".vtu"), readdir())) -rm("./markers/",recursive=true) +rm("./markers/", recursive = true) diff --git a/test/test_ASAGI_IO.jl b/test/test_ASAGI_IO.jl index 5ffe7fff..d4d0d8d2 100644 --- a/test/test_ASAGI_IO.jl +++ b/test/test_ASAGI_IO.jl @@ -1,13 +1,13 @@ using GeophysicalModelGenerator, Test, Statistics -XYZ = xyz_grid(1.0:1:10.0, 11.0:1:21.0, -23:1:-10); -Dat = zeros(size(XYZ[1])); -Rho = ones(Float64, size(Dat))*3000; -Phases = zeros(Int32, size(Dat)); -Sxx = XYZ[3]*10; -Stress = (Sxx,Sxx,Sxx,Sxx,Sxx,Sxx,Sxx,Sxx,Sxx) -Data = CartData(XYZ...,(Rho=Rho,Sxx=Sxx)) -Data_tuple = CartData(XYZ...,(Rho=Rho,Sxx=Sxx, Stress=Stress)) +XYZ = xyz_grid(1.0:1:10.0, 11.0:1:21.0, -23:1:-10); +Dat = zeros(size(XYZ[1])); +Rho = ones(Float64, size(Dat)) * 3000; +Phases = zeros(Int32, size(Dat)); +Sxx = XYZ[3] * 10; +Stress = (Sxx, Sxx, Sxx, Sxx, Sxx, Sxx, Sxx, Sxx, Sxx) +Data = CartData(XYZ..., (Rho = Rho, Sxx = Sxx)) +Data_tuple = CartData(XYZ..., (Rho = Rho, Sxx = Sxx, Stress = Stress)) fname_asagi = write_ASAGI("test", Data) @test fname_asagi == "test_ASAGI.nc" @@ -23,12 +23,12 @@ Data_SeisSol = read_ASAGI("test_files/tpv34_rhomulambda-inner.nc") @test eltype(Data_SeisSol.fields.rho[10]) == Float32 # test that specifying specific field works -fname_asagi = write_ASAGI("test", Data, fields=(:Sxx,)) +fname_asagi = write_ASAGI("test", Data, fields = (:Sxx,)) Data_ASAGI2 = read_ASAGI(fname_asagi) @test sum(Data_ASAGI2.fields.Sxx - Data.fields.Sxx) == 0 # test that converting to meters works -fname_asagi = write_ASAGI("test3", Data, fields=(:Sxx,), km_to_m=true) +fname_asagi = write_ASAGI("test3", Data, fields = (:Sxx,), km_to_m = true) Data_ASAGI3 = read_ASAGI(fname_asagi) @test Data_ASAGI3.x.val[1] ≈ 1000.0 diff --git a/test/test_Chmy.jl b/test/test_Chmy.jl index d48687c8..e5ea1393 100644 --- a/test/test_Chmy.jl +++ b/test/test_Chmy.jl @@ -7,70 +7,72 @@ backend = CPU() arch = Arch(backend) # 3D test -lx, ly, lz = 10.0, 11.0, 12.0 -nx, ny, nz = 10,11,12 -grid = UniformGrid(arch; - origin=(-lx/2, -ly/2, -lz/2), - extent=(lx, ly, lz), - dims=(nx, ny, nz)) +lx, ly, lz = 10.0, 11.0, 12.0 +nx, ny, nz = 10, 11, 12 +grid = UniformGrid( + arch; + origin = (-lx / 2, -ly / 2, -lz / 2), + extent = (lx, ly, lz), + dims = (nx, ny, nz) +) # create field -Temp_C = Field(backend, grid, Center(), Float64; halo=1) -Phases_C = Field(backend, grid, Center(), Int32; halo=1) -Temp_V = Field(backend, grid, Vertex(), Float64; halo=1) -Phases_V = Field(backend, grid, Vertex(), Int32; halo=1) +Temp_C = Field(backend, grid, Center(), Float64; halo = 1) +Phases_C = Field(backend, grid, Center(), Int32; halo = 1) +Temp_V = Field(backend, grid, Vertex(), Float64; halo = 1) +Phases_V = Field(backend, grid, Vertex(), Int32; halo = 1) # grid: -CartGrid = create_CartGrid(grid) +CartGrid = create_CartGrid(grid) @test sum.(CartGrid.coord1D) == (0.0, 0.0, 0.0) -# test add_box! directly. Note that this requires you to specify a "cell" keyword for Center() locations -add_box!(Phases_C,Temp_C,CartGrid, xlim=(0.0,1.0), zlim=(-2,0), phase=ConstantPhase(3), cell=true) -@test extrema(Phases_C) == (0,3) +# test add_box! directly. Note that this requires you to specify a "cell" keyword for Center() locations +add_box!(Phases_C, Temp_C, CartGrid, xlim = (0.0, 1.0), zlim = (-2, 0), phase = ConstantPhase(3), cell = true) +@test extrema(Phases_C) == (0, 3) -add_box!(Phases_V,Temp_V,CartGrid, xlim=(0,1.0), zlim=(-2,0), phase=ConstantPhase(3)) -@test extrema(Phases_V) == (0,3) +add_box!(Phases_V, Temp_V, CartGrid, xlim = (0, 1.0), zlim = (-2, 0), phase = ConstantPhase(3)) +@test extrema(Phases_V) == (0, 3) # multiple dispatch functions -add_box!(Phases_C,Temp_C,grid, xlim=(0,1.0), zlim=(-2,0), phase=ConstantPhase(2)) -@test extrema(Phases_C) == (0,2) +add_box!(Phases_C, Temp_C, grid, xlim = (0, 1.0), zlim = (-2, 0), phase = ConstantPhase(2)) +@test extrema(Phases_C) == (0, 2) -add_box!(Phases_V,Temp_V,grid, xlim=(0,1.0), zlim=(-2,0), phase=ConstantPhase(2)) -@test extrema(Phases_V) == (0,2) +add_box!(Phases_V, Temp_V, grid, xlim = (0, 1.0), zlim = (-2, 0), phase = ConstantPhase(2)) +@test extrema(Phases_V) == (0, 2) -add_sphere!(Phases_V,Temp_V,grid, cen=(0,0,-1), radius=2.5, phase=ConstantPhase(3), T=ConstantTemp(800)) -@test extrema(Phases_V) == (0,3) -@test extrema(Temp_V) == (0.0,800.0) +add_sphere!(Phases_V, Temp_V, grid, cen = (0, 0, -1), radius = 2.5, phase = ConstantPhase(3), T = ConstantTemp(800)) +@test extrema(Phases_V) == (0, 3) +@test extrema(Temp_V) == (0.0, 800.0) # test above/below surface intersection -Topo_cart = CartData(xyz_grid(-6:.2:6,-12:.2:13,0)); -ind = above_surface(grid, Phases_V, Topo_cart); +Topo_cart = CartData(xyz_grid(-6:0.2:6, -12:0.2:13, 0)); +ind = above_surface(grid, Phases_V, Topo_cart); Phases_V[ind] .= 4; -@test extrema(Phases_V) == (0,4) +@test extrema(Phases_V) == (0, 4) -ind = above_surface(grid, Phases_C, Topo_cart); +ind = above_surface(grid, Phases_C, Topo_cart); Phases_C[ind] .= 4; -@test extrema(Phases_V) == (0,4) - - +@test extrema(Phases_V) == (0, 4) # 2D test -lx, lz = 10.0, 12.0 -nx, nz = 10, 12 -grid = UniformGrid(arch; - origin=(-lx/2, -lz/2), - extent=(lx, lz), - dims=(nx, nz)) +lx, lz = 10.0, 12.0 +nx, nz = 10, 12 +grid = UniformGrid( + arch; + origin = (-lx / 2, -lz / 2), + extent = (lx, lz), + dims = (nx, nz) +) # create field -Temp2D_C = Field(backend, grid, Center()) -Phases2D_C = Field(backend, grid, Center(), Int32) +Temp2D_C = Field(backend, grid, Center()) +Phases2D_C = Field(backend, grid, Center(), Int32) # check -add_box!(Phases2D_C,Temp2D_C,grid, xlim=(0,1.0), zlim=(-2,0), phase=ConstantPhase(2),T=ConstantTemp(800)) -@test extrema(Phases2D_C) == (0,2) +add_box!(Phases2D_C, Temp2D_C, grid, xlim = (0, 1.0), zlim = (-2, 0), phase = ConstantPhase(2), T = ConstantTemp(800)) +@test extrema(Phases2D_C) == (0, 2) -add_sphere!(Phases2D_C,Temp2D_C,grid, cen=(0,0,-1), radius=2.5, phase=ConstantPhase(3), T=ConstantTemp(800)) -@test extrema(Phases2D_C) == (0,3) +add_sphere!(Phases2D_C, Temp2D_C, grid, cen = (0, 0, -1), radius = 2.5, phase = ConstantPhase(3), T = ConstantTemp(800)) +@test extrema(Phases2D_C) == (0, 3) diff --git a/test/test_GMT.jl b/test/test_GMT.jl index 9aa536fe..ac1a889a 100644 --- a/test/test_GMT.jl +++ b/test/test_GMT.jl @@ -1,14 +1,14 @@ using Test using GeophysicalModelGenerator, GMT -Topo = import_topo(lat=[30,31], lon=[50, 51] ) +Topo = import_topo(lat = [30, 31], lon = [50, 51]) @test sum(Topo.depth.val) ≈ 2777.5705 -Topo = import_topo([50,51, 30,31]); +Topo = import_topo([50, 51, 30, 31]); @test sum(Topo.depth.val) ≈ 2777.5705 -test_fwd = import_GeoTIFF("test_files/length_fwd.tif", fieldname=:forward) +test_fwd = import_GeoTIFF("test_files/length_fwd.tif", fieldname = :forward) @test maximum(test_fwd.fields.forward) ≈ 33.17775km -test2 = import_GeoTIFF("test_files/UTM2GTIF.TIF") -@test test2.fields.layer1[20,20] == 105.0 +test2 = import_GeoTIFF("test_files/UTM2GTIF.TIF") +@test test2.fields.layer1[20, 20] == 105.0 diff --git a/test/test_Gmsh.jl b/test/test_Gmsh.jl index 0abe9092..cd3e7424 100644 --- a/test/test_Gmsh.jl +++ b/test/test_Gmsh.jl @@ -3,7 +3,7 @@ using GeophysicalModelGenerator, GridapGmsh # Read a Gmsh file -fname="test_files/subduction_ptatin.msh" +fname = "test_files/subduction_ptatin.msh" fe_data, tag_names = import_Gmsh(fname) @test sum(fe_data.cellfields.regions) == 830 @@ -12,8 +12,8 @@ data_fe = swap_yz_dims(fe_data) # Define a CartData set with the same dimensions as the Gmsh file bbox = extrema(data_fe); -nx,ny,nz = 100,50,80 -data_cart = CartData( xyz_grid(range(bbox[1]...,length=nx),range(bbox[2]...,length=ny),range(bbox[3]...,length=nz) )) +nx, ny, nz = 100, 50, 80 +data_cart = CartData(xyz_grid(range(bbox[1]..., length = nx), range(bbox[2]..., length = ny), range(bbox[3]..., length = nz))) data_cart1 = project_FEData_CartData(data_cart, data_fe) @test extrema(data_cart1.fields.regions) == (2, 11) diff --git a/test/test_IO.jl b/test/test_IO.jl index 5ce7dfe0..c5e4cd01 100644 --- a/test/test_IO.jl +++ b/test/test_IO.jl @@ -3,23 +3,22 @@ using Test pkg_dir = pkgdir(GeophysicalModelGenerator) # test saving to file -Lon3D,Lat3D,Depth3D = lonlatdepth_grid(1.0:3:10.0, 11.0:4:20.0, (-20:5:-10)*km); -Data_set = GeophysicalModelGenerator.GeoData(Lon3D,Lat3D,Depth3D,(DataFieldName=Depth3D,)) -@test save_GMG(joinpath(pkg_dir,"test"),Data_set) == nothing +Lon3D, Lat3D, Depth3D = lonlatdepth_grid(1.0:3:10.0, 11.0:4:20.0, (-20:5:-10) * km); +Data_set = GeophysicalModelGenerator.GeoData(Lon3D, Lat3D, Depth3D, (DataFieldName = Depth3D,)) +@test save_GMG(joinpath(pkg_dir, "test"), Data_set) == nothing # loading from local file -data_local = load_GMG(joinpath(pkg_dir,"test")) -@test data_local.depth.val[20]==-15.0 +data_local = load_GMG(joinpath(pkg_dir, "test")) +@test data_local.depth.val[20] == -15.0 # loading from local file -url = "https://seafile.rlp.net/f/10f867e410bb4d95b3fe/?dl=1" +url = "https://seafile.rlp.net/f/10f867e410bb4d95b3fe/?dl=1" data_remote = load_GMG(url) @test data_remote.fields.MohoDepth[20] ≈ -17.99km -# loading remote data -url = "https://seafile.rlp.net/f/10f867e410bb4d95b3fe/?dl=1" +# loading remote data +url = "https://seafile.rlp.net/f/10f867e410bb4d95b3fe/?dl=1" data_remote = download_data(url, "temp1.dat") -@test data_remote[end-8:end] == "temp1.dat" - +@test data_remote[(end - 8):end] == "temp1.dat" diff --git a/test/test_ProfileProcessing.jl b/test/test_ProfileProcessing.jl index 596b2592..605d887f 100644 --- a/test/test_ProfileProcessing.jl +++ b/test/test_ProfileProcessing.jl @@ -2,43 +2,43 @@ using Test using GeophysicalModelGenerator pkg_dir = pkgdir(GeophysicalModelGenerator) -cd(joinpath(pkg_dir,"test")) +cd(joinpath(pkg_dir, "test")) # Test profile processing dataset routines -data_Surf = GMG_Dataset("Mrozek_Moho_Grid_EU","Surface","https://seafile.rlp.net/f/483d9c7c808a4087ba9e/?dl=1", true) +data_Surf = GMG_Dataset("Mrozek_Moho_Grid_EU", "Surface", "https://seafile.rlp.net/f/483d9c7c808a4087ba9e/?dl=1", true) @test data_Surf.DirName == "https://seafile.rlp.net/f/483d9c7c808a4087ba9e/?dl=1" @test data_Surf.Type == "Surface" @test data_Surf.active == true @test data_Surf.Name == "Mrozek_Moho_Grid_EU" # Specify a few more profiles -data_EQ = GMG_Dataset("AlpArraySeis","Point","https://seafile.rlp.net/f/87d565882eda40689666/?dl=1", true) -data_SS = GMG_Dataset("Handy_etal_SE_Profile1","Screenshot","https://seafile.rlp.net/f/5ffe580e765e4bd1bafe/?dl=1", true) +data_EQ = GMG_Dataset("AlpArraySeis", "Point", "https://seafile.rlp.net/f/87d565882eda40689666/?dl=1", true) +data_SS = GMG_Dataset("Handy_etal_SE_Profile1", "Screenshot", "https://seafile.rlp.net/f/5ffe580e765e4bd1bafe/?dl=1", true) # Note: the volumetric datasets are chosen as they are smaller in size (less download) -data_Vol1 = GMG_Dataset("Hua2017","Volume","https://seafile.rlp.net/f/1fb68b74e5d742d39e62/?dl=1", true) -data_Vol2 = GMG_Dataset("Plomerova2022","Volume","https://seafile.rlp.net/f/abccb8d3302b4ef5af17/?dl=1", true) +data_Vol1 = GMG_Dataset("Hua2017", "Volume", "https://seafile.rlp.net/f/1fb68b74e5d742d39e62/?dl=1", true) +data_Vol2 = GMG_Dataset("Plomerova2022", "Volume", "https://seafile.rlp.net/f/abccb8d3302b4ef5af17/?dl=1", true) #data_Vol1 = GMG_Dataset("Paffrath2021","Volume","https://seafile.rlp.net/f/5c8c851af6764b5db20d/?dl=1", true) #data_Vol2 = GMG_Dataset("Zhao2016","Volume","https://seafile.rlp.net/f/e81a6d075f6746609973/?dl=1", true) # Now load these datasets into NamedTuples -SurfData = load_GMG(data_Surf) -PointData = load_GMG(data_EQ) -ScreenshotData = load_GMG(data_SS) -VolData = load_GMG(data_Vol1) -VolData = merge(VolData, load_GMG(data_Vol2)) +SurfData = load_GMG(data_Surf) +PointData = load_GMG(data_EQ) +ScreenshotData = load_GMG(data_SS) +VolData = load_GMG(data_Vol1) +VolData = merge(VolData, load_GMG(data_Vol2)) # Combine all Datasets into one file -Datasets = [data_Vol1,data_Vol2, data_Surf, data_EQ, data_SS] +Datasets = [data_Vol1, data_Vol2, data_Surf, data_EQ, data_SS] # Some tests with the loaded datasets -@test SurfData.Mrozek_Moho_Grid_EU.fields.MohoDepth[100,100] ≈ -58.6889km +@test SurfData.Mrozek_Moho_Grid_EU.fields.MohoDepth[100, 100] ≈ -58.6889km @test keys(VolData) == (:Hua2017, :Plomerova2022) # read datasets from file Datasets_temp = load_dataset_file("test_files/AlpineData.txt") -@test Datasets_temp[2].DirName == GMG_Dataset("INGV","Point","./Seismicity/CLASS/class_seis_alps.jld2", true).DirName +@test Datasets_temp[2].DirName == GMG_Dataset("INGV", "Point", "./Seismicity/CLASS/class_seis_alps.jld2", true).DirName # Load data of all Datasets & split them in type of data Data = load_GMG(Datasets) @@ -48,35 +48,35 @@ Data = load_GMG(Datasets) VolData_combined1 = combine_vol_data(Data.Volume) @test keys(VolData_combined1.fields) == (:Hua2017_Vp, :Hua2017_dVp_perc, :Plomerova2022_Vp, :Plomerova2022_dVp) -VolData_combined2 = combine_vol_data(Data.Volume, dims=(50,51,52)) +VolData_combined2 = combine_vol_data(Data.Volume, dims = (50, 51, 52)) @test VolData_combined2.fields.Hua2017_Vp[1000] ≈ 10.6904 -VolData_combined3 = combine_vol_data(Data.Volume, lon=(1,22), lat=(40,52), dims=(50,51,52)) +VolData_combined3 = combine_vol_data(Data.Volume, lon = (1, 22), lat = (40, 52), dims = (50, 51, 52)) @test isnan(VolData_combined3.fields.Hua2017_Vp[1000]) # Define horizontal & vertical profiles -prof1 = ProfileData(start_lonlat=(5,45), end_lonlat=(15,49)) +prof1 = ProfileData(start_lonlat = (5, 45), end_lonlat = (15, 49)) prof2 = ProfileData(depth = -100) -prof3 = ProfileData(start_lonlat=(5,45), end_lonlat=(5,49)) +prof3 = ProfileData(start_lonlat = (5, 45), end_lonlat = (5, 49)) prof4 = ProfileData(depth = -20) # test internal routines to intersect profile with volumetric data: GeophysicalModelGenerator.create_profile_volume!(prof1, VolData_combined1) -@test prof1.VolData.fields.Hua2017_Vp[30,40] ≈ 9.141520976523731 +@test prof1.VolData.fields.Hua2017_Vp[30, 40] ≈ 9.141520976523731 GeophysicalModelGenerator.create_profile_volume!(prof2, VolData_combined1) -@test prof2.VolData.fields.Hua2017_Vp[30,40] ≈ 8.177263544536272 +@test prof2.VolData.fields.Hua2017_Vp[30, 40] ≈ 8.177263544536272 -GeophysicalModelGenerator.create_profile_volume!(prof1, VolData_combined1, Depth_extent=(-300, -100)) +GeophysicalModelGenerator.create_profile_volume!(prof1, VolData_combined1, Depth_extent = (-300, -100)) @test extrema(prof1.VolData.depth.val) == (-300.0, -100.0) # Intersect surface data: -GeophysicalModelGenerator.create_profile_surface!(prof1,Data.Surface) +GeophysicalModelGenerator.create_profile_surface!(prof1, Data.Surface) @test prof1.SurfData[1].fields.MohoDepth[80] ≈ -37.58791461075397km # ditto with EQ data: -GeophysicalModelGenerator.create_profile_point!(prof1,Data.Point, section_width=5km) -GeophysicalModelGenerator.create_profile_point!(prof4,Data.Point, section_width=10km) +GeophysicalModelGenerator.create_profile_point!(prof1, Data.Point, section_width = 5km) +GeophysicalModelGenerator.create_profile_point!(prof4, Data.Point, section_width = 10km) @test length(prof1.PointData[1].lon) == 13 @test length(prof4.PointData[1].lon) == 445 @@ -112,15 +112,15 @@ extract_ProfileData!(prof4, nothing, NamedTuple(), Data.Point) # Read profiles from file profile_list = read_picked_profiles("test_files/PickedProfiles.txt") -@test profile_list[5].start_lonlat == ProfileData(start_lonlat=(9.40627872242647, 45.5128223429144), end_lonlat=(7.85480813419117, 47.8635353553922)).start_lonlat +@test profile_list[5].start_lonlat == ProfileData(start_lonlat = (9.40627872242647, 45.5128223429144), end_lonlat = (7.85480813419117, 47.8635353553922)).start_lonlat # Try the convenience function -DepthVol=nothing -DimsVolCross=(100,100) -Depth_extent=nothing -DimsSurfCross=(100,) -section_width=50km +DepthVol = nothing +DimsVolCross = (100, 100) +Depth_extent = nothing +DimsSurfCross = (100,) +section_width = 50km -profile_backwards_compat = extract_ProfileData("test_files/PickedProfiles.txt",1,"test_files/AlpineData_remote.txt",DimsVolCross=DimsVolCross,DepthVol=Depth_extent,DimsSurfCross=DimsSurfCross,WidthPointProfile=section_width) +profile_backwards_compat = extract_ProfileData("test_files/PickedProfiles.txt", 1, "test_files/AlpineData_remote.txt", DimsVolCross = DimsVolCross, DepthVol = Depth_extent, DimsSurfCross = DimsSurfCross, WidthPointProfile = section_width) @test length(profile_backwards_compat.PointData[1].lon) == 440 diff --git a/test/test_WaterFlow.jl b/test/test_WaterFlow.jl index eed5703d..97d570c2 100644 --- a/test/test_WaterFlow.jl +++ b/test/test_WaterFlow.jl @@ -2,10 +2,10 @@ using Test, GMT # Download some topographic data -Topo = import_topo([6.5,7.3,50.2,50.6], file="@earth_relief_03s"); +Topo = import_topo([6.5, 7.3, 50.2, 50.6], file = "@earth_relief_03s"); # Flow the water through the area: -Topo_water, sinks, pits, bnds = waterflows(Topo) +Topo_water, sinks, pits, bnds = waterflows(Topo) @test maximum(Topo_water.fields.area) ≈ 9.309204547276944e8 @test sum(Topo_water.fields.c) == 834501044 @@ -13,8 +13,7 @@ Topo_water, sinks, pits, bnds = waterflows(Topo) @test sum(Topo_water.fields.dir) == 2412566 # With rain in m3/s per cell -rainfall = ones(size(Topo.lon.val[:,:,1]))*1e-3 # 2D array with rainfall per cell area -Topo_water1, sinks, pits, bnds = waterflows(Topo, rainfall=rainfall) +rainfall = ones(size(Topo.lon.val[:, :, 1])) * 1.0e-3 # 2D array with rainfall per cell area +Topo_water1, sinks, pits, bnds = waterflows(Topo, rainfall = rainfall) @test maximum(Topo_water1.fields.area) ≈ 169.79800000000208 - diff --git a/test/test_create_movie.jl b/test/test_create_movie.jl index 3dc6e275..d13ef197 100644 --- a/test/test_create_movie.jl +++ b/test/test_create_movie.jl @@ -1,21 +1,19 @@ using Test dir = "test_files" -outname1 = movie_from_images(dir=dir) -@test outname1=="test_animation.mp4" +outname1 = movie_from_images(dir = dir) +@test outname1 == "test_animation.mp4" -outname2 = movie_from_images(dir=dir, copy_to_current_dir=false) -@test outname2=="test_animation.mp4" +outname2 = movie_from_images(dir = dir, copy_to_current_dir = false) +@test outname2 == "test_animation.mp4" -outname3 = movie_from_images(dir=dir, copy_to_current_dir=false, framerate=20, outfile="test_anim2") -@test outname3=="test_anim2.mp4" +outname3 = movie_from_images(dir = dir, copy_to_current_dir = false, framerate = 20, outfile = "test_anim2") +@test outname3 == "test_anim2.mp4" -outname4 = movie_from_images(dir=dir, copy_to_current_dir=false, framerate=20, outfile="test_anim3", type=:mov_hires) -@test outname4=="test_anim3.mov" +outname4 = movie_from_images(dir = dir, copy_to_current_dir = false, framerate = 20, outfile = "test_anim3", type = :mov_hires) +@test outname4 == "test_anim3.mov" rm(outname1) -rm(joinpath(dir,outname2)) -rm(joinpath(dir,outname3)) -rm(joinpath(dir,outname4)) - - +rm(joinpath(dir, outname2)) +rm(joinpath(dir, outname3)) +rm(joinpath(dir, outname4)) diff --git a/test/test_data_import.jl b/test/test_data_import.jl index 60871224..07f27f76 100644 --- a/test/test_data_import.jl +++ b/test/test_data_import.jl @@ -29,52 +29,52 @@ using GeophysicalModelGenerator # test loading images (profiles & mapviews) # Extract & save profile in GeoData format -filename = "test.png"; # fake png -Corner_LowerLeft = (18.0, 51.0, -590.0) -Corner_UpperRight = (9.0, 42.0, 0.0) -data_Image = screenshot_to_GeoData(filename,Corner_LowerLeft, Corner_UpperRight) +filename = "test.png"; # fake png +Corner_LowerLeft = (18.0, 51.0, -590.0) +Corner_UpperRight = (9.0, 42.0, 0.0) +data_Image = screenshot_to_GeoData(filename, Corner_LowerLeft, Corner_UpperRight) @test data_Image.lon[1000] ≈ 17.592964824120603 @test data_Image.lat[1000] ≈ 50.59296482412061 -@test Value(data_Image.depth[1000])==-590km -@test write_paraview(data_Image, "Profile_1")==nothing +@test Value(data_Image.depth[1000]) == -590km +@test write_paraview(data_Image, "Profile_1") == nothing # test if we use a different name for the color dataset -data_Image_newfieldname = screenshot_to_GeoData(filename,Corner_LowerLeft, Corner_UpperRight, fieldname=:fake) +data_Image_newfieldname = screenshot_to_GeoData(filename, Corner_LowerLeft, Corner_UpperRight, fieldname = :fake) @test keys(data_Image_newfieldname.fields)[1] == :fake # Test in CartData -data_Image = screenshot_to_GeoData(filename,Corner_LowerLeft, Corner_UpperRight, Cartesian=true) +data_Image = screenshot_to_GeoData(filename, Corner_LowerLeft, Corner_UpperRight, Cartesian = true) @test Value(data_Image.x[22]) == 18.0km @test Value(data_Image.y[22]) == 51.0km @test Value(data_Image.z[22]) ≈ -125.15151515151516km # Test in UTM zone [note that depth should be in m] -data_Image = screenshot_to_GeoData(filename,Corner_LowerLeft, Corner_UpperRight, UTM=true, UTMzone=33, isnorth=true) +data_Image = screenshot_to_GeoData(filename, Corner_LowerLeft, Corner_UpperRight, UTM = true, UTMzone = 33, isnorth = true) @test data_Image.EW.val[22] == 18.0 @test data_Image.NS.val[22] == 51.0 @test Value(data_Image.depth[22]) ≈ -125.15151515151516m # Mapview (distorted) in GeoData format -filename = "test.png"; # fake png -Corner_LowerLeft = (2.0, 40.0, -15.0) -Corner_UpperRight = (22.0, 51.0, -15.0) -Corner_LowerRight = (20.0, 40.0, -15.0) -Corner_UpperLeft = (0.0, 51.0, -15.0) -data_Image = screenshot_to_GeoData(filename,Corner_LowerLeft, Corner_UpperRight, Corner_LowerRight=Corner_LowerRight, Corner_UpperLeft=Corner_UpperLeft) -@test data_Image.lon[1000] ≈ 2.814070351758794 -@test data_Image.lat[1000] ≈ 40.00000000000001 -@test Value(data_Image.depth[1000])==-15km +filename = "test.png"; # fake png +Corner_LowerLeft = (2.0, 40.0, -15.0) +Corner_UpperRight = (22.0, 51.0, -15.0) +Corner_LowerRight = (20.0, 40.0, -15.0) +Corner_UpperLeft = (0.0, 51.0, -15.0) +data_Image = screenshot_to_GeoData(filename, Corner_LowerLeft, Corner_UpperRight, Corner_LowerRight = Corner_LowerRight, Corner_UpperLeft = Corner_UpperLeft) +@test data_Image.lon[1000] ≈ 2.814070351758794 +@test data_Image.lat[1000] ≈ 40.00000000000001 +@test Value(data_Image.depth[1000]) == -15km @test write_paraview(data_Image, "MapView_1") == nothing # MapView in CartData -data_Image = screenshot_to_CartData(filename,Corner_LowerLeft, Corner_UpperRight, Corner_LowerRight=Corner_LowerRight, Corner_UpperLeft=Corner_UpperLeft) +data_Image = screenshot_to_CartData(filename, Corner_LowerLeft, Corner_UpperRight, Corner_LowerRight = Corner_LowerRight, Corner_UpperLeft = Corner_UpperLeft) @test Value(data_Image.x[22]) ≈ 0.42424242424242425km @test Value(data_Image.y[22]) ≈ 48.666666666666664km @test Value(data_Image.z[22]) ≈ -15km # MapView in UTMData -data_Image = screenshot_to_UTMData(filename,Corner_LowerLeft, Corner_UpperRight, Corner_LowerRight=Corner_LowerRight, Corner_UpperLeft=Corner_UpperLeft, UTMzone=33, isnorth=true) +data_Image = screenshot_to_UTMData(filename, Corner_LowerLeft, Corner_UpperRight, Corner_LowerRight = Corner_LowerRight, Corner_UpperLeft = Corner_UpperLeft, UTMzone = 33, isnorth = true) @test data_Image.EW.val[22] ≈ 0.42424242424242425 @test data_Image.NS.val[22] ≈ 48.666666666666664 @test Value(data_Image.depth[22]) ≈ -15.0m @@ -85,5 +85,5 @@ data_Image = screenshot_to_UTMData(filename,Corner_LowerLeft, Corner_ # for completeness, the download link is kept here # download_data("http://www.isc.ac.uk/cgi-bin/web-db-run?request=COLLECTED&req_agcy=ISC-EHB&out_format=QuakeML&ctr_lat=&ctr_lon=&radius=&max_dist_units=deg&searchshape=RECT&top_lat=49&bot_lat=37&left_lon=4&right_lon=20&srn=&grn=&start_year=2000&start_month=1&start_day=01&start_time=00%3A00%3A00&end_year=2005&end_month=12&end_day=31&end_time=00%3A00%3A00&min_dep=&max_dep=&min_mag=5.8&max_mag=&req_mag_type=Any&req_mag_agcy=Any&min_def=&max_def=&include_magnitudes=on&include_links=on&include_headers=on&include_comments=on&table_owner=iscehb","ISCTest.xml") Data_ISC = getlonlatdepthmag_QuakeML("test_files/ISCTest.xml"); -@test Value(Data_ISC.depth[1])==-13.0km -@test Data_ISC.fields.Magnitude[1]==5.8 \ No newline at end of file +@test Value(Data_ISC.depth[1]) == -13.0km +@test Data_ISC.fields.Magnitude[1] == 5.8 diff --git a/test/test_data_types.jl b/test/test_data_types.jl index 5d5a446b..95dfbaf2 100644 --- a/test/test_data_types.jl +++ b/test/test_data_types.jl @@ -2,158 +2,158 @@ using Test using GeophysicalModelGenerator # Create 1D dataset with lat/lon/depth -Lat = 1.0:10.0; -Lon = 11.0:20.0; -Depth = (-20:-11)*km; -Data = zeros(size(Lon)); -Data_set = GeoData(Lat,Lon,Depth,(FakeData=Data,Data2=Data.+1.)) # create GeoData without attributes -@test Value(Data_set.depth[2])==-19km - -Depth1 = (-20.:-11.)*m; # depth has units of m -Data_set1 = GeoData(Lat,Lon,Depth1,(FakeData=Data,Data2=Data.+1.)) -@test Value(Data_set1.depth[2])==-0.019km -@test Data_set.atts["note"]=="No attributes were given to this dataset" # check whether the default attribute assignment works - -Depth2 = -20.:-11.; # no units -Data_set2 = GeoData(Lat,Lon,Depth2,(FakeData=Data,Data2=Data.+1.)) -@test Value(Data_set2.depth[2])==-19km +Lat = 1.0:10.0; +Lon = 11.0:20.0; +Depth = (-20:-11) * km; +Data = zeros(size(Lon)); +Data_set = GeoData(Lat, Lon, Depth, (FakeData = Data, Data2 = Data .+ 1.0)) # create GeoData without attributes +@test Value(Data_set.depth[2]) == -19km + +Depth1 = (-20.0:-11.0) * m; # depth has units of m +Data_set1 = GeoData(Lat, Lon, Depth1, (FakeData = Data, Data2 = Data .+ 1.0)) +@test Value(Data_set1.depth[2]) == -0.019km +@test Data_set.atts["note"] == "No attributes were given to this dataset" # check whether the default attribute assignment works + +Depth2 = -20.0:-11.0; # no units +Data_set2 = GeoData(Lat, Lon, Depth2, (FakeData = Data, Data2 = Data .+ 1.0)) +@test Value(Data_set2.depth[2]) == -19km # test that it works if we give a Data array, rather than a NamedTuple, as input (we add a default name) -Data_set3 = GeoData(Lat,Lon,Depth,Data) +Data_set3 = GeoData(Lat, Lon, Depth, Data) @test keys(Data_set3.fields)[1] == :DataSet1 # test that it works if we give a Tuple, rather than a NamedTuple, as input (we add a default name) -Data_set4 = GeoData(Lat,Lon,Depth,(Data,)) +Data_set4 = GeoData(Lat, Lon, Depth, (Data,)) @test keys(Data_set4.fields)[1] == :DataSet1 # Throw an error if we supply a Tuple with 2 fields (the user should really supply names in that case) -@test_throws ErrorException GeoData(Lat,Lon,Depth,(Data,Data)) +@test_throws ErrorException GeoData(Lat, Lon, Depth, (Data, Data)) # test assignment of attributes -att_dict = Dict("author"=>"Marcel", "year"=>2021) -Data_set4 = GeoData(Lat,Lon,Depth,(Data,),att_dict) -@test Data_set4.atts["author"]=="Marcel" -@test Data_set4.atts["year"]==2021 +att_dict = Dict("author" => "Marcel", "year" => 2021) +Data_set4 = GeoData(Lat, Lon, Depth, (Data,), att_dict) +@test Data_set4.atts["author"] == "Marcel" +@test Data_set4.atts["year"] == 2021 # check that an error is thrown if a different size input is given for depth -Depth2 = (-100:10:1.0) # no units -@test_throws ErrorException GeoData(Lat,Lon,Depth2,(FakeData=Data,Data2=Data.+1.)) +Depth2 = (-100:10:1.0) # no units +@test_throws ErrorException GeoData(Lat, Lon, Depth2, (FakeData = Data, Data2 = Data .+ 1.0)) # Convert 1D vector to cartesian structure -Data_cart = convert(ParaviewData,Data_set) +Data_cart = convert(ParaviewData, Data_set) @test Data_cart.x[3] ≈ 6189.685604255086 @test Data_cart.y[3] ≈ 324.3876769792181 -@test Data_cart.z[3] ≈ 1421.35608984477 +@test Data_cart.z[3] ≈ 1421.35608984477 # Create Lon/Lat/Depth and X/Y/Z grids from given numbers or 1D vectors -Lon,Lat,Depth = lonlatdepth_grid(10:20,30:40,(-10:-1)km); -X,Y,Z = xyz_grid(10:20,30:40,(-10:-1)km); -@test size(Lon)==(11, 11, 10) -@test Lat[2,2,2]==31.0 -@test size(X)==(11, 11, 10) -@test Y[2,2,2]==31.0 - -Lon,Lat,Depth = lonlatdepth_grid(10:20,30:40,-50km); -X,Y,Z = xyz_grid(10:20,30:40,-50km); -@test Lon[2,2] == 11.0 -@test X[2,2] == 11.0 - -Lon,Lat,Depth = lonlatdepth_grid(10,30,(-10:-1)km); # 1D line @ given lon/lat -X,Y,Z = xyz_grid(10,30,(-10:-1)km); -@test size(Lon)==(1,1,10) -@test Lat[2]==30.0 -@test size(X)==(1,1,10) -@test Y[2]==30.0 +Lon, Lat, Depth = lonlatdepth_grid(10:20, 30:40, (-10:-1)km); +X, Y, Z = xyz_grid(10:20, 30:40, (-10:-1)km); +@test size(Lon) == (11, 11, 10) +@test Lat[2, 2, 2] == 31.0 +@test size(X) == (11, 11, 10) +@test Y[2, 2, 2] == 31.0 + +Lon, Lat, Depth = lonlatdepth_grid(10:20, 30:40, -50km); +X, Y, Z = xyz_grid(10:20, 30:40, -50km); +@test Lon[2, 2] == 11.0 +@test X[2, 2] == 11.0 + +Lon, Lat, Depth = lonlatdepth_grid(10, 30, (-10:-1)km); # 1D line @ given lon/lat +X, Y, Z = xyz_grid(10, 30, (-10:-1)km); +@test size(Lon) == (1, 1, 10) +@test Lat[2] == 30.0 +@test size(X) == (1, 1, 10) +@test Y[2] == 30.0 # throw an error if a 2D array is passed as input -@test_throws ErrorException lonlatdepth_grid(10:20,30:40,[20 30; 40 50]); -@test_throws ErrorException xyz_grid(10:20,30:40,[20 30; 40 50]); +@test_throws ErrorException lonlatdepth_grid(10:20, 30:40, [20 30; 40 50]); +@test_throws ErrorException xyz_grid(10:20, 30:40, [20 30; 40 50]); -# Create 3D arrays & convert them -Lon,Lat,Depth = lonlatdepth_grid(10:20,30:40,(-10:-1)km); # 3D grid -Data = ustrip(Depth); +# Create 3D arrays & convert them +Lon, Lat, Depth = lonlatdepth_grid(10:20, 30:40, (-10:-1)km); # 3D grid +Data = ustrip(Depth); -Data_set1 = GeoData(Lon,Lat,Depth,(FakeData=Data,Data2=Data.+1.)) -@test size(Data_set1.depth)==(11, 11, 10) -@test Value(Data_set1.depth[1,2,3])==-8.0km +Data_set1 = GeoData(Lon, Lat, Depth, (FakeData = Data, Data2 = Data .+ 1.0)) +@test size(Data_set1.depth) == (11, 11, 10) +@test Value(Data_set1.depth[1, 2, 3]) == -8.0km # double-check that giving 3D arrays in the wrong ordering triggers a warning message -Data_set2 = GeoData(Lat,Lon,Depth,(FakeData=Data,Data2=Data.+1.)) +Data_set2 = GeoData(Lat, Lon, Depth, (FakeData = Data, Data2 = Data .+ 1.0)) #@test (test_logger.logs[1].level==Warn && test_logger.logs[1].message=="It appears that the lon array has a wrong ordering") -# Create 2D arrays & convert them -Lon,Lat,Depth = lonlatdepth_grid(10:20,30:40,-50km); -Data = ustrip(Depth); -Data_set2 = GeoData(Lon,Lat,Depth,(FakeData=Data,Data2=Data.+1.)) -@test Value(Data_set2.depth[2,2])== -50.0km +# Create 2D arrays & convert them +Lon, Lat, Depth = lonlatdepth_grid(10:20, 30:40, -50km); +Data = ustrip(Depth); +Data_set2 = GeoData(Lon, Lat, Depth, (FakeData = Data, Data2 = Data .+ 1.0)) +@test Value(Data_set2.depth[2, 2]) == -50.0km # Convert the 2D and 3D arrays to their cartesian counterparts -Data_cart1 = convert(ParaviewData,Data_set1) -@test size(Data_cart1.z)==(11, 11, 10) -@test Value(Data_cart1.z[2,2,2]) ≈ 3261.2581739797533 +Data_cart1 = convert(ParaviewData, Data_set1) +@test size(Data_cart1.z) == (11, 11, 10) +@test Value(Data_cart1.z[2, 2, 2]) ≈ 3261.2581739797533 -Data_cart2 = convert(ParaviewData,Data_set2) -@test size(Data_cart2.z)==(11, 11, 1) -@test Data_cart2.z[2,2] ≈ 3240.141612908441 +Data_cart2 = convert(ParaviewData, Data_set2) +@test size(Data_cart2.z) == (11, 11, 1) +@test Data_cart2.z[2, 2] ≈ 3240.141612908441 # Test projection points (used for map projections) p1 = ProjectionPoint(); -@test p1.Lat==49.9929 -@test p1.Lon==8.2473 +@test p1.Lat == 49.9929 +@test p1.Lon == 8.2473 @test p1.EW ≈ 446048.5158750616 @test p1.NS ≈ 5.53811274482716e6 -p2 = ProjectionPoint(p1.EW,p1.NS,p1.zone,p1.isnorth) -@test p1.EW-p2.EW + p1.NS-p2.NS + p1.zone-p2.zone == 0.0 +p2 = ProjectionPoint(p1.EW, p1.NS, p1.zone, p1.isnorth) +@test p1.EW - p2.EW + p1.NS - p2.NS + p1.zone - p2.zone == 0.0 # Create UTM Data structure -ew = 422123.0:100:433623.0 -ns = 4.514137e6:100:4.523637e6 -depth = -5400:250:600 -EW,NS,Depth = xyz_grid(ew, ns, depth); -Data = ustrip.(Depth); -Data_set = UTMData(EW,NS,Depth,33, true, (FakeData=Data,Data2=Data.+1.)) - -@test Data_set.EW[3,4,2] ≈ 422323.0 -@test Data_set.NS[3,4,2] ≈ 4.514437e6 -@test Value(Data_set.depth[3,4,2])==-5150m +ew = 422123.0:100:433623.0 +ns = 4.514137e6:100:4.523637e6 +depth = -5400:250:600 +EW, NS, Depth = xyz_grid(ew, ns, depth); +Data = ustrip.(Depth); +Data_set = UTMData(EW, NS, Depth, 33, true, (FakeData = Data, Data2 = Data .+ 1.0)) + +@test Data_set.EW[3, 4, 2] ≈ 422323.0 +@test Data_set.NS[3, 4, 2] ≈ 4.514437e6 +@test Value(Data_set.depth[3, 4, 2]) == -5150m @test Data_set.northern[1] == true @test Data_set.zone[1] == 33 -@test Data_set.atts["note"]=="No attributes were given to this dataset" # check whether the default attribute assignment works +@test Data_set.atts["note"] == "No attributes were given to this dataset" # check whether the default attribute assignment works # convert from UTMData -> GeoData Data_set1 = convert(GeoData, Data_set) -@test Data_set1.lon[20] ≈ 14.099668158564413 -@test Data_set1.lat[20] ≈ 40.77470011887963 +@test Data_set1.lon[20] ≈ 14.099668158564413 +@test Data_set1.lat[20] ≈ 40.77470011887963 @test Value(Data_set1.depth[20]) == -5400m -# Convert from GeoData -> UTMData +# Convert from GeoData -> UTMData Data_set2 = convert(UTMData, Data_set1) -@test sum(abs.(Data_set2.EW.val-Data_set.EW.val)) < 1e-5 +@test sum(abs.(Data_set2.EW.val - Data_set.EW.val)) < 1.0e-5 # Test Projection point for negative values -proj = ProjectionPoint(Lat= -2.8, Lon=36) +proj = ProjectionPoint(Lat = -2.8, Lon = 36) @test proj.zone == 37 @test proj.isnorth == false # Convert from GeoData -> UTMData, but for a fixed zone (used for map projection) -proj = ProjectionPoint(Lat= 40.77470011887963, Lon=14.099668158564413) +proj = ProjectionPoint(Lat = 40.77470011887963, Lon = 14.099668158564413) Data_set3 = convert2UTMzone(Data_set1, proj) -@test Data_set3.EW.val[100] ≈ 432022.99999999994 +@test Data_set3.EW.val[100] ≈ 432022.99999999994 # Create CartData structure -x = 0:2:10 -y = -5:5 -z = -10:2:2 -X,Y,Z = xyz_grid(x, y, z); -Data = Z -Data_setC = CartData(X,Y,Z, (FakeData=Data,Data2=Data.+1.)) +x = 0:2:10 +y = -5:5 +z = -10:2:2 +X, Y, Z = xyz_grid(x, y, z); +Data = Z +Data_setC = CartData(X, Y, Z, (FakeData = Data, Data2 = Data .+ 1.0)) @test sum(abs.(Value(Data_setC.x))) ≈ 2310.0km -CharDim=GEO_units() +CharDim = GEO_units() # Convert from CartData -> UTMData @@ -168,14 +168,14 @@ Data_set5 = convert2CartData(Data_set, proj) # Convert result back to UTM (convert LaMEM results back to UTM & afterwards to GeoData) Data_set6 = convert2UTMzone(Data_set5, proj) -@test sum(Data_set.EW.val-Data_set6.EW.val) ≈ 0.0 -@test sum(Data_set.NS.val-Data_set6.NS.val) ≈ 0.0 +@test sum(Data_set.EW.val - Data_set6.EW.val) ≈ 0.0 +@test sum(Data_set.NS.val - Data_set6.NS.val) ≈ 0.0 # Create Q1 data set -q1_data = Q1Data(xyz_grid(1:10,1:10,1:8)) -@test size(q1_data.x)==(10,10,8) -@test size(q1_data)==(9,9,7) +q1_data = Q1Data(xyz_grid(1:10, 1:10, 1:8)) +@test size(q1_data.x) == (10, 10, 8) +@test size(q1_data) == (9, 9, 7) # Create FE object from it: fe_data = convert2FEData(q1_data) diff --git a/test/test_event_counts.jl b/test/test_event_counts.jl index 4d3ff0a6..fde87bf4 100644 --- a/test/test_event_counts.jl +++ b/test/test_event_counts.jl @@ -1,54 +1,54 @@ using Test # Create a CartGrid -Grid_cart = CartData(xyz_grid(-20:20,-20:.1:20,-30:30)) +Grid_cart = CartData(xyz_grid(-20:20, -20:0.1:20, -30:30)) -Lon,Lat,Depth = lonlatdepth_grid(-20:20,-20:.1:20,-30:30); -Grid_geo = GeoData(Lon,Lat,Depth,(;Depth)) +Lon, Lat, Depth = lonlatdepth_grid(-20:20, -20:0.1:20, -30:30); +Grid_geo = GeoData(Lon, Lat, Depth, (; Depth)) # create 2D GeoData struct -Lon,Lat,Depth = lonlatdepth_grid(10:20,30:40,0); -CM = zeros(size(Depth)); CM[1:5,1:5] .= 1.0 -Data_set2D = GeoData(Lon,Lat,Depth,(Count=CM,)) +Lon, Lat, Depth = lonlatdepth_grid(10:20, 30:40, 0); +CM = zeros(size(Depth)); CM[1:5, 1:5] .= 1.0 +Data_set2D = GeoData(Lon, Lat, Depth, (Count = CM,)) using StableRNGs rng = StableRNG(123) # Create some random point data -pt = rand(rng, 10_000,3) .- 0.5 -pt[:,1] .*= 20 -pt[:,2] .*= 20 -pt[:,3] .*= 30 +pt = rand(rng, 10_000, 3) .- 0.5 +pt[:, 1] .*= 20 +pt[:, 2] .*= 20 +pt[:, 3] .*= 30 -EQ_cart = CartData(pt[:,1],pt[:,2], pt[:,3], (z=pt[:,3],)) -EQ_geo = GeoData(pt[:,1],pt[:,2], pt[:,3], (z=pt[:,3],)) +EQ_cart = CartData(pt[:, 1], pt[:, 2], pt[:, 3], (z = pt[:, 3],)) +EQ_geo = GeoData(pt[:, 1], pt[:, 2], pt[:, 3], (z = pt[:, 3],)) -R = (sum(pt.^2,dims=2)).^(1/3) -ind = findall(R.< 5); +R = (sum(pt .^ 2, dims = 2)) .^ (1 / 3) +ind = findall(R .< 5); # test the basic routine -counts = point_to_nearest_grid(pt[:,1],pt[:,2],pt[:,3], NumValue(Grid_cart.x),NumValue(Grid_cart.y), NumValue(Grid_cart.z); radius_factor=2) +counts = point_to_nearest_grid(pt[:, 1], pt[:, 2], pt[:, 3], NumValue(Grid_cart.x), NumValue(Grid_cart.y), NumValue(Grid_cart.z); radius_factor = 2) @test extrema(counts) == (0, 85) # Test if the grid is on a CartData grid -Grid_Count = point_to_nearest_grid(pt[:,1],pt[:,2],pt[:,3], Grid_cart; radius_factor=2) +Grid_Count = point_to_nearest_grid(pt[:, 1], pt[:, 2], pt[:, 3], Grid_cart; radius_factor = 2) @test extrema(Grid_Count.fields.Count) == (0, 85) # Test in case the EQ data is also specified as CartData -Grid_Count = point_to_nearest_grid(EQ_cart, Grid_cart; radius_factor=2) +Grid_Count = point_to_nearest_grid(EQ_cart, Grid_cart; radius_factor = 2) @test extrema(Grid_Count.fields.Count) == (0, 85) # Test if the grid is on a GeoData grid -Grid_Count = point_to_nearest_grid(pt[:,1],pt[:,2],pt[:,3], Grid_geo; radius_factor=2) +Grid_Count = point_to_nearest_grid(pt[:, 1], pt[:, 2], pt[:, 3], Grid_geo; radius_factor = 2) @test extrema(Grid_Count.fields.Count) == (0, 85) # Test in case the EQ data is also specified as GeoData -Grid_Count = point_to_nearest_grid(EQ_geo, Grid_geo; radius_factor=2) +Grid_Count = point_to_nearest_grid(EQ_geo, Grid_geo; radius_factor = 2) @test extrema(Grid_Count.fields.Count) == (0, 85) # Test countmap -Data_countMap = countmap(Data_set2D,"Count",5,5) -@test Data_countMap.fields.countmap[1,1] == 1.0 -@test Data_countMap.fields.countmap[2,2] == 0.4444444444444444 -@test Data_countMap.fields.countmap[4,4] == 0.0 +Data_countMap = countmap(Data_set2D, "Count", 5, 5) +@test Data_countMap.fields.countmap[1, 1] == 1.0 +@test Data_countMap.fields.countmap[2, 2] == 0.4444444444444444 +@test Data_countMap.fields.countmap[4, 4] == 0.0 diff --git a/test/test_lamem.jl b/test/test_lamem.jl index e12e8466..eb5fd94b 100644 --- a/test/test_lamem.jl +++ b/test/test_lamem.jl @@ -2,13 +2,13 @@ using Test, GeophysicalModelGenerator # Load LaMEM input file with grid refinement: -Grid = read_LaMEM_inputfile("test_files/non-uniform_grid.dat") -@test Grid.X[2358] ≈ 1.833333333333335 -@test Grid.Y[7741] ≈ -0.450000000000000 +Grid = read_LaMEM_inputfile("test_files/non-uniform_grid.dat") +@test Grid.X[2358] ≈ 1.833333333333335 +@test Grid.Y[7741] ≈ -0.45 @test Grid.Z[5195] ≈ -8.897114711471147 # Non-uniform grid in z-direction (taken from LaMEM test suite) -Grid = read_LaMEM_inputfile("test_files/Subduction_VEP.dat") +Grid = read_LaMEM_inputfile("test_files/Subduction_VEP.dat") @test maximum(diff(Grid.z_vec)) ≈ 2.626262626262701 @test minimum(diff(Grid.z_vec)) ≈ 1.3333333333333321 @test maximum(diff(Grid.x_vec)) ≈ 10.4166666666667 @@ -16,100 +16,100 @@ Grid = read_LaMEM_inputfile("test_files/Subduction_VEP.dat") # Add command-line arguments args = "-coord_z -660,-300,5,25 -nel_z 36,90,5 -nel_x 32" -coord_z = GeophysicalModelGenerator.ParseValue_LaMEM_InputFile("test_files/Subduction_VEP.dat","coord_z",Float64, args=args) -nel_z = GeophysicalModelGenerator.ParseValue_LaMEM_InputFile("test_files/Subduction_VEP.dat","nel_z",Int64, args=args) -@test coord_z ≈ [-660.0, -300.0, 5.0, 25.0] -@test nel_z == [36, 90, 5] +coord_z = GeophysicalModelGenerator.ParseValue_LaMEM_InputFile("test_files/Subduction_VEP.dat", "coord_z", Float64, args = args) +nel_z = GeophysicalModelGenerator.ParseValue_LaMEM_InputFile("test_files/Subduction_VEP.dat", "nel_z", Int64, args = args) +@test coord_z ≈ [-660.0, -300.0, 5.0, 25.0] +@test nel_z == [36, 90, 5] -Grid = read_LaMEM_inputfile("test_files/Subduction_VEP.dat", args=args) +Grid = read_LaMEM_inputfile("test_files/Subduction_VEP.dat", args = args) @test Grid.nel_z == 131 # Load LaMEM input file: -Grid = read_LaMEM_inputfile("test_files/SaltModels.dat") +Grid = read_LaMEM_inputfile("test_files/SaltModels.dat") @test Grid.X[10] ≈ -2.40625 # Transfer into ParaviewData struct: -Phases = zeros(Int32, size(Grid.X)); -Temp = zeros(Float64, size(Grid.X)); -Model3D = CartData(Grid, (Phases=Grid.Z,)); -@test Value(Model3D.y[100])==-1.9375km +Phases = zeros(Int32, size(Grid.X)); +Temp = zeros(Float64, size(Grid.X)); +Model3D = CartData(Grid, (Phases = Grid.Z,)); +@test Value(Model3D.y[100]) == -1.9375km -# Read Partitioning file: +# Read Partitioning file: PartitioningFile = "test_files/ProcessorPartitioning_4cpu_1.2.2.bin" -Nprocx,Nprocy,Nprocz, xc,yc,zc, nNodeX,nNodeY,nNodeZ = get_processor_partitioning(PartitioningFile) -@test Nprocz==2 -@test yc[2]==0.0 +Nprocx, Nprocy, Nprocz, xc, yc, zc, nNodeX, nNodeY, nNodeZ = get_processor_partitioning(PartitioningFile) +@test Nprocz == 2 +@test yc[2] == 0.0 # Save serial output -save_LaMEM_markers_parallel(Model3D, verbose=false) +save_LaMEM_markers_parallel(Model3D, verbose = false) # Save parallel output -save_LaMEM_markers_parallel(Model3D, PartitioningFile=PartitioningFile, verbose=false) +save_LaMEM_markers_parallel(Model3D, PartitioningFile = PartitioningFile, verbose = false) # Test creating model setups -Grid = read_LaMEM_inputfile("test_files/Subduction2D_FreeSlip_Particles_Linear_DirectSolver.dat") -Phases = zeros(Int32, size(Grid.X)); +Grid = read_LaMEM_inputfile("test_files/Subduction2D_FreeSlip_Particles_Linear_DirectSolver.dat") +Phases = zeros(Int32, size(Grid.X)); # constant T -Temp = ones(Float64, size(Grid.X))*1350; -add_box!(Phases,Temp,Grid, xlim=(0,500), zlim=(-50,0), phase=ConstantPhase(3), DipAngle=10, T=ConstantTemp(1000)) +Temp = ones(Float64, size(Grid.X)) * 1350; +add_box!(Phases, Temp, Grid, xlim = (0, 500), zlim = (-50, 0), phase = ConstantPhase(3), DipAngle = 10, T = ConstantTemp(1000)) @test sum(Temp) == 1.1905107e9 # Add a layer above the slab with a different phase but no thermal structure -add_box!(Phases,Temp,Grid, xlim=(0,500), zlim=(0,20), phase=ConstantPhase(1), DipAngle=10, Origin=(0,0,0)) +add_box!(Phases, Temp, Grid, xlim = (0, 500), zlim = (0, 20), phase = ConstantPhase(1), DipAngle = 10, Origin = (0, 0, 0)) @test sum(Temp) == 1.1905107e9 # Linear T -Temp = ones(Float64, size(Grid.X))*1350; -add_box!(Phases,Temp,Grid, xlim=(0,500), zlim=(-50,0), phase=ConstantPhase(3), DipAngle=10, T=LinearTemp(Tbot=1350, Ttop=200)) +Temp = ones(Float64, size(Grid.X)) * 1350; +add_box!(Phases, Temp, Grid, xlim = (0, 500), zlim = (-50, 0), phase = ConstantPhase(3), DipAngle = 10, T = LinearTemp(Tbot = 1350, Ttop = 200)) @test sum(Temp) == 1.1881296265169694e9 # Halfspace cooling T structure -Phases = zeros(Int32, size(Grid.X)); -Temp = ones(Float64, size(Grid.X))*1350; -add_box!(Phases,Temp,Grid, xlim=(0,500), zlim=(-500,0), phase=LithosphericPhases(Layers=[15 15 250], Phases=[1 2 3 0], Tlab=1250), DipAngle=10, T=HalfspaceCoolingTemp(Age=20, Adiabat=0.3)) +Phases = zeros(Int32, size(Grid.X)); +Temp = ones(Float64, size(Grid.X)) * 1350; +add_box!(Phases, Temp, Grid, xlim = (0, 500), zlim = (-500, 0), phase = LithosphericPhases(Layers = [15 15 250], Phases = [1 2 3 0], Tlab = 1250), DipAngle = 10, T = HalfspaceCoolingTemp(Age = 20, Adiabat = 0.3)) @test sum(Temp) == 1.1942982365477426e9 # Mid-oceanic ridge cooling temperature structure -Phases = zeros(Int32, size(Grid.X)); -Temp = ones(Float64, size(Grid.X))*1350; -add_box!(Phases,Temp,Grid, xlim=(0,500), zlim=(-500,-20), phase=LithosphericPhases(Layers=[15 15 250], Phases=[1 2 3 0],Tlab=1250), DipAngle=10, T=SpreadingRateTemp(MORside="right", SpreadingVel=3)) +Phases = zeros(Int32, size(Grid.X)); +Temp = ones(Float64, size(Grid.X)) * 1350; +add_box!(Phases, Temp, Grid, xlim = (0, 500), zlim = (-500, -20), phase = LithosphericPhases(Layers = [15 15 250], Phases = [1 2 3 0], Tlab = 1250), DipAngle = 10, T = SpreadingRateTemp(MORside = "right", SpreadingVel = 3)) @test sum(Temp) == 1.189394358568891e9 -Model3D = CartData(Grid, (Phases=Phases,Temp=Temp)); -write_paraview(Model3D,"LaMEM_ModelSetup") # Save model to paraview +Model3D = CartData(Grid, (Phases = Phases, Temp = Temp)); +write_paraview(Model3D, "LaMEM_ModelSetup") # Save model to paraview # Test writing a LaMEM topography file -X,Y,Z = xyz_grid(-20:20,-10:10,0); -Z = cos.(2*pi.*X./5).*cos.(2*pi.*Y./10) +X, Y, Z = xyz_grid(-20:20, -10:10, 0); +Z = cos.(2 * pi .* X ./ 5) .* cos.(2 * pi .* Y ./ 10) -Topo = CartData(X,Y,Z,(Topography=Z,)) -@test save_LaMEM_topography(Topo, "test_topo.dat")==nothing +Topo = CartData(X, Y, Z, (Topography = Z,)) +@test save_LaMEM_topography(Topo, "test_topo.dat") == nothing rm("test_topo.dat") # Test adding geometric primitives -Grid = read_LaMEM_inputfile("test_files/GeometricPrimitives.dat") -Phases = zeros(Int32,size(Grid.X)); -Temp = zeros(Float64,size(Grid.X)); -add_sphere!(Phases,Temp,Grid, cen=(0,0,-6), radius=2.0, phase=ConstantPhase(1), T=ConstantTemp(800)) -@test Phases[55,55,55] == 1 -@test Phases[56,56,56] == 0 -@test Temp[44,52,21] == 800.0 -@test Temp[44,52,20] == 0.0 - -add_ellipsoid!(Phases,Temp,Grid, cen=(-2,-1,-7), axes=(1,2,3), StrikeAngle=90, DipAngle=45, phase=ConstantPhase(2), T=ConstantTemp(600)) -@test Phases[11,37,28] == 2 -@test Phases[10,37,28] == 0 -@test Temp[31,58,18] == 600.0 -@test Temp[31,59,18] == 0.0 - -add_cylinder!(Phases,Temp,Grid, base=(0,0,-5), cap=(3,3,-2), radius=1.5, phase=ConstantPhase(3), T=ConstantTemp(400)) -@test Phases[55,65,75] == 3 -@test Phases[54,65,75] == 0 -@test Temp[55,46,45] == 400.0 -@test Temp[55,45,45] == 800.0 +Grid = read_LaMEM_inputfile("test_files/GeometricPrimitives.dat") +Phases = zeros(Int32, size(Grid.X)); +Temp = zeros(Float64, size(Grid.X)); +add_sphere!(Phases, Temp, Grid, cen = (0, 0, -6), radius = 2.0, phase = ConstantPhase(1), T = ConstantTemp(800)) +@test Phases[55, 55, 55] == 1 +@test Phases[56, 56, 56] == 0 +@test Temp[44, 52, 21] == 800.0 +@test Temp[44, 52, 20] == 0.0 + +add_ellipsoid!(Phases, Temp, Grid, cen = (-2, -1, -7), axes = (1, 2, 3), StrikeAngle = 90, DipAngle = 45, phase = ConstantPhase(2), T = ConstantTemp(600)) +@test Phases[11, 37, 28] == 2 +@test Phases[10, 37, 28] == 0 +@test Temp[31, 58, 18] == 600.0 +@test Temp[31, 59, 18] == 0.0 + +add_cylinder!(Phases, Temp, Grid, base = (0, 0, -5), cap = (3, 3, -2), radius = 1.5, phase = ConstantPhase(3), T = ConstantTemp(400)) +@test Phases[55, 65, 75] == 3 +@test Phases[54, 65, 75] == 0 +@test Temp[55, 46, 45] == 400.0 +@test Temp[55, 45, 45] == 800.0 # for debugging: #data = CartData(Grid, (;Phases, Temp)); @@ -117,8 +117,8 @@ add_cylinder!(Phases,Temp,Grid, base=(0,0,-5), cap=(3,3,-2), radius=1.5, phase=C # test adding generic volcano topography Grid = read_LaMEM_inputfile("test_files/SaltModels.dat"); -Topo = make_volc_topo(Grid, center=[0.0,0.0], height=0.4, radius=1.5, crater=0.5, base=0.1); -@test Topo.fields.Topography[13,13] ≈ 0.279583654km -Topo = make_volc_topo(Grid, center=[0.0,0.0], height=0.8, radius=0.5, crater=0.0, base=0.4, background=Topo.fields.Topography); -@test Topo.fields.Topography[13,13] ≈ 0.279583654km -@test Topo.fields.Topography[16,18] ≈ 0.619722436km +Topo = make_volc_topo(Grid, center = [0.0, 0.0], height = 0.4, radius = 1.5, crater = 0.5, base = 0.1); +@test Topo.fields.Topography[13, 13] ≈ 0.279583654km +Topo = make_volc_topo(Grid, center = [0.0, 0.0], height = 0.8, radius = 0.5, crater = 0.0, base = 0.4, background = Topo.fields.Topography); +@test Topo.fields.Topography[13, 13] ≈ 0.279583654km +@test Topo.fields.Topography[16, 18] ≈ 0.619722436km diff --git a/test/test_nearest_points.jl b/test/test_nearest_points.jl index cdfae58e..38ec9cc2 100644 --- a/test/test_nearest_points.jl +++ b/test/test_nearest_points.jl @@ -1,21 +1,21 @@ using Test, GeophysicalModelGenerator # 3D arrays -c_1D = CartData(xyz_grid(1:4,0,0)) -c_2D = CartData(xyz_grid(1:4,1:5,2)) -c_3D = CartData(xyz_grid(1:4,1:5,2:5)) +c_1D = CartData(xyz_grid(1:4, 0, 0)) +c_2D = CartData(xyz_grid(1:4, 1:5, 2)) +c_3D = CartData(xyz_grid(1:4, 1:5, 2:5)) # points -X_pt, Y_pt, Z_pt = xyz_grid(1:.05:5,0:.07:8,1:.4:5) +X_pt, Y_pt, Z_pt = xyz_grid(1:0.05:5, 0:0.07:8, 1:0.4:5) # 1D test -id_1D = nearest_point_indices(NumValue(c_1D.x),X_pt[:]) +id_1D = nearest_point_indices(NumValue(c_1D.x), X_pt[:]) @test sum(id_1D) == 166336 # 2D test -id_2D = nearest_point_indices(NumValue(c_2D.x),NumValue(c_2D.y), X_pt[:], Y_pt[:]) +id_2D = nearest_point_indices(NumValue(c_2D.x), NumValue(c_2D.y), X_pt[:], Y_pt[:]) @test sum(id_2D) == 992141 # 3D test -id_3D = nearest_point_indices(NumValue(c_2D.x),NumValue(c_2D.y),NumValue(c_2D.z), X_pt[:],Y_pt[:],Z_pt[:]) -@test sum(id_3D) == 442556 \ No newline at end of file +id_3D = nearest_point_indices(NumValue(c_2D.x), NumValue(c_2D.y), NumValue(c_2D.z), X_pt[:], Y_pt[:], Z_pt[:]) +@test sum(id_3D) == 442556 diff --git a/test/test_pTatin_IO.jl b/test/test_pTatin_IO.jl index c593b178..0712a945 100644 --- a/test/test_pTatin_IO.jl +++ b/test/test_pTatin_IO.jl @@ -1,9 +1,9 @@ using Test # Q1 data set -q1_data = Q1Data(xyz_grid(1:10,1:10,1:8)) -q1_data = addfield(q1_data, (T=ones(Float64,size(q1_data) .+1 ),)) -q1_data = addfield(q1_data, (region=zeros(Int64,size(q1_data)),), cellfield=true) +q1_data = Q1Data(xyz_grid(1:10, 1:10, 1:8)) +q1_data = addfield(q1_data, (T = ones(Float64, size(q1_data) .+ 1),)) +q1_data = addfield(q1_data, (region = zeros(Int64, size(q1_data)),), cellfield = true) # convert to FEData fe_data = convert2FEData(q1_data) diff --git a/test/test_paraview.jl b/test/test_paraview.jl index 8c771a63..c06083c2 100644 --- a/test/test_paraview.jl +++ b/test/test_paraview.jl @@ -4,59 +4,59 @@ using GeophysicalModelGenerator @testset "Paraview" begin -# Generate a 3D grid -Lon,Lat,Depth = lonlatdepth_grid(10:20,30:40,(-300:25:0)km); -Data = Depth*2; # some data -Data_set = GeoData(Lon,Lat,Depth,(Depthdata=Data,LonData=Lon)) -@test write_paraview(Data_set, "test_depth3D") == nothing - -# Horizontal profile @ 10 km height -Lon,Lat,Depth = lonlatdepth_grid(10:20,30:40,10km); -Depth[2:4,2:4,1] .= 25km # add some fake topography - -Data_set2 = GeoData(Lon,Lat,Depth,(Topography=Depth,)) -@test write_paraview(Data_set2, "test2") == nothing - -# Cross sections -Lon,Lat,Depth = lonlatdepth_grid(10:20,35,(-300:25:0)km); -Data_set3 = GeoData(Lon,Lat,Depth,(DataSet=Depth,)) -@test write_paraview(Data_set3, "test3") == nothing - - -Lon,Lat,Depth = lonlatdepth_grid(15,30:40,(-300:25:0)km); -Data_set4 = GeoData(Lon,Lat,Depth,(DataSet=Depth,)) -@test write_paraview(Data_set4, "test4") == nothing - -Lon,Lat,Depth = lonlatdepth_grid(15,35,(-300:25:0)km); -Data_set5 = GeoData(Lon,Lat,Depth,(DataSet=Depth,)) -@test write_paraview(Data_set5, "test5") == nothing - - -# Test saving vectors -Lon,Lat,Depth = lonlatdepth_grid(10:20,30:40,50km); -Ve = zeros(size(Depth)) .+ 1.0; -Vn = zeros(size(Depth)); -Vz = zeros(size(Depth)); -Velocity = (copy(Ve),copy(Vn),copy(Vz)) # tuple with 3 values, which -Data_set_vel = GeoData(Lon,Lat,Depth,(Velocity=Velocity, Veast=Velocity[1]*cm/yr, Vnorth=Velocity[2]*cm/yr, Vup=Velocity[3]*cm/yr)) -@test write_paraview(Data_set_vel, "test_Vel") == nothing - -# Test saving colors -red = zeros(size(Lon)); -green = zeros(size(Lon)); -blue = zeros(size(Lon)); -Data_set_color = GeoData(Lon, Lat, Depth, (Velocity=Velocity,colors=(red,green,blue),color2=(red,green,blue))) -@test write_paraview(Data_set_color, "test_Color") == nothing - -# Manually test the in-place conversion from spherical -> cartesian (done automatically when converting GeoData->ParaviewData ) -Vel_Cart = (copy(Ve),copy(Vn),copy(Vz)) -velocity_spherical_to_cartesian!(Data_set_vel, Vel_Cart); -@test Vel_Cart[2][15] ≈ 0.9743700647852352 -@test Vel_Cart[1][15] ≈ -0.224951054343865 -@test Vel_Cart[3][15] ≈ 0.0 - -# Test saving unstructured point data (EQ, or GPS points) -Data_set_VelPoints = GeoData(Lon[:],Lat[:],ustrip.(Depth[:]),(Velocity=(copy(Ve[:]),copy(Vn[:]),copy(Vz[:])), Veast=Ve[:]*mm/yr, Vnorth=Vn[:]*cm/yr, Vup=Vz[:]*cm/yr)) -@test write_paraview(Data_set_VelPoints, "test_Vel_points", PointsData=true) == nothing - -end \ No newline at end of file + # Generate a 3D grid + Lon, Lat, Depth = lonlatdepth_grid(10:20, 30:40, (-300:25:0)km) + Data = Depth * 2 # some data + Data_set = GeoData(Lon, Lat, Depth, (Depthdata = Data, LonData = Lon)) + @test write_paraview(Data_set, "test_depth3D") == nothing + + # Horizontal profile @ 10 km height + Lon, Lat, Depth = lonlatdepth_grid(10:20, 30:40, 10km) + Depth[2:4, 2:4, 1] .= 25km # add some fake topography + + Data_set2 = GeoData(Lon, Lat, Depth, (Topography = Depth,)) + @test write_paraview(Data_set2, "test2") == nothing + + # Cross sections + Lon, Lat, Depth = lonlatdepth_grid(10:20, 35, (-300:25:0)km) + Data_set3 = GeoData(Lon, Lat, Depth, (DataSet = Depth,)) + @test write_paraview(Data_set3, "test3") == nothing + + + Lon, Lat, Depth = lonlatdepth_grid(15, 30:40, (-300:25:0)km) + Data_set4 = GeoData(Lon, Lat, Depth, (DataSet = Depth,)) + @test write_paraview(Data_set4, "test4") == nothing + + Lon, Lat, Depth = lonlatdepth_grid(15, 35, (-300:25:0)km) + Data_set5 = GeoData(Lon, Lat, Depth, (DataSet = Depth,)) + @test write_paraview(Data_set5, "test5") == nothing + + + # Test saving vectors + Lon, Lat, Depth = lonlatdepth_grid(10:20, 30:40, 50km) + Ve = zeros(size(Depth)) .+ 1.0 + Vn = zeros(size(Depth)) + Vz = zeros(size(Depth)) + Velocity = (copy(Ve), copy(Vn), copy(Vz)) # tuple with 3 values, which + Data_set_vel = GeoData(Lon, Lat, Depth, (Velocity = Velocity, Veast = Velocity[1] * cm / yr, Vnorth = Velocity[2] * cm / yr, Vup = Velocity[3] * cm / yr)) + @test write_paraview(Data_set_vel, "test_Vel") == nothing + + # Test saving colors + red = zeros(size(Lon)) + green = zeros(size(Lon)) + blue = zeros(size(Lon)) + Data_set_color = GeoData(Lon, Lat, Depth, (Velocity = Velocity, colors = (red, green, blue), color2 = (red, green, blue))) + @test write_paraview(Data_set_color, "test_Color") == nothing + + # Manually test the in-place conversion from spherical -> cartesian (done automatically when converting GeoData->ParaviewData ) + Vel_Cart = (copy(Ve), copy(Vn), copy(Vz)) + velocity_spherical_to_cartesian!(Data_set_vel, Vel_Cart) + @test Vel_Cart[2][15] ≈ 0.9743700647852352 + @test Vel_Cart[1][15] ≈ -0.224951054343865 + @test Vel_Cart[3][15] ≈ 0.0 + + # Test saving unstructured point data (EQ, or GPS points) + Data_set_VelPoints = GeoData(Lon[:], Lat[:], ustrip.(Depth[:]), (Velocity = (copy(Ve[:]), copy(Vn[:]), copy(Vz[:])), Veast = Ve[:] * mm / yr, Vnorth = Vn[:] * cm / yr, Vup = Vz[:] * cm / yr)) + @test write_paraview(Data_set_VelPoints, "test_Vel_points", PointsData = true) == nothing + +end diff --git a/test/test_paraview_collection.jl b/test/test_paraview_collection.jl index 7b7c2096..645dd28a 100644 --- a/test/test_paraview_collection.jl +++ b/test/test_paraview_collection.jl @@ -2,51 +2,51 @@ using Test using GeophysicalModelGenerator, WriteVTK @testset "Paraview collection" begin -x, y, z = 0:10, 1:6, 2:0.1:3 -times = range(0, 1; step = 1) - -#generate `*.vti` files -for (n, time) ∈ enumerate(times) - vtk_grid("./test_files/test_vti_$n", x, y, z) do vtk - vtk["Pressure"] = rand(length(x), length(y), length(z)) + x, y, z = 0:10, 1:6, 2:0.1:3 + times = range(0, 1; step = 1) + + #generate `*.vti` files + for (n, time) in enumerate(times) + vtk_grid("./test_files/test_vti_$n", x, y, z) do vtk + vtk["Pressure"] = rand(length(x), length(y), length(z)) + end end -end -# Generate a 3D grid -Lon,Lat,Depth = lonlatdepth_grid(10:20,30:40,(-300:25:0)km); -Data = Depth*2; # some data -Data_set = GeoData(Lon,Lat,Depth,(Depthdata=Data,LonData=Lon)) -write_paraview(Data_set, "./test_files/test_depth3D") - -make_paraview_collection(;dir = "./test_files", pvd_name="test", file_extension=".vti") -@test isfile("test.pvd") -@test filesize("test.pvd") == 317 - -make_paraview_collection(;dir = "./test_files", file_extension=".vti") -@test isfile("full_simulation.pvd") -@test filesize("full_simulation.pvd") == 317 - -make_paraview_collection(;dir = "./test_files") -@test isfile("full_simulation.pvd") -#@test filesize("full_simulation.pvd") == 251 - - -files = ["test_files/test_vti_1.vti", "test_files/test_vti_2.vti"] -time = ["1.0", "2.0"] -make_paraview_collection("test2", files, time) -@test isfile("test2.pvd") -@test filesize("test2.pvd") == 317 - -make_paraview_collection(; pvd_name="test3", files=files, time=time) -@test isfile("test3.pvd") -@test filesize("test3.pvd") == 317 - -rm("test.pvd") -rm("full_simulation.pvd") -rm("test_files/test_depth3D.vts") -rm("test_files/test_vti_1.vti") -rm("test_files/test_vti_2.vti") -rm("test2.pvd") -rm("test3.pvd") + # Generate a 3D grid + Lon, Lat, Depth = lonlatdepth_grid(10:20, 30:40, (-300:25:0)km) + Data = Depth * 2 # some data + Data_set = GeoData(Lon, Lat, Depth, (Depthdata = Data, LonData = Lon)) + write_paraview(Data_set, "./test_files/test_depth3D") + + make_paraview_collection(; dir = "./test_files", pvd_name = "test", file_extension = ".vti") + @test isfile("test.pvd") + @test filesize("test.pvd") == 317 + + make_paraview_collection(; dir = "./test_files", file_extension = ".vti") + @test isfile("full_simulation.pvd") + @test filesize("full_simulation.pvd") == 317 + + make_paraview_collection(; dir = "./test_files") + @test isfile("full_simulation.pvd") + #@test filesize("full_simulation.pvd") == 251 + + + files = ["test_files/test_vti_1.vti", "test_files/test_vti_2.vti"] + time = ["1.0", "2.0"] + make_paraview_collection("test2", files, time) + @test isfile("test2.pvd") + @test filesize("test2.pvd") == 317 + + make_paraview_collection(; pvd_name = "test3", files = files, time = time) + @test isfile("test3.pvd") + @test filesize("test3.pvd") == 317 + + rm("test.pvd") + rm("full_simulation.pvd") + rm("test_files/test_depth3D.vts") + rm("test_files/test_vti_1.vti") + rm("test_files/test_vti_2.vti") + rm("test2.pvd") + rm("test3.pvd") end diff --git a/test/test_sea_level.jl b/test/test_sea_level.jl index 4071f707..abc45d9c 100644 --- a/test/test_sea_level.jl +++ b/test/test_sea_level.jl @@ -2,17 +2,17 @@ using Test x = SeaLevel(:Spratt_800ka) -@test length(x) === length(x.elevation) -@test size(x) === size(x.elevation) -@test eachindex(x) === eachindex(x.elevation) -@test axes(x) === axes(x.elevation) +@test length(x) === length(x.elevation) +@test size(x) === size(x.elevation) +@test eachindex(x) === eachindex(x.elevation) +@test axes(x) === axes(x.elevation) @test curve_name(x) === :Spratt_800ka -@test x[1] == x.elevation[1] -@test x[1,1] == (x.elevation[1], x.age[1]) -@test x[1,799] == (x.elevation[1], x.age[799]) -@test x[(1,)] == (x.elevation[1], x.age[1]) -@test x[(799,)] == (x.elevation[799], x.age[799]) +@test x[1] == x.elevation[1] +@test x[1, 1] == (x.elevation[1], x.age[1]) +@test x[1, 799] == (x.elevation[1], x.age[799]) +@test x[(1,)] == (x.elevation[1], x.age[1]) +@test x[(799,)] == (x.elevation[799], x.age[799]) x_rev = SeaLevel(:Spratt_800ka; flip_elevation = true, flip_age = true) @test x.elevation == reverse(x_rev.elevation) -@test x.age == reverse(x_rev.age) \ No newline at end of file +@test x.age == reverse(x_rev.age) diff --git a/test/test_setup_geometry.jl b/test/test_setup_geometry.jl index bd6d267b..c38fec79 100644 --- a/test/test_setup_geometry.jl +++ b/test/test_setup_geometry.jl @@ -2,185 +2,194 @@ using Test, GeophysicalModelGenerator, GeoParams # GeoData -Lon3D,Lat3D,Depth3D = lonlatdepth_grid(1.0:1:10.0, 11.0:1:20.0, (-20:1:-10)*km); -Data = zeros(size(Lon3D)); -Temp = ones(Float64, size(Data))*1350; -Phases = zeros(Int32, size(Data)); -Grid = GeoData(Lon3D,Lat3D,Depth3D,(DataFieldName=Data,)) +Lon3D, Lat3D, Depth3D = lonlatdepth_grid(1.0:1:10.0, 11.0:1:20.0, (-20:1:-10) * km); +Data = zeros(size(Lon3D)); +Temp = ones(Float64, size(Data)) * 1350; +Phases = zeros(Int32, size(Data)); +Grid = GeoData(Lon3D, Lat3D, Depth3D, (DataFieldName = Data,)) -add_box!(Phases,Temp,Grid, xlim=(2,4), zlim=(-15,-10), phase=ConstantPhase(3), DipAngle=10, T=LinearTemp(Tbot=1350, Ttop=200)) -@test sum(Temp[1,1,:]) ≈ 14850.0 - -add_ellipsoid!(Phases,Temp,Grid, cen=(4,15,-17), axes=(1,2,3), StrikeAngle=90, DipAngle=45, phase=ConstantPhase(2), T=ConstantTemp(600)) -@test sum(Temp[1,1,:]) ≈ 14850.0 +add_box!(Phases, Temp, Grid, xlim = (2, 4), zlim = (-15, -10), phase = ConstantPhase(3), DipAngle = 10, T = LinearTemp(Tbot = 1350, Ttop = 200)) +@test sum(Temp[1, 1, :]) ≈ 14850.0 +add_ellipsoid!(Phases, Temp, Grid, cen = (4, 15, -17), axes = (1, 2, 3), StrikeAngle = 90, DipAngle = 45, phase = ConstantPhase(2), T = ConstantTemp(600)) +@test sum(Temp[1, 1, :]) ≈ 14850.0 # CartData -X,Y,Z = xyz_grid(1.0:1:10.0, 11.0:1:20.0, -20:1:-10); -Data = zeros(size(X)); -Temp = ones(Float64, size(Data))*1350; -Phases = zeros(Int32, size(Data)); -Grid = CartData(X,Y,Z,(DataFieldName=Data,)) +X, Y, Z = xyz_grid(1.0:1:10.0, 11.0:1:20.0, -20:1:-10); +Data = zeros(size(X)); +Temp = ones(Float64, size(Data)) * 1350; +Phases = zeros(Int32, size(Data)); +Grid = CartData(X, Y, Z, (DataFieldName = Data,)) -add_box!(Phases,Temp,Grid, xlim=(2,4), zlim=(-15,-10), phase=ConstantPhase(3), DipAngle=10, T=LinearTemp(Tbot=1350, Ttop=200)) -@test sum(Temp[1,1,:]) ≈ 14850.0 +add_box!(Phases, Temp, Grid, xlim = (2, 4), zlim = (-15, -10), phase = ConstantPhase(3), DipAngle = 10, T = LinearTemp(Tbot = 1350, Ttop = 200)) +@test sum(Temp[1, 1, :]) ≈ 14850.0 -add_ellipsoid!(Phases,Temp,Grid, cen=(4,15,-17), axes=(1,2,3), StrikeAngle=90, DipAngle=45, phase=ConstantPhase(2), T=ConstantTemp(600)) -@test sum(Temp[1,1,:]) ≈ 14850.0 +add_ellipsoid!(Phases, Temp, Grid, cen = (4, 15, -17), axes = (1, 2, 3), StrikeAngle = 90, DipAngle = 45, phase = ConstantPhase(2), T = ConstantTemp(600)) +@test sum(Temp[1, 1, :]) ≈ 14850.0 # CartGrid -Grid = create_CartGrid(size=(10,20,30),x=(0.,10), y=(0.,10), z=(2.,10)) -Temp = ones(Float64, Grid.N...)*1350; -Phases = zeros(Int32, Grid.N...); +Grid = create_CartGrid(size = (10, 20, 30), x = (0.0, 10), y = (0.0, 10), z = (2.0, 10)) +Temp = ones(Float64, Grid.N...) * 1350; +Phases = zeros(Int32, Grid.N...); -add_box!(Phases,Temp,Grid, xlim=(2,4), zlim=(4,8), phase=ConstantPhase(3), DipAngle=10, T=LinearTemp(Tbot=1350, Ttop=200)) +add_box!(Phases, Temp, Grid, xlim = (2, 4), zlim = (4, 8), phase = ConstantPhase(3), DipAngle = 10, T = LinearTemp(Tbot = 1350, Ttop = 200)) @test maximum(Phases) == 3 -add_stripes!(Phases, Grid,stripAxes=(1,1,1),stripeWidth=0.2,stripeSpacing=1,Origin=nothing, StrikeAngle=0, DipAngle=10,phase = ConstantPhase(3),stripePhase = ConstantPhase(4)) +add_stripes!(Phases, Grid, stripAxes = (1, 1, 1), stripeWidth = 0.2, stripeSpacing = 1, Origin = nothing, StrikeAngle = 0, DipAngle = 10, phase = ConstantPhase(3), stripePhase = ConstantPhase(4)) @test maximum(Phases) == 4 # Create a CartData structure from it -Data = CartData(Grid, (T=Temp, Phases=Phases)) +Data = CartData(Grid, (T = Temp, Phases = Phases)) -@test NumValue(Data.x[3,3,2]) ≈ 2.2222222222222223 +@test NumValue(Data.x[3, 3, 2]) ≈ 2.2222222222222223 # Doing the same for vertical cross-sections -Grid2D = create_CartGrid(size=(10,30),x=(0.,10), z=(2.,10)) -Temp2D = ones(Float64, Grid2D.N...)*1350; -Phases2D = zeros(Int32, Grid2D.N...); +Grid2D = create_CartGrid(size = (10, 30), x = (0.0, 10), z = (2.0, 10)) +Temp2D = ones(Float64, Grid2D.N...) * 1350; +Phases2D = zeros(Int32, Grid2D.N...); -Data2D = CartData(Grid2D, (T=Temp2D, Phases=Phases2D)) +Data2D = CartData(Grid2D, (T = Temp2D, Phases = Phases2D)) -@test NumValue(Data.x[3,1,2]) ≈ 2.2222222222222223 +@test NumValue(Data.x[3, 1, 2]) ≈ 2.2222222222222223 # LithosphericPhases -LP = LithosphericPhases(Layers=[5 10 6], Phases=[0 1 2 3], Tlab=nothing); -X,Y,Z = xyz_grid(-5:1:5,-5:1:5,-20:1:5); -Phases = zeros(Int32, size(X)); -Temp = zeros(Int32, size(X)); -Phases = compute_phase(Phases, Temp, X, Y, Z, LP); +LP = LithosphericPhases(Layers = [5 10 6], Phases = [0 1 2 3], Tlab = nothing); +X, Y, Z = xyz_grid(-5:1:5, -5:1:5, -20:1:5); +Phases = zeros(Int32, size(X)); +Temp = zeros(Int32, size(X)); +Phases = compute_phase(Phases, Temp, X, Y, Z, LP); -@test Phases[1,1,end] == 3 -@test Phases[1,1,7] == 1 +@test Phases[1, 1, end] == 3 +@test Phases[1, 1, 7] == 1 -Phases = compute_phase(Phases, Temp, X, Y, Z, LP, Ztop=5); +Phases = compute_phase(Phases, Temp, X, Y, Z, LP, Ztop = 5); -@test Phases[1,1,end-4] == 0 -@test Phases[1,1,5] == 2 +@test Phases[1, 1, end - 4] == 0 +@test Phases[1, 1, 5] == 2 -LP = LithosphericPhases(Layers=[0.5 1.0 1.0], Phases=[0 1 2], Tlab=nothing); -Grid = read_LaMEM_inputfile("test_files/SaltModels.dat"); -Phases = zeros(Int32, size(Grid.X)); -Temp = zeros(Int32, size(Grid.X)); -Phases = compute_phase(Phases, Temp, Grid, LP); +LP = LithosphericPhases(Layers = [0.5 1.0 1.0], Phases = [0 1 2], Tlab = nothing); +Grid = read_LaMEM_inputfile("test_files/SaltModels.dat"); +Phases = zeros(Int32, size(Grid.X)); +Temp = zeros(Int32, size(Grid.X)); +Phases = compute_phase(Phases, Temp, Grid, LP); -@test Phases[1,1,25] == 1 -@test Phases[1,1,73] == 0 +@test Phases[1, 1, 25] == 1 +@test Phases[1, 1, 73] == 0 # Create Grid & nondimensionalize it -CharDim = GEO_units(); -Grid = create_CartGrid(size=(10,20,30),x=(0.0km,10km), y=(0.0km, 10km), z=(-10.0km, 2.0km), CharDim=CharDim) +CharDim = GEO_units(); +Grid = create_CartGrid(size = (10, 20, 30), x = (0.0km, 10km), y = (0.0km, 10km), z = (-10.0km, 2.0km), CharDim = CharDim) @test Grid.Δ[2] ≈ 0.0005263157894736842 # test 1D-explicit thermal solver for AddBox ----------- -nel = 96 -Grid = create_CartGrid(size=(nel,nel,nel),x=(-200.,200.), y=(-200.,200.), z=(-200.,0)) -Temp = zeros(Float64, Grid.N...); -Phases = zeros(Int64, Grid.N...); +nel = 96 +Grid = create_CartGrid(size = (nel, nel, nel), x = (-200.0, 200.0), y = (-200.0, 200.0), z = (-200.0, 0)) +Temp = zeros(Float64, Grid.N...); +Phases = zeros(Int64, Grid.N...); # 1) horizontally layer lithosphere; UpperCrust,LowerCrust,Mantle -add_box!(Phases,Temp,Grid, xlim=(-100,100), zlim=(-100,0), Origin=(0.0,0.0,0.0), - phase=LithosphericPhases(Layers=[20 15 65], Phases = [1 2 3], Tlab=nothing), - DipAngle=0.0, T=LithosphericTemp(nz=201)) +add_box!( + Phases, Temp, Grid, xlim = (-100, 100), zlim = (-100, 0), Origin = (0.0, 0.0, 0.0), + phase = LithosphericPhases(Layers = [20 15 65], Phases = [1 2 3], Tlab = nothing), + DipAngle = 0.0, T = LithosphericTemp(nz = 201) +) -@test sum(Temp[Int64(nel/2),Int64(nel/2),:]) ≈ 36131.638045729735 +@test sum(Temp[Int64(nel / 2), Int64(nel / 2), :]) ≈ 36131.638045729735 # 2) inclined lithosphere; UpperCrust,LowerCrust,Mantle -Temp = zeros(Float64, Grid.N...); -Phases = zeros(Int64, Grid.N...); +Temp = zeros(Float64, Grid.N...); +Phases = zeros(Int64, Grid.N...); -add_box!(Phases,Temp,Grid, xlim=(-100,100), zlim=(-100,0), Origin=(0.0,0.0,0.0), - phase=LithosphericPhases(Layers=[20 15 65], Phases = [1 2 3], Tlab=nothing), - DipAngle=30.0, T=LithosphericTemp(nz=201)) +add_box!( + Phases, Temp, Grid, xlim = (-100, 100), zlim = (-100, 0), Origin = (0.0, 0.0, 0.0), + phase = LithosphericPhases(Layers = [20 15 65], Phases = [1 2 3], Tlab = nothing), + DipAngle = 30.0, T = LithosphericTemp(nz = 201) +) -@test sum(Temp[Int64(nel/2),Int64(nel/2),:]) ≈ 41912.18172533137 +@test sum(Temp[Int64(nel / 2), Int64(nel / 2), :]) ≈ 41912.18172533137 # 3) inclined lithosphere with respect to the default origin; UpperCrust,LowerCrust,Mantle -Temp = zeros(Float64, Grid.N...); -Phases = zeros(Int64, Grid.N...); +Temp = zeros(Float64, Grid.N...); +Phases = zeros(Int64, Grid.N...); -add_box!(Phases,Temp,Grid, xlim=(-100,100), zlim=(-100,0), - phase=LithosphericPhases(Layers=[20 15 65], Phases = [1 2 3], Tlab=nothing), - DipAngle=30.0, T=LithosphericTemp(nz=201)) +add_box!( + Phases, Temp, Grid, xlim = (-100, 100), zlim = (-100, 0), + phase = LithosphericPhases(Layers = [20 15 65], Phases = [1 2 3], Tlab = nothing), + DipAngle = 30.0, T = LithosphericTemp(nz = 201) +) -@test sum(Temp[Int64(nel/2),Int64(nel/2),:]) ≈ 41316.11499878003 +@test sum(Temp[Int64(nel / 2), Int64(nel / 2), :]) ≈ 41316.11499878003 # 4) inclined lithosphere with only two layers -Temp = zeros(Float64, Grid.N...); -Phases = zeros(Int64, Grid.N...); - -ρM=3.0e3 # Density [ kg/m^3 ] -CpM=1.0e3 # Specific heat capacity [ J/kg/K ] -kM=2.3 # Thermal conductivity [ W/m/K ] -HM=0.0 # Radiogenic heat source per mass [H] = W/kg; [H] = [Q/rho] -ρUC=2.7e3 # Density [ kg/m^3 ] -CpUC=1.0e3 # Specific heat capacity [ J/kg/K ] -kUC=3.0 # Thermal conductivity [ W/m/K ] -HUC=617.0e-12 # Radiogenic heat source per mass [H] = W/kg; [H] = [Q/rho] +Temp = zeros(Float64, Grid.N...); +Phases = zeros(Int64, Grid.N...); + +ρM = 3.0e3 # Density [ kg/m^3 ] +CpM = 1.0e3 # Specific heat capacity [ J/kg/K ] +kM = 2.3 # Thermal conductivity [ W/m/K ] +HM = 0.0 # Radiogenic heat source per mass [H] = W/kg; [H] = [Q/rho] +ρUC = 2.7e3 # Density [ kg/m^3 ] +CpUC = 1.0e3 # Specific heat capacity [ J/kg/K ] +kUC = 3.0 # Thermal conductivity [ W/m/K ] +HUC = 617.0e-12 # Radiogenic heat source per mass [H] = W/kg; [H] = [Q/rho] rheology = ( - # Name = "UpperCrust", - SetMaterialParams(; - Phase = 1, - Density = ConstantDensity(; ρ=ρUC), - HeatCapacity = ConstantHeatCapacity(; Cp=CpUC), - Conductivity = ConstantConductivity(; k=kUC), - RadioactiveHeat = ConstantRadioactiveHeat(; H_r=HUC*ρUC), # [H] = W/m^3 - ), - # Name = "LithosphericMantle", - SetMaterialParams(; - Phase = 2, - Density = ConstantDensity(; ρ=ρM), - HeatCapacity = ConstantHeatCapacity(; Cp=CpM), - Conductivity = ConstantConductivity(; k=kM), - RadioactiveHeat = ConstantRadioactiveHeat(; H_r=HM*ρM), # [H] = W/m^3 - ), - ); - -add_box!(Phases,Temp,Grid, xlim=(-100,100), zlim=(-100,0), - phase=LithosphericPhases(Layers=[20 80], Phases = [1 2], Tlab=nothing), - DipAngle=30.0, T=LithosphericTemp(rheology=rheology,nz=201)) - -@test sum(Temp[Int64(nel/2),Int64(nel/2),:]) ≈ 40513.969831615716 + # Name = "UpperCrust", + SetMaterialParams(; + Phase = 1, + Density = ConstantDensity(; ρ = ρUC), + HeatCapacity = ConstantHeatCapacity(; Cp = CpUC), + Conductivity = ConstantConductivity(; k = kUC), + RadioactiveHeat = ConstantRadioactiveHeat(; H_r = HUC * ρUC), # [H] = W/m^3 + ), + # Name = "LithosphericMantle", + SetMaterialParams(; + Phase = 2, + Density = ConstantDensity(; ρ = ρM), + HeatCapacity = ConstantHeatCapacity(; Cp = CpM), + Conductivity = ConstantConductivity(; k = kM), + RadioactiveHeat = ConstantRadioactiveHeat(; H_r = HM * ρM), # [H] = W/m^3 + ), +); + +add_box!( + Phases, Temp, Grid, xlim = (-100, 100), zlim = (-100, 0), + phase = LithosphericPhases(Layers = [20 80], Phases = [1 2], Tlab = nothing), + DipAngle = 30.0, T = LithosphericTemp(rheology = rheology, nz = 201) +) + +@test sum(Temp[Int64(nel / 2), Int64(nel / 2), :]) ≈ 40513.969831615716 # 5) using flux lower boundary conditions -Temp = zeros(Float64, Grid.N...); -Phases = zeros(Int64, Grid.N...); +Temp = zeros(Float64, Grid.N...); +Phases = zeros(Int64, Grid.N...); -add_box!(Phases,Temp,Grid, xlim=(-100,100), zlim=(-100,0), - phase=LithosphericPhases(Layers=[20 15 65], Phases = [1 2 3], Tlab=nothing), - DipAngle=30.0, T=LithosphericTemp(lbound="flux",nz=201)) +add_box!( + Phases, Temp, Grid, xlim = (-100, 100), zlim = (-100, 0), + phase = LithosphericPhases(Layers = [20 15 65], Phases = [1 2 3], Tlab = nothing), + DipAngle = 30.0, T = LithosphericTemp(lbound = "flux", nz = 201) +) -@test sum(Temp[Int64(nel/2),Int64(nel/2),:]) ≈ 37359.648604722104 +@test sum(Temp[Int64(nel / 2), Int64(nel / 2), :]) ≈ 37359.648604722104 # Test the McKenzie thermal structure # Create CartGrid struct -x = LinRange(0.0,1200.0,64); -y = LinRange(0.0,1200.0,64); -z = LinRange(-660,50,64); -Cart = CartData(xyz_grid(x, y, z)); +x = LinRange(0.0, 1200.0, 64); +y = LinRange(0.0, 1200.0, 64); +z = LinRange(-660, 50, 64); +Cart = CartData(xyz_grid(x, y, z)); # initialize phase and temperature matrix -Phase = ones(Int32,size(Cart)); -Temp = ones(Float64,size(Cart))*1350; +Phase = ones(Int32, size(Cart)); +Temp = ones(Float64, size(Cart)) * 1350; # Create thermal structures -TsHC = HalfspaceCoolingTemp(Tsurface=20.0, Tmantle=1350, Age=120, Adiabat=0.4) +TsHC = HalfspaceCoolingTemp(Tsurface = 20.0, Tmantle = 1350, Age = 120, Adiabat = 0.4) TsMK = McKenzie_subducting_slab(Tsurface = 20.0, Tmantle = 1350.0, v_cm_yr = 4.0, Adiabat = 0.0) @test TsMK.v_cm_yr == 4.0 @@ -189,48 +198,47 @@ TsMK = McKenzie_subducting_slab(Tsurface = 20.0, Tmantle = 1350.0, v_cm_yr = 4.0 # Add a box with a McKenzie thermal structure # horizontal -Temp = ones(Float64,size(Cart))*1350; -add_box!(Phase, Temp, Cart; xlim=(0.0,600.0),ylim=(0.0,600.0), zlim=(-80.0, 0.0), phase = ConstantPhase(5), T=TsMK); -@test sum(Temp) ≈ 3.518172093383281e8 +Temp = ones(Float64, size(Cart)) * 1350; +add_box!(Phase, Temp, Cart; xlim = (0.0, 600.0), ylim = (0.0, 600.0), zlim = (-80.0, 0.0), phase = ConstantPhase(5), T = TsMK); +@test sum(Temp) ≈ 3.518172093383281e8 # inclined slab -Temp = ones(Float64,size(Cart))*1350; -add_box!(Phase, Temp, Cart; xlim=(0.0,600.0),ylim=(0.0,600.0), zlim=(-80.0,0.0),StrikeAngle=0, DipAngle=45, phase = ConstantPhase(5), T=TsMK); -@test sum(Temp) ≈ 3.5125017626287365e8 - +Temp = ones(Float64, size(Cart)) * 1350; +add_box!(Phase, Temp, Cart; xlim = (0.0, 600.0), ylim = (0.0, 600.0), zlim = (-80.0, 0.0), StrikeAngle = 0, DipAngle = 45, phase = ConstantPhase(5), T = TsMK); +@test sum(Temp) ≈ 3.5125017626287365e8 # horizontal slab, constant T -T_slab = LinearWeightedTemperature(0,1,600.0,:X,ConstantTemp(1000), ConstantTemp(2000)); -Temp = ones(Float64,size(Cart))*1350; -add_box!(Phase, Temp, Cart; xlim=(0.0,600.0),ylim=(0.0,600.0), zlim=(-80.0, 0.0), phase = ConstantPhase(5), T=T_slab); -@test sum(Temp) ≈ 3.549127111111111e8 +T_slab = LinearWeightedTemperature(0, 1, 600.0, :X, ConstantTemp(1000), ConstantTemp(2000)); +Temp = ones(Float64, size(Cart)) * 1350; +add_box!(Phase, Temp, Cart; xlim = (0.0, 600.0), ylim = (0.0, 600.0), zlim = (-80.0, 0.0), phase = ConstantPhase(5), T = T_slab); +@test sum(Temp) ≈ 3.549127111111111e8 # horizontal slab, halfspace and McKenzie -T_slab = LinearWeightedTemperature(crit_dist=600, F1=TsHC, F2=TsMK); -Temp = ones(Float64,size(Cart))*1350; -add_box!(Phase, Temp, Cart; xlim=(0.0,600.0),ylim=(0.0,600.0), zlim=(-80.0, 0.0), phase = ConstantPhase(5), T=T_slab); -@test sum(Temp) ≈ 3.499457641038468e8 +T_slab = LinearWeightedTemperature(crit_dist = 600, F1 = TsHC, F2 = TsMK); +Temp = ones(Float64, size(Cart)) * 1350; +add_box!(Phase, Temp, Cart; xlim = (0.0, 600.0), ylim = (0.0, 600.0), zlim = (-80.0, 0.0), phase = ConstantPhase(5), T = T_slab); +@test sum(Temp) ≈ 3.499457641038468e8 -Data_Final = addfield(Cart,"Temp",Temp) +Data_Final = addfield(Cart, "Temp", Temp) # test polygon structure -x = LinRange(0.0,1200.0,64); -y = LinRange(0.0,1200.0,64); -z = LinRange(-660,50,64); -Cart = CartData(xyz_grid(x, y, z)); +x = LinRange(0.0, 1200.0, 64); +y = LinRange(0.0, 1200.0, 64); +z = LinRange(-660, 50, 64); +Cart = CartData(xyz_grid(x, y, z)); # initialize phase and temperature matrix -Phase = ones(Int32,(length(x),length(y),length(z))); -Temp = ones(Float64,(length(x),length(y),length(z)))*1350; +Phase = ones(Int32, (length(x), length(y), length(z))); +Temp = ones(Float64, (length(x), length(y), length(z))) * 1350; -add_box!(Phase, Temp, Cart; xlim=(0.0,600.0),ylim=(0.0,600.0), zlim=(-80.0, 0.0), phase = ConstantPhase(5), T=T=ConstantTemp(120.0)); +add_box!(Phase, Temp, Cart; xlim = (0.0, 600.0), ylim = (0.0, 600.0), zlim = (-80.0, 0.0), phase = ConstantPhase(5), T = T = ConstantTemp(120.0)); # add accretionary prism -add_polygon!(Phase, Temp, Cart; xlim=(500.0, 200.0, 500.0),ylim=(100.0,400.0), zlim=(0.0,0.0,-60.0), phase = ConstantPhase(8), T=LinearTemp(Ttop=20, Tbot=30)) +add_polygon!(Phase, Temp, Cart; xlim = (500.0, 200.0, 500.0), ylim = (100.0, 400.0), zlim = (0.0, 0.0, -60.0), phase = ConstantPhase(8), T = LinearTemp(Ttop = 20, Tbot = 30)) @test maximum(Phase) == 8 @test minimum(Temp) == 21.40845070422536 @@ -239,73 +247,73 @@ add_polygon!(Phase, Temp, Cart; xlim=(500.0, 200.0, 500.0),ylim=(100.0,400.0), z # Test the Bending slab geometry # Create CartGrid struct -x = LinRange(0.0,1200.0,128); -y = LinRange(0.0,1200.0,128); -z = LinRange(-660,50,128); -Cart = CartData(xyz_grid(x, y, z)); -X,Y,Z = xyz_grid(x, y, z); +x = LinRange(0.0, 1200.0, 128); +y = LinRange(0.0, 1200.0, 128); +z = LinRange(-660, 50, 128); +Cart = CartData(xyz_grid(x, y, z)); +X, Y, Z = xyz_grid(x, y, z); # initialize phase and temperature matrix -Phase = ones(Int32,size(Cart)); -Temp = fill(1350.0,size(Cart)); +Phase = ones(Int32, size(Cart)); +Temp = fill(1350.0, size(Cart)); -t1 = Trench(Start = (400.0,400.0), End = (800.0,800.0),θ_max = 45, direction = 1.0, n_seg = 50, Length = 600.0, Thickness = 80.0, Lb = 500.0,d_decoupling = 100.0, type_bending =:Ribe) +t1 = Trench(Start = (400.0, 400.0), End = (800.0, 800.0), θ_max = 45, direction = 1.0, n_seg = 50, Length = 600.0, Thickness = 80.0, Lb = 500.0, d_decoupling = 100.0, type_bending = :Ribe) @test t1.θ_max == 45.0 @test t1.Thickness == 80.0 @test t1.Length == 600.0 @test t1.Lb == 500.0 -phase = LithosphericPhases(Layers=[5 7 88], Phases = [2 3 4], Tlab=nothing) -TsHC = HalfspaceCoolingTemp(Tsurface=20.0, Tmantle=1350, Age=30, Adiabat=0.4) +phase = LithosphericPhases(Layers = [5 7 88], Phases = [2 3 4], Tlab = nothing) +TsHC = HalfspaceCoolingTemp(Tsurface = 20.0, Tmantle = 1350, Age = 30, Adiabat = 0.4) temp = TsHC; -add_slab!(Phase,Temp,Cart, t1, phase=phase, T = TsHC) -@test Temp[84,84,110] ≈ 1045.1322688510577 +add_slab!(Phase, Temp, Cart, t1, phase = phase, T = TsHC) +@test Temp[84, 84, 110] ≈ 1045.1322688510577 @test extrema(Phase) == (1, 4) # with weak zone -t1 = Trench(Start = (400.0,400.0), End = (800.0,800.0),θ_max = 45, direction = 1.0, n_seg = 50, Length = 600.0, Thickness = 80.0, Lb = 500.0,d_decoupling = 100.0, WeakzoneThickness=10, WeakzonePhase=9) -Phase = ones(Int32,size(Cart)); -Temp = fill(1350.0,size(Cart)); -add_slab!(Phase,Temp,Cart, t1, phase=phase, T = TsHC) +t1 = Trench(Start = (400.0, 400.0), End = (800.0, 800.0), θ_max = 45, direction = 1.0, n_seg = 50, Length = 600.0, Thickness = 80.0, Lb = 500.0, d_decoupling = 100.0, WeakzoneThickness = 10, WeakzonePhase = 9) +Phase = ones(Int32, size(Cart)); +Temp = fill(1350.0, size(Cart)); +add_slab!(Phase, Temp, Cart, t1, phase = phase, T = TsHC) @test extrema(Phase) == (1, 9) #Data_Final = CartData(X,Y,Z,(Phase=Phase,Temp=Temp)) #write_paraview(Data_Final, "Data_Final"); -Phase = ones(Int32,size(Cart)); -Temp = fill(1350.0,size(Cart)); +Phase = ones(Int32, size(Cart)); +Temp = fill(1350.0, size(Cart)); TsMK = McKenzie_subducting_slab(Tsurface = 20.0, Tmantle = 1350.0, v_cm_yr = 4.0, Adiabat = 0.0) temp = TsMK -Phase = ones(Int32,size(Cart)); -Temp = fill(1350.0,size(Cart)); -TsHC = HalfspaceCoolingTemp(Tsurface=20.0, Tmantle=1350, Age=120, Adiabat=0.4) +Phase = ones(Int32, size(Cart)); +Temp = fill(1350.0, size(Cart)); +TsHC = HalfspaceCoolingTemp(Tsurface = 20.0, Tmantle = 1350, Age = 120, Adiabat = 0.4) TsMK = McKenzie_subducting_slab(Tsurface = 20.0, Tmantle = 1350.0, v_cm_yr = 4.0, Adiabat = 0.0) -T_slab = LinearWeightedTemperature(crit_dist=600, F1=TsHC, F2=TsMK); -phase = LithosphericPhases(Layers=[5 7 88], Phases = [2 3 4], Tlab=nothing) +T_slab = LinearWeightedTemperature(crit_dist = 600, F1 = TsHC, F2 = TsMK); +phase = LithosphericPhases(Layers = [5 7 88], Phases = [2 3 4], Tlab = nothing) -t1 = Trench(Start = (400.0,400.0), End = (800.0,800.0),θ_max = 90.0, direction = 1.0, n_seg = 50, Length = 600.0, Thickness = 80.0, Lb = 500.0,d_decoupling = 100.0, type_bending =:Ribe, WeakzoneThickness=10, WeakzonePhase=9) +t1 = Trench(Start = (400.0, 400.0), End = (800.0, 800.0), θ_max = 90.0, direction = 1.0, n_seg = 50, Length = 600.0, Thickness = 80.0, Lb = 500.0, d_decoupling = 100.0, type_bending = :Ribe, WeakzoneThickness = 10, WeakzonePhase = 9) -add_slab!(Phase,Temp,Cart, t1, phase=phase, T = T_slab) -@test Temp[84,84,110] ≈ 624.6682008876219 +add_slab!(Phase, Temp, Cart, t1, phase = phase, T = T_slab) +@test Temp[84, 84, 110] ≈ 624.6682008876219 -Data_Final = CartData(X,Y,Z,(Phase=Phase,Temp=Temp)) +Data_Final = CartData(X, Y, Z, (Phase = Phase, Temp = Temp)) # 2D slab: -nx,nz = 512,128 -x = range(-1000,1000, nx); -z = range(-660,0, nz); -Grid2D = CartData(xyz_grid(x,0,z)) +nx, nz = 512, 128 +x = range(-1000, 1000, nx); +z = range(-660, 0, nz); +Grid2D = CartData(xyz_grid(x, 0, z)) Phases = zeros(Int64, nx, 1, nz); Temp = fill(1350.0, nx, 1, nz); -add_box!(Phases, Temp, Grid2D; xlim=(-800.0,0.0), zlim=(-80.0, 0.0), phase = ConstantPhase(1), T=HalfspaceCoolingTemp(Age=40)); +add_box!(Phases, Temp, Grid2D; xlim = (-800.0, 0.0), zlim = (-80.0, 0.0), phase = ConstantPhase(1), T = HalfspaceCoolingTemp(Age = 40)); -trench = Trench(Start=(0.0,-100.0), End=(0.0,100.0), Thickness=80.0, θ_max=30.0, Length=300, Lb=150); -add_slab!(Phases, Temp, Grid2D, trench, phase = ConstantPhase(2), T=HalfspaceCoolingTemp(Age=40)); +trench = Trench(Start = (0.0, -100.0), End = (0.0, 100.0), Thickness = 80.0, θ_max = 30.0, Length = 300, Lb = 150); +add_slab!(Phases, Temp, Grid2D, trench, phase = ConstantPhase(2), T = HalfspaceCoolingTemp(Age = 40)); -T_slab = LinearWeightedTemperature( F1=HalfspaceCoolingTemp(Age=40), F2=McKenzie_subducting_slab(Tsurface=0,v_cm_yr=4, Adiabat = 0.0), crit_dist=600) -add_slab!(Phases, Temp, Grid2D, trench, phase = ConstantPhase(2), T=T_slab); +T_slab = LinearWeightedTemperature(F1 = HalfspaceCoolingTemp(Age = 40), F2 = McKenzie_subducting_slab(Tsurface = 0, v_cm_yr = 4, Adiabat = 0.0), crit_dist = 600) +add_slab!(Phases, Temp, Grid2D, trench, phase = ConstantPhase(2), T = T_slab); @test sum(Temp) ≈ 8.571402268095453e7 @test extrema(Phases) == (0, 2) @@ -316,36 +324,38 @@ add_slab!(Phases, Temp, Grid2D, trench, phase = ConstantPhase(2), T=T_slab); # More sophisticated 2D example with overriding plate -nx,nz = 512,128 -x = range(-1000,1000, nx); -z = range(-660,0, nz); -Grid2D = CartData(xyz_grid(x,0,z)) +nx, nz = 512, 128 +x = range(-1000, 1000, nx); +z = range(-660, 0, nz); +Grid2D = CartData(xyz_grid(x, 0, z)) Phases = zeros(Int64, nx, 1, nz); Temp = fill(1350.0, nx, 1, nz); -lith = LithosphericPhases(Layers=[15 20 55], Phases=[3 4 5], Tlab=1250) +lith = LithosphericPhases(Layers = [15 20 55], Phases = [3 4 5], Tlab = 1250) # Lets add the overriding plate. Note that we add this twice with a different thickness to properly represent the transition around the trench -add_box!(Phases, Temp, Grid2D; xlim=(200.0,1000.0), zlim=(-150.0, 0.0), phase = lith, T=HalfspaceCoolingTemp(Age=80)); -add_box!(Phases, Temp, Grid2D; xlim=(0.0,200.0), zlim=(-60.0, 0.0), phase = lith, T=HalfspaceCoolingTemp(Age=80)); +add_box!(Phases, Temp, Grid2D; xlim = (200.0, 1000.0), zlim = (-150.0, 0.0), phase = lith, T = HalfspaceCoolingTemp(Age = 80)); +add_box!(Phases, Temp, Grid2D; xlim = (0.0, 200.0), zlim = (-60.0, 0.0), phase = lith, T = HalfspaceCoolingTemp(Age = 80)); # The horizontal part of the oceanic plate is as before v_spread_cm_yr = 3 #spreading velocity -lith = LithosphericPhases(Layers=[15 55], Phases=[1 2], Tlab=1250) -add_box!(Phases, Temp, Grid2D; xlim=(-800.0,0.0), zlim=(-150.0, 0.0), phase = lith, T=SpreadingRateTemp(SpreadingVel=v_spread_cm_yr)); +lith = LithosphericPhases(Layers = [15 55], Phases = [1 2], Tlab = 1250) +add_box!(Phases, Temp, Grid2D; xlim = (-800.0, 0.0), zlim = (-150.0, 0.0), phase = lith, T = SpreadingRateTemp(SpreadingVel = v_spread_cm_yr)); # Yet, now we add a trench as well. -AgeTrench_Myrs = 800e3/(v_spread_cm_yr/1e2)/1e6 #plate age @ trench +AgeTrench_Myrs = 800.0e3 / (v_spread_cm_yr / 1.0e2) / 1.0e6 #plate age @ trench # We want to add a smooth transition from a halfspace cooling thermal profile to a slab that is heated by the surrounding mantle below a decoupling depth `d_decoupling`. -T_slab = LinearWeightedTemperature( F1=HalfspaceCoolingTemp(Age=AgeTrench_Myrs), F2=McKenzie_subducting_slab(Tsurface=0,v_cm_yr=v_spread_cm_yr, Adiabat = 0.0), crit_dist=600) +T_slab = LinearWeightedTemperature(F1 = HalfspaceCoolingTemp(Age = AgeTrench_Myrs), F2 = McKenzie_subducting_slab(Tsurface = 0, v_cm_yr = v_spread_cm_yr, Adiabat = 0.0), crit_dist = 600) # # in this case, we have a more reasonable slab thickness: -trench = Trench(Start=(0.0,-100.0), End=(0.0,100.0), Thickness=90.0, θ_max=30.0, Length=600, Lb=200, - WeakzoneThickness=15, WeakzonePhase=6, d_decoupling=125); -add_slab!(Phases, Temp, Grid2D, trench, phase = lith, T=T_slab); +trench = Trench( + Start = (0.0, -100.0), End = (0.0, 100.0), Thickness = 90.0, θ_max = 30.0, Length = 600, Lb = 200, + WeakzoneThickness = 15, WeakzonePhase = 6, d_decoupling = 125 +); +add_slab!(Phases, Temp, Grid2D, trench, phase = lith, T = T_slab); # Lithosphere-asthenosphere boundary: -ind = findall(Temp .> 1250 .&& (Phases.==2 .|| Phases.==5)); +ind = findall(Temp .> 1250 .&& (Phases .== 2 .|| Phases .== 5)); Phases[ind] .= 0; @test sum(Temp) ≈ 8.292000736425713e7 @@ -354,98 +364,101 @@ Phases[ind] .= 0; #write_paraview(Grid2D,"Grid2D_SubductionCurvedOverriding"); # 2D volcano -nx,nz = 512,128 -x = range(-100e0,100e0, nx); -z = range(-60e0,5e0, nz); -Grid2D = CartData(xyz_grid(x,0,z)) +nx, nz = 512, 128 +x = range(-100.0e0, 100.0e0, nx); +z = range(-60.0e0, 5.0e0, nz); +Grid2D = CartData(xyz_grid(x, 0, z)) Phases = zeros(Int64, nx, 1, nz); Temp = fill(1350.0, nx, 1, nz); -lith = LithosphericPhases(Layers=[15 20 55], Phases=[3 4 5], Tlab=1250) - -add_box!(Phases, Temp, Grid2D; xlim=(-100.0,100.0), zlim=(-60e0, 0.0), phase = lith, T=HalfspaceCoolingTemp(Age=80)); - -add_volcano!(Phases, Temp, Grid2D; -volcanic_phase = 1, -center = (mean(Grid2D.x.val), 0.0), -height = 3, -radius = 5, -base = 0.0, -background = nothing, -T = HalfspaceCoolingTemp(Age=20) +lith = LithosphericPhases(Layers = [15 20 55], Phases = [3 4 5], Tlab = 1250) + +add_box!(Phases, Temp, Grid2D; xlim = (-100.0, 100.0), zlim = (-60.0e0, 0.0), phase = lith, T = HalfspaceCoolingTemp(Age = 80)); + +add_volcano!( + Phases, Temp, Grid2D; + volcanic_phase = 1, + center = (mean(Grid2D.x.val), 0.0), + height = 3, + radius = 5, + base = 0.0, + background = nothing, + T = HalfspaceCoolingTemp(Age = 20) ) -@test any(Phases[256,1,:] .== 1) == true +@test any(Phases[256, 1, :] .== 1) == true # 3D volcano # Create CartGrid struct -x = LinRange(0.0,100.0,64); -y = LinRange(0.0,100.0,64); -z = LinRange(-60,5e0,64); -Cart = CartData(xyz_grid(x, y, z)); +x = LinRange(0.0, 100.0, 64); +y = LinRange(0.0, 100.0, 64); +z = LinRange(-60, 5.0e0, 64); +Cart = CartData(xyz_grid(x, y, z)); # initialize phase and temperature matrix -Phase = zeros(Int32,size(Cart)); -Temp = fill(1350.0,size(Cart)); -lith = LithosphericPhases(Layers=[15 20 55], Phases=[3 4 5], Tlab=1250) - -add_box!(Phase, Temp, Cart; xlim=(0.0,100.0),ylim=(0.0,100.0), zlim=(-60.0, 0.0), phase = lith, T=HalfspaceCoolingTemp(Age=80)); - -add_volcano!(Phase, Temp, Cart; - volcanic_phase = 1, - center = (mean(Cart.x.val), mean(Cart.y.val), 0.0), - height = 3, - radius = 5, - base = 0.0, - background = nothing, - T = HalfspaceCoolingTemp(Age=20) +Phase = zeros(Int32, size(Cart)); +Temp = fill(1350.0, size(Cart)); +lith = LithosphericPhases(Layers = [15 20 55], Phases = [3 4 5], Tlab = 1250) + +add_box!(Phase, Temp, Cart; xlim = (0.0, 100.0), ylim = (0.0, 100.0), zlim = (-60.0, 0.0), phase = lith, T = HalfspaceCoolingTemp(Age = 80)); + +add_volcano!( + Phase, Temp, Cart; + volcanic_phase = 1, + center = (mean(Cart.x.val), mean(Cart.y.val), 0.0), + height = 3, + radius = 5, + base = 0.0, + background = nothing, + T = HalfspaceCoolingTemp(Age = 20) ) -@test any(Phase[32,32,:] .== 1) == true +@test any(Phase[32, 32, :] .== 1) == true #3D fault # Create CartGrid struct -x = LinRange(0.0,100.0,64); -y = LinRange(0.0,100.0,64); -z = LinRange(-60,5e0,64); -Cart = CartData(xyz_grid(x, y, z)); +x = LinRange(0.0, 100.0, 64); +y = LinRange(0.0, 100.0, 64); +z = LinRange(-60, 5.0e0, 64); +Cart = CartData(xyz_grid(x, y, z)); # initialize phase and temperature matrix -Phase = zeros(Int32,size(Cart)); -Temp = fill(1350.0,size(Cart)); -lith = LithosphericPhases(Layers=[15 20 55], Phases=[3 4 5], Tlab=1250) - -add_box!(Phase, Temp, Cart; xlim=(0.0,100.0),ylim=(0.0,100.0), zlim=(-60.0, 0.0), phase = lith, T=HalfspaceCoolingTemp(Age=80)); - -add_fault!(Phase, Temp, Cart; - Start=(0.0,0.0), End=(100,100), - Fault_thickness=1.0, - Depth_extent=(-30.0, 0.0), - DipAngle=-10e0, - phase=ConstantPhase(1), - T=ConstantTemp(1200), +Phase = zeros(Int32, size(Cart)); +Temp = fill(1350.0, size(Cart)); +lith = LithosphericPhases(Layers = [15 20 55], Phases = [3 4 5], Tlab = 1250) + +add_box!(Phase, Temp, Cart; xlim = (0.0, 100.0), ylim = (0.0, 100.0), zlim = (-60.0, 0.0), phase = lith, T = HalfspaceCoolingTemp(Age = 80)); + +add_fault!( + Phase, Temp, Cart; + Start = (0.0, 0.0), End = (100, 100), + Fault_thickness = 1.0, + Depth_extent = (-30.0, 0.0), + DipAngle = -10.0e0, + phase = ConstantPhase(1), + T = ConstantTemp(1200), ) -@test any(Phase[32,32,:] .== 1) == true -@test any(Temp[32,32,:] .== 1200) == true +@test any(Phase[32, 32, :] .== 1) == true +@test any(Temp[32, 32, :] .== 1200) == true # Q1Data -Grid = Q1Data(xyz_grid(1.0:1:10.0, 11.0:1:20.0, -20:1:-10)) -PhasesC = zeros(Int64,size(Grid)); # at cell -TempC = ones(Float64, size(Grid))*1350; -PhasesV = zeros(Int64,size(Grid.x)); # at vertex -TempV = ones(Float64, size(Grid.x))*1350; +Grid = Q1Data(xyz_grid(1.0:1:10.0, 11.0:1:20.0, -20:1:-10)) +PhasesC = zeros(Int64, size(Grid)); # at cell +TempC = ones(Float64, size(Grid)) * 1350; +PhasesV = zeros(Int64, size(Grid.x)); # at vertex +TempV = ones(Float64, size(Grid.x)) * 1350; # Add data to vertex fields: -add_box!(PhasesV,TempV,Grid, xlim=(2,4), zlim=(-15,-10), phase=ConstantPhase(3), DipAngle=10, T=LinearTemp(Tbot=1350, Ttop=200)) -@test sum(TempV[1,1,:]) ≈ 14850.0 +add_box!(PhasesV, TempV, Grid, xlim = (2, 4), zlim = (-15, -10), phase = ConstantPhase(3), DipAngle = 10, T = LinearTemp(Tbot = 1350, Ttop = 200)) +@test sum(TempV[1, 1, :]) ≈ 14850.0 -add_ellipsoid!(PhasesV,TempV,Grid, cen=(4,15,-17), axes=(1,2,3), StrikeAngle=90, DipAngle=45, phase=ConstantPhase(2), T=ConstantTemp(600)) -@test sum(TempV[1,1,:]) ≈ 14850.0 +add_ellipsoid!(PhasesV, TempV, Grid, cen = (4, 15, -17), axes = (1, 2, 3), StrikeAngle = 90, DipAngle = 45, phase = ConstantPhase(2), T = ConstantTemp(600)) +@test sum(TempV[1, 1, :]) ≈ 14850.0 # Add data to cell fields: -add_box!(PhasesC,TempC,Grid, xlim=(2,4), zlim=(-15,-10), phase=ConstantPhase(3), DipAngle=10, T=LinearTemp(Tbot=1350, Ttop=200), cell=true) -@test sum(TempC[1,1,:]) ≈ 13360.239732164195 +add_box!(PhasesC, TempC, Grid, xlim = (2, 4), zlim = (-15, -10), phase = ConstantPhase(3), DipAngle = 10, T = LinearTemp(Tbot = 1350, Ttop = 200), cell = true) +@test sum(TempC[1, 1, :]) ≈ 13360.239732164195 -add_ellipsoid!(PhasesC,TempC,Grid, cen=(4,15,-17), axes=(1,2,3), StrikeAngle=90, DipAngle=45, phase=ConstantPhase(2), T=ConstantTemp(1600), cell=true) +add_ellipsoid!(PhasesC, TempC, Grid, cen = (4, 15, -17), axes = (1, 2, 3), StrikeAngle = 90, DipAngle = 45, phase = ConstantPhase(2), T = ConstantTemp(1600), cell = true) @test all(extrema(TempC) .≈ (262.2231770957809, 1600.0)) diff --git a/test/test_stl.jl b/test/test_stl.jl index 090741cd..d65f2403 100644 --- a/test/test_stl.jl +++ b/test/test_stl.jl @@ -2,20 +2,20 @@ using Test, GeophysicalModelGenerator # Load cat MESH -mesh = load("./test_files/cat.stl") -X,Y,Z = xyz_grid(150:180, -15:2:15, 10:5:60) # Create mesh +mesh = load("./test_files/cat.stl") +X, Y, Z = xyz_grid(150:180, -15:2:15, 10:5:60) # Create mesh # Test isinside_closed_STL routine for individual points (note: bit slow) Phase = zeros(size(X)); for i in eachindex(X) - inside = isinside_closed_STL(mesh, [X[i], Y[i], Z[i]]) - if inside - Phase[i] = 1; + inside = isinside_closed_STL(mesh, [X[i], Y[i], Z[i]]) + if inside + Phase[i] = 1 end end -@test Phase[14,6,2] == 1.0 +@test Phase[14, 6, 2] == 1.0 #Data_Cat = ParaviewData(X,Y,Z, (Phase=Phase,)) #write_paraview(Data_Cat,"Data_Cat") diff --git a/test/test_surfaces.jl b/test/test_surfaces.jl index 1ab5acaf..6cf6f765 100644 --- a/test/test_surfaces.jl +++ b/test/test_surfaces.jl @@ -2,18 +2,18 @@ using Test # test various surface routines # Create surfaces -cartdata1 = CartData(xyz_grid(1:4,1:5,0)) -cartdata2 = CartData(xyz_grid(1:4,1:5,2)) -cartdata3 = CartData(xyz_grid(1:4,1:5,2:5)) -cartdata2 = addfield(cartdata2,"Z2",cartdata2.x.val) +cartdata1 = CartData(xyz_grid(1:4, 1:5, 0)) +cartdata2 = CartData(xyz_grid(1:4, 1:5, 2)) +cartdata3 = CartData(xyz_grid(1:4, 1:5, 2:5)) +cartdata2 = addfield(cartdata2, "Z2", cartdata2.x.val) @test is_surface(cartdata1) @test is_surface(cartdata2) @test is_surface(cartdata3) == false -geodata1 = GeoData(lonlatdepth_grid(1:4,1:5,0)) -geodata2 = GeoData(lonlatdepth_grid(1:4,1:5,2)) -geodata3 = GeoData(lonlatdepth_grid(1:4,1:5,2:5)) +geodata1 = GeoData(lonlatdepth_grid(1:4, 1:5, 0)) +geodata2 = GeoData(lonlatdepth_grid(1:4, 1:5, 2)) +geodata3 = GeoData(lonlatdepth_grid(1:4, 1:5, 2:5)) @test is_surface(geodata1) @test is_surface(geodata2) @@ -21,32 +21,32 @@ geodata3 = GeoData(lonlatdepth_grid(1:4,1:5,2:5)) # Test add & subtraction of surfaces cartdata4 = cartdata1 + cartdata2 -@test length(cartdata4.fields)==2 -@test cartdata4.z.val[2]==2.0 +@test length(cartdata4.fields) == 2 +@test cartdata4.z.val[2] == 2.0 cartdata5 = cartdata1 - cartdata2 -@test length(cartdata5.fields)==2 -@test cartdata5.z.val[2]==-2.0 +@test length(cartdata5.fields) == 2 +@test cartdata5.z.val[2] == -2.0 geodata4 = geodata1 + geodata2 -@test length(geodata4.fields)==1 -@test geodata4.depth.val[2]==2.0 +@test length(geodata4.fields) == 1 +@test geodata4.depth.val[2] == 2.0 geodata5 = geodata1 - geodata2 -@test length(geodata5.fields)==1 -@test geodata5.depth.val[2]==-2.0 +@test length(geodata5.fields) == 1 +@test geodata5.depth.val[2] == -2.0 # Test removing NaN; Z = NumValue(cartdata5.z) -Z[2,2] = NaN; -remove_NaN_surface!(Z,NumValue(cartdata5.x), NumValue(cartdata5.y)) -@test any(isnan.(Z))==false +Z[2, 2] = NaN; +remove_NaN_surface!(Z, NumValue(cartdata5.x), NumValue(cartdata5.y)) +@test any(isnan.(Z)) == false # Test draping values on topography -X,Y,Z = xyz_grid(1:.14:4,1:.02:5,0); -v = X.^2 .+ Y.^2; -values1 = CartData(X,Y,Z, (; v)) -values2 = CartData(X,Y,Z, (; colors=(v,v,v) )) +X, Y, Z = xyz_grid(1:0.14:4, 1:0.02:5, 0); +v = X .^ 2 .+ Y .^ 2; +values1 = CartData(X, Y, Z, (; v)) +values2 = CartData(X, Y, Z, (; colors = (v, v, v))) cart_drape1 = drape_on_topo(cartdata2, values1) @test sum(cart_drape1.fields.v) ≈ 366.02799999999996 @@ -54,8 +54,8 @@ cart_drape1 = drape_on_topo(cartdata2, values1) cart_drape2 = drape_on_topo(cartdata2, values2) @test cart_drape2.fields.colors[1][10] ≈ 12.9204 -values1 = GeoData(X,Y,Z, (; v)) -values2 = GeoData(X,Y,Z, (; colors=(v,v,v) )) +values1 = GeoData(X, Y, Z, (; v)) +values2 = GeoData(X, Y, Z, (; colors = (v, v, v))) geo_drape1 = drape_on_topo(geodata2, values1) @test sum(geo_drape1.fields.v) ≈ 366.02799999999996 @@ -70,25 +70,25 @@ cartdata2b = fit_surface_to_points(cartdata2, X[:], Y[:], v[:]) #------------- # test above_surface with the Grid object -Grid = create_CartGrid(size=(10,20,30),x=(0.,10), y=(0.,10), z=(-10.,2.)) +Grid = create_CartGrid(size = (10, 20, 30), x = (0.0, 10), y = (0.0, 10), z = (-10.0, 2.0)) @test Grid.Δ[2] ≈ 0.5263157894736842 -Temp = ones(Float64, Grid.N...)*1350; -Phases = zeros(Int32, Grid.N...); +Temp = ones(Float64, Grid.N...) * 1350; +Phases = zeros(Int32, Grid.N...); -Topo_cart = CartData(xyz_grid(-1:.2:20,-12:.2:13,0)); -ind = above_surface(Grid, Topo_cart); -@test sum(ind[1,1,:]) == 5 +Topo_cart = CartData(xyz_grid(-1:0.2:20, -12:0.2:13, 0)); +ind = above_surface(Grid, Topo_cart); +@test sum(ind[1, 1, :]) == 5 -ind = below_surface(Grid, Topo_cart); -@test sum(ind[1,1,:]) == 25 +ind = below_surface(Grid, Topo_cart); +@test sum(ind[1, 1, :]) == 25 #------------- # test above_surface with the Q1Data object -q1data = Q1Data(xyz_grid(1:4,1:5,-5:5)) -ind = above_surface(q1data, cartdata2); +q1data = Q1Data(xyz_grid(1:4, 1:5, -5:5)) +ind = above_surface(q1data, cartdata2); @test sum(ind) == 60 -ind = below_surface(q1data, cartdata2); +ind = below_surface(q1data, cartdata2); @test sum(ind) == 140 diff --git a/test/test_transformation.jl b/test/test_transformation.jl index 7c1d9c9c..dee0c143 100644 --- a/test/test_transformation.jl +++ b/test/test_transformation.jl @@ -3,47 +3,47 @@ using Test using GeophysicalModelGenerator # Create 3D volume with some fake data -Lon,Lat,Depth = lonlatdepth_grid(5:25,20:50,(-1300:100:0)km); -Data_set3D = GeoData(Lon,Lat,Depth,(Depthdata=Depth*2 + Lon*km,LonData=Lon)) +Lon, Lat, Depth = lonlatdepth_grid(5:25, 20:50, (-1300:100:0)km); +Data_set3D = GeoData(Lon, Lat, Depth, (Depthdata = Depth * 2 + Lon * km, LonData = Lon)) -proj = ProjectionPoint(Lon=20,Lat=35) +proj = ProjectionPoint(Lon = 20, Lat = 35) # Convert this 3D dataset to a Cartesian dataset (the grid will not be orthogonal) -Data_set3D_Cart = convert2CartData(Data_set3D, proj) +Data_set3D_Cart = convert2CartData(Data_set3D, proj) @test sum(abs.(Value(Data_set3D_Cart.x))) ≈ 5.293469089428514e6km # Create Cartesian grid -X,Y,Z = xyz_grid(-400:100:400,-500:200:500,(-1300:100:0)km); -Data_Cart = CartData(X,Y,Z,(Z=Z,)) +X, Y, Z = xyz_grid(-400:100:400, -500:200:500, (-1300:100:0)km); +Data_Cart = CartData(X, Y, Z, (Z = Z,)) # Project values of Data_set3D to the cartesian data -Data_Cart = project_CartData(Data_Cart, Data_set3D, proj) +Data_Cart = project_CartData(Data_Cart, Data_set3D, proj) @test sum(Data_Cart.fields.Depthdata) ≈ -967680.9136292854km #@test sum(Data_Cart.fields.Depthdata) ≈ -1.416834287168597e6km @test sum(Data_Cart.fields.LonData) ≈ 15119.086370714615 # Next, 3D surface (like topography) -Lon,Lat,Depth = lonlatdepth_grid(5:25,20:50,0); -Depth = cos.(Lon/5).*sin.(Lat)*10; -Data_surf = GeoData(Lon,Lat,Depth,(Z=Depth,)); -Data_surf_Cart = convert2CartData(Data_surf, proj); +Lon, Lat, Depth = lonlatdepth_grid(5:25, 20:50, 0); +Depth = cos.(Lon / 5) .* sin.(Lat) * 10; +Data_surf = GeoData(Lon, Lat, Depth, (Z = Depth,)); +Data_surf_Cart = convert2CartData(Data_surf, proj); # Cartesian surface -X,Y,Z = xyz_grid(-500:10:500,-900:20:900,0); -Data_Cart = CartData(X,Y,Z,(Z=Z,)) +X, Y, Z = xyz_grid(-500:10:500, -900:20:900, 0); +Data_Cart = CartData(X, Y, Z, (Z = Z,)) -Data_Cart = project_CartData(Data_Cart, Data_surf, proj) +Data_Cart = project_CartData(Data_Cart, Data_surf, proj) @test sum(Value(Data_Cart.z)) ≈ 1858.2487019158766km @test sum(Data_Cart.fields.Z) ≈ 1858.2487019158766 # Cartesian surface when UTM data is used -WE,SN,depth = xyz_grid(420000:1000:430000, 4510000:1000:4520000, 0); +WE, SN, depth = xyz_grid(420000:1000:430000, 4510000:1000:4520000, 0); -Data_surfUTM = UTMData(WE, SN, depth, 33, true, (Depth = WE,)); -Data_Cart = CartData(X,Y,Z,(Z=Z,)) -Data_Cart = project_CartData(Data_Cart, Data_surfUTM, proj) +Data_surfUTM = UTMData(WE, SN, depth, 33, true, (Depth = WE,)); +Data_Cart = CartData(X, Y, Z, (Z = Z,)) +Data_Cart = project_CartData(Data_Cart, Data_surfUTM, proj) @test sum(Value(Data_Cart.z)) ≈ 0.0km @test sum(Data_Cart.fields.Depth) ≈ 3.9046959539921126e9 diff --git a/test/test_tutorials.jl b/test/test_tutorials.jl index 99c16ba4..6ab18499 100644 --- a/test/test_tutorials.jl +++ b/test/test_tutorials.jl @@ -23,4 +23,4 @@ end # #@testset "3D Numerical Model tutorial" begin # include("../tutorials/Tutorial_NumericalModel_3D.jl") -#end \ No newline at end of file +#end diff --git a/test/test_utils.jl b/test/test_utils.jl index c8ebbd18..5674f944 100644 --- a/test/test_utils.jl +++ b/test/test_utils.jl @@ -4,106 +4,106 @@ using Test using GeophysicalModelGenerator # should throw an error with a 2D dataset -Lon,Lat,Depth = lonlatdepth_grid(10:20,30:40,-50km); -Data1 = Depth*2; # some data -Vx1,Vy1,Vz1 = Data1*3,Data1*4,Data1*5 -Data_set2D = GeoData(Lon,Lat,Depth,(Depthdata=Data1,LonData1=Lon, Velocity=(Vx1,Vy1,Vz1))) -Data_set2D0 = GeoData(Lon,Lat,Depth,(Depthdata=Data1,LonData1=Lon)) -@test_throws ErrorException cross_section(Data_set2D, Depth_level=-10) +Lon, Lat, Depth = lonlatdepth_grid(10:20, 30:40, -50km); +Data1 = Depth * 2; # some data +Vx1, Vy1, Vz1 = Data1 * 3, Data1 * 4, Data1 * 5 +Data_set2D = GeoData(Lon, Lat, Depth, (Depthdata = Data1, LonData1 = Lon, Velocity = (Vx1, Vy1, Vz1))) +Data_set2D0 = GeoData(Lon, Lat, Depth, (Depthdata = Data1, LonData1 = Lon)) +@test_throws ErrorException cross_section(Data_set2D, Depth_level = -10) # Test interpolation of depth to a given cartesian XY-plane x = 11:19 y = 31:39 plane1 = interpolate_datafields_2D(Data_set2D, x, y) -proj = ProjectionPoint() +proj = ProjectionPoint() plane2 = interpolate_datafields_2D(Data_set2D, proj, x, y) -Lon1,Lat1,Depth1 = lonlatdepth_grid(12:18,33:39,-50km); -Data2 = Depth1*2; # some data -Vx1,Vy1,Vz1 = Data2*3,Data2*4,Data2*5 -Data_set2D_1 = GeoData(Lon1,Lat1,Depth1,(Depthdata1=Data2,LonData2=Lon1)) +Lon1, Lat1, Depth1 = lonlatdepth_grid(12:18, 33:39, -50km); +Data2 = Depth1 * 2; # some data +Vx1, Vy1, Vz1 = Data2 * 3, Data2 * 4, Data2 * 5 +Data_set2D_1 = GeoData(Lon1, Lat1, Depth1, (Depthdata1 = Data2, LonData2 = Lon1)) plane3 = interpolate_datafields_2D(Data_set2D0, Data_set2D_1) @test sum(plane3.fields.Depthdata) ≈ -4900.0km @test plane1 == plane2 -@test all(==(-50e0), plane1) +@test all(==(-50.0e0), plane1) # Create 3D volume with some fake data -Lon,Lat,Depth = lonlatdepth_grid(10:20,30:40,(-300:25:0)km); -Data = Depth*2; # some data -Vx,Vy,Vz = ustrip(Data*3)*km/s,ustrip(Data*4)*km/s,ustrip(Data*5)*km/s; -Data_set3D = GeoData(Lon,Lat,Depth,(Depthdata=Data,LonData=Lon, Velocity=(Vx,Vy,Vz))) +Lon, Lat, Depth = lonlatdepth_grid(10:20, 30:40, (-300:25:0)km); +Data = Depth * 2; # some data +Vx, Vy, Vz = ustrip(Data * 3) * km / s, ustrip(Data * 4) * km / s, ustrip(Data * 5) * km / s; +Data_set3D = GeoData(Lon, Lat, Depth, (Depthdata = Data, LonData = Lon, Velocity = (Vx, Vy, Vz))) # Test addfield -Data_set3D = addfield(Data_set3D,"Lat", Lat) +Data_set3D = addfield(Data_set3D, "Lat", Lat) @test keys(Data_set3D.fields) == (:Depthdata, :LonData, :Velocity, :Lat) -Data_set3D = addfield(Data_set3D,(;Lat, Lon)) +Data_set3D = addfield(Data_set3D, (; Lat, Lon)) @test keys(Data_set3D.fields) == (:Depthdata, :LonData, :Velocity, :Lat, :Lon) # test removefield -Data_set3D_1 = removefield(Data_set3D,"Lon") +Data_set3D_1 = removefield(Data_set3D, "Lon") @test keys(Data_set3D_1.fields) == (:Depthdata, :LonData, :Velocity, :Lat) -Data_set3D_2 = removefield(Data_set3D,:Lon) +Data_set3D_2 = removefield(Data_set3D, :Lon) @test keys(Data_set3D_2.fields) == (:Depthdata, :LonData, :Velocity, :Lat) -Data_set3D_3 = removefield(Data_set3D,(:Lon,:Lat)) +Data_set3D_3 = removefield(Data_set3D, (:Lon, :Lat)) @test keys(Data_set3D_3.fields) == (:Depthdata, :LonData, :Velocity) # Create 3D cartesian dataset -Data_setCart3D = CartData(Lon,Lat,Depth,(Depthdata=Data,LonData=Lon, Velocity=(Vx,Vy,Vz))) +Data_setCart3D = CartData(Lon, Lat, Depth, (Depthdata = Data, LonData = Lon, Velocity = (Vx, Vy, Vz))) # Create 3D volume with some fake data -Lon,Lat,Depth = lonlatdepth_grid(10:20,30:40,(0:-25:-300)km); -Data = Depth*2; # some data -Vx,Vy,Vz = ustrip(Data*3)*km/s,ustrip(Data*4)*km/s,ustrip(Data*5)*km/s; -Data_set3D_reverse = GeoData(Lon,Lat,Depth,(Depthdata=Data,LonData=Lon, Velocity=(Vx,Vy,Vz))) +Lon, Lat, Depth = lonlatdepth_grid(10:20, 30:40, (0:-25:-300)km); +Data = Depth * 2; # some data +Vx, Vy, Vz = ustrip(Data * 3) * km / s, ustrip(Data * 4) * km / s, ustrip(Data * 5) * km / s; +Data_set3D_reverse = GeoData(Lon, Lat, Depth, (Depthdata = Data, LonData = Lon, Velocity = (Vx, Vy, Vz))) # Create cross-sections in various directions (no interpolation which is default) -test_cross = cross_section(Data_set3D, Depth_level=-100km) -@test test_cross.fields[1][41]==-200km -@test test_cross.fields[2][31]==18 -@test test_cross.fields[3][1][30]==-600km/s -@test test_cross.fields[3][2][30]==-800km/s -@test test_cross.fields[3][3][30]==-1000km/s +test_cross = cross_section(Data_set3D, Depth_level = -100km) +@test test_cross.fields[1][41] == -200km +@test test_cross.fields[2][31] == 18 +@test test_cross.fields[3][1][30] == -600km / s +@test test_cross.fields[3][2][30] == -800km / s +@test test_cross.fields[3][3][30] == -1000km / s # throw error if outside bounds -@test_throws ErrorException cross_section(Data_set3D, Depth_level=100km) - -test_cross = cross_section(Data_set3D, Lon_level=15) -@test test_cross.fields[1][41]==-450km -@test test_cross.fields[2][31]==15 -@test test_cross.fields[3][1][30]==-1500km/s -@test test_cross.fields[3][2][30]==-2000km/s -@test test_cross.fields[3][3][30]==-2500km/s - -test_cross = cross_section(Data_set3D, Lat_level=35) -@test test_cross.fields[1][41]==-450km -@test test_cross.fields[2][31]==18 -@test test_cross.fields[3][1][30]==-1500km/s -@test test_cross.fields[3][2][30]==-2000km/s -@test test_cross.fields[3][3][30]==-2500km/s +@test_throws ErrorException cross_section(Data_set3D, Depth_level = 100km) + +test_cross = cross_section(Data_set3D, Lon_level = 15) +@test test_cross.fields[1][41] == -450km +@test test_cross.fields[2][31] == 15 +@test test_cross.fields[3][1][30] == -1500km / s +@test test_cross.fields[3][2][30] == -2000km / s +@test test_cross.fields[3][3][30] == -2500km / s + +test_cross = cross_section(Data_set3D, Lat_level = 35) +@test test_cross.fields[1][41] == -450km +@test test_cross.fields[2][31] == 18 +@test test_cross.fields[3][1][30] == -1500km / s +@test test_cross.fields[3][2][30] == -2000km / s +@test test_cross.fields[3][3][30] == -2500km / s # Create cross-sections with interpolation in various directions -test_cross = cross_section(Data_set3D, Depth_level=-100km, dims=(50,100), Interpolate=true) -@test size(test_cross.fields[1]) == (50,100,1) -@test size(test_cross.fields[3][2]) == (50,100,1) +test_cross = cross_section(Data_set3D, Depth_level = -100km, dims = (50, 100), Interpolate = true) +@test size(test_cross.fields[1]) == (50, 100, 1) +@test size(test_cross.fields[3][2]) == (50, 100, 1) -test_cross = cross_section(Data_set3D, Lon_level=15, dims=(50,100), Interpolate=true) -@test size(test_cross.fields[3][2])==(1,50,100) -@test write_paraview(test_cross, "profile_test")==nothing +test_cross = cross_section(Data_set3D, Lon_level = 15, dims = (50, 100), Interpolate = true) +@test size(test_cross.fields[3][2]) == (1, 50, 100) +@test write_paraview(test_cross, "profile_test") == nothing -test_cross = cross_section(Data_set3D, Lat_level=35, dims=(50,100), Interpolate=true) -@test size(test_cross.fields[3][2])==(50,1,100) +test_cross = cross_section(Data_set3D, Lat_level = 35, dims = (50, 100), Interpolate = true) +@test size(test_cross.fields[3][2]) == (50, 1, 100) # Diagonal cross-section -test_cross = cross_section(Data_set3D, Start=(10,30), End=(20,40), dims=(50,100), Interpolate=true) -@test size(test_cross.fields[3][2])==(50,100,1) -@test write_paraview(test_cross, "profile_test")==nothing +test_cross = cross_section(Data_set3D, Start = (10, 30), End = (20, 40), dims = (50, 100), Interpolate = true) +@test size(test_cross.fields[3][2]) == (50, 100, 1) +@test write_paraview(test_cross, "profile_test") == nothing #test_cross_rev = cross_section(Data_set3D_reverse, Start=(10,30), End=(20,40), dims=(50,100), Interpolate=true) #@test size(test_cross_rev.fields[3][2])==(50,100,1) @@ -111,204 +111,204 @@ test_cross = cross_section(Data_set3D, Start=(10,30), End=(20,40), dims=( # Cross section of a topography depth_values = [rand(0:0.1:3.5)] -Lon, Lat, Depth =lonlatdepth_grid(10:20, 30:40, depth_values[:]); -Data_Topo = GeoData(Lon, Lat, Depth, (Depthdata=Depth,)) -Data_Topo_geo= cross_section(Data_Topo, Start=(10,30), End=(20,40), dims=(50,100), Interpolate=true) +Lon, Lat, Depth = lonlatdepth_grid(10:20, 30:40, depth_values[:]); +Data_Topo = GeoData(Lon, Lat, Depth, (Depthdata = Depth,)) +Data_Topo_geo = cross_section(Data_Topo, Start = (10, 30), End = (20, 40), dims = (50, 100), Interpolate = true) @test Data_Topo_geo isa GeoData -Lon,Lat,Depth = lonlatdepth_grid(5:25,20:50,0); -Depth = cos.(Lon/5).*sin.(Lat)*10; -Data_surf = GeoData(Lon,Lat,Depth,(Z=Depth,)); -Data_surf_cart = convert2CartData(Data_surf, proj); -Data_surf_cross = cross_section(Data_surf_cart, Start=(-1693,2500), End=(-1000,3650), dims=(50,100), Interpolate=true) +Lon, Lat, Depth = lonlatdepth_grid(5:25, 20:50, 0); +Depth = cos.(Lon / 5) .* sin.(Lat) * 10; +Data_surf = GeoData(Lon, Lat, Depth, (Z = Depth,)); +Data_surf_cart = convert2CartData(Data_surf, proj); +Data_surf_cross = cross_section(Data_surf_cart, Start = (-1693, 2500), End = (-1000, 3650), dims = (50, 100), Interpolate = true) @test Data_surf_cross isa CartData # Cross-section with cartesian data -test_cross = cross_section(Data_setCart3D, Lon_level=15, dims=(50,100), Interpolate=true) -@test size(test_cross.fields[3][2])==(1,50,100) -@test test_cross.x[1,2,3]==GeoUnit(15km) +test_cross = cross_section(Data_setCart3D, Lon_level = 15, dims = (50, 100), Interpolate = true) +@test size(test_cross.fields[3][2]) == (1, 50, 100) +@test test_cross.x[1, 2, 3] == GeoUnit(15km) # Flatten diagonal 3D cross_section with CartData # Create 3D volume with some fake data -Grid = create_CartGrid(size=(100,100,100), x=(0.0km, 99.9km), y=(-10.0km, 20.0km), z=(-40km,4km)); -X,Y,Z = xyz_grid(Grid.coord1D...); -DataSet_Cart = CartData(X,Y,Z,(Depthdata=Z,)) +Grid = create_CartGrid(size = (100, 100, 100), x = (0.0km, 99.9km), y = (-10.0km, 20.0km), z = (-40km, 4km)); +X, Y, Z = xyz_grid(Grid.coord1D...); +DataSet_Cart = CartData(X, Y, Z, (Depthdata = Z,)) -test_cross_cart = cross_section(DataSet_Cart, dims=(100,100), Interpolate=true, Start=(ustrip(Grid.min[1]),ustrip(Grid.max[2])), End=(ustrip(Grid.max[1]), ustrip(Grid.min[2]))) +test_cross_cart = cross_section(DataSet_Cart, dims = (100, 100), Interpolate = true, Start = (ustrip(Grid.min[1]), ustrip(Grid.max[2])), End = (ustrip(Grid.max[1]), ustrip(Grid.min[2]))) -flatten_cross = flatten_cross_section(test_cross_cart) +flatten_cross = flatten_cross_section(test_cross_cart) -@test flatten_cross[2][30]==1.0536089537226578 +@test flatten_cross[2][30] == 1.0536089537226578 @test test_cross_cart.fields.FlatCrossSection[2][30] == flatten_cross[2][30] # should be added by default # Flatten 3D cross_section with GeoData -Lon,Lat,Depth = lonlatdepth_grid(10:20,30:40,(-300:25:0)km); -Data = Depth*2; # some data -Data_set = GeoData(Lon,Lat,Depth,(Depthdata=Data,)); -Data_cross = cross_section(Data_set, Start=(10,39),End=(10,40)) -x_profile = flatten_cross_section(Data_cross) +Lon, Lat, Depth = lonlatdepth_grid(10:20, 30:40, (-300:25:0)km); +Data = Depth * 2; # some data +Data_set = GeoData(Lon, Lat, Depth, (Depthdata = Data,)); +Data_cross = cross_section(Data_set, Start = (10, 39), End = (10, 40)) +x_profile = flatten_cross_section(Data_cross) -@test x_profile[100][100][1]==111.02363637836613 +@test x_profile[100][100][1] == 111.02363637836613 # Extract sub-volume # with interpolation -Data_sub_Interp = extract_subvolume(Data_set3D,Lon_level=(10,15), Lat_level=(30,32), Interpolate=true, dims=(51,21,32)) -@test Data_sub_Interp.fields[1][11]==-600km -@test size(Data_sub_Interp.lat)==(51,21,32) +Data_sub_Interp = extract_subvolume(Data_set3D, Lon_level = (10, 15), Lat_level = (30, 32), Interpolate = true, dims = (51, 21, 32)) +@test Data_sub_Interp.fields[1][11] == -600km +@test size(Data_sub_Interp.lat) == (51, 21, 32) -Data_sub_Interp_Cart = extract_subvolume(DataSet_Cart,X_level=(10,15), Y_level=(10,12), Interpolate=true, dims=(51,21,32)) -@test Data_sub_Interp_Cart.fields[1][11]==-40km -@test size(Data_sub_Interp_Cart.x)==(51,21,32) +Data_sub_Interp_Cart = extract_subvolume(DataSet_Cart, X_level = (10, 15), Y_level = (10, 12), Interpolate = true, dims = (51, 21, 32)) +@test Data_sub_Interp_Cart.fields[1][11] == -40km +@test size(Data_sub_Interp_Cart.x) == (51, 21, 32) -Data_cross_Interp_Cart = extract_subvolume(test_cross_cart,X_level=(10,50), Z_level=(-20,-5), dims=(51,61)) -@test Data_cross_Interp_Cart.fields[1][11]==18.0 -@test size(Data_cross_Interp_Cart.x)==(51,61,1) +Data_cross_Interp_Cart = extract_subvolume(test_cross_cart, X_level = (10, 50), Z_level = (-20, -5), dims = (51, 61)) +@test Data_cross_Interp_Cart.fields[1][11] == 18.0 +@test size(Data_cross_Interp_Cart.x) == (51, 61, 1) # no interpolation -Data_sub_NoInterp = extract_subvolume(Data_set3D,Lon_level=(10,15), Lat_level=(30,32), Interpolate=false, dims=(51,21,32)) -@test Data_sub_NoInterp.fields[1][11]==-600km -@test size(Data_sub_NoInterp.lat)==(6,3,13) +Data_sub_NoInterp = extract_subvolume(Data_set3D, Lon_level = (10, 15), Lat_level = (30, 32), Interpolate = false, dims = (51, 21, 32)) +@test Data_sub_NoInterp.fields[1][11] == -600km +@test size(Data_sub_NoInterp.lat) == (6, 3, 13) -Data_sub_Interp_Cart = extract_subvolume(DataSet_Cart,X_level=(10,15), Y_level=(10,12), Interpolate=false, dims=(51,21,32)) -@test Data_sub_Interp_Cart.fields[1][5]==-40km -@test size(Data_sub_Interp_Cart.x)==(6,8,100) +Data_sub_Interp_Cart = extract_subvolume(DataSet_Cart, X_level = (10, 15), Y_level = (10, 12), Interpolate = false, dims = (51, 21, 32)) +@test Data_sub_Interp_Cart.fields[1][5] == -40km +@test size(Data_sub_Interp_Cart.x) == (6, 8, 100) # Extract subset of cross-section -test_cross = cross_section(Data_set3D, Lat_level=35, dims=(50,100), Interpolate=true) -Data_sub_cross = extract_subvolume(test_cross, Depth_level=(-100km,0km), Interpolate=false) -@test Data_sub_cross.fields[1][11]==-200.00000000000003km -@test size(Data_sub_cross.lat)==(50,1,34) +test_cross = cross_section(Data_set3D, Lat_level = 35, dims = (50, 100), Interpolate = true) +Data_sub_cross = extract_subvolume(test_cross, Depth_level = (-100km, 0km), Interpolate = false) +@test Data_sub_cross.fields[1][11] == -200.00000000000003km +@test size(Data_sub_cross.lat) == (50, 1, 34) -test_cross_cart = cross_section(DataSet_Cart, Start=(0.0,-9.0), End=(90.0, 19.0)) # Cartesian cross-section +test_cross_cart = cross_section(DataSet_Cart, Start = (0.0, -9.0), End = (90.0, 19.0)) # Cartesian cross-section # compute the mean velocity per depth in a 3D dataset and subtract the mean from the given velocities -Data_pert = subtract_horizontalmean(ustrip(Data)) # 3D, no units +Data_pert = subtract_horizontalmean(ustrip(Data)) # 3D, no units @test Data_pert[10] == 0.0 -Data_pert = subtract_horizontalmean(Data) # 3D with units +Data_pert = subtract_horizontalmean(Data) # 3D with units @test Data_pert[10] == 0.0km -Data_pert = subtract_horizontalmean(Data, Percentage=true) # 3D with units +Data_pert = subtract_horizontalmean(Data, Percentage = true) # 3D with units @test Data_pert[1000] == 0.0 -Data2D = Data[:,1,:]; -Data_pert = subtract_horizontalmean(Data2D, Percentage=true) # 2D version with units [dp the same along a vertical profile] +Data2D = Data[:, 1, :]; +Data_pert = subtract_horizontalmean(Data2D, Percentage = true) # 2D version with units [dp the same along a vertical profile] -Data_set2D = GeoData(Lon,Lat,Depth,(Depthdata=Data,LonData=Lon,Pertdata=Data_pert ,Velocity=(Vx,Vy,Vz))) -@test Data_set2D.fields[3][10,8,1] == 0 +Data_set2D = GeoData(Lon, Lat, Depth, (Depthdata = Data, LonData = Lon, Pertdata = Data_pert, Velocity = (Vx, Vy, Vz))) +@test Data_set2D.fields[3][10, 8, 1] == 0 # Create surface ("Moho") -Lon,Lat,Depth = lonlatdepth_grid(10:20,30:40,-40km); -Depth = Depth + Lon*km; # some fake topography on Moho -Data_Moho = GeoData(Lon,Lat,Depth,(MohoDepth=Depth,LonData=Lon,TestData=(Depth,Depth,Depth))) +Lon, Lat, Depth = lonlatdepth_grid(10:20, 30:40, -40km); +Depth = Depth + Lon * km; # some fake topography on Moho +Data_Moho = GeoData(Lon, Lat, Depth, (MohoDepth = Depth, LonData = Lon, TestData = (Depth, Depth, Depth))) # Test intersecting a surface with 2D or 3D data sets -Above = above_surface(Data_set3D, Data_Moho); # 3D regular ordering -@test Above[1,1,12]==true -@test Above[1,1,11]==false +Above = above_surface(Data_set3D, Data_Moho); # 3D regular ordering +@test Above[1, 1, 12] == true +@test Above[1, 1, 11] == false -Above = above_surface(Data_set3D_reverse, Data_Moho); # 3D reverse depth ordering -@test Above[1,1,2]==true -@test Above[1,1,3]==false +Above = above_surface(Data_set3D_reverse, Data_Moho); # 3D reverse depth ordering +@test Above[1, 1, 2] == true +@test Above[1, 1, 3] == false -Above = above_surface(Data_sub_cross, Data_Moho); # 2D cross-section -@test Above[end]==true -@test Above[1]==false +Above = above_surface(Data_sub_cross, Data_Moho); # 2D cross-section +@test Above[end] == true +@test Above[1] == false # test profile creation of surface data -test_cross = cross_section(Data_Moho, dims=(101,), Lat_level=37.5) -@test test_cross.fields.MohoDepth[8]==-29.3km +test_cross = cross_section(Data_Moho, dims = (101,), Lat_level = 37.5) +@test test_cross.fields.MohoDepth[8] == -29.3km -test_cross = cross_section(Data_Moho, dims=(101,), Lon_level=15.8) -@test test_cross.fields.MohoDepth[11]==-24.2km +test_cross = cross_section(Data_Moho, dims = (101,), Lon_level = 15.8) +@test test_cross.fields.MohoDepth[11] == -24.2km -test_cross = cross_section(Data_Moho, dims=(101,), Start=(10,30), End=(20,40)) -@test test_cross.fields.MohoDepth[30]==-27.1km +test_cross = cross_section(Data_Moho, dims = (101,), Start = (10, 30), End = (20, 40)) +@test test_cross.fields.MohoDepth[30] == -27.1km # Test VoteMaps -Data_VoteMap = votemap(Data_set3D, "Depthdata<-560",dims=(10,10,10)) -@test Data_VoteMap.fields[:votemap][101]==0 -@test Data_VoteMap.fields[:votemap][100]==1 +Data_VoteMap = votemap(Data_set3D, "Depthdata<-560", dims = (10, 10, 10)) +@test Data_VoteMap.fields[:votemap][101] == 0 +@test Data_VoteMap.fields[:votemap][100] == 1 -Data_VoteMap = votemap(Data_set3D_reverse, "Depthdata<-560",dims=(10,10,10)) -@test Data_VoteMap.fields[:votemap][101]==0 -@test Data_VoteMap.fields[:votemap][100]==1 +Data_VoteMap = votemap(Data_set3D_reverse, "Depthdata<-560", dims = (10, 10, 10)) +@test Data_VoteMap.fields[:votemap][101] == 0 +@test Data_VoteMap.fields[:votemap][100] == 1 # Combine 2 datasets -Data_VoteMap = votemap([Data_set3D_reverse, Data_set3D], ["Depthdata<-560","LonData>19"],dims=(10,10,10)) -@test Data_VoteMap.fields[:votemap][10,9,1]==2 -@test Data_VoteMap.fields[:votemap][9 ,9,1]==1 -@test Data_VoteMap.fields[:votemap][9 ,9,2]==0 +Data_VoteMap = votemap([Data_set3D_reverse, Data_set3D], ["Depthdata<-560", "LonData>19"], dims = (10, 10, 10)) +@test Data_VoteMap.fields[:votemap][10, 9, 1] == 2 +@test Data_VoteMap.fields[:votemap][9, 9, 1] == 1 +@test Data_VoteMap.fields[:votemap][9, 9, 2] == 0 # Test rotation routines -X,Y,Z = lonlatdepth_grid(10:20,30:40,-50:-10); -Data_C = ParaviewData(X,Y,Z,(Depth=Z,)) -Data_C1 = rotate_translate_scale(Data_C, Rotate=30); +X, Y, Z = lonlatdepth_grid(10:20, 30:40, -50:-10); +Data_C = ParaviewData(X, Y, Z, (Depth = Z,)) +Data_C1 = rotate_translate_scale(Data_C, Rotate = 30); @test Data_C1.x.val[10] ≈ 1.4544826719043336 @test Data_C1.y.val[10] ≈ 35.48076211353316 @test Data_C1.z.val[20] == -50 -Data_C1 = rotate_translate_scale(Data_C, Scale=10, Rotate=10, Translate=(1,2,3)); +Data_C1 = rotate_translate_scale(Data_C, Scale = 10, Rotate = 10, Translate = (1, 2, 3)); @test Data_C1.x.val[10] ≈ 136.01901977224043 @test Data_C1.y.val[10] ≈ 330.43547966037914 @test Data_C1.z.val[20] == -497.0 # create point data set (e.g. Earthquakes) -Lon,Lat,Depth = lonlatdepth_grid(15:0.05:17,35:0.05:37,280km); -Depth = Depth - 20*Lon*km; # some variation in depth -Magnitude = rand(size(Depth,1),size(Depth,2),size(Depth,3))*6; # some magnitude -TestVecField = (Magnitude[:],Magnitude[:],Magnitude[:]) +Lon, Lat, Depth = lonlatdepth_grid(15:0.05:17, 35:0.05:37, 280km); +Depth = Depth - 20 * Lon * km; # some variation in depth +Magnitude = rand(size(Depth, 1), size(Depth, 2), size(Depth, 3)) * 6; # some magnitude +TestVecField = (Magnitude[:], Magnitude[:], Magnitude[:]) -Data_EQ = GeoData(Lon[:],Lat[:],Depth[:],(depth=Depth[:],Magnitude=Magnitude[:],VecField=TestVecField)) +Data_EQ = GeoData(Lon[:], Lat[:], Depth[:], (depth = Depth[:], Magnitude = Magnitude[:], VecField = TestVecField)) # Test profile creation from point data set -cross_tmp = cross_section(Data_EQ,Depth_level=-25km,section_width=10km) +cross_tmp = cross_section(Data_EQ, Depth_level = -25km, section_width = 10km) @test cross_tmp.fields.depth_proj[10] == -25km # check if the projected depth level is actually the chosen one -cross_tmp = cross_section(Data_EQ,Lat_level=36.2,section_width=10km) -@test cross_tmp.fields.lat_proj[10]==36.2 # check if the projected latitude level is the chosen one +cross_tmp = cross_section(Data_EQ, Lat_level = 36.2, section_width = 10km) +@test cross_tmp.fields.lat_proj[10] == 36.2 # check if the projected latitude level is the chosen one -cross_tmp = cross_section(Data_EQ,Lon_level=16.4,section_width=10km) -@test cross_tmp.fields.lon_proj[10]==16.4 # check if the projected longitude level is the chosen one -cross_tmp = cross_section(Data_EQ,Start=(15.0,35.0),End=(17.0,37.0),section_width=10km) -@test cross_tmp.fields.lon_proj[20] ==15.314329874961091 +cross_tmp = cross_section(Data_EQ, Lon_level = 16.4, section_width = 10km) +@test cross_tmp.fields.lon_proj[10] == 16.4 # check if the projected longitude level is the chosen one +cross_tmp = cross_section(Data_EQ, Start = (15.0, 35.0), End = (17.0, 37.0), section_width = 10km) +@test cross_tmp.fields.lon_proj[20] == 15.314329874961091 @test cross_tmp.fields.lat_proj[20] == 35.323420618580585 # test inPolygon -PolyX = [-2.,-1,0,1,2,1,3,3,8,3,3,1,2,1,0,-1,-2,-1,-3,-3,-8,-3,-3,-1,-2] -PolyY = [3.,3,8.01,3,3,1,2,1,0,-1,-2,-1,-3,-3,-8,-3,-3,-1,-2,-1,0,1,2,1,3] -xvec = collect(-9:0.5:9); yvec = collect(-9:0.5:9); zvec = collect(1.:1.); -X,Y,Z = meshgrid(xvec, yvec, zvec) -X, Y = X[:,:,1], Y[:,:,1] -yN = zeros(Bool, size(X)) -inpolygon!(yN, PolyX, PolyY, X, Y, fast=true) +PolyX = [-2.0, -1, 0, 1, 2, 1, 3, 3, 8, 3, 3, 1, 2, 1, 0, -1, -2, -1, -3, -3, -8, -3, -3, -1, -2] +PolyY = [3.0, 3, 8.01, 3, 3, 1, 2, 1, 0, -1, -2, -1, -3, -3, -8, -3, -3, -1, -2, -1, 0, 1, 2, 1, 3] +xvec = collect(-9:0.5:9); yvec = collect(-9:0.5:9); zvec = collect(1.0:1.0); +X, Y, Z = meshgrid(xvec, yvec, zvec) +X, Y = X[:, :, 1], Y[:, :, 1] +yN = zeros(Bool, size(X)) +inpolygon!(yN, PolyX, PolyY, X, Y, fast = true) @test sum(yN) == 194 inpolygon!(yN, PolyX, PolyY, X, Y) @test sum(yN) == 217 X, Y, yN = X[:], Y[:], yN[:] -inpolygon!(yN, PolyX, PolyY, X, Y, fast=true) +inpolygon!(yN, PolyX, PolyY, X, Y, fast = true) @test sum(yN) == 194 inpolygon!(yN, PolyX, PolyY, X, Y) @test sum(yN) == 217 # add cell and vertex fields -q1_data = Q1Data(xyz_grid(1:10,1:10,1:8)) -q1_data = addfield(q1_data, (region=zeros(Int64,size(q1_data)),), cellfield=true) +q1_data = Q1Data(xyz_grid(1:10, 1:10, 1:8)) +q1_data = addfield(q1_data, (region = zeros(Int64, size(q1_data)),), cellfield = true) @test keys(q1_data.fields) == (:Z,) @test keys(q1_data.cellfields) == (:region,) # Q1 data -q1_data = addfield(q1_data, (T=ones(Float64,size(q1_data) .+1 ),)) -@test keys(q1_data.fields) == (:Z,:T) +q1_data = addfield(q1_data, (T = ones(Float64, size(q1_data) .+ 1),)) +@test keys(q1_data.fields) == (:Z, :T) @test keys(q1_data.cellfields) == (:region,) @@ -316,5 +316,5 @@ q1_data = addfield(q1_data, (T=ones(Float64,size(q1_data) .+1 ),)) fe_data = convert2FEData(q1_data) @test size(fe_data.fields.Z) == (800,) -fe_data = addfield(fe_data, (T1=ones(Float64,size(fe_data.fields.Z) ),)) -@test keys(fe_data.fields) == (:Z,:T, :T1) +fe_data = addfield(fe_data, (T1 = ones(Float64, size(fe_data.fields.Z)),)) +@test keys(fe_data.fields) == (:Z, :T, :T1) diff --git a/test/test_voxel_gravity.jl b/test/test_voxel_gravity.jl index 2601a82d..e57fd9a6 100644 --- a/test/test_voxel_gravity.jl +++ b/test/test_voxel_gravity.jl @@ -2,97 +2,97 @@ using Test using GeophysicalModelGenerator function main_gravity() -############# Input ############# -# survey -x = [-20., 100.] -y = [100., 200.] -z = [-100., 10.] -nx = 111 -ny = 101 -nz = 91 - -# spheres -centerX = [ 50., 10., 80.] -centerY = [ 150., 150., 180.] -centerZ = [ -30., -10., -50.] -radius = [ 10., 3., 10.] -rho = [2650., 2600., 2750.] - -# background density model -background = [0. 2700.] -################################# - -######### Set things up ######### -# constants -G = 6.67408e-11 -mGal = 1e5 - -# coordinates -x_vec = LinRange(x[1],x[2],nx) -y_vec = LinRange(y[1],y[2],ny) -z_vec = LinRange(z[1],z[2],nz) -Y,X,Z = meshgrid(y_vec,x_vec,z_vec) - -# reference model -RefMod = zeros(nz) -RefMod[findall(x->x>=0, z_vec)] .= background[1] -RefMod[findall(x->x<0, z_vec)] .= background[2] - -# densities -RHO = zeros(nx,ny,nz) -RHO[findall(x->x>=0, Z)] .= background[1] -RHO[findall(x->x<0, Z)] .= background[2] - -# put in spheres -if !(length(centerX) == length(centerY) && length(centerX) == length(centerZ) && length(centerX) == length(radius)) - error("Sphere definition is wrong") -end + ############# Input ############# + # survey + x = [-20.0, 100.0] + y = [100.0, 200.0] + z = [-100.0, 10.0] + nx = 111 + ny = 101 + nz = 91 + + # spheres + centerX = [50.0, 10.0, 80.0] + centerY = [150.0, 150.0, 180.0] + centerZ = [-30.0, -10.0, -50.0] + radius = [10.0, 3.0, 10.0] + rho = [2650.0, 2600.0, 2750.0] + + # background density model + background = [0.0 2700.0] + ################################# + + ######### Set things up ######### + # constants + G = 6.67408e-11 + mGal = 1.0e5 + + # coordinates + x_vec = LinRange(x[1], x[2], nx) + y_vec = LinRange(y[1], y[2], ny) + z_vec = LinRange(z[1], z[2], nz) + Y, X, Z = meshgrid(y_vec, x_vec, z_vec) + + # reference model + RefMod = zeros(nz) + RefMod[findall(x -> x >= 0, z_vec)] .= background[1] + RefMod[findall(x -> x < 0, z_vec)] .= background[2] + + # densities + RHO = zeros(nx, ny, nz) + RHO[findall(x -> x >= 0, Z)] .= background[1] + RHO[findall(x -> x < 0, Z)] .= background[2] + + # put in spheres + if !(length(centerX) == length(centerY) && length(centerX) == length(centerZ) && length(centerX) == length(radius)) + error("Sphere definition is wrong") + end -numSpheres = length(rho) + numSpheres = length(rho) -for i = 1 : numSpheres - d = (X .- centerX[i]).^2 .+ (Y .- centerY[i]).^2 .+ (Z .- centerZ[i]).^2 - RHO[findall(x->x x < radius[i]^2, d)] .= rho[i] + end + ################################# + + ############ Compute ############ + # Analytical Solution + ana = zeros(nx, ny) + for iS in 1:numSpheres + for iX in 1:nx + for iY in 1:ny + d = ((x_vec[iX] - centerX[iS])^2 + (y_vec[iY] - centerY[iS])^2 + (centerZ[iS])^2)^0.5 + depth = -centerZ[iS] + ana[iX, iY] = ana[iX, iY] + (4 * π * G * (radius[iS]^3) * (rho[iS] - background[2]) * depth) / (3 * (d^3)) * mGal + end end end -end -# Voxel Code -dg1, gradX1, gradY1 = voxel_grav(X, Y, Z, RHO, refMod = RefMod, outName = "Benchmark1", printing = false) + # Voxel Code + dg1, gradX1, gradY1 = voxel_grav(X, Y, Z, RHO, refMod = RefMod, outName = "Benchmark1", printing = false) -# Other options (same result) -dg2, gradX2, gradY2 = voxel_grav(X, Y, Z, RHO, rhoTol = 1, refMod = "SW", outName = "Benchmark2", printing = false) + # Other options (same result) + dg2, gradX2, gradY2 = voxel_grav(X, Y, Z, RHO, rhoTol = 1, refMod = "SW", outName = "Benchmark2", printing = false) -# Other options (different result) -dg3, gradX3, gradY3 = voxel_grav(X, Y, Z, RHO, rhoTol = 70, refMod = "NW", outName = "Benchmark3", printing = false) -################################# + # Other options (different result) + dg3, gradX3, gradY3 = voxel_grav(X, Y, Z, RHO, rhoTol = 70, refMod = "NW", outName = "Benchmark3", printing = false) + ################################# -############# Test ############## -maxVal = maximum(broadcast(abs, ana)) -maxErr = maxVal / 20 + ############# Test ############## + maxVal = maximum(broadcast(abs, ana)) + maxErr = maxVal / 20 -# check differences -diff1 = broadcast(abs, ana-dg1) -diff2 = broadcast(abs, ana-dg2) -diff3 = broadcast(abs, ana-dg3) + # check differences + diff1 = broadcast(abs, ana - dg1) + diff2 = broadcast(abs, ana - dg2) + diff3 = broadcast(abs, ana - dg3) -# check -@test maximum(diff1) < maxErr -@test maximum(diff2) < maxErr -@test maximum(diff3) > maxErr -################################# + # check + @test maximum(diff1) < maxErr + @test maximum(diff2) < maxErr + return @test maximum(diff3) > maxErr + ################################# end main_gravity()