diff --git a/.gitignore b/.gitignore index 540dfa8..a908246 100644 --- a/.gitignore +++ b/.gitignore @@ -8,11 +8,13 @@ *.npz *.obj *.pdf +*.ply *.png *.so *.tif *.txt *.zip +*.mat *~ .DS_Store .DS_Store @@ -27,4 +29,6 @@ dist src/flux/cgal/aabb.cpp src/flux/thermal.c src/flux/view_factor/view_factor.c +src/flux/cgal/CGAL/ +src/flux/cgal/boost/ stats \ No newline at end of file diff --git a/README.md b/README.md index 79a1ea4..1250ffe 100644 --- a/README.md +++ b/README.md @@ -4,38 +4,52 @@ ## Installation ## -### Set up a virtual environment and clone the repository ### +If using Windows, first install Microsoft Visual Studio C++ Build Tools following +the [directions at this link](https://github.com/bycloudai/InstallVSBuildToolsWindows). This includes +an installation of MSVC, Windows SDK, and C++ CMake tools for Windows followed by adding MSBuild Tools +to your system path. -Make a new directory and clone this repository to it. Then, inside the -directory that you've just created, run `python -m venv .`. This will -create a "virtual environment", which is a useful tool that Python -provides for managing dependencies for projects. The new directory -"contains" the virtual environment. +### Setting up a virtual environment ### -### Activate the virtual environment ### - -To activate the virtual environment, run `source bin/activate` from -the directory containing it. Make sure to do this before doing -anything else below. +Using [Anaconda](https://www.anaconda.com/), +a virtual environment management tool for Python, create and activate a new conda environment and download +pip via the anaconda channel by running: +``` shell +conda create --name radiosity +conda activate radiosity +conda install -c anaconda pip +conda install -c anaconda cython +conda install -c conda-forge rioxarray +``` +Alternatively, run `python -m venv .` to create a virtual environment in standard Python without using Anaconda. Activating the virtual +environment (via `conda activate radiosity` or `source bin/activate`) means that all python and pip operations will be +managed using the packages installed into the environment. Therefore, the command +must be run before proceeding to the next steps (and before running any code). ### Getting the dependencies ### First, install [python-embree](https://github.com/sampotter/python-embree) in this -virtual environment: +conda environment. Begin by installing Embree from the [Embree website](https://www.embree.org/). Typically, the Embree binaries, +headers, and libraries will all be installed to C:\Program Files\Intel\Embree3 by default. Then, clone, build, and install python-embree +to the radiosity conda environment: ``` shell git clone https://github.com/sampotter/python-embree cd python-embree +python setup.py build_ext -I "/c/Program\ Files/Intel/Embree3/include" -L "/c/Program\ Files/Intel/Embree3/lib" python setup.py install -cd - +cd .. ``` -Next, install the rest of the dependencies: +Next, clone this repository and install the rest of the dependencies, including Boost and CGAL. ``` shell git clone https://github.com/sampotter/python-flux cd python-flux pip install -r requirements.txt ``` +Install the latest versions of [Boost](https://www.boost.org/) +and [CGAL](https://www.cgal.org/download.html) from the corresponding websites. Finally, +link the CGAL and boost directories in `python-flux/setup.py` (via the `include_dirs` argument of the `flux.cgal.aabb` extension). ### Installing this package ### @@ -44,24 +58,23 @@ Finally, from the cloned repository, run: python setup.py install ``` -## Running the unit tests ## - -To run python-flux's unit tests, just run: -``` shell -./run_tests.sh -``` -from the root of this repository. All this script does is execute `python -m unittest discover ./tests`; i.e., it discovers and runs all Python unit tests found in the directory `./tests`. - ## Running the examples ## The `examples` directory contains simple Python scripts illustrating -how to use this package. Each example directory contains a README with -more information about that particulra example. +how to use this package. The most up-to-date implementations can be +found in the `examples/shackleton_vary_outer` and `examples/blurred_south_pole` +sub-directories. For instance, try running: ``` shell -cd examples/lunar_south_pole -python haworth.py +conda install -c conda-forge arrow +conda install -c conda-forge tqdm + +cd examples/shackleton_vary_outer +python make_mesh.py 5.0 40 +python make_compressed_form_factor_matrix.py --compression_type "svd" --max_area 5.0 --outer_radius 40 --tol 1e-1 --k0 40 --add_residuals +python make_block_plots.py --compression_type "svd" --max_area 5.0 --outer_radius 40 --tol 1e-1 --k0 40 --add_residuals +python collect_time_dep_data_nomemory.py --compression_type "svd" --max_area 5.0 --outer_radius 40 --tol 1e-1 --k0 40 --add_residuals ``` after following the installation instructions above. diff --git a/examples/blurred_south_pole/make_block_plots.py b/examples/blurred_south_pole/make_block_plots.py new file mode 100644 index 0000000..e52f028 --- /dev/null +++ b/examples/blurred_south_pole/make_block_plots.py @@ -0,0 +1,175 @@ +import itertools as it +import matplotlib.patches as patches +import matplotlib.pyplot as plt +import numpy as np + +import flux.compressed_form_factors as cff +from flux.compressed_form_factors import CompressedFormFactorMatrix + +import scipy +import os +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('--compression_type', type=str, default="svd", choices=["svd", "rand_svd", "aca", "rand_id"]) +parser.add_argument('--max_area', type=float, default=3.0) +parser.add_argument('--outer_radius', type=int, default=80) +parser.add_argument('--sigma', type=float, default=5.0) + +parser.add_argument('--tol', type=float, default=1e-1) +parser.add_argument('--min_depth', type=int, default=1) +parser.add_argument('--max_depth', type=int, default=0) +parser.add_argument('--compress_sparse', action='store_true') + +parser.add_argument('--add_residuals', action='store_true') +parser.add_argument('--k0', type=int, default=40) +parser.add_argument('--p', type=int, default=5) +parser.add_argument('--q', type=int, default=1) + +parser.add_argument('--cliques', action='store_true') +parser.add_argument('--n_cliques', type=int, default=25) +parser.add_argument('--obb', action='store_true') + +parser.add_argument('--plot_labels', action='store_true') + +parser.set_defaults(feature=False) + +args = parser.parse_args() + + +def plot_blocks(block, fig, **kwargs): + + if 'figsize' in kwargs: + fig.set_size_inches(*kwargs['figsize']) + else: + fig.set_size_inches(12, 12) + + ax = fig.add_subplot() + ax.axis('off') + + ax.set_xlim(-0.001, 1.001) + ax.set_ylim(-0.001, 1.001) + + rect = patches.Rectangle((0, 0), 1, 1, linewidth=1, edgecolor='k', + facecolor='none') + ax.add_patch(rect) + + def get_ind_offsets(inds): + return np.concatenate([[0], np.cumsum([len(I) for I in inds])]) + + def add_rects(block, c0=(0, 0), w0=1, h0=1): + row_offsets = get_ind_offsets(block._row_block_inds) + col_offsets = get_ind_offsets(block._col_block_inds) + for (i, row), (j, col) in it.product( + enumerate(row_offsets[:-1]), + enumerate(col_offsets[:-1]) + ): + w = w0*(row_offsets[i + 1] - row)/row_offsets[-1] + h = h0*(col_offsets[j + 1] - col)/col_offsets[-1] + i0, j0 = c0 + c = (i0 + w0*row/row_offsets[-1], j0 + h0*col/col_offsets[-1]) + + child = block._blocks[i, j] + if child.is_leaf: + if isinstance(child, cff.FormFactorSvdBlock) or isinstance(child, cff.FormFactorSparseSvdBlock) or \ + isinstance(child, cff.FormFactorAcaBlock) or isinstance(child, cff.FormFactorSparseAcaBlock) or \ + isinstance(child, cff.FormFactorIdBlock) or isinstance(child, cff.FormFactorSparseIdBlock): + facecolor = 'cyan' if child.compressed else 'orange' + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor=facecolor) + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorZeroBlock): + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor='black') + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorSparseBlock): + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor='white') + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorDenseBlock): + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor='magenta') + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorNullBlock): + continue + else: + raise Exception('TODO: add %s to _plot_block' % type(child)) + + if args.plot_labels: + ax.text(c[0] + 0.5*w, c[1] + 0.5*h, "{:.0e}".format(np.prod(child.shape)), transform=ax.transAxes, fontsize=8, verticalalignment='center', horizontalalignment='center') + else: + add_rects(child, c, w, h) + + rect = patches.Rectangle( + c, w, h, linewidth=1, edgecolor='k', facecolor='none') + ax.add_patch(rect) + + add_rects(block) + + ax.invert_xaxis() + + return fig, ax + + + + +compression_type = args.compression_type +max_area_str = str(args.max_area) +outer_radius_str = str(args.outer_radius) +sigma_str = str(args.sigma) + +max_depth = args.max_depth if args.max_depth != 0 else None + +if compression_type == "svd" or compression_type == "aca": + if args.add_residuals: + FF_dir = "{}_resid_{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_area_str, outer_radius_str, sigma_str, args.tol, + args.k0) + else: + FF_dir = "{}_{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_area_str, outer_radius_str, sigma_str, args.tol, + args.k0) + +elif compression_type == "rand_svd" or compression_type == "rand_id": + if args.add_residuals: + FF_dir = "{}_resid_{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_area_str, outer_radius_str, sigma_str, args.tol, + args.p, args.q, args.k0) + else: + FF_dir = "{}_{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_area_str, outer_radius_str, sigma_str, args.tol, + args.p, args.q, args.k0) + + +if not args.cliques and args.min_depth != 1: + FF_dir += "_{}mindepth".format(args.min_depth) + +if max_depth is not None: + FF_dir += "_{}maxdepth".format(max_depth) + +if args.compress_sparse: + FF_dir += "_cs" + +result_dir = "results" +if args.cliques: + FF_dir += "_{}nc".format(args.n_cliques) + result_dir += "_cliques" + +if args.cliques and args.obb: + FF_dir = FF_dir + "_obb" + +if args.add_residuals: + FF_path = result_dir+"/"+ FF_dir + "/FF_{}_{}_{:.0e}_{}_resid.bin".format(max_area_str, outer_radius_str, args.tol, compression_type) +else: + FF_path = result_dir+"/"+ FF_dir + "/FF_{}_{}_{:.0e}_{}.bin".format(max_area_str, outer_radius_str, args.tol, compression_type) + +if not os.path.exists(FF_path): + print("PATH DOES NOT EXIST " + FF_path) + assert False +FF = CompressedFormFactorMatrix.from_file(FF_path) + + +fig = plt.figure(figsize=(18, 6)) # , figsize=(18, 6)) +print(f'- {FF_path}') +fig, ax = plot_blocks(FF._root, fig) +if args.plot_labels: + fig.savefig(result_dir+"/"+FF_dir+"/block_plot_labeled.png") +else: + fig.savefig(result_dir+"/"+FF_dir+"/block_plot.png") +plt.close(fig) \ No newline at end of file diff --git a/examples/blurred_south_pole/make_cliqued_form_factor_matrix.py b/examples/blurred_south_pole/make_cliqued_form_factor_matrix.py new file mode 100644 index 0000000..6bf5ced --- /dev/null +++ b/examples/blurred_south_pole/make_cliqued_form_factor_matrix.py @@ -0,0 +1,198 @@ +import numpy as np +import scipy.sparse + +from flux.form_factors import get_form_factor_matrix +from flux.compressed_form_factors import CompressedFormFactorMatrix, FormFactorPartitionBlock, FormFactorQuadtreeBlock, FormFactorObbQuadtreeBlock +from flux.shape import CgalTrimeshShapeModel, get_surface_normals + +import argparse +import arrow +import os + +from copy import copy + +parser = argparse.ArgumentParser() +parser.add_argument('--compression_type', type=str, default="svd", choices=["svd", "rand_svd", "aca", "rand_id"]) +parser.add_argument('--max_area', type=float, default=3.0) +parser.add_argument('--outer_radius', type=int, default=80) +parser.add_argument('--sigma', type=float, default=5.0) + +parser.add_argument('--tol', type=float, default=1e-1) +parser.add_argument('--max_depth', type=int, default=0) +parser.add_argument('--compress_sparse', action='store_true') + +parser.add_argument('--add_residuals', action='store_true') +parser.add_argument('--k0', type=int, default=40) +parser.add_argument('--p', type=int, default=5) +parser.add_argument('--q', type=int, default=1) + +parser.add_argument('--n_cliques', type=int, default=25) +parser.add_argument('--obb', action='store_true') +parser.add_argument('--load_ff', action='store_true') + +parser.add_argument('--overwrite', action='store_true') + +parser.set_defaults(feature=False) + +args = parser.parse_args() + +# produce compressed FF from shapemodel produced by make_mesh.py + +compression_type = args.compression_type +max_area_str = str(args.max_area) +outer_radius_str = str(args.outer_radius) +sigma_str = str(args.sigma) +tol_str = "{:.0e}".format(args.tol) +tol = args.tol + +max_depth = args.max_depth if args.max_depth != 0 else None + + +if compression_type == "svd" or compression_type == "aca": + compression_params = { + "k0": args.k0 + } + + prefix = compression_type + "_resid" if args.add_residuals else compression_type + + savedir = "{}_{}_{}_{}_{:.0e}_{}k0".format(prefix, max_area_str, outer_radius_str, sigma_str, tol, + args.k0) + + +elif compression_type == "rand_svd" or compression_type == "rand_id": + compression_params = { + "k0": args.k0, + "p": args.p, + "q": args.q + } + + prefix = compression_type + "_resid" if args.add_residuals else compression_type + + savedir = "{}_{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(prefix, max_area_str, outer_radius_str, sigma_str, tol, + args.p, args.q, args.k0) + + + + +if max_depth is not None: + savedir += "_{}maxdepth".format(max_depth) + +if args.compress_sparse: + savedir += "_cs" + +savedir = "results_cliques/"+savedir +savedir = savedir+"_{}nc".format(args.n_cliques) +if args.obb: + savedir += "_obb" +if not os.path.exists('results_cliques'): + os.mkdir('results_cliques') +if not os.path.exists(savedir): + os.mkdir(savedir) + + +if not args.overwrite: + if args.add_residuals: + if os.path.exists(savedir+f'/FF_{max_area_str}_{outer_radius_str}_{tol_str}_{compression_type}_resid.bin'): + raise RuntimeError("Compressed FF already exists!") + else: + if os.path.exists(savedir+f'/FF_{max_area_str}_{outer_radius_str}_{tol_str}_{compression_type}.bin'): + raise RuntimeError("Compressed FF already exists!") + + +verts = np.load(f'blurred_pole_verts_{max_area_str}_{outer_radius_str}_{sigma_str}.npy') +faces = np.load(f'blurred_pole_faces_{max_area_str}_{outer_radius_str}_{sigma_str}.npy') + +# convert verts from km to m +verts *= 1e3 + +normals = get_surface_normals(verts, faces) +normals[normals[:, 2] > 0] *= -1 + +shape_model = CgalTrimeshShapeModel(verts, faces, normals) + + +start_assembly_time = arrow.now() + +if args.load_ff: + path = f"results/true_{max_area_str}_{outer_radius_str}_{sigma_str}/FF_{max_area_str}_{outer_radius_str}.npz" + FF_uncompressed = scipy.sparse.load_npz(path) +else: + FF_uncompressed = get_form_factor_matrix(shape_model) + + +dmat = FF_uncompressed.A +FF_adj = np.zeros(FF_uncompressed.shape) +FF_adj[dmat >= np.quantile(dmat[dmat>0.], 0.0)] = 1. + +all_pseudo_cliques = [] +for i in np.random.permutation(FF_adj.shape[0]): + + # make sure the seed is not already in a clique + if np.array([i in _pseudo_clique for _pseudo_clique in all_pseudo_cliques]).any(): + continue + + nonzero_idx = list((FF_adj[i] > 0).nonzero()[0]) + nonzero_idx.append(i) + nonzero_idx = np.array(nonzero_idx) + + _nonzero_idx = [] + for j in nonzero_idx: + + if j == i: + _nonzero_idx.append(j) + + elif not np.array([j in _pseudo_clique for _pseudo_clique in all_pseudo_cliques]).any(): + this_nonzero_idx = list((FF_adj[j] > 0).nonzero()[0]) + this_nonzero_idx.append(j) + this_nonzero_idx = np.array(this_nonzero_idx) + num_intersecting = np.intersect1d(nonzero_idx, this_nonzero_idx).shape[0] + + if num_intersecting >= 0.5*min(nonzero_idx.shape[0], this_nonzero_idx.shape[0]): + _nonzero_idx.append(j) + nonzero_idx = np.array(_nonzero_idx) + + pseudo_clique = set(list(np.copy(nonzero_idx))) + all_pseudo_cliques.append(pseudo_clique) + +all_pseudo_clique_lists = [] +for i in range(len(all_pseudo_cliques)): + all_pseudo_clique_lists.append(np.array(list(all_pseudo_cliques[i]))) +ordered_pseudo_clique_list = [] +for new_idx in np.argsort([len(c) for c in all_pseudo_clique_lists])[::-1]: + ordered_pseudo_clique_list.append(all_pseudo_clique_lists[new_idx]) + + +culled_cliques = [] + +for i in range(args.n_cliques): + culled_cliques.append(np.copy(ordered_pseudo_clique_list[i])) + +all_small_cliques = [] +for i in range(args.n_cliques, len(ordered_pseudo_clique_list)): + all_small_cliques += list(np.copy(ordered_pseudo_clique_list[i])) +culled_cliques.append(np.array(all_small_cliques)) + +current_clique_list = copy(culled_cliques) + +if np.sum([len(this_clique) for this_clique in current_clique_list]) != FF_adj.shape[0]: + raise RuntimeError("Cliques do not cover index set!") +else: + print("Cliques cover index set!") + +if args.obb: + FF = CompressedFormFactorMatrix( + shape_model, tol=tol, min_size=16384, max_depth=max_depth, compression_type=compression_type, compression_params=compression_params, parts=current_clique_list, + RootBlock=FormFactorPartitionBlock, ChildBlock=FormFactorObbQuadtreeBlock, truncated_sparse=args.compress_sparse, add_residuals=args.add_residuals) +else: + FF = CompressedFormFactorMatrix( + shape_model, tol=tol, min_size=16384, max_depth=max_depth, compression_type=compression_type, compression_params=compression_params, parts=current_clique_list, + RootBlock=FormFactorPartitionBlock, ChildBlock=FormFactorQuadtreeBlock, truncated_sparse=args.compress_sparse, add_residuals=args.add_residuals) + +assembly_time = (arrow.now() - start_assembly_time).total_seconds() + +np.save(savedir+f'/FF_assembly_time.npy', np.array(assembly_time)) + +if args.add_residuals: + FF.save(savedir+f'/FF_{max_area_str}_{outer_radius_str}_{tol_str}_{compression_type}_resid.bin') +else: + FF.save(savedir+f'/FF_{max_area_str}_{outer_radius_str}_{tol_str}_{compression_type}.bin') \ No newline at end of file diff --git a/examples/blurred_south_pole/make_compressed_form_factor_matrix.py b/examples/blurred_south_pole/make_compressed_form_factor_matrix.py new file mode 100644 index 0000000..26b00dd --- /dev/null +++ b/examples/blurred_south_pole/make_compressed_form_factor_matrix.py @@ -0,0 +1,181 @@ +import numpy as np +import scipy.sparse + +from flux.form_factors import get_form_factor_matrix, get_form_factor_paige, get_form_factor_sparsified +from flux.compressed_form_factors import CompressedFormFactorMatrix, FormFactorMinDepthQuadtreeBlock +from flux.shape import CgalTrimeshShapeModel, get_surface_normals + +import argparse +import arrow +import os + +parser = argparse.ArgumentParser() +parser.add_argument('--compression_type', type=str, default="svd", choices=["svd", "rand_svd", "aca", "rand_id", "paige", "sparse_tol", "sparse_k", "true_model"]) +parser.add_argument('--max_area', type=float, default=3.0) +parser.add_argument('--outer_radius', type=int, default=80) +parser.add_argument('--sigma', type=float, default=5.0) + +parser.add_argument('--tol', type=float, default=1e-1) +parser.add_argument('--min_depth', type=int, default=1) +parser.add_argument('--max_depth', type=int, default=0) +parser.add_argument('--compress_sparse', action='store_true') + +parser.add_argument('--add_residuals', action='store_true') +parser.add_argument('--k0', type=int, default=40) +parser.add_argument('--p', type=int, default=5) +parser.add_argument('--q', type=int, default=1) + +parser.add_argument('--paige_mult', type=int, default=1) +parser.add_argument('--sparse_mult', type=int, default=1) + +parser.add_argument('--overwrite', action='store_true') + +parser.set_defaults(feature=False) + +args = parser.parse_args() + +# produce compressed FF from shapemodel produced by make_mesh.py + +compression_type = args.compression_type +max_area_str = str(args.max_area) +outer_radius_str = str(args.outer_radius) +sigma_str = str(args.sigma) +tol_str = "{:.0e}".format(args.tol) +tol = args.tol + +max_depth = args.max_depth if args.max_depth != 0 else None + + +if compression_type == "true_model": + compression_params = {} + + savedir = "true_{}_{}_{}".format(max_area_str, outer_radius_str, sigma_str, sigma_str) + + +elif compression_type == "paige": + compression_params = {} + + savedir = "paige_{}_{}_{}_{}k".format(max_area_str, outer_radius_str, sigma_str, args.paige_mult) + + +elif compression_type == "sparse_tol": + compression_params = {} + + savedir = "sparse_{}_{}_{}_{:.0e}".format(max_area_str, outer_radius_str, sigma_str, args.tol) + + +elif compression_type == "sparse_k": + compression_params = {} + + savedir = "sparse_{}_{}_{}_{}k".format(max_area_str, outer_radius_str, sigma_str, args.sparse_mult) + + +elif compression_type == "svd" or compression_type == "aca": + compression_params = { + "k0": args.k0 + } + + prefix = compression_type + "_resid" if args.add_residuals else compression_type + + savedir = "{}_{}_{}_{}_{:.0e}_{}k0".format(prefix, max_area_str, outer_radius_str, sigma_str, tol, + args.k0) + + +elif compression_type == "rand_svd" or compression_type == "rand_id": + compression_params = { + "k0": args.k0, + "p": args.p, + "q": args.q + } + + prefix = compression_type + "_resid" if args.add_residuals else compression_type + + savedir = "{}_{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(prefix, max_area_str, outer_radius_str, sigma_str, tol, + args.p, args.q, args.k0) + + + + +if not (compression_type == "true_model" or compression_type == "paige" or compression_type == "sparse_tol" or compression_type == "sparse_k") and args.min_depth != 1: + savedir += "_{}mindepth".format(args.min_depth) + +if not (compression_type == "true_model" or compression_type == "paige" or compression_type == "sparse_tol" or compression_type == "sparse_k") and max_depth is not None: + savedir += "_{}maxdepth".format(max_depth) + +if not (compression_type == "true_model" or compression_type == "paige" or compression_type == "sparse_tol" or compression_type == "sparse_k") and args.compress_sparse: + savedir += "_cs" + +savedir = "results/"+savedir +if not os.path.exists('results'): + os.mkdir('results') +if not os.path.exists(savedir): + os.mkdir(savedir) + + +if not args.overwrite: + if compression_type == "true_model" or compression_type == "paige" or compression_type == "sparse_tol" or compression_type == "sparse_k": + if os.path.exists(savedir+f'/FF_{max_area_str}_{outer_radius_str}.npz'): + raise RuntimeError("Sparse FF already exists!") + else: + if args.add_residuals: + if os.path.exists(savedir+f'/FF_{max_area_str}_{outer_radius_str}_{tol_str}_{compression_type}_resid.bin'): + raise RuntimeError("Compressed FF already exists!") + else: + if os.path.exists(savedir+f'/FF_{max_area_str}_{outer_radius_str}_{tol_str}_{compression_type}.bin'): + raise RuntimeError("Compressed FF already exists!") + + +verts = np.load(f'blurred_pole_verts_{max_area_str}_{outer_radius_str}_{sigma_str}.npy') +faces = np.load(f'blurred_pole_faces_{max_area_str}_{outer_radius_str}_{sigma_str}.npy') + +# convert verts from km to m +verts *= 1e3 + +normals = get_surface_normals(verts, faces) +normals[normals[:, 2] > 0] *= -1 + +shape_model = CgalTrimeshShapeModel(verts, faces, normals) + + +start_assembly_time = arrow.now() + +if args.compression_type == "true_model": + FF = get_form_factor_matrix(shape_model) + +elif args.compression_type == "paige": + path = f'results/true_{max_area_str}_{outer_radius_str}_{sigma_str}/FF_{max_area_str}_{outer_radius_str}.npz' + full_sparse_FF = scipy.sparse.load_npz(path) + FF = get_form_factor_paige(shape_model, full_sparse_FF, args.paige_mult*np.sqrt(shape_model.F.shape[0])) + +elif args.compression_type == "sparse_tol": + path = f'results/true_{max_area_str}_{outer_radius_str}_{sigma_str}/FF_{max_area_str}_{outer_radius_str}.npz' + full_sparse_FF = scipy.sparse.load_npz(path) + FF = get_form_factor_sparsified(full_sparse_FF, tol=args.tol) + +elif args.compression_type == "sparse_k": + path = f'results/true_{max_area_str}_{outer_radius_str}_{sigma_str}/FF_{max_area_str}_{outer_radius_str}.npz' + full_sparse_FF = scipy.sparse.load_npz(path) + FF = get_form_factor_sparsified(full_sparse_FF, k=args.sparse_mult*shape_model.F.shape[0]) + +elif args.min_depth != 1: + FF = CompressedFormFactorMatrix( + shape_model, tol=tol, min_size=16384, max_depth=max_depth, compression_type=compression_type, compression_params=compression_params, + min_depth=args.min_depth, RootBlock=FormFactorMinDepthQuadtreeBlock, truncated_sparse=args.compress_sparse, add_residuals=args.add_residuals) + +else: + FF = CompressedFormFactorMatrix( + shape_model, tol=tol, min_size=16384, max_depth=max_depth, compression_type=compression_type, compression_params=compression_params, + truncated_sparse=args.compress_sparse, add_residuals=args.add_residuals) + +assembly_time = (arrow.now() - start_assembly_time).total_seconds() + +np.save(savedir+f'/FF_assembly_time.npy', np.array(assembly_time)) + + +if args.compression_type == "true_model" or args.compression_type == "paige" or compression_type == "sparse_tol" or compression_type == "sparse_k": + scipy.sparse.save_npz(savedir+f'/FF_{max_area_str}_{outer_radius_str}', FF) +else: + if args.add_residuals: + FF.save(savedir+f'/FF_{max_area_str}_{outer_radius_str}_{tol_str}_{compression_type}_resid.bin') + else: + FF.save(savedir+f'/FF_{max_area_str}_{outer_radius_str}_{tol_str}_{compression_type}.bin') \ No newline at end of file diff --git a/examples/blurred_south_pole/make_mesh_downsampled.py b/examples/blurred_south_pole/make_mesh_downsampled.py new file mode 100644 index 0000000..3444db9 --- /dev/null +++ b/examples/blurred_south_pole/make_mesh_downsampled.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python +import matplotlib.pyplot as plt +import meshio +import meshpy.triangle as triangle +import numpy as np +import sys +import rioxarray as rio +import scipy.ndimage + +path = 'ldem_60s_final_adj_240mpp_surf' +tif_path = path + '.tif' + +max_inner_area_str = sys.argv[1] +max_outer_area_str = max_inner_area_str + +R_roi_str = sys.argv[2] + +max_inner_area = float(max_inner_area_str) +max_outer_area = float(max_outer_area_str) + +Rp = 1737.4 + +rds = rio.open_rasterio(tif_path) + +xgrid = rds.coords['y'].values*-1.e-3 +ygrid = rds.coords['x'].values*1.e-3 +dem = rds.data[0].T[:,::-1] + +sigma_str = sys.argv[3] +sigma = float(sigma_str) +dem = scipy.ndimage.gaussian_filter(dem, sigma=sigma) + +nx = len(xgrid) +ny = len(ygrid) + +dx = rds.coords['x'].attrs["actual_range"][-1] - rds.coords['x'].attrs["actual_range"][0] +dy = rds.coords['y'].attrs["actual_range"][-1] - rds.coords['y'].attrs["actual_range"][0] +hx, hy = dx/nx, dy/ny + +xmin, xmax = xgrid.min(), xgrid.max() +ymin, ymax = ygrid.min(), ygrid.max() + +scale_x2ind = (nx - 1)/(xmax - xmin) +scale_y2ind = (ny - 1)/(ymax - ymin) + +def getdem(i, j): + return dem[i, j]/1e3 # we want this in km initially + +def getz(x, y): + '''linearly interpolate z from memory-mapped DEM''' + + assert xmin <= x <= xmax and ymin <= y <= ymax + + isub = scale_x2ind*(x - xmin) + jsub = scale_y2ind*(y - ymin) + + i0, i1 = int(np.floor(isub)), int(np.ceil(isub)) + j0, j1 = int(np.floor(jsub)), int(np.ceil(jsub)) + + # handle special cases first + + if i0 == i1 and j0 == j1: + return getdem(i0, j0) + + p1 = (y - ygrid[j0])/hy + assert 0 <= p1 <= 1 + if i0 == i1: + return (1 - p1)*getdem(i0, j0) + p1*getdem(i0, j1) + + p0 = (x - xgrid[i0])/hx + assert 0 <= p0 <= 1 + if j0 == j1: + return (1 - p0)*getdem(i0, j0) + p0*getdem(i1, j0) + + assert i1 == i0 + 1 and j1 == j0 + 1 + + z00 = getdem(i0, j0) + z01 = getdem(i0, j1) + z10 = getdem(i1, j0) + z11 = getdem(i1, j1) + z0 = (1 - p0)*z00 + p0*z10 + z1 = (1 - p0)*z01 + p0*z11 + z = (1 - p1)*z0 + p1*z1 + + return z # km + +def unproject_stereographic(x, y): + az0, el0, R = 0, -np.pi/2, Rp + if x == 0 and y == 0: + return az0, el0 + rho = np.sqrt(x**2 + y**2) + c = 2*np.arctan2(rho, 2*R) + c_c, s_c = np.cos(c), np.sin(c) + c_l, s_l = np.cos(el0), np.sin(el0) + az = az0 + np.arctan2(x*s_c, c_l*rho*c_c - s_l*y*s_c) + az = np.mod(az, 2*np.pi) + el = np.arcsin(c_c*s_l + c_l*y*s_c/rho) + return az, el + +def stereo2cart(x, y): + az, el = unproject_stereographic(x, y) + # print("x,y",x,y) + # start = time.time() + dR = getz(x, y) + # print("dR call took", time.time()-start) + # print(dR) + # start = time.time() + # xi = xr.DataArray([x], dims="z") + # yi = xr.DataArray([y], dims="z") + # dR = np.squeeze(rds.interp(x=xi, y=yi).data) + # print("rioxarray call took", time.time()-start) + # print(dR) + # exit() + r = Rp + dR + cos_el = np.cos(el) + return r*np.array([cos_el*np.cos(az), cos_el*np.sin(az), np.sin(el)]) + +xc_roi, yc_roi = 8, -6 +r_roi, R_roi = 10, int(R_roi_str) + +x0_roi, x1_roi = xc_roi - R_roi, xc_roi + R_roi +y0_roi, y1_roi = yc_roi - R_roi, yc_roi + R_roi + +def should_refine(verts, _): + verts = np.array(verts) + assert verts.shape[0] == 3 + + P = np.empty((3, 3)) + for i, (x, y) in enumerate(verts): + P[i] = stereo2cart(x, y) + + tri_area = np.linalg.norm(np.cross(P[1] - P[0], P[2] - P[0]))/2 + + xm, ym = np.mean(verts, 0) + rm = np.sqrt((xm - xc_roi)**2 + (ym - yc_roi)**2) + + if rm < r_roi: + max_area = max_inner_area + else: + max_area = ((rm - r_roi)/(R_roi - r_roi))*max_inner_area \ + + (rm/(R_roi - r_roi))*max_outer_area + + return tri_area > max_area + +points = np.array( + [(x0_roi, y0_roi), (x1_roi, y0_roi), (x1_roi, y1_roi), (x0_roi, y1_roi)], + dtype=np.float32) +facets = np.array([(0, 1), (1, 2), (2, 3), (3, 0)], dtype=np.intc) + +info = triangle.MeshInfo() +info.set_points(points) +info.set_facets(facets) + +mesh = triangle.build(info, refinement_func=should_refine) + +verts_stereo = np.array(mesh.points) +verts = np.array([stereo2cart(*_) for _ in np.array(mesh.points)]).squeeze() +faces = np.array(mesh.elements) + +print(f' * {verts.shape[0]} vertices and {faces.shape[0]} faces') + +area_str = f'{max_inner_area_str}_{R_roi_str}_{sigma_str}' +np.save(f'blurred_pole_verts_stereo_{area_str}', verts_stereo) +np.save(f'blurred_pole_verts_{area_str}', verts) +np.save(f'blurred_pole_faces_{area_str}', faces) diff --git a/examples/faustini/collect_time_dep_data_faustini.py b/examples/faustini/collect_time_dep_data_faustini.py new file mode 100644 index 0000000..dfd13fd --- /dev/null +++ b/examples/faustini/collect_time_dep_data_faustini.py @@ -0,0 +1,244 @@ +#!/usr/bin/env python +import json_numpy as json +import scipy.sparse +import sys +import glob +import pyvista as pv +import os + +import meshio +import numpy as np +from tqdm import tqdm + +from spice_util import get_sunvec +from flux.compressed_form_factors_nmf import CompressedFormFactorMatrix +from flux.model import ThermalModel +from flux.shape import CgalTrimeshShapeModel, get_surface_normals + +import argparse +import arrow + +parser = argparse.ArgumentParser() +parser.add_argument('--compression_type', type=str, default="svd",choices=["nmf","snmf","wsnmf", + "svd","ssvd", + "rand_svd","rand_ssvd","rand_snmf", + "true_model"]) +parser.add_argument('--max_inner_area', type=float, default=0.8) +parser.add_argument('--max_outer_area', type=float, default=3.0) +parser.add_argument('--tol', type=float, default=1e-1) + +parser.add_argument('--min_depth', type=int, default=1) +parser.add_argument('--max_depth', type=int, default=0) + +parser.add_argument('--nmf_max_iters', type=int, default=int(1e4)) +parser.add_argument('--nmf_tol', type=float, default=1e-2) + +parser.add_argument('--k0', type=int, default=40) + +parser.add_argument('--p', type=int, default=5) +parser.add_argument('--q', type=int, default=1) + +parser.add_argument('--nmf_beta_loss', type=int, default=2, choices=[1,2]) + +parser.set_defaults(feature=False) + +args = parser.parse_args() + +# some useful routines +# transform cartesian to spherical (meters, radians) +def cart2sph(xyz): + + rtmp = np.linalg.norm(np.array(xyz).reshape(-1, 3), axis=1) + lattmp = np.arcsin(np.array(xyz).reshape(-1, 3)[:, 2] / rtmp) + lontmp = np.arctan2(np.array(xyz).reshape(-1, 3)[:, 1], np.array(xyz).reshape(-1, 3)[:, 0]) + + return rtmp, lattmp, lontmp + +def sind(x): + return np.sin(np.deg2rad(x)) + + +def cosd(x): + return np.cos(np.deg2rad(x)) + +def project_stereographic(lon, lat, lon0, lat0, R=1): + """ + project cylindrical coordinates to stereographic xy from central lon0/lat0 + :param lon: array of input longitudes (deg) + :param lat: array of input latitudes (deg) + :param lon0: center longitude for the projection (deg) + :param lat0: center latitude for the projection (deg) + :param R: planetary radius (km) + :return: stereographic projection xy coord from center (km) + """ + + cosd_lat = cosd(lat) + cosd_lon_lon0 = cosd(lon - lon0) + sind_lat = sind(lat) + + k = (2. * R) / (1. + sind(lat0) * sind_lat + cosd(lat0) * cosd_lat * cosd_lon_lon0) + x = k * cosd_lat * sind(lon - lon0) + y = k * (cosd(lat0) * sind_lat - sind(lat0) * cosd_lat * cosd_lon_lon0) + + return x, y + +# ============================================================ +# main code + +compression_type = args.compression_type +max_inner_area_str = str(args.max_inner_area) +max_outer_area_str = str(args.max_outer_area) +tol_str = "{:.0e}".format(args.tol) + +max_depth = args.max_depth if args.max_depth != 0 else None + + +if compression_type == "true_model": + FF_dir = "true_{}_{}".format(max_inner_area_str, max_outer_area_str) + +elif compression_type == "svd": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "ssvd": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "rand_svd": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + +elif compression_type == "rand_ssvd": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + +elif compression_type == "nmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "klnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "snmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "sklnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "rand_snmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.p, args.q, args.k0) + +elif compression_type == "wsnmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "wsklnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + + +if not compression_type == "true_model" and args.min_depth != 1: + FF_dir += "_{}mindepth".format(args.min_depth) + +if not compression_type == "true_model" and max_depth is not None: + FF_dir += "_{}maxdepth".format(max_depth) + + +FF_dir = "results/"+FF_dir +if not os.path.exists(FF_dir): + print("PATH DOES NOT EXIST "+FF_dir) + assert False +savedir = FF_dir + "/T_frames" +if not os.path.exists(savedir): + os.mkdir(savedir) + + +# read shapemodel and form-factor matrix generated by make_compressed_form_factor_matrix.py +if compression_type == 'true_model': + path = FF_dir+f'/FF_{max_inner_area_str}_{max_outer_area_str}.npz' + FF = scipy.sparse.load_npz(path) + V = np.load(f'faustini_verts_{max_inner_area_str}_{max_outer_area_str}.npy') + F = np.load(f'faustini_faces_{max_inner_area_str}_{max_outer_area_str}.npy') + N = get_surface_normals(V, F) + N[N[:, 2] > 0] *= -1 + shape_model = CgalTrimeshShapeModel(V, F, N) +else: + path = FF_dir+f'/FF_{max_inner_area_str}_{max_outer_area_str}_{tol_str}_{compression_type}.bin' + FF = CompressedFormFactorMatrix.from_file(path) + shape_model = FF.shape_model + +print(' * loaded form factor matrix and (cartesian) shape model') + +# choose simulation parameters +with open('params.json') as f: + params = json.load(f) + + +# Define time window (it can be done either with dates or with utc0 - initial epoch - and np.linspace of epochs) + +# utc0 = '2011 MAR 01 00:00:00.00' +# utc1 = '2011 MAR 02 00:00:00.00' +# num_frames = 100 +# stepet = 86400/100 +# sun_vecs = get_sunvec(utc0=utc0, utc1=utc1, stepet=stepet, path_to_furnsh="simple.furnsh", +# target='SUN', observer='MOON', frame='MOON_ME') +# t = np.linspace(0, 86400, num_frames + 1) + +utc0 = '2011 MAR 01 00:00:00.00' +utc1 = '2011 MAR 31 00:00:00.00' +num_frames = 3000 +stepet = 2592000/3000 +sun_vecs = get_sunvec(utc0=utc0, utc1=utc1, stepet=stepet, path_to_furnsh="simple.furnsh", + target='SUN', observer='MOON', frame='MOON_ME') +t = np.linspace(0, 2592000, num_frames + 1) + +D = sun_vecs/np.linalg.norm(sun_vecs, axis=1)[:, np.newaxis] +D = D.copy(order='C') + +print(' * got sun positions from SPICE') + +z = np.linspace(0, 3e-3, 31) + +print(' * set up thermal model') +thermal_model = ThermalModel( + FF, t, D, + F0=np.repeat(1365, len(D)), rho=0.11, method='1mvp', + z=z, T0=100, ti=120, rhoc=9.6e5, emiss=0.95, + Fgeotherm=0.2, bcond='Q', shape_model=shape_model) + +Tmin, Tmax = np.inf, -np.inf +vmin, vmax = 90, 310 + +sim_start_time = arrow.now() +for frame_index, T in tqdm(enumerate(thermal_model), total=D.shape[0], desc='thermal models time-steps'): + # print(f' + {frame_index + 1}/{D.shape[0]}') + path = savedir+"/T{:03d}.npy".format(frame_index) + np.save(path, T) +sim_duration = (arrow.now()-sim_start_time).total_seconds() +print(' * thermal model run completed in {:.2f} seconds'.format(sim_duration)) + +np.save(savedir+f'/sim_duration.npy', np.array(sim_duration)) + +# retrieve T at surface for all epochs and compute max and mean along epochs +Tfiles = glob.glob(savedir+f"/T*.npy") + +Tsurf = [] +for f in Tfiles: + Tsurf.append(np.load(f)[:, 0]) +Tsurf = np.vstack(Tsurf) + +Tsurf_max = np.max(Tsurf, axis=0) # max along epochs +Tsurf_mean = np.mean(Tsurf, axis=0) # mean along epochs + +# plot +# generate stereographic counterpart to shape_model (for plotting) +Rp = 1737.4e3 +r, lat, lon = cart2sph(shape_model.V) +x, y = project_stereographic(np.rad2deg(lon), np.rad2deg(lat), lon0=0, lat0=-90., R=1737.4e3) +V_st = np.vstack([x, y, r-Rp]).T +shape_model_st = CgalTrimeshShapeModel(V_st.copy(order='C'), shape_model.F) + +# save mesh to file +mesh = meshio.Mesh(shape_model_st.V, [('triangle', shape_model_st.F)]) +fout = f'faustini_{max_inner_area_str}_{max_outer_area_str}_st.ply' +mesh.write(fout) + +# read with pyvista and plot Tmax and Tmean at surface +grid = pv.read(fout) +grid.cell_data['Tmax'] = Tsurf_max +grid.plot() + +grid.cell_data['Tmean'] = Tsurf_mean +grid.plot() diff --git a/examples/faustini/make_block_plots.py b/examples/faustini/make_block_plots.py new file mode 100644 index 0000000..f3ac6f4 --- /dev/null +++ b/examples/faustini/make_block_plots.py @@ -0,0 +1,186 @@ +import itertools as it +import matplotlib.patches as patches +import matplotlib.pyplot as plt +import numpy as np + +import flux.compressed_form_factors_nmf as cff +from flux.compressed_form_factors_nmf import CompressedFormFactorMatrix + +import scipy +import os +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('--compression_type', type=str, default="svd",choices=["nmf","snmf","wsnmf", + "svd","ssvd", + "rand_svd","rand_ssvd","rand_snmf", + "true_model"]) +parser.add_argument('--max_inner_area', type=float, default=0.8) +parser.add_argument('--max_outer_area', type=float, default=3.0) +parser.add_argument('--tol', type=float, default=1e-1) + +parser.add_argument('--min_depth', type=int, default=1) +parser.add_argument('--max_depth', type=int, default=0) + +parser.add_argument('--nmf_max_iters', type=int, default=int(1e4)) +parser.add_argument('--nmf_tol', type=float, default=1e-2) + +parser.add_argument('--k0', type=int, default=40) + +parser.add_argument('--p', type=int, default=5) +parser.add_argument('--q', type=int, default=1) + +parser.add_argument('--nmf_beta_loss', type=int, default=2, choices=[1,2]) + +parser.add_argument('--plot_labels', action='store_true') + +parser.set_defaults(feature=False) + +args = parser.parse_args() + + +def plot_blocks(block, fig, **kwargs): + + if 'figsize' in kwargs: + fig.set_size_inches(*kwargs['figsize']) + else: + fig.set_size_inches(12, 12) + + ax = fig.add_subplot() + ax.axis('off') + + ax.set_xlim(-0.001, 1.001) + ax.set_ylim(-0.001, 1.001) + + rect = patches.Rectangle((0, 0), 1, 1, linewidth=1, edgecolor='k', + facecolor='none') + ax.add_patch(rect) + + def get_ind_offsets(inds): + return np.concatenate([[0], np.cumsum([len(I) for I in inds])]) + + def add_rects(block, c0=(0, 0), w0=1, h0=1): + row_offsets = get_ind_offsets(block._row_block_inds) + col_offsets = get_ind_offsets(block._col_block_inds) + for (i, row), (j, col) in it.product( + enumerate(row_offsets[:-1]), + enumerate(col_offsets[:-1]) + ): + w = w0*(row_offsets[i + 1] - row)/row_offsets[-1] + h = h0*(col_offsets[j + 1] - col)/col_offsets[-1] + i0, j0 = c0 + c = (i0 + w0*row/row_offsets[-1], j0 + h0*col/col_offsets[-1]) + + child = block._blocks[i, j] + if child.is_leaf: + if isinstance(child, cff.FormFactorSvdBlock) or isinstance(child, cff.FormFactorSparseSvdBlock) or isinstance(child, cff.FormFactorNmfBlock) or isinstance(child, cff.FormFactorSparseNmfBlock): + facecolor = 'cyan' if child.compressed else 'orange' + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor=facecolor) + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorZeroBlock): + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor='black') + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorSparseBlock): + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor='white') + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorDenseBlock): + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor='magenta') + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorNullBlock): + continue + else: + raise Exception('TODO: add %s to _plot_block' % type(child)) + + if args.plot_labels: + ax.text(c[0] + 0.5*w, c[1] + 0.5*h, "{:.0e}".format(np.prod(child.shape)), transform=ax.transAxes, fontsize=8, verticalalignment='center', horizontalalignment='center') + else: + add_rects(child, c, w, h) + + rect = patches.Rectangle( + c, w, h, linewidth=1, edgecolor='k', facecolor='none') + ax.add_patch(rect) + + add_rects(block) + + ax.invert_xaxis() + + return fig, ax + + + + +compression_type = args.compression_type +max_inner_area_str = str(args.max_inner_area) +max_outer_area_str = str(args.max_outer_area) + +max_depth = args.max_depth if args.max_depth != 0 else None + +if compression_type == "true_model": + FF_dir = "true_{}_{}".format(max_inner_area_str, max_outer_area_str) + +elif compression_type == "svd": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "ssvd": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "rand_svd": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + +elif compression_type == "rand_ssvd": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + +elif compression_type == "nmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "klnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "snmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "sklnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "rand_snmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.p, args.q, args.k0) + +elif compression_type == "wsnmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "wsklnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + + +if not compression_type == "true_model" and args.min_depth != 1: + FF_dir += "_{}mindepth".format(args.min_depth) + +if not compression_type == "true_model" and max_depth is not None: + FF_dir += "_{}maxdepth".format(max_depth) + + +if compression_type == 'true_model': + FF_path = 'results/' + FF_dir + f'/FF_{max_inner_area_str}_{max_outer_area_str}.npz' + if not os.path.exists(FF_path): + print("PATH DOES NOT EXIST " + FF_path) + assert False + FF = scipy.sparse.load_npz(FF_path) +else: + FF_path = "results/"+ FF_dir + "/FF_{}_{}_{:.0e}_{}.bin".format(max_inner_area_str, max_outer_area_str, args.tol, compression_type) + if not os.path.exists(FF_path): + print("PATH DOES NOT EXIST " + FF_path) + assert False + FF = CompressedFormFactorMatrix.from_file(FF_path) + + +fig = plt.figure(figsize=(18, 6)) # , figsize=(18, 6)) +print(f'- {FF_path}') +fig, ax = plot_blocks(FF._root, fig) +if args.plot_labels: + fig.savefig("results/"+FF_dir+"/block_plot_labeled.png") +else: + fig.savefig("results/"+FF_dir+"/block_plot.png") +plt.close(fig) \ No newline at end of file diff --git a/examples/faustini/make_compressed_form_factor_matrix_faustini.py b/examples/faustini/make_compressed_form_factor_matrix_faustini.py new file mode 100644 index 0000000..2d42228 --- /dev/null +++ b/examples/faustini/make_compressed_form_factor_matrix_faustini.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python + +import numpy as np +import scipy.sparse + +from flux.form_factors import get_form_factor_matrix +from flux.compressed_form_factors_nmf import CompressedFormFactorMatrix, FormFactorMinDepthQuadtreeBlock +from flux.shape import CgalTrimeshShapeModel, get_surface_normals + +import argparse +import arrow +import os + +parser = argparse.ArgumentParser() +parser.add_argument('--compression_type', type=str, default="svd",choices=["nmf","snmf","wsnmf", + "svd","ssvd", + "rand_svd","rand_ssvd","rand_snmf", + "true_model"]) +parser.add_argument('--max_inner_area', type=float, default=0.8) +parser.add_argument('--max_outer_area', type=float, default=3.0) +parser.add_argument('--tol', type=float, default=1e-1) + +parser.add_argument('--min_depth', type=int, default=1) +parser.add_argument('--max_depth', type=int, default=0) + +parser.add_argument('--nmf_max_iters', type=int, default=int(1e4)) +parser.add_argument('--nmf_tol', type=float, default=1e-2) + +parser.add_argument('--k0', type=int, default=40) + +parser.add_argument('--p', type=int, default=5) +parser.add_argument('--q', type=int, default=1) + +parser.add_argument('--nmf_beta_loss', type=int, default=2, choices=[1,2]) + +parser.set_defaults(feature=False) + +args = parser.parse_args() + +# produce compressed FF from shapemodel produced by make_mesh.py + +compression_type = args.compression_type +max_inner_area_str = str(args.max_inner_area) +max_outer_area_str = str(args.max_outer_area) +tol_str = "{:.0e}".format(args.tol) +tol = args.tol + +max_depth = args.max_depth if args.max_depth != 0 else None + + +if compression_type == "true_model": + compression_params = {} + + savedir = "true_{}_{}".format(max_inner_area_str, max_outer_area_str) + + +elif compression_type == "svd": + compression_params = { + "k0": args.k0 + } + + savedir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, tol, + args.k0) + + +elif compression_type == "ssvd": + compression_params = { + "k0": args.k0 + } + + savedir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, tol, + args.k0) + + +elif compression_type == "rand_svd": + compression_params = { + "k0": args.k0, + "p": args.p, + "q": args.q + } + + savedir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, tol, + args.p, args.q, args.k0) + + +elif compression_type == "rand_ssvd": + compression_params = { + "k0": args.k0, + "p": args.p, + "q": args.q + } + + savedir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, tol, + args.p, args.q, args.k0) + + +elif compression_type == "nmf": + compression_params = { + "max_iters": args.nmf_max_iters, + "nmf_tol": args.nmf_tol, + "k0": args.k0, + "beta_loss": args.nmf_beta_loss + } + + savedir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "klnmf", max_inner_area_str, max_outer_area_str, tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + + +elif compression_type == "snmf": + compression_params = { + "max_iters": args.nmf_max_iters, + "nmf_tol": args.nmf_tol, + "k0": args.k0, + "beta_loss": args.nmf_beta_loss + } + + savedir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "sklnmf", max_inner_area_str, max_outer_area_str, tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + + +elif compression_type == "rand_snmf": + compression_params = { + "max_iters": args.nmf_max_iters, + "nmf_tol": args.nmf_tol, + "k0": args.k0, + "p": args.p, + "q": args.q + } + + savedir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, tol, + args.nmf_max_iters, args.nmf_tol, args.p, args.q, args.k0) + + +elif compression_type == "wsnmf": + compression_params = { + "max_iters": args.nmf_max_iters, + "nmf_tol": args.nmf_tol, + "k0": args.k0, + "beta_loss": args.nmf_beta_loss + } + + savedir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "wsklnmf", max_inner_area_str, max_outer_area_str, tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + + +if not compression_type == "true_model" and args.min_depth != 1: + savedir += "_{}mindepth".format(args.min_depth) + +if not compression_type == "true_model" and max_depth is not None: + savedir += "_{}maxdepth".format(max_depth) + + +savedir = "results/"+savedir +if not os.path.exists('results'): + os.mkdir('results') +if not os.path.exists(savedir): + os.mkdir(savedir) + + +verts = np.load(f'faustini_verts_{max_inner_area_str}_{max_outer_area_str}.npy') +faces = np.load(f'faustini_faces_{max_inner_area_str}_{max_outer_area_str}.npy') + +# convert verts from km to m +verts *= 1e3 + +normals = get_surface_normals(verts, faces) +normals[normals[:, 2] > 0] *= -1 + +shape_model = CgalTrimeshShapeModel(verts, faces, normals) + + +start_assembly_time = arrow.now() + +if args.compression_type == "true_model": + FF = get_form_factor_matrix(shape_model) +elif args.min_depth != 1: + FF = CompressedFormFactorMatrix( + shape_model, tol=tol, min_size=16384, max_depth=max_depth, compression_type=compression_type, compression_params=compression_params, + min_depth=args.min_depth, RootBlock=FormFactorMinDepthQuadtreeBlock) +else: + FF = CompressedFormFactorMatrix( + shape_model, tol=tol, min_size=16384, max_depth=max_depth, compression_type=compression_type, compression_params=compression_params) + +assembly_time = (arrow.now() - start_assembly_time).total_seconds() + +np.save(savedir+f'/FF_assembly_time.npy', np.array(assembly_time)) + + +if args.compression_type == "true_model": + scipy.sparse.save_npz(savedir+f'/FF_{max_inner_area_str}_{max_outer_area_str}', FF) +else: + FF.save(savedir+f'/FF_{max_inner_area_str}_{max_outer_area_str}_{tol_str}_{compression_type}.bin') \ No newline at end of file diff --git a/examples/faustini/make_mesh_faustini.py b/examples/faustini/make_mesh_faustini.py new file mode 100644 index 0000000..69751a4 --- /dev/null +++ b/examples/faustini/make_mesh_faustini.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python +import matplotlib.pyplot as plt +import meshio +import meshpy.triangle as triangle +import numpy as np +import sys +import rioxarray as rio + +path = 'ldem_80s_final_adj_80mpp_surf_wPyramids' # '/home/sberton2/Lavoro/code/nac_kplo/aux/ldem_20mpp_Stefano' # 'LDEM_80S_150M' # +tif_path = path + '.tif' + +max_inner_area_str = sys.argv[1] +max_outer_area_str = sys.argv[2] + +max_inner_area = float(max_inner_area_str) +max_outer_area = float(max_outer_area_str) + +Rp = 1737.4 + +rds = rio.open_rasterio(tif_path) + +xgrid = rds.coords['y'].values*-1.e-3 +ygrid = rds.coords['x'].values*1.e-3 +dem = rds.data[0].T[:,::-1] + +nx = len(xgrid) +ny = len(ygrid) + +dx = rds.coords['x'].attrs["actual_range"][-1] - rds.coords['x'].attrs["actual_range"][0] +dy = rds.coords['y'].attrs["actual_range"][-1] - rds.coords['y'].attrs["actual_range"][0] +hx, hy = dx/nx, dy/ny + +xmin, xmax = xgrid.min(), xgrid.max() +ymin, ymax = ygrid.min(), ygrid.max() + +scale_x2ind = (nx - 1)/(xmax - xmin) +scale_y2ind = (ny - 1)/(ymax - ymin) + +def getdem(i, j): + return dem[i, j]/1e3 # we want this in km initially + +def getz(x, y): + '''linearly interpolate z from memory-mapped DEM''' + + assert xmin <= x <= xmax and ymin <= y <= ymax + + isub = scale_x2ind*(x - xmin) + jsub = scale_y2ind*(y - ymin) + + i0, i1 = int(np.floor(isub)), int(np.ceil(isub)) + j0, j1 = int(np.floor(jsub)), int(np.ceil(jsub)) + + # handle special cases first + + if i0 == i1 and j0 == j1: + return getdem(i0, j0) + + p1 = (y - ygrid[j0])/hy + assert 0 <= p1 <= 1 + if i0 == i1: + return (1 - p1)*getdem(i0, j0) + p1*getdem(i0, j1) + + p0 = (x - xgrid[i0])/hx + assert 0 <= p0 <= 1 + if j0 == j1: + return (1 - p0)*getdem(i0, j0) + p0*getdem(i1, j0) + + assert i1 == i0 + 1 and j1 == j0 + 1 + + z00 = getdem(i0, j0) + z01 = getdem(i0, j1) + z10 = getdem(i1, j0) + z11 = getdem(i1, j1) + z0 = (1 - p0)*z00 + p0*z10 + z1 = (1 - p0)*z01 + p0*z11 + z = (1 - p1)*z0 + p1*z1 + + return z # km + +def unproject_stereographic(x, y): + az0, el0, R = 0, -np.pi/2, Rp + if x == 0 and y == 0: + return az0, el0 + rho = np.sqrt(x**2 + y**2) + c = 2*np.arctan2(rho, 2*R) + c_c, s_c = np.cos(c), np.sin(c) + c_l, s_l = np.cos(el0), np.sin(el0) + az = az0 + np.arctan2(x*s_c, c_l*rho*c_c - s_l*y*s_c) + az = np.mod(az, 2*np.pi) + el = np.arcsin(c_c*s_l + c_l*y*s_c/rho) + return az, el + +def stereo2cart(x, y): + az, el = unproject_stereographic(x, y) + # print("x,y",x,y) + # start = time.time() + dR = getz(x, y) + # print("dR call took", time.time()-start) + # print(dR) + # start = time.time() + # xi = xr.DataArray([x], dims="z") + # yi = xr.DataArray([y], dims="z") + # dR = np.squeeze(rds.interp(x=xi, y=yi).data) + # print("rioxarray call took", time.time()-start) + # print(dR) + # exit() + r = Rp + dR + cos_el = np.cos(el) + return r*np.array([cos_el*np.cos(az), cos_el*np.sin(az), np.sin(el)]) + +xc_roi, yc_roi = 82.5, 7.5 +r_roi, R_roi = 25, 60 + +x0_roi, x1_roi = xc_roi - R_roi, xc_roi + R_roi +y0_roi, y1_roi = yc_roi - R_roi, yc_roi + R_roi + +def should_refine(verts, _): + verts = np.array(verts) + assert verts.shape[0] == 3 + + P = np.empty((3, 3)) + for i, (x, y) in enumerate(verts): + P[i] = stereo2cart(x, y) + + tri_area = np.linalg.norm(np.cross(P[1] - P[0], P[2] - P[0]))/2 + + xm, ym = np.mean(verts, 0) + rm = np.sqrt((xm - xc_roi)**2 + (ym - yc_roi)**2) + + if rm < r_roi: + max_area = max_inner_area + else: + max_area = ((rm - r_roi)/(R_roi - r_roi))*max_inner_area \ + + (rm/(R_roi - r_roi))*max_outer_area + + return tri_area > max_area + +points = np.array( + [(x0_roi, y0_roi), (x1_roi, y0_roi), (x1_roi, y1_roi), (x0_roi, y1_roi)], + dtype=np.float32) +facets = np.array([(0, 1), (1, 2), (2, 3), (3, 0)], dtype=np.intc) + +info = triangle.MeshInfo() +info.set_points(points) +info.set_facets(facets) + +mesh = triangle.build(info, refinement_func=should_refine) + +verts_stereo = np.array(mesh.points) +verts = np.array([stereo2cart(*_) for _ in np.array(mesh.points)]).squeeze() +faces = np.array(mesh.elements) + +print(f' * {verts.shape[0]} vertices and {faces.shape[0]} faces') + +area_str = f'{max_inner_area_str}_{max_outer_area_str}' +np.save(f'faustini_verts_stereo_{area_str}', verts_stereo) +np.save(f'faustini_verts_{area_str}', verts) +np.save(f'faustini_faces_{area_str}', faces) diff --git a/examples/faustini/simple.furnsh b/examples/faustini/simple.furnsh new file mode 100644 index 0000000..9d057ba --- /dev/null +++ b/examples/faustini/simple.furnsh @@ -0,0 +1,22 @@ +\begindata + + PATH_VALUES = ( '../kernels' ) + + PATH_SYMBOLS = ( 'KERNELS_COMMON' ) + + KERNELS_TO_LOAD = ( + '$KERNELS_COMMON/pck00010.tpc', + '$KERNELS_COMMON/moon_080317.tf', + '$KERNELS_COMMON/moon_assoc_me.tf', + '$KERNELS_COMMON/moon_pa_de421_1900-2050.bpc', + '$KERNELS_COMMON/naif0012.tls', + '$KERNELS_COMMON/de421.bsp', + '$KERNELS_COMMON/spinup_Sun_MOONME_ONEYEAR_m1p5deg.bsp' + ) + +\begintext + '$KERNELS_COMMON/pck00010_msgr_v23.tpc' + '$KERNELS_COMMON/spinup_Sun_MOONME_ONEMONTH_m1p5deg.bsp' + + + diff --git a/examples/faustini/spice_util.py b/examples/faustini/spice_util.py new file mode 100644 index 0000000..e9cfe2e --- /dev/null +++ b/examples/faustini/spice_util.py @@ -0,0 +1,47 @@ +def get_sunvec(utc0, target, observer, frame, utc1=0, stepet=1, et_linspace=None, path_to_furnsh='aux/simple.furnsh'): + '''This script uses SPICE to compute a trajectory for the sun, loads a + shape model discretizing a patch of the lunar south pole (made using + lsp_make_obj.py), and a compressed form factor matrix for that + shape model (computed using lsp_compress_form_factor_matrix.py). + It then proceeds to compute the steady state temperature at each sun + position, writing a plot of the temperature to disk for each sun + position. + + ''' + import numpy as np + import spiceypy as spice + + clktol = '10:000' + spice.kclear() + + spice.furnsh(path_to_furnsh) + et0 = spice.str2et(utc0) + if np.max(et_linspace) == None: + et1 = spice.str2et(utc1) + nbet = int(np.ceil((et1 - et0) / stepet)) + et = np.linspace(et0, et1, nbet) + else: + # add first epoch to array of time steps + et = et0 + et_linspace + + # Sun positions over time period + # TODO I would usually do this, maybe you want to + # TODO add something similar to the thermal model options/config? + # possun = spice.spkpos('SUN', et, FluxOpt.get("frame"), 'LT+S', FluxOpt.get("body"))[0] + possun = spice.spkpos(target, et, frame, 'LT+S', observer)[0] + possun = possun + + # TODO why am I doing this conversion to lon/lat if then I'm reconstructing the cartesian version? + lonsun = np.arctan2(possun[:, 1], possun[:, 0]) + lonsun = np.mod(lonsun, 2 * np.pi) + radsun = np.sqrt(np.sum(possun[:, :] ** 2, axis=1)) + latsun = np.arcsin(possun[:, 2]/radsun) + + sun_vec = radsun[:,np.newaxis] * \ + np.array([ + np.cos(lonsun) * np.cos(latsun), + np.sin(lonsun) * np.cos(latsun), + np.sin(latsun) + ]).T + + return sun_vec diff --git a/examples/gerlache/clean_T_spinup.py b/examples/gerlache/clean_T_spinup.py index 174a5ae..07c56c6 100755 --- a/examples/gerlache/clean_T_spinup.py +++ b/examples/gerlache/clean_T_spinup.py @@ -101,7 +101,7 @@ def get_values_using_raytracing(field): max_inner_area_str = sys.argv[1] max_outer_area_str = sys.argv[2] tol_str = sys.argv[3] - niter = 20 + niter = 2 # nsteps = 863 path_to_remove = f"T_frames/{max_inner_area_str}_{max_outer_area_str}_{tol_str}/" @@ -163,33 +163,33 @@ def get_values_using_raytracing(field): # .astype(float) - # vmax = max(Tmax_grid.max(), Tavg_grid.max()) - # with open('params.json') as f: - # params = json.load(f) - # - # xmin = params['xmin'] - # xmax = params['xmax'] - # ymin = params['ymin'] - # ymax = params['ymax'] - # extent = xmin, xmax, ymin, ymax - # - # plt.figure(figsize=(12, 5)) - # plt.subplot(1, 2, 1) - # plt.title('$T$ (true)') - # plt.imshow(Tmax_grid, interpolation='none', extent=extent, - # vmin=0, vmax=Tmax_grid.max(), cmap=cc.cm.fire) - # plt.ylabel('$x$') - # plt.xlabel('$y$') - # plt.colorbar() - # plt.gca().set_aspect('equal') - # plt.subplot(1, 2, 2) - # plt.title(r'$T$ (tol = %s - true)' % tol_str) - # plt.imshow(Tavg_grid, interpolation='none', extent=extent, - # vmin=0, vmax=Tavg_grid.max(), cmap=cc.cm.fire, zorder=1) - # plt.ylabel('$x$') - # plt.xlabel('$y$') - # plt.colorbar() - # plt.gca().set_aspect('equal') - # plt.tight_layout() - # plt.show() - # plt.close() + vmax = max(Tmax_grid.max(), Tavg_grid.max()) + with open('params.json') as f: + params = json.load(f) + + xmin = params['xmin'] + xmax = params['xmax'] + ymin = params['ymin'] + ymax = params['ymax'] + extent = xmin, xmax, ymin, ymax + + plt.figure(figsize=(12, 5)) + plt.subplot(1, 2, 1) + plt.title('$T$ (true)') + plt.imshow(Tmax_grid, interpolation='none', extent=extent, + vmin=0, vmax=Tmax_grid.max(), cmap=cc.cm.fire) + plt.ylabel('$x$') + plt.xlabel('$y$') + plt.colorbar() + plt.gca().set_aspect('equal') + plt.subplot(1, 2, 2) + plt.title(r'$T$ (tol = %s - true)' % tol_str) + plt.imshow(Tavg_grid, interpolation='none', extent=extent, + vmin=0, vmax=Tavg_grid.max(), cmap=cc.cm.fire, zorder=1) + plt.ylabel('$x$') + plt.xlabel('$y$') + plt.colorbar() + plt.gca().set_aspect('equal') + plt.tight_layout() + plt.show() + plt.close() diff --git a/examples/gerlache/collect_equil_T_data.py b/examples/gerlache/collect_equil_T_data.py index ffedd24..282a324 100755 --- a/examples/gerlache/collect_equil_T_data.py +++ b/examples/gerlache/collect_equil_T_data.py @@ -55,6 +55,7 @@ def get_values_using_raytracing(field): T = compute_steady_state_temp(FF, E, rho, emiss) time_T = toc() + print(time_T) T_grid = get_values_using_raytracing(T) @@ -72,9 +73,11 @@ def get_values_using_raytracing(field): # get data for compressed FF matrices +# FF_paths = glob.glob('FF_0.8_3.0_1e-2.bin') FF_paths = glob.glob('FF_*_*_*.bin') -for path in FF_paths: +for path in FF_paths[:]: + max_inner_area_str, max_outer_area_str, tol_str = path[3:-4].split('_') print(max_inner_area_str, max_outer_area_str, tol_str) @@ -99,9 +102,10 @@ def get_values_using_raytracing(field): # get data from true FF matrices -FF_true_paths = glob.glob('FF_*.npz') +# FF_true_paths = glob.glob('FF_0.8_3.0.npz') +FF_true_paths = glob.glob('FF_*_*.npz') -for path in FF_true_paths: +for path in FF_true_paths[:]: max_inner_area, max_outer_area = map(float, areas_str.split('_')) print(max_inner_area, max_outer_area) diff --git a/examples/gerlache/collect_time_dep_data.py b/examples/gerlache/collect_time_dep_data.py index 8f83e07..89abffb 100755 --- a/examples/gerlache/collect_time_dep_data.py +++ b/examples/gerlache/collect_time_dep_data.py @@ -1,25 +1,60 @@ #!/usr/bin/env python - -import os - -import colorcet as cc -import itertools as it import json_numpy as json -import matplotlib.pyplot as plt -import numpy as np import scipy.sparse import sys +import glob +import pyvista as pv +from pathlib import Path + +import meshio +import numpy as np +from tqdm import tqdm from spice_util import get_sunvec from flux.compressed_form_factors import CompressedFormFactorMatrix -from flux.compressed_form_factors import CompressedKernelMatrix from flux.model import ThermalModel from flux.shape import CgalTrimeshShapeModel, get_surface_normals -from flux.solve import solve_radiosity -from flux.util import tic, toc -from pathlib import Path +# some useful routines +# transform cartesian to spherical (meters, radians) +def cart2sph(xyz): + + rtmp = np.linalg.norm(np.array(xyz).reshape(-1, 3), axis=1) + lattmp = np.arcsin(np.array(xyz).reshape(-1, 3)[:, 2] / rtmp) + lontmp = np.arctan2(np.array(xyz).reshape(-1, 3)[:, 1], np.array(xyz).reshape(-1, 3)[:, 0]) + + return rtmp, lattmp, lontmp + +def sind(x): + return np.sin(np.deg2rad(x)) + +def cosd(x): + return np.cos(np.deg2rad(x)) + +def project_stereographic(lon, lat, lon0, lat0, R=1): + """ + project cylindrical coordinates to stereographic xy from central lon0/lat0 + :param lon: array of input longitudes (deg) + :param lat: array of input latitudes (deg) + :param lon0: center longitude for the projection (deg) + :param lat0: center latitude for the projection (deg) + :param R: planetary radius (km) + :return: stereographic projection xy coord from center (km) + """ + + cosd_lat = cosd(lat) + cosd_lon_lon0 = cosd(lon - lon0) + sind_lat = sind(lat) + + k = (2. * R) / (1. + sind(lat0) * sind_lat + cosd(lat0) * cosd_lat * cosd_lon_lon0) + x = k * cosd_lat * sind(lon - lon0) + y = k * (cosd(lat0) * sind_lat - sind(lat0) * cosd_lat * cosd_lon_lon0) + + return x, y + +# ============================================================ +# main code outdir = Path('T_frames') if not outdir.exists(): outdir.mkdir() @@ -28,6 +63,7 @@ max_outer_area_str = sys.argv[2] tol_str = sys.argv[3] +# read shapemodel and form-factor matrix generated by make_compressed_form_factor_matrix.py if tol_str == 'true': path = f'FF_{max_inner_area_str}_{max_outer_area_str}.npz' FF = scipy.sparse.load_npz(path) @@ -41,9 +77,9 @@ FF = CompressedFormFactorMatrix.from_file(path) shape_model = FF.shape_model -print(' * loaded form factor matrix and shape model') - +print(' * loaded form factor matrix and (cartesian) shape model') +# choose simulation parameters with open('params.json') as f: params = json.load(f) @@ -60,36 +96,66 @@ utc1 = '2011 MAR 02 00:00:00.00' num_frames = 100 stepet = 86400/100 -sun_vecs = get_sunvec(utc0=utc0, utc1=utc1, stepet=stepet) +sun_vecs = get_sunvec(utc0=utc0, utc1=utc1, stepet=stepet, path_to_furnsh="simple.furnsh", + target='SUN', observer='MOON', frame='MOON_ME') t = np.linspace(0, 86400, num_frames + 1) -D = sun_vecs/np.linalg.norm(sun_vecs,axis=1)[:,np.newaxis] +D = sun_vecs/np.linalg.norm(sun_vecs, axis=1)[:, np.newaxis] D = D.copy(order='C') print(' * got sun positions from SPICE') z = np.linspace(0, 3e-3, 31) +print(' * set up thermal model') thermal_model = ThermalModel( FF, t, D, - F0=1365, rho=0.11, method='1mvp', + F0=np.repeat(1365, len(D)), rho=0.11, method='1mvp', z=z, T0=100, ti=120, rhoc=9.6e5, emiss=0.95, Fgeotherm=0.2, bcond='Q', shape_model=shape_model) -print(' * set up thermal model') - -plot_layers = [0, 1, 2, 3] - Tmin, Tmax = np.inf, -np.inf vmin, vmax = 90, 310 path_fmt = f'T%0{len(str(num_frames))}d.npy' -print(' * time stepping the thermal model:') -for frame_index, T in enumerate(thermal_model): - print(f' + {frame_index + 1}/{D.shape[0]}') +for frame_index, T in tqdm(enumerate(thermal_model), total=D.shape[0], desc='thermal models time-steps'): + # print(f' + {frame_index + 1}/{D.shape[0]}') path = outdir/f'{max_inner_area_str}_{max_outer_area_str}_{tol_str}' if not path.exists(): path.mkdir() path = path/(path_fmt % frame_index) np.save(path, T) +print(' * thermal model run completed') + +# retrieve T at surface for all epochs and compute max and mean along epochs +Tfiles = glob.glob(f"T_frames/{max_inner_area_str}_{max_outer_area_str}_{tol_str}/T*.npy") + +Tsurf = [] +for f in Tfiles: + Tsurf.append(np.load(f)[:, 0]) +Tsurf = np.vstack(Tsurf) + +Tsurf_max = np.max(Tsurf, axis=0) # max along epochs +Tsurf_mean = np.mean(Tsurf, axis=0) # mean along epochs + +# plot +# generate stereographic counterpart to shape_model (for plotting) +Rp = 1737.4e3 +r, lat, lon = cart2sph(shape_model.V) +x, y = project_stereographic(np.rad2deg(lon), np.rad2deg(lat), lon0=0, lat0=-90., R=1737.4e3) +V_st = np.vstack([x, y, r-Rp]).T +shape_model_st = CgalTrimeshShapeModel(V_st.copy(order='C'), shape_model.F) + +# save mesh to file +mesh = meshio.Mesh(shape_model_st.V, [('triangle', shape_model_st.F)]) +fout = f'gerlache_{max_inner_area_str}_{max_outer_area_str}_st.ply' +mesh.write(fout) + +# read with pyvista and plot Tmax and Tmean at surface +grid = pv.read(fout) +grid.cell_data['Tmax'] = Tsurf_max +grid.plot() + +grid.cell_data['Tmean'] = Tsurf_mean +grid.plot() diff --git a/examples/gerlache/collect_time_dep_data_cliques.py b/examples/gerlache/collect_time_dep_data_cliques.py new file mode 100644 index 0000000..11e8631 --- /dev/null +++ b/examples/gerlache/collect_time_dep_data_cliques.py @@ -0,0 +1,233 @@ +#!/usr/bin/env python +import json_numpy as json +import scipy.sparse +import sys +import glob +import pyvista as pv +import os + +import meshio +import numpy as np +from tqdm import tqdm + +from spice_util import get_sunvec +from flux.compressed_form_factors_cliques import CliquedFormFactorMatrix +from flux.model import ThermalModel +from flux.shape import CgalTrimeshShapeModel, get_surface_normals + +import argparse +import arrow + +parser = argparse.ArgumentParser() +parser.add_argument('--compression_type', type=str, default="svd",choices=["nmf","snmf","wsnmf", + "svd","ssvd", + "rand_svd","rand_ssvd","rand_snmf", + "saca","sbrp","rand_sid"]) +parser.add_argument('--max_inner_area', type=float, default=0.8) +parser.add_argument('--max_outer_area', type=float, default=3.0) +parser.add_argument('--tol', type=float, default=1e-1) + +parser.add_argument('--nmf_max_iters', type=int, default=int(1e4)) +parser.add_argument('--nmf_tol', type=float, default=1e-2) + +parser.add_argument('--k0', type=int, default=40) + +parser.add_argument('--p', type=int, default=5) +parser.add_argument('--q', type=int, default=1) + +parser.add_argument('--nmf_beta_loss', type=int, default=2, choices=[1,2]) + +parser.set_defaults(feature=False) + +args = parser.parse_args() + +# some useful routines +# transform cartesian to spherical (meters, radians) +def cart2sph(xyz): + + rtmp = np.linalg.norm(np.array(xyz).reshape(-1, 3), axis=1) + lattmp = np.arcsin(np.array(xyz).reshape(-1, 3)[:, 2] / rtmp) + lontmp = np.arctan2(np.array(xyz).reshape(-1, 3)[:, 1], np.array(xyz).reshape(-1, 3)[:, 0]) + + return rtmp, lattmp, lontmp + +def sind(x): + return np.sin(np.deg2rad(x)) + + +def cosd(x): + return np.cos(np.deg2rad(x)) + +def project_stereographic(lon, lat, lon0, lat0, R=1): + """ + project cylindrical coordinates to stereographic xy from central lon0/lat0 + :param lon: array of input longitudes (deg) + :param lat: array of input latitudes (deg) + :param lon0: center longitude for the projection (deg) + :param lat0: center latitude for the projection (deg) + :param R: planetary radius (km) + :return: stereographic projection xy coord from center (km) + """ + + cosd_lat = cosd(lat) + cosd_lon_lon0 = cosd(lon - lon0) + sind_lat = sind(lat) + + k = (2. * R) / (1. + sind(lat0) * sind_lat + cosd(lat0) * cosd_lat * cosd_lon_lon0) + x = k * cosd_lat * sind(lon - lon0) + y = k * (cosd(lat0) * sind_lat - sind(lat0) * cosd_lat * cosd_lon_lon0) + + return x, y + +# ============================================================ +# main code + +compression_type = args.compression_type +max_inner_area_str = str(args.max_inner_area) +max_outer_area_str = str(args.max_outer_area) +tol_str = "{:.0e}".format(args.tol) + + +if compression_type == "svd": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "ssvd": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "rand_svd": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + +elif compression_type == "rand_ssvd": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + +elif compression_type == "nmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "klnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "snmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "sklnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "rand_snmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.p, args.q, args.k0) + +elif compression_type == "wsnmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "wsklnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "saca": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "sbrp": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "rand_sid": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + + + +FF_dir = "results_cliques/"+FF_dir +if not os.path.exists(FF_dir): + print("PATH DOES NOT EXIST "+FF_dir) + assert False +savedir = FF_dir + "/T_frames" +if not os.path.exists(savedir): + os.mkdir(savedir) + + +# read shapemodel and form-factor matrix generated by make_compressed_form_factor_matrix.py +path = FF_dir+f'/FF_{max_inner_area_str}_{max_outer_area_str}_{tol_str}_{compression_type}.bin' +FF = CliquedFormFactorMatrix.from_file(path) +shape_model = FF.shape_model + +print(' * loaded form factor matrix and (cartesian) shape model') + +# choose simulation parameters +with open('params.json') as f: + params = json.load(f) + + +# Define time window (it can be done either with dates or with utc0 - initial epoch - and np.linspace of epochs) + +# utc0 = '2011 MAR 01 00:00:00.00' +# utc1 = '2011 MAR 02 00:00:00.00' +# num_frames = 100 +# stepet = 86400/100 +# sun_vecs = get_sunvec(utc0=utc0, utc1=utc1, stepet=stepet, path_to_furnsh="simple.furnsh", +# target='SUN', observer='MOON', frame='MOON_ME') +# t = np.linspace(0, 86400, num_frames + 1) + +utc0 = '2011 MAR 01 00:00:00.00' +utc1 = '2011 MAR 31 00:00:00.00' +num_frames = 3000 +stepet = 2592000/3000 +sun_vecs = get_sunvec(utc0=utc0, utc1=utc1, stepet=stepet, path_to_furnsh="simple.furnsh", + target='SUN', observer='MOON', frame='MOON_ME') +t = np.linspace(0, 2592000, num_frames + 1) + +D = sun_vecs/np.linalg.norm(sun_vecs, axis=1)[:, np.newaxis] +D = D.copy(order='C') + +print(' * got sun positions from SPICE') + +z = np.linspace(0, 3e-3, 31) + +print(' * set up thermal model') +thermal_model = ThermalModel( + FF, t, D, + F0=np.repeat(1365, len(D)), rho=0.11, method='1mvp', + z=z, T0=100, ti=120, rhoc=9.6e5, emiss=0.95, + Fgeotherm=0.2, bcond='Q', shape_model=shape_model) + +Tmin, Tmax = np.inf, -np.inf +vmin, vmax = 90, 310 + +sim_start_time = arrow.now() +for frame_index, T in tqdm(enumerate(thermal_model), total=D.shape[0], desc='thermal models time-steps'): + # print(f' + {frame_index + 1}/{D.shape[0]}') + path = savedir+"/T{:03d}.npy".format(frame_index) + np.save(path, T) +sim_duration = (arrow.now()-sim_start_time).total_seconds() +print(' * thermal model run completed in {:.2f} seconds'.format(sim_duration)) + +np.save(savedir+f'/sim_duration.npy', np.array(sim_duration)) + +# retrieve T at surface for all epochs and compute max and mean along epochs +Tfiles = glob.glob(savedir+f"/T*.npy") + +Tsurf = [] +for f in Tfiles: + Tsurf.append(np.load(f)[:, 0]) +Tsurf = np.vstack(Tsurf) + +Tsurf_max = np.max(Tsurf, axis=0) # max along epochs +Tsurf_mean = np.mean(Tsurf, axis=0) # mean along epochs + +# plot +# generate stereographic counterpart to shape_model (for plotting) +Rp = 1737.4e3 +r, lat, lon = cart2sph(shape_model.V) +x, y = project_stereographic(np.rad2deg(lon), np.rad2deg(lat), lon0=0, lat0=-90., R=1737.4e3) +V_st = np.vstack([x, y, r-Rp]).T +shape_model_st = CgalTrimeshShapeModel(V_st.copy(order='C'), shape_model.F) + +# save mesh to file +mesh = meshio.Mesh(shape_model_st.V, [('triangle', shape_model_st.F)]) +fout = f'gerlache_{max_inner_area_str}_{max_outer_area_str}_st.ply' +mesh.write(fout) + +# read with pyvista and plot Tmax and Tmean at surface +grid = pv.read(fout) +grid.cell_data['Tmax'] = Tsurf_max +grid.plot() + +grid.cell_data['Tmean'] = Tsurf_mean +grid.plot() diff --git a/examples/gerlache/collect_time_dep_data_long_spinup_phase1.py b/examples/gerlache/collect_time_dep_data_long_spinup_phase1.py new file mode 100644 index 0000000..1e47abd --- /dev/null +++ b/examples/gerlache/collect_time_dep_data_long_spinup_phase1.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python +import json_numpy as json +import scipy.sparse +import sys +import glob +import pyvista as pv +import os + +import meshio +import numpy as np +from tqdm import tqdm + +from spice_util import get_sunvec +from flux.compressed_form_factors_nmf import CompressedFormFactorMatrix +from flux.model import ThermalModel +from flux.shape import CgalTrimeshShapeModel, get_surface_normals + +from flux.thermal import setgrid + +import argparse +import arrow + +parser = argparse.ArgumentParser() +parser.add_argument('--compression_type', type=str, default="svd",choices=["nmf","snmf","wsnmf", + "svd","ssvd", + "rand_svd","rand_ssvd","rand_snmf", + "saca","sbrp","rand_sid", + "stoch_radiosity", + "true_model"]) +parser.add_argument('--max_inner_area', type=float, default=0.8) +parser.add_argument('--max_outer_area', type=float, default=3.0) +parser.add_argument('--tol', type=float, default=1e-1) + +parser.add_argument('--min_depth', type=int, default=1) +parser.add_argument('--max_depth', type=int, default=0) + +parser.add_argument('--nmf_max_iters', type=int, default=int(1e4)) +parser.add_argument('--nmf_tol', type=float, default=1e-2) + +parser.add_argument('--k0', type=int, default=40) + +parser.add_argument('--p', type=int, default=5) +parser.add_argument('--q', type=int, default=1) + +parser.add_argument('--nmf_beta_loss', type=int, default=2, choices=[1,2]) + +parser.add_argument('--roi', action='store_true') + +parser.set_defaults(feature=False) + +args = parser.parse_args() + + +compression_type = args.compression_type +max_inner_area_str = str(args.max_inner_area) +max_outer_area_str = str(args.max_outer_area) +tol_str = "{:.0e}".format(args.tol) + +max_depth = args.max_depth if args.max_depth != 0 else None + + +if compression_type == "true_model": + FF_dir = "true_{}_{}".format(max_inner_area_str, max_outer_area_str) + +elif compression_type == "stoch_radiosity": + FF_dir = "stoch_rad_{}_{}_{}k0".format(max_inner_area_str, max_outer_area_str, + args.k0) + +elif compression_type == "svd": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "ssvd": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "rand_svd": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + +elif compression_type == "rand_ssvd": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + +elif compression_type == "nmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "klnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "snmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "sklnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "rand_snmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.p, args.q, args.k0) + +elif compression_type == "wsnmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "wsklnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "saca": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "sbrp": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "rand_sid": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + + +if not (compression_type == "true_model" or compression_type == "stoch_radiosity") and args.min_depth != 1: + FF_dir += "_{}mindepth".format(args.min_depth) + +if not (compression_type == "true_model" or compression_type == "stoch_radiosity") and max_depth is not None: + FF_dir += "_{}maxdepth".format(max_depth) + +if args.roi: + FF_dir = "roi_" + FF_dir + + +FF_dir = "results/"+FF_dir +if not os.path.exists(FF_dir): + print("PATH DOES NOT EXIST "+FF_dir) + assert False +savedir = FF_dir + + +# read shapemodel and form-factor matrix generated by make_compressed_form_factor_matrix.py +if compression_type == 'true_model' or compression_type == 'stoch_radiosity': + path = FF_dir+f'/FF_{max_inner_area_str}_{max_outer_area_str}.npz' + FF = scipy.sparse.load_npz(path) + V = np.load(f'gerlache_verts_{max_inner_area_str}_{max_outer_area_str}.npy') + F = np.load(f'gerlache_faces_{max_inner_area_str}_{max_outer_area_str}.npy') + N = get_surface_normals(V, F) + N[N[:, 2] > 0] *= -1 + shape_model = CgalTrimeshShapeModel(V, F, N) +else: + path = FF_dir+f'/FF_{max_inner_area_str}_{max_outer_area_str}_{tol_str}_{compression_type}.bin' + FF = CompressedFormFactorMatrix.from_file(path) + shape_model = FF.shape_model + +print(' * loaded form factor matrix and (cartesian) shape model') + +# Define time window (it can be done either with dates or with utc0 - initial epoch - and np.linspace of epochs) + +utc0 = '2000 JAN 01 12:00:00.00' +utc1 = '2000 JUL 26 00:20:00.00' +num_frames = 20161 +stepet = 885 +sun_vecs = get_sunvec(utc0=utc0, utc1=utc1, stepet=stepet, path_to_furnsh="simple_long_spinup.furnsh", + target='SUN', observer='MOON', frame='MOON_ME') +t = np.linspace(0, num_frames*stepet, num_frames + 1) + +print(t.shape, sun_vecs.shape) + +D = sun_vecs/np.linalg.norm(sun_vecs, axis=1)[:, np.newaxis] +D = D.copy(order='C') + +print(' * got sun positions from SPICE') + +# z = np.linspace(0, 3e-3, 31) +nz = 60 +zfac = 1.05 +zmax = 2.5 +z = setgrid(nz=nz, zfac=zfac, zmax=zmax) +z = np.hstack([0, z]) # add surface layer + +print(' * set up thermal model') +thermal_model = ThermalModel( + FF, t, D, + F0=np.repeat(1365, len(D)), rho=0.11, method='1mvp', + z=z, T0=100, ti=120, rhoc=9.6e5, emiss=0.95, + Fgeotherm=0.2, bcond='Q', shape_model=shape_model) + +T_list = [] +sim_start_time = arrow.now() +for frame_index, T in tqdm(enumerate(thermal_model), total=D.shape[0], desc='thermal models time-steps'): + if frame_index > 2880*6: + T_list.append(T) + pass +path = savedir+"/T_long_spinup.npy" + +T_list = np.array(T_list) +np.save(path, T_list.mean(axis=0)) +sim_duration = (arrow.now()-sim_start_time).total_seconds() +print(' * thermal model spinup completed in {:.2f} seconds'.format(sim_duration)) + +np.save(savedir+f'/long_spinup_duration.npy', np.array(sim_duration)) \ No newline at end of file diff --git a/examples/gerlache/collect_time_dep_data_long_spinup_phase2.py b/examples/gerlache/collect_time_dep_data_long_spinup_phase2.py new file mode 100644 index 0000000..9d734e8 --- /dev/null +++ b/examples/gerlache/collect_time_dep_data_long_spinup_phase2.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python +import json_numpy as json +import scipy.sparse +import sys +import glob +import pyvista as pv +import os + +import meshio +import numpy as np +from tqdm import tqdm + +from spice_util import get_sunvec +from flux.compressed_form_factors_nmf import CompressedFormFactorMatrix +from flux.model import ThermalModel +from flux.shape import CgalTrimeshShapeModel, get_surface_normals + +from flux.thermal import setgrid + +import argparse +import arrow + +parser = argparse.ArgumentParser() +parser.add_argument('--compression_type', type=str, default="svd",choices=["nmf","snmf","wsnmf", + "svd","ssvd", + "rand_svd","rand_ssvd","rand_snmf", + "saca","sbrp","rand_sid", + "stoch_radiosity", + "true_model"]) +parser.add_argument('--max_inner_area', type=float, default=0.8) +parser.add_argument('--max_outer_area', type=float, default=3.0) +parser.add_argument('--tol', type=float, default=1e-1) + +parser.add_argument('--min_depth', type=int, default=1) +parser.add_argument('--max_depth', type=int, default=0) + +parser.add_argument('--nmf_max_iters', type=int, default=int(1e4)) +parser.add_argument('--nmf_tol', type=float, default=1e-2) + +parser.add_argument('--k0', type=int, default=40) + +parser.add_argument('--p', type=int, default=5) +parser.add_argument('--q', type=int, default=1) + +parser.add_argument('--nmf_beta_loss', type=int, default=2, choices=[1,2]) + +parser.add_argument('--roi', action='store_true') + +parser.set_defaults(feature=False) + +args = parser.parse_args() + + +compression_type = args.compression_type +max_inner_area_str = str(args.max_inner_area) +max_outer_area_str = str(args.max_outer_area) +tol_str = "{:.0e}".format(args.tol) + +max_depth = args.max_depth if args.max_depth != 0 else None + + +if compression_type == "true_model": + FF_dir = "true_{}_{}".format(max_inner_area_str, max_outer_area_str) + +elif compression_type == "stoch_radiosity": + FF_dir = "stoch_rad_{}_{}_{}k0".format(max_inner_area_str, max_outer_area_str, + args.k0) + +elif compression_type == "svd": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "ssvd": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "rand_svd": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + +elif compression_type == "rand_ssvd": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + +elif compression_type == "nmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "klnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "snmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "sklnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "rand_snmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.p, args.q, args.k0) + +elif compression_type == "wsnmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "wsklnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "saca": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "sbrp": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "rand_sid": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + + +if not (compression_type == "true_model" or compression_type == "stoch_radiosity") and args.min_depth != 1: + FF_dir += "_{}mindepth".format(args.min_depth) + +if not (compression_type == "true_model" or compression_type == "stoch_radiosity") and max_depth is not None: + FF_dir += "_{}maxdepth".format(max_depth) + +if args.roi: + FF_dir = "roi_" + FF_dir + + +FF_dir = "results/"+FF_dir +if not os.path.exists(FF_dir): + print("PATH DOES NOT EXIST "+FF_dir) + assert False +savedir = FF_dir + "/T_frames_long" +if not os.path.exists(savedir): + os.mkdir(savedir) + + +# read shapemodel and form-factor matrix generated by make_compressed_form_factor_matrix.py +if compression_type == 'true_model' or compression_type == 'stoch_radiosity': + path = FF_dir+f'/FF_{max_inner_area_str}_{max_outer_area_str}.npz' + FF = scipy.sparse.load_npz(path) + V = np.load(f'gerlache_verts_{max_inner_area_str}_{max_outer_area_str}.npy') + F = np.load(f'gerlache_faces_{max_inner_area_str}_{max_outer_area_str}.npy') + N = get_surface_normals(V, F) + N[N[:, 2] > 0] *= -1 + shape_model = CgalTrimeshShapeModel(V, F, N) +else: + path = FF_dir+f'/FF_{max_inner_area_str}_{max_outer_area_str}_{tol_str}_{compression_type}.bin' + FF = CompressedFormFactorMatrix.from_file(path) + shape_model = FF.shape_model + +print(' * loaded form factor matrix and (cartesian) shape model') + +# Define time window (it can be done either with dates or with utc0 - initial epoch - and np.linspace of epochs) + +if not os.path.exists(FF_dir + "/T_long_spinup.npy"): + raise RuntimeError("Need to run step 1 of spin up first!") +else: + T_spinup = np.load(FF_dir + "/T_long_spinup.npy") + T_surf_mean = T_spinup[:,0] + T_init = np.repeat(T_surf_mean[:,np.newaxis], T_spinup.shape[1], axis=-1) + +utc0 = '2000 JAN 01 12:00:00.00' +utc1 = '2000 OCT 22 12:20:00.00' +num_frames = 28801 +stepet = 885 +sun_vecs = get_sunvec(utc0=utc0, utc1=utc1, stepet=stepet, path_to_furnsh="simple_long_spinup.furnsh", + target='SUN', observer='MOON', frame='MOON_ME') +t = np.linspace(0, num_frames*stepet, num_frames + 1) + +print(t.shape, sun_vecs.shape) + +D = sun_vecs/np.linalg.norm(sun_vecs, axis=1)[:, np.newaxis] +D = D.copy(order='C') + +print(' * got sun positions from SPICE') + +print(sun_vecs[0], sun_vecs[2880], sun_vecs[5760]) + +# z = np.linspace(0, 3e-3, 31) +nz = 60 +zfac = 1.05 +zmax = 2.5 +z = setgrid(nz=nz, zfac=zfac, zmax=zmax) +z = np.hstack([0, z]) # add surface layer + +print(' * set up thermal model') +thermal_model = ThermalModel( + FF, t, D, + F0=np.repeat(1365, len(D)), rho=0.11, method='1mvp', + z=z, T0=T_init, ti=120, rhoc=9.6e5, emiss=0.95, + Fgeotherm=0.2, bcond='Q', shape_model=shape_model, return_flux=True) + +sim_start_time = arrow.now() +for frame_index, (T, E, _, _) in tqdm(enumerate(thermal_model), total=D.shape[0], desc='thermal models time-steps'): + if (frame_index % 2880) == 0 or ((frame_index < 2880) and (frame_index % 20 == 0)): + + path = savedir+"/T{:03d}.npy".format(frame_index) + np.save(path, T) + + path = savedir+"/E{:03d}.npy".format(frame_index) + np.save(path, E) +sim_duration = (arrow.now()-sim_start_time).total_seconds() +print(' * thermal model run completed in {:.2f} seconds'.format(sim_duration)) + +np.save(savedir+f'/sim_duration_long.npy', np.array(sim_duration)) \ No newline at end of file diff --git a/examples/gerlache/get_weights.py b/examples/gerlache/get_weights.py new file mode 100644 index 0000000..75e202f --- /dev/null +++ b/examples/gerlache/get_weights.py @@ -0,0 +1,54 @@ +import numpy as np +import scipy.sparse +import scipy + +from flux.shape import CgalTrimeshShapeModel, get_surface_normals + +from flux.util import nbytes, get_sunvec + +import argparse +import os + +parser = argparse.ArgumentParser() +parser.add_argument('--max_inner_area', type=float, default=0.8) +parser.add_argument('--max_outer_area', type=float, default=3.0) + +parser.set_defaults(feature=False) + +args = parser.parse_args() + + +max_inner_area_str = str(args.max_inner_area) +max_outer_area_str = str(args.max_outer_area) + +verts = np.load(f'gerlache_verts_{max_inner_area_str}_{max_outer_area_str}.npy') +faces = np.load(f'gerlache_faces_{max_inner_area_str}_{max_outer_area_str}.npy') + +# convert verts from km to m +verts *= 1e3 + +normals = get_surface_normals(verts, faces) +normals[normals[:, 2] > 0] *= -1 + +shape_model = CgalTrimeshShapeModel(verts, faces, normals) + + +# Construct a weight matrix based on direct irradiance in the shape model +utc0 = '2011 MAR 01 00:00:00.00' +utc1 = '2011 MAR 30 00:00:00.00' +num_frames = 29*4 +stepet = (3600*24*29)/num_frames +sun_vecs = get_sunvec(utc0=utc0, utc1=utc1, stepet=stepet, path_to_furnsh="simple.furnsh", + target='SUN', observer='MOON', frame='MOON_ME') + +D = sun_vecs/np.linalg.norm(sun_vecs, axis=1)[:, np.newaxis] +D = D.copy(order='C') + +E_t = [] +for t in range(D.shape[0]): + E_t.append(shape_model.get_direct_irradiance(1365, D[t])) +E_t = np.array(E_t) +weights = E_t.mean(axis=0) + +np.save(f'weights_{max_inner_area_str}_{max_outer_area_str}.npy', weights) +scipy.io.savemat(f'weights_{max_inner_area_str}_{max_outer_area_str}.mat', {'weights':weights}) \ No newline at end of file diff --git a/examples/gerlache/ldem_87s_50mpp.tif b/examples/gerlache/ldem_87s_50mpp.tif new file mode 100644 index 0000000..dd38393 Binary files /dev/null and b/examples/gerlache/ldem_87s_50mpp.tif differ diff --git a/examples/gerlache/longspinup_Sun_MOONME_ONEMONTH_m1p5deg.bsp b/examples/gerlache/longspinup_Sun_MOONME_ONEMONTH_m1p5deg.bsp new file mode 100644 index 0000000..5b6ea3c Binary files /dev/null and b/examples/gerlache/longspinup_Sun_MOONME_ONEMONTH_m1p5deg.bsp differ diff --git a/examples/gerlache/make_FF_comparison_plots.py b/examples/gerlache/make_FF_comparison_plots.py index 3d9f2c6..a2f8349 100755 --- a/examples/gerlache/make_FF_comparison_plots.py +++ b/examples/gerlache/make_FF_comparison_plots.py @@ -52,9 +52,12 @@ FF_nbytes[area_str] = dict() for tol_str in tol_strs: - path = next(_ for _ in FF_paths if area_str in _ and tol_str in _) - FF = CompressedFormFactorMatrix.from_file(path) - FF_nbytes[area_str][tol_str] = FF.nbytes + try: + path = next(_ for _ in FF_paths if area_str in _ and tol_str in _) + FF = CompressedFormFactorMatrix.from_file(path) + FF_nbytes[area_str][tol_str] = FF.nbytes + except: + print(f"Failed for {tol_str}") path = next(_ for _ in FF_true_paths if area_str in _) FF = scipy.sparse.load_npz(path) @@ -86,19 +89,29 @@ for area_str in area_strs: FF_T_times[area_str] = dict() for tol_str in tol_strs: - path = next(_ for _ in FF_paths if area_str in _ and tol_str in _) - FF_T_times[area_str][tol_str] = data[path]['time_T'] + try: + path = next(_ for _ in FF_paths if area_str in _ and tol_str in _) + FF_T_times[area_str][tol_str] = data[path]['time_T'] + except: + print(f"Failed on FFtim {tol_str}") path = next(_ for _ in FF_true_paths if area_str in _) FF_T_times[area_str]['true'] = data[path]['time_T'] df_T_times = pandas.DataFrame(FF_T_times).T plt.clf() for tol_str in tol_strs + ['true']: - plt.loglog(num_faces, df_assembly_times[tol_str], marker='*', - label=f'assembly ({tol_str})') + try: + plt.loglog(num_faces, df_assembly_times[tol_str], marker='*', + label=f'assembly ({tol_str})') + except: + print(f"Failed on FFtim plot {tol_str}") for tol_str in tol_strs + ['true']: - plt.loglog(num_faces, df_T_times[tol_str], marker='*', linestyle='--', + try: + print(tol_str, num_faces, df_T_times[tol_str]) + plt.loglog(num_faces, df_T_times[tol_str], marker='*', linestyle='--', label=f'compute T ({tol_str})') + except: + print(f"Failed on Ttim plot {tol_str}") plt.ylabel('Time [s]') plt.xlabel('$N$') plt.legend() diff --git a/examples/gerlache/make_FF_depth_comparison_plots.py b/examples/gerlache/make_FF_depth_comparison_plots.py new file mode 100755 index 0000000..f7315aa --- /dev/null +++ b/examples/gerlache/make_FF_depth_comparison_plots.py @@ -0,0 +1,161 @@ +#!/usr/bin/env python +import glob +import itertools + +import json_numpy as json +import matplotlib.pyplot as plt +import numpy as np +import pandas +import scipy.sparse + +plt.ion() + +from pathlib import Path + +from flux.compressed_form_factors import CompressedFormFactorMatrix +from flux.util import nbytes + +plt.figure() + +plotdir = Path('gerlache_plots_depth') + +if not plotdir.exists(): + plotdir.mkdir() + +equil_T_data_maxdepth = sorted(glob.glob('*lev/equil_T_data.json'), key=lambda x:int(x.split('/')[0][:-3])) + +df_sizes = {} +df_times = {} +for data_depth in equil_T_data_maxdepth: + maxdepth = data_depth.split('/')[0][:-3] + + with open(data_depth) as f: + data = json.load(f) + # print(data) +# exit() + + + FF_paths = [_ for _ in data.keys() if _[-4:] == '.bin'] + FF_true_paths = [_ for _ in data.keys() if _[-4:] == '.npz'] + assert len(FF_paths) + len(FF_true_paths) == len(data) + + # get list of area strings sorted in decreasing order of max inner + # area (i.e., in increasing order of problem size and mesh fineness) + area_strs = [_[3:-4] for _ in FF_true_paths] + area_strs = sorted(area_strs, key=lambda _:float(_.split('_')[0]), reverse=True) + + max_inner_areas = [float(_.split('_')[0]) for _ in area_strs] + + # get list of tolerance strings in decreasing order (i.e., in + # increasing order of accuracy) + tol_strs = {_[3:-4].split('_')[-1] for _ in FF_paths} + tol_strs = sorted(tol_strs, key=float, reverse=True) + + # figure out the number of faces for each triangle mesh + num_faces = [] + for area_str in area_strs: + F = np.load(f'gerlache_faces_{area_str}.npy') + num_faces.append(F.shape[0]) + + # PLOT: SIZE OF FORM FACTOR MATRIX + + FF_nbytes = dict() + for area_str in area_strs: + FF_nbytes[area_str] = dict() + + for tol_str in tol_strs: + try: + path = next(_ for _ in FF_paths if area_str in _ and tol_str in _) + FF = CompressedFormFactorMatrix.from_file(path) + FF_nbytes[area_str][tol_str] = FF.nbytes + except: + print(f"Failed for {tol_str}") + + path = next(_ for _ in FF_true_paths if area_str in _) + FF = scipy.sparse.load_npz(path) + FF_nbytes[area_str]['true'] = nbytes(FF) + + df = pandas.DataFrame(FF_nbytes).T + df_sizes[maxdepth] = df + + # plt.clf() + # for tol_str in tol_strs + ['true']: + # plt.loglog(num_faces, df[tol_str]/1024**2, marker='*', label=tol_str) + # plt.ylabel('Size [MB]') + # plt.xlabel('$N$') + # plt.legend() + # plt.savefig(plotdir/'FF_sizes.png') + + # PLOT: FORM FACTOR MATRIX TIMINGS + + FF_assembly_times = dict() + with open('FF_assembly_times.txt', 'r') as f: + for line in f: + tol_str, max_inner_area_str, max_outer_area_str, time_str = line.split() + area_str = max_inner_area_str + '_' + max_outer_area_str + if area_str not in FF_assembly_times: + FF_assembly_times[area_str] = dict() + FF_assembly_times[area_str][tol_str] = float(time_str) + df_assembly_times = pandas.DataFrame(FF_assembly_times).T + + FF_T_times = dict() + for area_str in area_strs: + FF_T_times[area_str] = dict() + for tol_str in tol_strs: + try: + path = next(_ for _ in FF_paths if area_str in _ and tol_str in _) + FF_T_times[area_str][tol_str] = data[path]['time_T'] + except: + print(f"Failed on FFtim {tol_str}") + path = next(_ for _ in FF_true_paths if area_str in _) + FF_T_times[area_str]['true'] = data[path]['time_T'] + df_T_times = pandas.DataFrame(FF_T_times).T + + df_times[maxdepth] = df_T_times + +# print(df_sizes) +# print(df_times) +# exit() + +plt.close() +plt.figure() + +print(tol_str, num_faces, df_times[maxdepth]['true'], f"{plotdir}times.png") +plt.loglog(num_faces, df_times[maxdepth]['true'], marker='*', linestyle='-', color='k', + label=f'(true)') +colors = itertools.cycle(np.repeat(["r", "b", "g", "c"],2)) +for maxdepth, df_T_times in df_times.items(): + for tol_str in tol_strs: + print(tol_str, num_faces, df_T_times[tol_str]) + if tol_str == '1e-1': + plt.loglog(num_faces, df_T_times[tol_str], marker='*', linestyle='-', color=next(colors), + label=f'({tol_str}, {maxdepth})') + else: + plt.loglog(num_faces, df_T_times[tol_str], marker='*', linestyle='--', color=next(colors), + label=f'({tol_str}, {maxdepth})') + +plt.title("compute T") +plt.ylabel('Time [s]') +plt.xlabel('$N$') +plt.legend() +plt.savefig(f"{plotdir}/times.png") +plt.close() + +plt.figure() +plt.loglog(num_faces, df_sizes[maxdepth]['true']/1024**2, marker='*', linestyle='-', color='k', + label=f'(true)') +for maxdepth, sizes in df_sizes.items(): + for tol_str in tol_strs: + print(tol_str, num_faces, sizes[tol_str]/1024**2) + if tol_str == '1e-1': + plt.loglog(num_faces, sizes[tol_str]/1024**2, marker='*', linestyle='-', color=next(colors), + label=f'({tol_str}, {maxdepth})') + else: + plt.loglog(num_faces, sizes[tol_str]/1024**2, marker='*', linestyle='--', color=next(colors), + label=f'({tol_str}, {maxdepth})') + +plt.title("FF sizes") +plt.ylabel('Size [MB]') +plt.xlabel('$N$') +plt.legend() +plt.savefig(f"{plotdir}/sizes.png") diff --git a/examples/gerlache/make_block_plots.py b/examples/gerlache/make_block_plots.py index e1df730..0419cbb 100755 --- a/examples/gerlache/make_block_plots.py +++ b/examples/gerlache/make_block_plots.py @@ -8,10 +8,86 @@ from flux.compressed_form_factors import CompressedFormFactorMatrix from flux.plot import plot_blocks -for FF_path in glob('FF_*_*_*.bin'): - print(f'- {FF_path}') - FF = CompressedFormFactorMatrix.from_file(FF_path) - fig, ax = plot_blocks(FF._root) - plot_path = FF_path[:-4] + '.png' - fig.savefig(Path('gerlache_plots')/plot_path) - plt.close(fig) +levels = [2,3,5,9] +tols = [1e-1,1e-2] +num_mesh_res = len(glob('2lev/FF_*_1e-1.bin')) +fig, axes = plt.subplots(len(levels)*2, num_mesh_res) # , figsize=(18, 6)) +for i, lev in enumerate(levels): + for j, FF_path in enumerate(glob(f'{lev}lev/FF_*_*_1e-1.bin')): + print(f'- {FF_path}') + FF = CompressedFormFactorMatrix.from_file(FF_path) + fig, ax = plot_blocks(FF._root, fig) + +plt.show() +exit() +plot_path = FF_path[:-4] + '.png' +fig.savefig(Path('gerlache_plots')/plot_path) +plt.close(fig) + +def plot_blocks(block, fig, **kwargs): + + if 'figsize' in kwargs: + fig.set_size_inches(*kwargs['figsize']) + else: + fig.set_size_inches(12, 12) + + ax = fig.add_subplot() + ax.axis('off') + + ax.set_xlim(-0.001, 1.001) + ax.set_ylim(-0.001, 1.001) + + rect = patches.Rectangle((0, 0), 1, 1, linewidth=1, edgecolor='k', + facecolor='none') + ax.add_patch(rect) + + def get_ind_offsets(inds): + return np.concatenate([[0], np.cumsum([len(I) for I in inds])]) + + def add_rects(block, c0=(0, 0), w0=1, h0=1): + row_offsets = get_ind_offsets(block._row_block_inds) + col_offsets = get_ind_offsets(block._col_block_inds) + for (i, row), (j, col) in it.product( + enumerate(row_offsets[:-1]), + enumerate(col_offsets[:-1]) + ): + w = w0*(row_offsets[i + 1] - row)/row_offsets[-1] + h = h0*(col_offsets[j + 1] - col)/col_offsets[-1] + i0, j0 = c0 + c = (i0 + w0*row/row_offsets[-1], j0 + h0*col/col_offsets[-1]) + + child = block._blocks[i, j] + if child.is_leaf: + if isinstance(child, cff.FormFactorSvdBlock): + facecolor = 'cyan' if child.compressed else 'orange' + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor=facecolor) + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorZeroBlock): + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor='black') + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorSparseBlock): + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor='white') + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorDenseBlock): + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor='magenta') + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorNullBlock): + continue + else: + raise Exception('TODO: add %s to _plot_block' % type(child)) + else: + add_rects(child, c, w, h) + + rect = patches.Rectangle( + c, w, h, linewidth=1, edgecolor='k', facecolor='none') + ax.add_patch(rect) + + add_rects(block) + + ax.invert_xaxis() + + return fig, ax \ No newline at end of file diff --git a/examples/gerlache/make_block_plots_cliques.py b/examples/gerlache/make_block_plots_cliques.py new file mode 100644 index 0000000..47f0765 --- /dev/null +++ b/examples/gerlache/make_block_plots_cliques.py @@ -0,0 +1,178 @@ +import itertools as it +import matplotlib.patches as patches +import matplotlib.pyplot as plt +import numpy as np + +import flux.compressed_form_factors_nmf as cff +from flux.compressed_form_factors_cliques import CliquedFormFactorMatrix + +import scipy +import os +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('--compression_type', type=str, default="svd",choices=["nmf","snmf","wsnmf", + "svd","ssvd", + "rand_svd","rand_ssvd","rand_snmf", + "saca","sbrp","rand_sid"]) +parser.add_argument('--max_inner_area', type=float, default=0.8) +parser.add_argument('--max_outer_area', type=float, default=3.0) +parser.add_argument('--tol', type=float, default=1e-1) + +parser.add_argument('--nmf_max_iters', type=int, default=int(1e4)) +parser.add_argument('--nmf_tol', type=float, default=1e-2) + +parser.add_argument('--k0', type=int, default=40) + +parser.add_argument('--p', type=int, default=5) +parser.add_argument('--q', type=int, default=1) + +parser.add_argument('--nmf_beta_loss', type=int, default=2, choices=[1,2]) + +parser.add_argument('--plot_labels', action='store_true') + +parser.set_defaults(feature=False) + +args = parser.parse_args() + + +def plot_blocks(block, fig, **kwargs): + + if 'figsize' in kwargs: + fig.set_size_inches(*kwargs['figsize']) + else: + fig.set_size_inches(12, 12) + + ax = fig.add_subplot() + ax.axis('off') + + ax.set_xlim(-0.001, 1.001) + ax.set_ylim(-0.001, 1.001) + + rect = patches.Rectangle((0, 0), 1, 1, linewidth=1, edgecolor='k', + facecolor='none') + ax.add_patch(rect) + + def get_ind_offsets(inds): + return np.concatenate([[0], np.cumsum([len(I) for I in inds])]) + + def add_rects(block, c0=(0, 0), w0=1, h0=1): + row_offsets = get_ind_offsets(block._row_block_inds) + col_offsets = get_ind_offsets(block._col_block_inds) + for (i, row), (j, col) in it.product( + enumerate(row_offsets[:-1]), + enumerate(col_offsets[:-1]) + ): + w = w0*(row_offsets[i + 1] - row)/row_offsets[-1] + h = h0*(col_offsets[j + 1] - col)/col_offsets[-1] + i0, j0 = c0 + c = (i0 + w0*row/row_offsets[-1], j0 + h0*col/col_offsets[-1]) + + child = block._blocks[i, j] + if child.is_leaf: + if isinstance(child, cff.FormFactorSvdBlock) or isinstance(child, cff.FormFactorSparseSvdBlock) or \ + isinstance(child, cff.FormFactorNmfBlock) or isinstance(child, cff.FormFactorSparseNmfBlock) or \ + isinstance(child, cff.FormFactorSparseAcaBlock) or isinstance(child, cff.FormFactorSparseBrpBlock) or isinstance(child, cff.FormFactorSparseIdBlock): + facecolor = 'cyan' if child.compressed else 'orange' + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor=facecolor) + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorZeroBlock): + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor='black') + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorSparseBlock): + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor='white') + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorDenseBlock): + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor='magenta') + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorNullBlock): + continue + else: + raise Exception('TODO: add %s to _plot_block' % type(child)) + + if args.plot_labels: + ax.text(c[0] + 0.5*w, c[1] + 0.5*h, "{:.0e}".format(np.prod(child.shape)), transform=ax.transAxes, fontsize=8, verticalalignment='center', horizontalalignment='center') + else: + add_rects(child, c, w, h) + + rect = patches.Rectangle( + c, w, h, linewidth=1, edgecolor='k', facecolor='none') + ax.add_patch(rect) + + add_rects(block) + + ax.invert_xaxis() + + return fig, ax + + + + +compression_type = args.compression_type +max_inner_area_str = str(args.max_inner_area) +max_outer_area_str = str(args.max_outer_area) + +if compression_type == "svd": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "ssvd": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "rand_svd": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + +elif compression_type == "rand_ssvd": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + +elif compression_type == "nmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "klnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "snmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "sklnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "rand_snmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.p, args.q, args.k0) + +elif compression_type == "wsnmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "wsklnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "saca": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "sbrp": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "rand_sid": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + + +FF_path = "results_cliques/"+ FF_dir + "/FF_{}_{}_{:.0e}_{}.bin".format(max_inner_area_str, max_outer_area_str, args.tol, compression_type) +if not os.path.exists(FF_path): + print("PATH DOES NOT EXIST " + FF_path) + assert False +FF = CliquedFormFactorMatrix.from_file(FF_path) + + +fig = plt.figure(figsize=(18, 6)) # , figsize=(18, 6)) +print(f'- {FF_path}') +fig, ax = plot_blocks(FF._root, fig) +if args.plot_labels: + fig.savefig("results_cliques/"+FF_dir+"/block_plot_labeled.png") +else: + fig.savefig("results_cliques/"+FF_dir+"/block_plot.png") +plt.close(fig) \ No newline at end of file diff --git a/examples/gerlache/make_block_plots_nmf.py b/examples/gerlache/make_block_plots_nmf.py new file mode 100644 index 0000000..bfc1c90 --- /dev/null +++ b/examples/gerlache/make_block_plots_nmf.py @@ -0,0 +1,206 @@ +import itertools as it +import matplotlib.patches as patches +import matplotlib.pyplot as plt +import numpy as np + +import flux.compressed_form_factors_nmf as cff +from flux.compressed_form_factors_nmf import CompressedFormFactorMatrix + +import scipy +import os +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('--compression_type', type=str, default="svd",choices=["nmf","snmf","wsnmf", + "svd","ssvd", + "rand_svd","rand_ssvd","rand_snmf", + "saca","sbrp","rand_sid", + "true_model"]) +parser.add_argument('--max_inner_area', type=float, default=0.8) +parser.add_argument('--max_outer_area', type=float, default=3.0) +parser.add_argument('--tol', type=float, default=1e-1) + +parser.add_argument('--min_depth', type=int, default=1) +parser.add_argument('--max_depth', type=int, default=0) + +parser.add_argument('--nmf_max_iters', type=int, default=int(1e4)) +parser.add_argument('--nmf_tol', type=float, default=1e-2) + +parser.add_argument('--k0', type=int, default=40) + +parser.add_argument('--p', type=int, default=5) +parser.add_argument('--q', type=int, default=1) + +parser.add_argument('--nmf_beta_loss', type=int, default=2, choices=[1,2]) + +parser.add_argument('--roi', action='store_true') + +parser.add_argument('--plot_labels', action='store_true') + +parser.set_defaults(feature=False) + +args = parser.parse_args() + + +def plot_blocks(block, fig, **kwargs): + + if 'figsize' in kwargs: + fig.set_size_inches(*kwargs['figsize']) + else: + fig.set_size_inches(12, 12) + + ax = fig.add_subplot() + ax.axis('off') + + ax.set_xlim(-0.001, 1.001) + ax.set_ylim(-0.001, 1.001) + + rect = patches.Rectangle((0, 0), 1, 1, linewidth=1, edgecolor='k', + facecolor='none') + ax.add_patch(rect) + + def get_ind_offsets(inds): + return np.concatenate([[0], np.cumsum([len(I) for I in inds])]) + + def add_rects(block, c0=(0, 0), w0=1, h0=1): + row_offsets = get_ind_offsets(block._row_block_inds) + col_offsets = get_ind_offsets(block._col_block_inds) + for (i, row), (j, col) in it.product( + enumerate(row_offsets[:-1]), + enumerate(col_offsets[:-1]) + ): + w = w0*(row_offsets[i + 1] - row)/row_offsets[-1] + h = h0*(col_offsets[j + 1] - col)/col_offsets[-1] + i0, j0 = c0 + c = (i0 + w0*row/row_offsets[-1], j0 + h0*col/col_offsets[-1]) + + child = block._blocks[i, j] + if child.is_leaf: + if isinstance(child, cff.FormFactorSvdBlock) or isinstance(child, cff.FormFactorSparseSvdBlock) or \ + isinstance(child, cff.FormFactorNmfBlock) or isinstance(child, cff.FormFactorSparseNmfBlock) or \ + isinstance(child, cff.FormFactorSparseAcaBlock) or isinstance(child, cff.FormFactorSparseBrpBlock) or isinstance(child, cff.FormFactorSparseIdBlock): + facecolor = 'cyan' if child.compressed else 'orange' + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor=facecolor) + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorZeroBlock): + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor='black') + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorSparseBlock): + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor='white') + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorDenseBlock): + rect = patches.Rectangle( + c, w, h, edgecolor='none', facecolor='magenta') + ax.add_patch(rect) + elif isinstance(child, cff.FormFactorNullBlock): + continue + else: + raise Exception('TODO: add %s to _plot_block' % type(child)) + + if args.plot_labels: + ax.text(c[0] + 0.5*w, c[1] + 0.5*h, "{:.0e}".format(np.prod(child.shape)), transform=ax.transAxes, fontsize=8, verticalalignment='center', horizontalalignment='center') + else: + add_rects(child, c, w, h) + + rect = patches.Rectangle( + c, w, h, linewidth=1, edgecolor='k', facecolor='none') + ax.add_patch(rect) + + add_rects(block) + + ax.invert_xaxis() + + return fig, ax + + + + +compression_type = args.compression_type +max_inner_area_str = str(args.max_inner_area) +max_outer_area_str = str(args.max_outer_area) + +max_depth = args.max_depth if args.max_depth != 0 else None + +if compression_type == "true_model": + FF_dir = "true_{}_{}".format(max_inner_area_str, max_outer_area_str) + +elif compression_type == "svd": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "ssvd": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "rand_svd": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + +elif compression_type == "rand_ssvd": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + +elif compression_type == "nmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "klnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "snmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "sklnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "rand_snmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.p, args.q, args.k0) + +elif compression_type == "wsnmf": + FF_dir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "wsklnmf", max_inner_area_str, max_outer_area_str, args.tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + +elif compression_type == "saca": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "sbrp": + FF_dir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.k0) + +elif compression_type == "rand_sid": + FF_dir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, args.tol, + args.p, args.q, args.k0) + + +if not compression_type == "true_model" and args.min_depth != 1: + FF_dir += "_{}mindepth".format(args.min_depth) + +if not compression_type == "true_model" and max_depth is not None: + FF_dir += "_{}maxdepth".format(max_depth) + +if args.roi: + FF_dir = "roi_" + FF_dir + + +if compression_type == 'true_model': + FF_path = 'results/' + FF_dir + f'/FF_{max_inner_area_str}_{max_outer_area_str}.npz' + if not os.path.exists(FF_path): + print("PATH DOES NOT EXIST " + FF_path) + assert False + FF = scipy.sparse.load_npz(FF_path) +else: + FF_path = "results/"+ FF_dir + "/FF_{}_{}_{:.0e}_{}.bin".format(max_inner_area_str, max_outer_area_str, args.tol, compression_type) + if not os.path.exists(FF_path): + print("PATH DOES NOT EXIST " + FF_path) + assert False + FF = CompressedFormFactorMatrix.from_file(FF_path) + + +fig = plt.figure(figsize=(18, 6)) # , figsize=(18, 6)) +print(f'- {FF_path}') +fig, ax = plot_blocks(FF._root, fig) +if args.plot_labels: + fig.savefig("results/"+FF_dir+"/block_plot_labeled.png") +else: + fig.savefig("results/"+FF_dir+"/block_plot.png") +plt.close(fig) \ No newline at end of file diff --git a/examples/gerlache/make_cliqued_form_factor_matrix.py b/examples/gerlache/make_cliqued_form_factor_matrix.py new file mode 100644 index 0000000..462a9a7 --- /dev/null +++ b/examples/gerlache/make_cliqued_form_factor_matrix.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python + +import numpy as np +import scipy.sparse + +from flux.form_factors import get_form_factor_matrix +from flux.compressed_form_factors_cliques import CliquedFormFactorMatrix +from flux.shape import CgalTrimeshShapeModel, get_surface_normals + +import argparse +import arrow +import os + +parser = argparse.ArgumentParser() +parser.add_argument('--compression_type', type=str, default="svd",choices=["nmf","snmf","wsnmf", + "svd","ssvd", + "rand_svd","rand_ssvd","rand_snmf", + "saca","sbrp","rand_sid"]) +parser.add_argument('--max_inner_area', type=float, default=0.8) +parser.add_argument('--max_outer_area', type=float, default=3.0) +parser.add_argument('--tol', type=float, default=1e-1) + +parser.add_argument('--nmf_max_iters', type=int, default=int(1e4)) +parser.add_argument('--nmf_tol', type=float, default=1e-2) + +parser.add_argument('--k0', type=int, default=40) + +parser.add_argument('--p', type=int, default=5) +parser.add_argument('--q', type=int, default=1) + +parser.add_argument('--nmf_beta_loss', type=int, default=2, choices=[1,2]) + +parser.add_argument('--load_ff', action='store_true') + +parser.set_defaults(feature=False) + +args = parser.parse_args() + +# produce compressed FF from shapemodel produced by make_mesh.py + +compression_type = args.compression_type +max_inner_area_str = str(args.max_inner_area) +max_outer_area_str = str(args.max_outer_area) +tol_str = "{:.0e}".format(args.tol) +tol = args.tol + + +if compression_type == "svd": + compression_params = { + "k0": args.k0 + } + + savedir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, tol, + args.k0) + + +elif compression_type == "ssvd": + compression_params = { + "k0": args.k0 + } + + savedir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, tol, + args.k0) + + +elif compression_type == "rand_svd": + compression_params = { + "k0": args.k0, + "p": args.p, + "q": args.q + } + + savedir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, tol, + args.p, args.q, args.k0) + + +elif compression_type == "rand_ssvd": + compression_params = { + "k0": args.k0, + "p": args.p, + "q": args.q + } + + savedir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, tol, + args.p, args.q, args.k0) + + +elif compression_type == "nmf": + compression_params = { + "max_iters": args.nmf_max_iters, + "nmf_tol": args.nmf_tol, + "k0": args.k0, + "beta_loss": args.nmf_beta_loss + } + + savedir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "klnmf", max_inner_area_str, max_outer_area_str, tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + + +elif compression_type == "snmf": + compression_params = { + "max_iters": args.nmf_max_iters, + "nmf_tol": args.nmf_tol, + "k0": args.k0, + "beta_loss": args.nmf_beta_loss + } + + savedir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "sklnmf", max_inner_area_str, max_outer_area_str, tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + + +elif compression_type == "rand_snmf": + compression_params = { + "max_iters": args.nmf_max_iters, + "nmf_tol": args.nmf_tol, + "k0": args.k0, + "p": args.p, + "q": args.q + } + + savedir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, tol, + args.nmf_max_iters, args.nmf_tol, args.p, args.q, args.k0) + + +elif compression_type == "wsnmf": + compression_params = { + "max_iters": args.nmf_max_iters, + "nmf_tol": args.nmf_tol, + "k0": args.k0, + "beta_loss": args.nmf_beta_loss + } + + savedir = "{}_{}_{}_{:.0e}_{:.0e}it_{:.0e}tol_{}k0".format(compression_type if args.nmf_beta_loss==2 else "wsklnmf", max_inner_area_str, max_outer_area_str, tol, + args.nmf_max_iters, args.nmf_tol, args.k0) + + +elif compression_type == "saca": + compression_params = { + "k0": args.k0 + } + + savedir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, tol, + args.k0) + + +elif compression_type == "sbrp": + compression_params = { + "k0": args.k0 + } + + savedir = "{}_{}_{}_{:.0e}_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, tol, + args.k0) + + +elif compression_type == "rand_sid": + compression_params = { + "k0": args.k0, + "p": args.p, + "q": args.q + } + + savedir = "{}_{}_{}_{:.0e}_{}p_{}q_{}k0".format(compression_type, max_inner_area_str, max_outer_area_str, tol, + args.p, args.q, args.k0) + + + +savedir = "results_cliques/"+savedir +if not os.path.exists('results_cliques'): + os.mkdir('results_cliques') +if not os.path.exists(savedir): + os.mkdir(savedir) + + +verts = np.load(f'gerlache_verts_{max_inner_area_str}_{max_outer_area_str}.npy') +faces = np.load(f'gerlache_faces_{max_inner_area_str}_{max_outer_area_str}.npy') + +# convert verts from km to m +verts *= 1e3 + +normals = get_surface_normals(verts, faces) +normals[normals[:, 2] > 0] *= -1 + +shape_model = CgalTrimeshShapeModel(verts, faces, normals) + + +start_assembly_time = arrow.now() + +if args.load_ff: + path = f"results/true_{max_inner_area_str}_{max_outer_area_str}/FF_{max_inner_area_str}_{max_outer_area_str}.npz" + FF_uncompressed = scipy.sparse.load_npz(path) +else: + FF_uncompressed = get_form_factor_matrix(shape_model) + +FF = CliquedFormFactorMatrix( + shape_model, FF_uncompressed, tol=tol, min_size=16384, compression_type=compression_type, compression_params=compression_params) + +assembly_time = (arrow.now() - start_assembly_time).total_seconds() + +np.save(savedir+f'/FF_assembly_time.npy', np.array(assembly_time)) + +FF.save(savedir+f'/FF_{max_inner_area_str}_{max_outer_area_str}_{tol_str}_{compression_type}.bin') \ No newline at end of file diff --git a/examples/gerlache/make_compressed_form_factor_matrix.py b/examples/gerlache/make_compressed_form_factor_matrix.py index 400bcba..f8d4f6a 100755 --- a/examples/gerlache/make_compressed_form_factor_matrix.py +++ b/examples/gerlache/make_compressed_form_factor_matrix.py @@ -5,15 +5,18 @@ import sys from flux.form_factors import get_form_factor_matrix -from flux.compressed_form_factors import CompressedFormFactorMatrix +from flux.compressed_form_factors import CompressedFormFactorMatrix, FormFactorPartitionBlock from flux.shape import CgalTrimeshShapeModel, get_surface_normals from flux.util import tic, toc +# produce compressed FF from shapemodel produced by make_mesh.py + max_inner_area_str = sys.argv[1] max_outer_area_str = sys.argv[2] tol_str = sys.argv[3] tol = float(tol_str) +parts = None verts = np.load(f'gerlache_verts_{max_inner_area_str}_{max_outer_area_str}.npy') faces = np.load(f'gerlache_faces_{max_inner_area_str}_{max_outer_area_str}.npy') @@ -26,9 +29,25 @@ shape_model = CgalTrimeshShapeModel(verts, faces, normals) +# take 4 parts among faces +# parts = np.array_split(range(faces.shape[0]),4,axis=0) +# parts = None + # use quadtree by default tic() -FF = CompressedFormFactorMatrix(shape_model, tol=tol, min_size=16384) +if parts is None: + FF = CompressedFormFactorMatrix( + shape_model, tol=tol, min_size=16384) #, max_depth=5) #, force_max_depth=True) +else: + FF = CompressedFormFactorMatrix( + shape_model, tol=tol, parts=parts, min_size=16384, + RootBlock=FormFactorPartitionBlock) + +if False: + FF_std = CompressedFormFactorMatrix( + shape_model, tol=tol, min_size=16384) + assert FF_std == FF + assembly_time = toc() with open('FF_assembly_times.txt', 'a') as f: diff --git a/examples/gerlache/make_equil_T_plots.py b/examples/gerlache/make_equil_T_plots.py index 6941028..bec6065 100755 --- a/examples/gerlache/make_equil_T_plots.py +++ b/examples/gerlache/make_equil_T_plots.py @@ -18,10 +18,10 @@ xmax = params['xmax'] ymin = params['ymin'] ymax = params['ymax'] -extent = [ymin, ymax, xmin, xmax] +extent = [xmin, xmax, ymin, ymax] r_ROI = params['ROI_radius'] -x_ROI, y_ROI = (xmin + xmax)/2, (ymin + ymax)/2 +x_ROI, y_ROI = (ymin + ymax)/2, (xmin + xmax)/2 with open('equil_T_data.json') as f: data = json.load(f) @@ -68,75 +68,84 @@ def plot_ROI_circle(**kwargs): rel_l2_errors[area_str] = dict() for tol_str in tol_strs: - print(f'- tol: {tol_str}') - - FF_data = get_FF_data(area_str, tol_str) - - P_st, A = FF_data['P_st'], FF_data['A'] - - I_ROI = (P_st[:, 0] - x_ROI)**2 + (P_st[:, 1] - y_ROI)**2 < r_ROI**2 - - T = FF_data['T'] - T_true = FF_true_data['T'] - - rel_l2_err = np.linalg.norm((T[I_ROI] - T_true[I_ROI])*A[I_ROI]) \ - / np.linalg.norm(T_true[I_ROI]*A[I_ROI]) - - print(f' * relative l2 error: {rel_l2_err}') - - rel_l2_errors[area_str][tol_str] = rel_l2_err - - T_grid = FF_data['T_grid'] - T_true_grid = FF_true_data['T_grid'] - - vmax = max(T_grid.max(), T_true_grid.max()) - - plt.figure(figsize=(12, 5)) - plt.subplot(1, 2, 1) - plt.title('$T$ (true)') - plt.imshow(T_true_grid, interpolation='none', extent=extent, - vmin=0, vmax=vmax, cmap=cc.cm.fire) - plot_ROI_circle(c='cyan', linewidth=1, linestyle='--', zorder=2) - plt.ylabel('$x$') - plt.xlabel('$y$') - plt.colorbar() - plt.gca().set_aspect('equal') - plt.subplot(1, 2, 2) - plt.title(r'$T$ (tol = %s - true)' % tol_str) - plt.imshow(T_grid-T_true_grid, interpolation='none', extent=extent, - vmin=0, vmax=1, cmap=cc.cm.fire, zorder=1) - plot_ROI_circle(c='cyan', linewidth=1, linestyle='--', zorder=2) - plt.ylabel('$x$') - plt.xlabel('$y$') - plt.colorbar() - plt.gca().set_aspect('equal') - plt.tight_layout() - plt.savefig(plotdir/f'{area_str}_{tol_str}_T.png') - plt.close() - - T_error_grid = T_grid - T_true_grid - T_max_grid = np.maximum(T_grid, T_true_grid) - T_max_grid[T_max_grid == 0] = np.inf - rel_error_grid = abs(T_error_grid)/T_max_grid - - plt.figure() - plt.imshow(rel_error_grid, extent=extent, interpolation='none', - vmin=0, vmax=max_tol, cmap=cc.cm.bmw) - plot_ROI_circle(c='cyan', linewidth=1, linestyle='--', zorder=2) - plt.xlabel('$y$') - plt.ylabel('$x$') - plt.colorbar() - plt.gca().set_aspect('equal') - plt.tight_layout() - plt.savefig(plotdir/f'{area_str}_{tol_str}_rel_error.png') - plt.close() + + try: + print(f'- tol: {tol_str}') + + FF_data = get_FF_data(area_str, tol_str) + + P_st, A = FF_data['P_st'], FF_data['A'] + + I_ROI = (P_st[:, 0] - x_ROI)**2 + (P_st[:, 1] - y_ROI)**2 < r_ROI**2 + + T = FF_data['T'] + T_true = FF_true_data['T'] + + rel_l2_err = np.linalg.norm((T[I_ROI] - T_true[I_ROI])*A[I_ROI]) \ + / np.linalg.norm(T_true[I_ROI]*A[I_ROI]) + + print(f' * relative l2 error: {rel_l2_err}') + + rel_l2_errors[area_str][tol_str] = rel_l2_err + + T_grid = FF_data['T_grid'].T[::-1,:] + T_true_grid = FF_true_data['T_grid'].T[::-1,:] + + vmax = max(T_grid.max(), T_true_grid.max()) + + plt.figure(figsize=(12, 5)) + plt.subplot(1, 2, 1) + plt.title('$T$ (true)') + plt.imshow(T_true_grid, interpolation='none', extent=extent, + vmin=0, vmax=vmax, cmap=cc.cm.fire) + plot_ROI_circle(c='cyan', linewidth=1, linestyle='--', zorder=2) + plt.ylabel('$y$') + plt.xlabel('$x$') + plt.colorbar() + plt.gca().set_aspect('equal') + plt.subplot(1, 2, 2) + plt.title(f'$T$ (tol = {tol_str} - true), ' + # f'max:{round(np.max(np.abs(T_grid-T_true_grid)),2)}, ' + f'median:{round(np.median(np.abs(T_grid-T_true_grid)),2)}') + plt.imshow(T_grid-T_true_grid, interpolation='none', extent=extent, + vmin=0, vmax=2, cmap=cc.cm.fire, zorder=1) + plot_ROI_circle(c='cyan', linewidth=1, linestyle='--', zorder=2) + plt.ylabel('$y$') + plt.xlabel('$x$') + plt.colorbar() + plt.gca().set_aspect('equal') + plt.tight_layout() + plt.savefig(plotdir/f'{area_str}_{tol_str}_T.png') + plt.close() + + T_error_grid = T_grid - T_true_grid + T_max_grid = np.maximum(T_grid, T_true_grid) + T_max_grid[T_max_grid == 0] = np.inf + rel_error_grid = abs(T_error_grid)/T_max_grid + + plt.figure() + plt.imshow(rel_error_grid, extent=extent, interpolation='none', + vmin=0, vmax=max_tol, cmap=cc.cm.bmw) + plot_ROI_circle(c='cyan', linewidth=1, linestyle='--', zorder=2) + plt.xlabel('$y$') + plt.ylabel('$x$') + plt.colorbar() + plt.gca().set_aspect('equal') + plt.tight_layout() + plt.savefig(plotdir/f'{area_str}_{tol_str}_rel_error.png') + plt.close() + except: + print(f"Failed for {tol_str}") # make relative l2 errors plot plt.figure() for tol_str in tol_strs: - tol = float(tol_str) - errs = [rel_l2_errors[area_str][tol_str] for area_str in area_strs] - plt.axhline(y=tol, c='k', linewidth=1, linestyle='--', zorder=1) - plt.loglog(max_inner_areas, errs, label=tol_str, marker='*', zorder=2) + try: + tol = float(tol_str) + errs = [rel_l2_errors[area_str][tol_str] for area_str in area_strs] + plt.axhline(y=tol, c='k', linewidth=1, linestyle='--', zorder=1) + plt.loglog(max_inner_areas, errs, label=tol_str, marker='*', zorder=2) + except: + print(f"Failed for {tol_str}") plt.legend() plt.savefig(plotdir/'rel_l2_errors_area_vs_tol.png') diff --git a/examples/gerlache/make_mesh.py b/examples/gerlache/make_mesh.py index 604dc0c..33add5d 100755 --- a/examples/gerlache/make_mesh.py +++ b/examples/gerlache/make_mesh.py @@ -1,10 +1,12 @@ #!/usr/bin/env python +import matplotlib.pyplot as plt +import meshio import meshpy.triangle as triangle import numpy as np import sys import rioxarray as rio -path = 'ldem_87s_5mpp' # 'LDEM_80S_150M' # +path = 'ldem_87s_50mpp' # '/home/sberton2/Lavoro/code/nac_kplo/aux/ldem_20mpp_Stefano' # 'LDEM_80S_150M' # tif_path = path + '.tif' max_inner_area_str = sys.argv[1] @@ -106,8 +108,8 @@ def stereo2cart(x, y): cos_el = np.cos(el) return r*np.array([cos_el*np.cos(az), cos_el*np.sin(az), np.sin(el)]) -xc_roi, yc_roi = -45, 0 -r_roi, R_roi = 20, 40 +xc_roi, yc_roi = 8, -6 +r_roi, R_roi = 10, 30 x0_roi, x1_roi = xc_roi - R_roi, xc_roi + R_roi y0_roi, y1_roi = yc_roi - R_roi, yc_roi + R_roi @@ -153,4 +155,4 @@ def should_refine(verts, _): area_str = f'{max_inner_area_str}_{max_outer_area_str}' np.save(f'gerlache_verts_stereo_{area_str}', verts_stereo) np.save(f'gerlache_verts_{area_str}', verts) -np.save(f'gerlache_faces_{area_str}', faces) \ No newline at end of file +np.save(f'gerlache_faces_{area_str}', faces) diff --git a/examples/gerlache/make_time_dep_plots.py b/examples/gerlache/make_time_dep_plots.py index 0165f94..338d41d 100755 --- a/examples/gerlache/make_time_dep_plots.py +++ b/examples/gerlache/make_time_dep_plots.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +import glob import json_numpy as json import matplotlib.pyplot as plt @@ -22,12 +23,15 @@ # ymax = -35 # extent = [xmin, xmax, ymin, ymax] -with open('equil_T_data.json') as f: - data = json.load(f) +# with open('equil_T_data.json') as f: +# data = json.load(f) + +FF_paths = glob.glob("FF_*_*_*-1.bin")[1:] # [_ for _ in data.keys() if _[-4:] == '.bin'] +FF_true_paths = glob.glob("FF_*_*.npz")[1:] # [_ for _ in data.keys() if _[-4:] == '.npz'] +# assert len(FF_paths) + len(FF_true_paths) == len(data) +print(FF_paths) +print(FF_true_paths) -FF_paths = [_ for _ in data.keys() if _[-4:] == '.bin'] -FF_true_paths = [_ for _ in data.keys() if _[-4:] == '.npz'] -assert len(FF_paths) + len(FF_true_paths) == len(data) # get list of area strings sorted in decreasing order of max inner # area (i.e., in increasing order of problem size and mesh fineness) @@ -43,7 +47,9 @@ T_frames_path = Path('T_frames') T_frame_dirs = list(T_frames_path.iterdir()) -frame_names = [_.name for _ in T_frame_dirs[0].glob('T*.npy')] +frame_names = [_.name for _ in T_frame_dirs[0].glob('T*_29.npy')] +frame_names = sorted(frame_names, key=lambda _:float(_.split('_')[0].split('T')[-1])) +print(frame_names) rel_l2_errors = dict() T_avgs = dict() @@ -113,7 +119,7 @@ plt.legend() plt.title(f'Relative $\ell_2$ error in temperature field') plt.ylabel(r'$\|\hat{T}_n - T_n\|_2/\|T_n\|$') -plt.xlabel('$n$ (Iteration)') +plt.xlabel('$n$ (time-step)') plt.savefig('gerlache_plots/time_dep_error.png') plt.figure() @@ -126,5 +132,5 @@ plt.legend() plt.title(f'Average temperature [K]') plt.ylabel(r'$$') -plt.xlabel('$n$ (Iteration)') +plt.xlabel('$n$ (time-step)') plt.savefig('gerlache_plots/time_dep_T_avg.png') diff --git a/examples/gerlache/plot_T_spinup.py b/examples/gerlache/plot_T_spinup.py index 24d77bf..f6b945b 100644 --- a/examples/gerlache/plot_T_spinup.py +++ b/examples/gerlache/plot_T_spinup.py @@ -32,7 +32,7 @@ V = np.hstack([V,np.repeat(1737.4,V.shape[0])[:,np.newaxis]]) # count number of spin-up iterations - niter = len(glob.glob(f"T_frames/{max_inner_area_str}_{max_outer_area_str}_{tol_str}/T00_*.npy")) + niter = len(glob.glob(f"T_frames/{max_inner_area_str}_{max_outer_area_str}_{tol_str}/T000_*.npy")) print(f"- Processing #{niter} spin-up iterations") # loop over all spin-up iterations diff --git a/examples/gerlache/run_example.sh b/examples/gerlache/run_example.sh index 643e14f..126e68a 100755 --- a/examples/gerlache/run_example.sh +++ b/examples/gerlache/run_example.sh @@ -1,12 +1,12 @@ #!/usr/bin/env bash -MAX_INNER_AREAS=("0.05") # "1.6" "0.8" "0.4" "0.2" "0.1") +MAX_INNER_AREAS=("1.6" "0.8" "0.4" "0.2") # "0.1") MAX_OUTER_AREA="3.0" # very coarse TOLS=(1e-1 1e-2) NITER=20 FROM_ITER=0 -if true; then +if false; then if [ ! -f ldem_87s_5mpp.tif ]; then echo "didn't find DEM. downloading it from PGDA..." @@ -26,6 +26,7 @@ do echo "- max inner area: $MAX_INNER_AREA, max outer area: $MAX_OUTER_AREA" ./make_mesh.py $MAX_INNER_AREA $MAX_OUTER_AREA done +fi echo "computing FF matrices" rm -f FF_assembly_times.txt @@ -40,7 +41,19 @@ do echo " * computed compressed FF matrix (tol = $TOL)" done done -fi +#fi + +## make block matrix plots +./make_block_plots.py + +## collect data to make equilibrium temperature plots +./collect_equil_T_data.py + +## make plots comparing form factor matrices +./make_equil_T_plots.py +./make_FF_comparison_plots.py + +exit # collect data for time-dependent plots echo "collecting time-dependent data" @@ -57,20 +70,15 @@ do ./clean_T_spinup.py $MAX_INNER_AREA $MAX_OUTER_AREA $TOL done done +fi -## collect data to make equilibrium temperature plots -#./collect_equil_T_data.py # ## make equilibrium temperature plots -#./make_equil_T_plots.py -./make_equil_T_post_spinup_plots.py +#./make_equil_T_post_spinup_plots.py # -## make plots comparing form factor matrices -#./make_FF_comparison_plots.py ## make time-dependent plots #./make_time_dep_plots.py # -## make block matrix plots -#./make_block_plots.py + diff --git a/examples/gerlache/run_example_partition.py b/examples/gerlache/run_example_partition.py new file mode 100644 index 0000000..d2a537a --- /dev/null +++ b/examples/gerlache/run_example_partition.py @@ -0,0 +1,82 @@ +import sys + +import numpy as np +import submitit as submitit + +from flux.compressed_form_factors import CompressedFormFactorMatrix, FormFactorPartitionBlock +from flux.quadtree import get_quadrant_order +from flux.shape import get_surface_normals, CgalTrimeshShapeModel +from flux.util import tic, toc + +radius = 1737.4 # km + +if __name__ == '__main__': + + # executor is the submission interface (logs are dumped in the folder) + executor = submitit.AutoExecutor(folder="log_slurm") + # set timeout in min, and partition for running the job + executor.update_parameters( # slurm_account='j1010', + slurm_name="flux_part", + slurm_cpus_per_task=10, + slurm_nodes=1, + slurm_time=60 * 99, # minutes + slurm_mem='90G', + slurm_array_parallelism=5) + + max_inner_area_str = sys.argv[1] + max_outer_area_str = sys.argv[2] + + tol_str = sys.argv[3] + tol = float(tol_str) + try: + num_parts = int(sys.argv[4]) + print(num_parts) + except: + num_parts = None + parts = None + print("- No parts selected, going quadtree") + + verts = np.load(f'gerlache_verts_{max_inner_area_str}_{max_outer_area_str}.npy') + faces = np.load(f'gerlache_faces_{max_inner_area_str}_{max_outer_area_str}.npy') + + # convert verts from km to m + verts *= 1e3 + + normals = get_surface_normals(verts, faces) + normals[normals[:, 2] > 0] *= -1 + + shape_model = CgalTrimeshShapeModel(verts, faces, normals) + + if num_parts != None: + # split mesh in 4 equal sets of faces (row-wise) + # parts = np.array_split(range(faces.shape[0]), num_parts, axis=0) + # or follow quadtree scheme + P = shape_model.P + PI = P[:, :2] # get (x,y) + parts = [I for I in get_quadrant_order(PI)] + print(parts[0]) + # apply the same for each part in parts to get higher order division + if num_parts == 4: # TODO extend for num_parts > 4 + parts2 = [] + for p in parts: + JJ = [I for I in get_quadrant_order(PI[p])] + parts2.extend([p[J] for J in JJ]) + elif num_parts > 2: + print("Please use num_parts in [None, 2, 4]. Exit.") + exit() + + # use quadtree by default + tic() + if parts is None: + FF = CompressedFormFactorMatrix( + shape_model, tol=tol, min_size=16384) + else: + FF = CompressedFormFactorMatrix( + shape_model, tol=tol, parts=parts, min_size=16384, + RootBlock=FormFactorPartitionBlock) #, slurm=executor) + assembly_time = toc() + + with open('FF_assembly_times.txt', 'a') as f: + print(f'{tol_str} {max_inner_area_str} {max_outer_area_str} {assembly_time}', file=f) + + FF.save(f'FF_{max_inner_area_str}_{max_outer_area_str}_{tol_str}.bin') diff --git a/examples/gerlache/simple_abs.furnsh b/examples/gerlache/simple_abs.furnsh new file mode 100644 index 0000000..4dcf85c --- /dev/null +++ b/examples/gerlache/simple_abs.furnsh @@ -0,0 +1,22 @@ +\begindata + + PATH_VALUES = ( 'temperature_computation/python-flux/examples/kernels' ) + + PATH_SYMBOLS = ( 'KERNELS_COMMON' ) + + KERNELS_TO_LOAD = ( + '$KERNELS_COMMON/pck00010.tpc', + '$KERNELS_COMMON/moon_080317.tf', + '$KERNELS_COMMON/moon_assoc_me.tf', + '$KERNELS_COMMON/moon_pa_de421_1900-2050.bpc', + '$KERNELS_COMMON/naif0012.tls', + '$KERNELS_COMMON/de421.bsp', + '$KERNELS_COMMON/spinup_Sun_MOONME_ONEYEAR_m1p5deg.bsp' + ) + +\begintext + '$KERNELS_COMMON/pck00010_msgr_v23.tpc' + '$KERNELS_COMMON/spinup_Sun_MOONME_ONEMONTH_m1p5deg.bsp' + + + diff --git a/examples/gerlache/simple_long_spinup.furnsh b/examples/gerlache/simple_long_spinup.furnsh new file mode 100644 index 0000000..3b88f1e --- /dev/null +++ b/examples/gerlache/simple_long_spinup.furnsh @@ -0,0 +1,22 @@ +\begindata + + PATH_VALUES = ( '../kernels' ) + + PATH_SYMBOLS = ( 'KERNELS_COMMON' ) + + KERNELS_TO_LOAD = ( + '$KERNELS_COMMON/pck00010.tpc', + '$KERNELS_COMMON/moon_080317.tf', + '$KERNELS_COMMON/moon_assoc_me.tf', + '$KERNELS_COMMON/moon_pa_de421_1900-2050.bpc', + '$KERNELS_COMMON/naif0012.tls', + '$KERNELS_COMMON/de421.bsp', + '$KERNELS_COMMON/longspinup_Sun_MOONME_m1p5deg.bsp' + ) + +\begintext + '$KERNELS_COMMON/pck00010_msgr_v23.tpc' + '$KERNELS_COMMON/spinup_Sun_MOONME_ONEMONTH_m1p5deg.bsp' + + + diff --git a/examples/gerlache/spice_util.py b/examples/gerlache/spice_util.py index 0746b76..e9cfe2e 100644 --- a/examples/gerlache/spice_util.py +++ b/examples/gerlache/spice_util.py @@ -1,4 +1,4 @@ -def get_sunvec(utc0, target, observer, frame, utc1=0, stepet=1, et_linspace=None): +def get_sunvec(utc0, target, observer, frame, utc1=0, stepet=1, et_linspace=None, path_to_furnsh='aux/simple.furnsh'): '''This script uses SPICE to compute a trajectory for the sun, loads a shape model discretizing a patch of the lunar south pole (made using lsp_make_obj.py), and a compressed form factor matrix for that @@ -14,7 +14,7 @@ def get_sunvec(utc0, target, observer, frame, utc1=0, stepet=1, et_linspace=None clktol = '10:000' spice.kclear() - spice.furnsh(f'simple.furnsh') + spice.furnsh(path_to_furnsh) et0 = spice.str2et(utc0) if np.max(et_linspace) == None: et1 = spice.str2et(utc1) diff --git a/examples/kernels/longspinup_Sun_MOONME_ONEMONTH_m1p5deg.bsp b/examples/kernels/longspinup_Sun_MOONME_ONEMONTH_m1p5deg.bsp new file mode 100644 index 0000000..5b6ea3c Binary files /dev/null and b/examples/kernels/longspinup_Sun_MOONME_ONEMONTH_m1p5deg.bsp differ diff --git a/examples/kernels/longspinup_Sun_MOONME_m1p5deg.bsp b/examples/kernels/longspinup_Sun_MOONME_m1p5deg.bsp new file mode 100644 index 0000000..2f7959d Binary files /dev/null and b/examples/kernels/longspinup_Sun_MOONME_m1p5deg.bsp differ diff --git a/examples/notebooks/Clique Clustering on Mesh.ipynb b/examples/notebooks/Clique Clustering on Mesh.ipynb new file mode 100644 index 0000000..7212742 --- /dev/null +++ b/examples/notebooks/Clique Clustering on Mesh.ipynb @@ -0,0 +1,334 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "2b3892e7", + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import scipy\n", + "import matplotlib.pyplot as plt\n", + "from matplotlib.colors import LogNorm\n", + "from flux.shape import CgalTrimeshShapeModel, get_surface_normals\n", + "\n", + "import pyvista as pv" + ] + }, + { + "cell_type": "markdown", + "id": "1d2cded9", + "metadata": {}, + "source": [ + "# Clique Clustering on Mesh\n", + "\n", + "Using the adjacency matrix induced by the uncompressed form factor, extract \"cliques\" which are \"almost\" fully-connected sub-graphs in the induced graph. The cliques should correspond to sets of triangles which are \"mostly\" mutually visible.\n", + "\n", + "### Use the full form factor to extract cliques." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "a89c77e1", + "metadata": {}, + "outputs": [], + "source": [ + "mesh_dir = 'python-flux/examples/shackleton_vary_outer/'\n", + "mesh_name = 'shackleton'" + ] + }, + { + "cell_type": "code", + "execution_count": 82, + "id": "39af45ab", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "4446052\n" + ] + } + ], + "source": [ + "FF = scipy.sparse.load_npz(f\"{mesh_dir}/results/true_3.0_80/FF_3.0_80.npz\")\n", + "FF_arr = FF.A\n", + "adjacency_FF = np.zeros(FF_arr.shape)\n", + "adjacency_FF[FF_arr > 0] = 1.\n", + "unweighted_FF = np.copy(adjacency_FF)\n", + "FF.nnz" + ] + }, + { + "cell_type": "code", + "execution_count": 85, + "id": "d08fb033", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████| 11125/11125 [01:10<00:00, 157.69it/s]\n" + ] + } + ], + "source": [ + "from tqdm import tqdm\n", + "\n", + "all_pseudo_cliques = []\n", + "for i in tqdm(np.random.permutation(unweighted_FF.shape[0])):\n", + " \n", + " # make sure the seed index is not already in a clique\n", + " if np.array([i in _pseudo_clique for _pseudo_clique in all_pseudo_cliques]).any():\n", + " continue\n", + " \n", + " nonzero_idx = list((unweighted_FF[i] > 0).nonzero()[0])\n", + " nonzero_idx.append(i)\n", + " nonzero_idx = np.array(nonzero_idx)\n", + " \n", + " # find all neighbors (visible triangles) which share a substantial number of neighbors with seed index i\n", + " _nonzero_idx = []\n", + " for j in nonzero_idx:\n", + " \n", + " if j == i:\n", + " _nonzero_idx.append(j)\n", + " \n", + " elif not np.array([j in _pseudo_clique for _pseudo_clique in all_pseudo_cliques]).any():\n", + " this_nonzero_idx = list((unweighted_FF[j] > 0).nonzero()[0])\n", + " this_nonzero_idx.append(j)\n", + " this_nonzero_idx = np.array(this_nonzero_idx)\n", + " num_intersecting = np.intersect1d(nonzero_idx, this_nonzero_idx).shape[0]\n", + " \n", + " if num_intersecting >= 0.5*min(nonzero_idx.shape[0], this_nonzero_idx.shape[0]):\n", + " _nonzero_idx.append(j)\n", + " nonzero_idx = np.array(_nonzero_idx)\n", + " \n", + " pseudo_clique = set(list(np.copy(nonzero_idx))) \n", + " all_pseudo_cliques.append(pseudo_clique)" + ] + }, + { + "cell_type": "markdown", + "id": "d59b9c49", + "metadata": {}, + "source": [ + "### Load the shape model and overlay the cliques which have at least 25 triangles." + ] + }, + { + "cell_type": "code", + "execution_count": 90, + "id": "f7f60ca1", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(11125, 3)" + ] + }, + "execution_count": 90, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "V = np.load(f'{mesh_dir}/{mesh_name}_verts_3.0_80.npy')\n", + "F = np.load(f'{mesh_dir}/{mesh_name}_faces_3.0_80.npy')\n", + "\n", + "# convert verts from km to m\n", + "V *= 1e3\n", + "\n", + "N = get_surface_normals(V, F)\n", + "N[N[:, 2] > 0] *= -1\n", + "\n", + "faces_padded = np.concatenate([3*np.ones(F.shape[0],dtype=int).reshape(-1,1), F], axis=1)\n", + "\n", + "shape_model = CgalTrimeshShapeModel(V, F, N)\n", + "F.shape" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "28ffde3b", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Widget(value=\"