diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000000..8b0e0fdafb --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,9 @@ +{ + "name": "MFC Container", + "image": "sbryngelson/mfc:latest-cpu", + "workspaceFolder": "/opt/MFC", + "settings": { + "terminal.integrated.shell.linux": "/bin/bash", + "editor.formatOnSave": true + }, +} \ No newline at end of file diff --git a/.github/.dockerignore b/.github/.dockerignore new file mode 100644 index 0000000000..57de78cc35 --- /dev/null +++ b/.github/.dockerignore @@ -0,0 +1,79 @@ +node_modules/ +package.json +yarn.lock + +.venv/ +.vscode/ +src/*/autogen/ + +*.swo +*.swp + +*:Zone.Identifier + +.nfs* + +__pycache__ + +*.egg-info + +.DS_Store + +# NVIDIA Nsight Compute +*.nsys-rep +*.sqlite + +docs/*/initial* +docs/*/result* +docs/documentation/*-example.png +docs/documentation/examples.md + +examples/*batch/*/ +examples/**/D/* +examples/**/p* +examples/**/D_* +examples/**/*.inf +examples/**/*.inp +examples/**/*.o* +examples/**/silo* +examples/**/restart_data* +examples/**/*.out +examples/**/binary +examples/**/fort.1 +examples/**/*.sh +examples/**/*.err +examples/**/viz/ +examples/*.jpg +examples/*.png +examples/*/workloads/ +examples/*/run-*/ +examples/*/logs/ +examples/**/*.f90 +workloads/ + +benchmarks/*batch/*/ +benchmarks/*/D/* +benchmarks/*/p* +benchmarks/*/D_* +benchmarks/*/*.inf +benchmarks/*/*.inp +benchmarks/*/*.dat +benchmarks/*/*.o* +benchmarks/*/silo* +benchmarks/*/restart_data* +benchmarks/*/*.out +benchmarks/*/binary +benchmarks/*/fort.1 +benchmarks/*/*.sh +benchmarks/*/*.err +benchmarks/*/viz/ +benchmarks/*.jpg +benchmarks/*.png + +*.mod + +# Video Files +*.mp4 +*.mov +*.mkv +*.avi diff --git a/.github/Dockerfile b/.github/Dockerfile new file mode 100644 index 0000000000..c7ddcd95a7 --- /dev/null +++ b/.github/Dockerfile @@ -0,0 +1,55 @@ +ARG BASE_IMAGE +FROM ${BASE_IMAGE} + +ARG TARGET +ARG CC_COMPILER +ARG CXX_COMPILER +ARG FC_COMPILER +ARG COMPILER_PATH +ARG COMPILER_LD_LIBRARY_PATH + +RUN apt-get update -y && \ + if [ "$TARGET" != "gpu" ]; then \ + apt-get install -y \ + build-essential git make cmake gcc g++ gfortran bc\ + python3 python3-venv python3-pip \ + openmpi-bin libopenmpi-dev libfftw3-dev \ + mpich libmpich-dev; \ + else \ + apt-get install -y \ + build-essential git make cmake bc\ + python3 python3-venv python3-pip \ + libfftw3-dev \ + openmpi-bin libopenmpi-dev; \ + fi && \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +ENV OMPI_ALLOW_RUN_AS_ROOT=1 +ENV OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1 +ENV PATH="/opt/MFC:$PATH" + +COPY ../ /opt/MFC + +ENV CC=${CC_COMPILER} +ENV CXX=${CXX_COMPILER} +ENV FC=${FC_COMPILER} +ENV PATH="${COMPILER_PATH}:$PATH" +ENV LD_LIBRARY_PATH="${COMPILER_LD_LIBRARY_PATH}:${LD_LIBRARY_PATH:-}" + +RUN echo "TARGET=$TARGET CC=$CC_COMPILER FC=$FC_COMPILER" && \ + cd /opt/MFC && \ + if [ "$TARGET" = "gpu" ]; then \ + ./mfc.sh build --gpu -j $(nproc); \ + else \ + ./mfc.sh build -j $(nproc); \ + fi + +RUN cd /opt/MFC && \ + if [ "$TARGET" = "gpu" ]; then \ + ./mfc.sh test -a --dry-run --gpu -j $(nproc); \ + else \ + ./mfc.sh test -a --dry-run -j $(nproc); \ + fi + +WORKDIR /opt/MFC +ENTRYPOINT ["tail", "-f", "/dev/null"] \ No newline at end of file diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml new file mode 100644 index 0000000000..b12c6cdc5f --- /dev/null +++ b/.github/workflows/docker.yml @@ -0,0 +1,132 @@ +name: Containerization + +on: + release: + types: [published] + workflow_dispatch: + inputs: + tag: + description: 'tag to containerize' + required: true + +concurrency: + group: Containerization + cancel-in-progress: false + +jobs: + Container: + strategy: + matrix: + config: + - { name: 'cpu', runner: 'ubuntu-22.04', base_image: 'ubuntu:22.04' } + - { name: 'gpu', runner: 'ubuntu-22.04', base_image: 'nvcr.io/nvidia/nvhpc:23.11-devel-cuda_multi-ubuntu22.04' } + - { name: 'gpu', runner: 'ubuntu-22.04-arm', base_image: 'nvcr.io/nvidia/nvhpc:23.11-devel-cuda_multi-ubuntu22.04' } + runs-on: ${{ matrix.config.runner }} + outputs: + tag: ${{ steps.clone.outputs.tag }} + steps: + - name: Free Disk Space + uses: jlumbroso/free-disk-space@main + with: + tool-cache: false + android: true + dotnet: true + haskell: true + large-packages: true + docker-images: true + swap-storage: true + + - name: Login + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Setup Buildx + uses: docker/setup-buildx-action@v3 + + - name: Setup QEMU + uses: docker/setup-qemu-action@v3 + + - name: Clone + id: clone + run: | + TAG="${{ github.event.inputs.tag || github.ref_name }}" + echo "tag=$TAG" >> $GITHUB_OUTPUT + echo "TAG=$TAG" >> $GITHUB_ENV + git clone --branch "$TAG" --depth 1 https://github.com/MFlowCode/MFC.git mfc + + - name: Stage + run: | + sudo fallocate -l 8G /swapfile + sudo chmod 600 /swapfile + sudo mkswap /swapfile + sudo swapon /swapfile + sudo mkdir -p /home/runner/tmp + export TMPDIR=/home/runner/tmp + free -h + sudo mkdir -p /mnt/share + sudo chmod 777 /mnt/share + cp -r mfc/* /mnt/share/ + cp -r mfc/.git /mnt/share/.git + cp mfc/.github/Dockerfile /mnt/share/ + cp mfc/.github/.dockerignore /mnt/share/ + docker buildx create --name mfcbuilder --driver docker-container --use + + - name: Build and push image (cpu) + if: ${{ matrix.config.name == 'cpu' }} + uses: docker/build-push-action@v6 + with: + builder: mfcbuilder + context: /mnt/share + file: /mnt/share/Dockerfile + platforms: linux/amd64,linux/arm64 + build-args: | + BASE_IMAGE=${{ matrix.config.base_image }} + TARGET=${{ matrix.config.name }} + CC_COMPILER=${{ 'gcc' }} + CXX_COMPILER=${{ 'g++' }} + FC_COMPILER=${{ 'gfortran' }} + COMPILER_PATH=${{ '/usr/bin' }} + COMPILER_LD_LIBRARY_PATH=${{ '/usr/lib' }} + tags: ${{ secrets.DOCKERHUB_USERNAME }}/mfc:${{ env.TAG }}-${{ matrix.config.name }} + push: true + + - name: Build and push image (gpu) + if: ${{ matrix.config.name == 'gpu' }} + uses: docker/build-push-action@v5 + with: + builder: default + context: /mnt/share + file: /mnt/share/Dockerfile + build-args: | + BASE_IMAGE=${{ matrix.config.base_image }} + TARGET=${{ matrix.config.name }} + CC_COMPILER=${{ 'nvc' }} + CXX_COMPILER=${{ 'nvc++' }} + FC_COMPILER=${{ 'nvfortran' }} + COMPILER_PATH=${{ '/opt/nvidia/hpc_sdk/Linux_x86_64/compilers/bin' }} + COMPILER_LD_LIBRARY_PATH=${{ '/opt/nvidia/hpc_sdk/Linux_x86_64/compilers/lib' }} + tags: ${{ secrets.DOCKERHUB_USERNAME }}/mfc:${{ env.TAG }}-${{ matrix.config.name }}-${{ matrix.config.runner}} + push: true + + manifests: + runs-on: ubuntu-latest + needs: Container + steps: + - name: Login + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_PASSWORD }} + + - name: Create and Push Manifest Lists + env: + TAG: ${{ needs.Container.outputs.tag }} + REGISTRY: ${{ secrets.DOCKERHUB_USERNAME }}/mfc + run: | + docker buildx imagetools create -t $REGISTRY:latest-cpu $REGISTRY:$TAG-cpu + docker manifest create $REGISTRY:$TAG-gpu $REGISTRY:$TAG-gpu-ubuntu-22.04 $REGISTRY:$TAG-gpu-ubuntu-22.04-arm + docker manifest create $REGISTRY:latest-gpu $REGISTRY:$TAG-gpu-ubuntu-22.04 $REGISTRY:$TAG-gpu-ubuntu-22.04-arm + docker manifest push $REGISTRY:$TAG-gpu + docker manifest push $REGISTRY:latest-gpu \ No newline at end of file diff --git a/README.md b/README.md index 196315ee20..3c77b7f317 100644 --- a/README.md +++ b/README.md @@ -27,8 +27,9 @@ **Welcome!** MFC simulates compressible multi-phase flows, [among other things](#what-else-can-this-thing-do). -It uses metaprogramming to stay short and portable (~20K lines). -MFC conducted the largest known, open CFD simulation at 200 trillion grid points, and 1 quadrillion degrees of freedom (as of September 2025), and is a 2025 Gordon Bell Prize finalist. +It uses metaprogramming and is short (20K lines) and portable. +MFC conducted the largest known CFD simulation at 200 trillion grid points, and 1 quadrillion degrees of freedom (as of September 2025). +MFC is a 2025 Gordon Bell Prize Finalist.

@@ -76,7 +77,7 @@ This one simulates high-Mach flow over an airfoil: Airfoil Example

-And here is a high amplitude acoustic wave reflecting and emerging through a circular orifice: +And here is a high-amplitude acoustic wave reflecting and emerging through a circular orifice:

Orifice Example
@@ -85,15 +86,23 @@ And here is a high amplitude acoustic wave reflecting and emerging through a cir ## Getting started -You can navigate [to this webpage](https://mflowcode.github.io/documentation/md_getting-started.html) to get started using MFC! +For a _very_ quick start, open a GitHub Codespace to load a pre-configured Docker container and familiarize yourself with MFC commands. +Click <> Code (green button at top right) → Codespaces (right tab) → + (create a codespace). + +> ****Note:**** Codespaces is a free service with a monthly quota of compute time and storage usage. +> It is recommended for testing commands, troubleshooting, and running simple case files without installing dependencies or building MFC on your device. +> Don't conduct any critical work here! +> To learn more, please see [how Docker & Containers work](https://mflowcode.github.io/documentation/docker.html). + +You can navigate [to this webpage](https://mflowcode.github.io/documentation/md_getting-started.html) to get you get started using MFC on your local machine, cluster, or supercomputer! It's rather straightforward. -We'll give a brief intro. here for MacOS. +We'll give a brief introdocution for MacOS below. Using [brew](https://brew.sh), install MFC's dependencies: ```shell brew install coreutils python cmake fftw hdf5 gcc boost open-mpi lapack ``` You're now ready to build and test MFC! -Put it to a convenient directory via +Put it to a local directory via ```shell git clone https://github.com/MFlowCode/MFC cd MFC @@ -123,17 +132,14 @@ You can visualize the output data in `examples/3d_shockdroplet/silo_hdf5` via Pa ## Is this _really_ exascale? [OLCF Frontier](https://www.olcf.ornl.gov/frontier/) is the first exascale supercomputer. -The weak scaling of MFC on this machine shows near-ideal utilization. +The weak scaling of MFC on this machine shows near-ideal utilization. +We also scale ideally to >98% of LLNL El Capitan.

Scaling

- -## What else can this thing do - -MFC has many features. -They are organized below. +## What else can this thing do? ### Physics @@ -209,7 +215,7 @@ They are organized below. If you use MFC, consider citing it as below. Ref. 1 includes all modern MFC features, including GPU acceleration and many new physics features. -If referencing MFC's (GPU) performance, consider citing ref. 1 and 2, which describe the solver and how it was crafted. +If referencing MFC's (GPU) performance, consider citing ref. 1 and 2, which describe the solver and its design. The original open-source release of MFC is ref. 3, which should be cited for provenance as appropriate. ```bibtex @@ -249,11 +255,11 @@ MFC is under the MIT license (see [LICENSE](LICENSE) for full text). ## Acknowledgements -Federal sponsors have supported MFC development, including the US Department of Defense (DOD), the National Institutes of Health (NIH), the Department of Energy (DOE), and the National Science Foundation (NSF). +Federal sponsors have supported MFC development, including the US Department of Defense (DOD), the National Institutes of Health (NIH), the Department of Energy (DOE) and National Nuclear Security Administration (NNSA), and the National Science Foundation (NSF). MFC computations have used many supercomputing systems. A partial list is below - * OLCF Frontier and Summit, and testbeds Wombat, Crusher, and Spock (allocation CFD154, PI Bryngelson) - * LLNL El Capitan, Tuolumne, and Lassen; El Capitan early access system Tioga + * OLCF Frontier and Summit, and testbeds Wombat, Crusher, and Spock (allocation CFD154, PI Bryngelson). + * LLNL El Capitan, Tuolumne, and Lassen; El Capitan early access system Tioga. * NCSA Delta and DeltaAI, PSC Bridges(1/2), SDSC Comet and Expanse, Purdue Anvil, TACC Stampede(1-3), and TAMU ACES via ACCESS-CI allocations from Bryngelson, Colonius, Rodriguez, and more. - * DOD systems Blueback, Onyx, Carpenter, Nautilus, and Narwhal via the DOD HPCMP program - * Sandia National Labs systems Doom and Attaway and testbed systems Weaver and Vortex + * DOD systems Blueback, Onyx, Carpenter, Nautilus, and Narwhal via the DOD HPCMP program. + * Sandia National Labs systems Doom and Attaway, and testbed systems Weaver and Vortex. diff --git a/docs/documentation/docker.md b/docs/documentation/docker.md new file mode 100644 index 0000000000..168a1b8ee6 --- /dev/null +++ b/docs/documentation/docker.md @@ -0,0 +1,188 @@ +# Containers + +## Navigating Docker + +Install Docker on +* [MacOS](https://docs.docker.com/desktop/setup/install/mac-install/) +* [Windows](https://docs.docker.com/desktop/setup/install/windows-install/) +* [Linux](https://docs.docker.com/desktop/setup/install/linux/) + +### Docker Desktop GUI + +- Search for the [sbryngelson/mfc](https://hub.docker.com/r/sbryngelson/mfc) repository. All the MFC containers are stored here. + +- Find and pull a release tag (e.g., `latest-cpu`). + + * Read through the **Tag Details** below to distinguish between them. Docker Desktop's left sidebar has two key tabs: **Images** stores your program copies, and **Containers** shows instances of those images. You can launch multiple containers from a single image. + +- Start a container by navigating to the `Images` tab and clicking the `Run` button. + + * Use the *Exec* section to interact with MFC directly via terminal, the *Files* section to transfer files between your device and container, and the *Stats* section to display resource usage. + +### Docker CLI + +You can navigate Docker entirely from the command line. +From a bash-like shell, pull from the [sbryngelson/mfc](https://hub.docker.com/r/sbryngelson/mfc) repository and run the latest MFC container: +```bash +docker run -it --rm --entrypoint bash sbryngelson/mfc:latest-cpu +``` + +**Selecting OS/ARCH:** Docker selects the compatible architecture by default when pulling and running a container. +You can manually specify your platform if something seems to go wrong, as Docker may suggest doing so. +For example, `linux/amd64` handles many *nix-based x86 architectures, and `linux/arm64` handles Apple Silicon and Arm-based *nix devices. +You can specify it like this: +```bash +docker run -it --rm --entrypoint bash --platform linux/amd64 sbryngelson/mfc:latest-cpu +``` + +**What's Next?** + +Once a container has started, the primary working directory is `/opt/MFC`, and all necessary files are located there. +You can check out the usual MFC documentation, such as the [Example Cases](https://mflowcode.github.io/documentation/md_examples.html), to get familiar with running cases. +Then, review the [Case Files](https://mflowcode.github.io/documentation/md_case.html) to write a custom case file. + +## Details on Running Containers + +Let's take a closer look at running MFC within a container. +Kick off a CPU container: +```bash +docker run -it --rm --entrypoint bash sbryngelson/mfc:latest-cpu +``` +Or, start a GPU container. +```bash +docker run -it --rm --gpus all --entrypoint bash sbryngelson/mfc:latest-gpu +``` + +**Note:** `--gpus all` exposes the container to available GPUs, and _only NVIDIA GPUs are currently supported_. +[Ensure your device's CUDA version is at least 12.3](https://stackoverflow.com/questions/9727688/how-to-get-the-cuda-version) to avoid backward compatibility issues. + +**Mounting Directory** + +Mount a directory to `mnt` inside the container to easily transfer files between your host computer and the separate container. +For example, `cp -r /mnt/destination>` moves something from your source computer to the container (reverse the order for the reverse to happen!). +```bash +docker run -it --rm --entrypoint bash -v "$PWD":/mnt sbryngelson/mfc:latest-cpu +``` + +**Shared Memory** + +If you run a job with multiple MPI ranks, you could run into _MPI memory binding errors_. +This can manifest as a failed test (launched via `./mfc.sh test`) and running cases with `./mfc.sh run -n X ` where `X > 1`. +To avoid this issue, you can increase the shared memory size (to keep MPI working): +```bash +docker run -it --rm --entrypoint bash --shm-size= sbryngelson/mfc:latest-cpu +``` +or avoid MPI altogether via `./mfc.sh --no-mpi`. + + +### Portability + +On the source machine, pull and save the image: +```bash +docker pull sbryngelson/mfc:latest-cpu +docker save -o mfc:latest-cpu.tar sbryngelson/mfc:latest-cpu +``` +On the target machine, load and run the image: +```bash +docker load -i mfc:latest-cpu.tar +docker run -it --rm mfc:latest-cpu +``` + +## Using Supercomputers/Clusters via Apptainer/Singularity + +### Interactive Shell + +```bash +apptainer shell --nv --fakeroot --writable-tmpfs --bind "$PWD":/mnt docker://sbryngelson/mfc:latest-gpu +Apptainer>cd /opt/MFC +``` +or +```bash +apptainer exec --nv --fakeroot --writable-tmpfs --bind "$PWD":/mnt docker://sbryngelson/mfc:latest-gpu bash -c "cd /opt/MFC && bash" +``` +To run MFC on CPUs, omit `--nv` and use the `mfc:latest-cpu` container image. + +### For Portability + +On the source machine, pull and translate the image into `.sif` format: +```bash +apptainer build mfc:latest-gpu.sif docker://sbryngelson/mfc:latest-gpu +``` +On the target machine, load and start an interactive shell: +```bash +apptainer shell --nv --fakeroot --writable-tmpfs --bind "$PWD":/mnt mfc:latest-gpu.sif +``` + +### Slurm Job + +Below is an example Slurm batch job script. +Refer to your machine's user guide for instructions on properly loading and using Apptainer. +```bash +#!/bin/bash +#SBATCH --job-name=mfc-sim +#SBATCH --nodes=2 +#SBATCH --ntasks-per-node=12 +#SBATCH --time=06:00:00 +#SBATCH --partition= +#SBATCH --output=mfc-sim-%j.out +#SBATCH --error=mfc-sim-%j.err + +cd $SLURM_SUBMIT_DIR + +# Define container image +CONTAINER="mfc:latest-gpu.sif" + +apptainer exec --nv --fakeroot --writable-tmpfs \ +--bind "$PWD":/mnt \ + $CONTAINER \ + bash -c "cd /opt/MFC && ./mfc.sh run sim/case.py -- -c " +``` + +In the above, +* The `/sim` directory should have all the simulation files, including the case setup (`case.py`). +* The `--nv --fakeroot --writable-tmpfs` set of flags are needed to + - Grant access to the host system's NVIDIA GPUs and its CUDA libraries. + - Enable root-like permissions inside the container without actual root access. + - Allow temporary write access to the container filesystem. + +## Tag Details + +### Base Images +- CPU images (v4.3.0-latest releases) are built on **Ubuntu 22.04**. +- GPU images (v4.3.0-latest releases) are built on **NVHPC SDK 23.11 (CUDA 12.3) & Ubuntu 22.04**. + +### Tag Structure +- **`vx.x.x`** - Official MFC release versions (recommended: use `latest` release) +- **`cpu/gpu`** - Build configurations for CPU or GPU acceleration. +- **`ubuntu-xx.xx`** - Base Ubuntu version (standard = `amd64`, `-arm` = `arm64`) + +### Example Tags + +```shell +mfc:latest-xxx # Latest version (amd64 & arm64) +mfc:vx.x.x-cpu # CPU version (amd64 & arm64) +mfc:vx.x.x-gpu # GPU version (amd64 & arm64) +mfc:vx.x.x-xxx-ubuntu-xx.xx # amd64 natively-supported version +mfc:vx.x.x-xxx-ubuntu-xx.xx-arm # arm64 natively-supported version +``` + +### Architecture Support + +You can specify your architecture with `--platform /`, typically either `linux/amd64` or `linux/arm64`. +If you are unsure, Docker automatically selects the compatible image with your system architecture. +If native support isn't available, QEMU emulation is enabled for the following architectures, albeit with degraded performance. +``` +linux/amd64 +linux/amd64/v2 +linux/amd64/v3 +linux/arm64 +linux/riscv64 +linux/ppc64le +linux/s390x +linux/386 +linux/mips64le +linux/mips64 +linux/loong64 +linux/arm/v7 +linux/arm/v6 +``` diff --git a/docs/documentation/getting-started.md b/docs/documentation/getting-started.md index 885c528cb9..9478a72ce2 100644 --- a/docs/documentation/getting-started.md +++ b/docs/documentation/getting-started.md @@ -12,14 +12,14 @@ cd MFC ## Build Environment MFC can be built in multiple ways on various operating systems. -Please select your desired configuration from the list bellow: +Please select your desired configuration from the list below:

*nix

- **On supported clusters:** Load environment modules ```shell -. ./mfc.sh load +source ./mfc.sh load ``` - **Via Aptitude:** @@ -34,8 +34,7 @@ sudo apt install tar wget make cmake gcc g++ \ libblas-dev liblapack-dev ``` -If you wish to build MFC using [NVIDIA's NVHPC SDK](https://developer.nvidia.com/hpc-sdk), -first follow the instructions [here](https://developer.nvidia.com/nvidia-hpc-sdk-downloads). +If you wish to build MFC using [NVIDIA's NVHPC SDK](https://developer.nvidia.com/hpc-sdk), first follow the instructions [here](https://developer.nvidia.com/nvidia-hpc-sdk-downloads).

Windows

@@ -116,12 +115,12 @@ MFC can be built with support for various (compile-time) features: | Feature | Enable | Disable | Default | Description | | :----------------: | :---------: | :------------: | :-----: | --------------------------------------------------------------- | -| **MPI** | `--mpi` | `--no-mpi` | On | Lets MFC run on multiple processors (and nodes) simultaneously. | +| **MPI** | `--mpi` | `--no-mpi` | On | Allows MFC to run on multiple processors (and nodes). | | **GPU** | `--gpu` | `--no-gpu` | Off | Enables GPU acceleration via OpenACC. | | **Debug** | `--debug` | `--no-debug` | Off | Requests the compiler build MFC in debug mode. | -| **GCov** | `--gcov` | `--no-gcov` | Off | Builds MFC with coverage flags on. | -| **Unified Memory** | `--unified` | `--no-unified` | Off | Builds MFC with unified CPU/GPU memory (GH-200 superchip only) | -| **Single** | `--single` | `--no-single` | Off | Builds MFC in single precision +| **GCov** | `--gcov` | `--no-gcov` | Off | Build MFC with coverage flags on. | +| **Unified Memory** | `--unified` | `--no-unified` | Off | Build MFC with unified CPU/GPU memory (GH200 superchip only) | +| **Single** | `--single` | `--no-single` | Off | Build MFC in single precision _⚠️ The `--gpu` option requires that your compiler supports OpenACC for Fortran for your target GPU architecture._ @@ -139,16 +138,23 @@ Most first-time users will want to build MFC using 8 threads (or more!) with MPI ./mfc.sh build -j 8 ``` -Examples: - +Some examples: - Build MFC using 8 threads with MPI and GPU acceleration: `./mfc.sh build --gpu -j 8`. - Build MFC using a single thread without MPI, GPU, and Debug support: `./mfc.sh build --no-mpi`. - Build MFC's `simulation` code in Debug mode with MPI and GPU support: `./mfc.sh build --debug --gpu -t simulation`. +## Using Containers + +Instead of building MFC from scratch, you can use containers to quickly access a pre-built version of MFC and its dependencies. +In brief, you can run the latest MFC container: +```bash +docker run -it --rm --entrypoint bash sbryngelson/mfc:latest-cpu +``` +Please refer to the [Docker](https://mflowcode.github.io/documentation/docker.html) document for more information. + ## Running the Test Suite Run MFC's test suite with 8 threads: - ```shell ./mfc.sh test -j 8 ```