-
Notifications
You must be signed in to change notification settings - Fork 27
Expand file tree
/
Copy pathgdal_batch_job_parallel.sh
More file actions
20 lines (17 loc) · 1.24 KB
/
gdal_batch_job_parallel.sh
File metadata and controls
20 lines (17 loc) · 1.24 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
#!/bin/bash
# ToDo: change project name in the row below
#SBATCH --account=project_2015299 # Choose the project to be billed
#SBATCH --reservation=geocomputing_day1 # Only available during the course
#SBATCH --output=slurm-%j.out # File to write the standard output to. %j is replaced by the job ID.
#SBATCH --error=slurm-%j.err # File to write the standard error to. %j is replaced by the job ID. Defaults to slurm-%j.out if not provided.
#SBATCH --time 0:05:00
#SBATCH --partition=small # Which queue to use. Defines maximum time, memory, tasks, nodes and local storage for job
#SBATCH --nodes=1 # Number of compute nodes. Upper limit depends on partition.
#SBATCH --cpus-per-task=4 # How many processors work on one task. Upper limit depends on number of CPUs per node.
#SBATCH --mem-per-cpu=300 # Minimum memory required per usable allocated CPU. Default units are megabytes.
# Load geoconda module to have GDAL commandline tools available.
module load parallel geoconda
# Find the files that have .tif ending, we do not want to process the .tif.aux.xml files in the same folders.
# Run the GDAL script for each of the found files.
find /appl/data/geo/mml/dem10m/2019/W3/W33 -name '*.tif' | \
parallel -j $SLURM_CPUS_PER_TASK bash gdal_parallel.sh {}