|
| 1 | +import math |
| 2 | + |
| 3 | +import reframe as rfm |
| 4 | +import reframe.utility.sanity as sn |
| 5 | +from reframe.core.launchers.registry import getlauncher |
| 6 | + |
| 7 | + |
| 8 | +@rfm.simple_test |
| 9 | +class SparkAnalyticsCheck(rfm.RunOnlyRegressionTest): |
| 10 | + def __init__(self): |
| 11 | + super().__init__() |
| 12 | + self.descr = 'Simple calculation of pi with Spark' |
| 13 | + self.valid_systems = ['daint:gpu', 'daint:mc', |
| 14 | + 'dom:gpu', 'dom:mc'] |
| 15 | + self.valid_prog_environs = ['PrgEnv-cray'] |
| 16 | + self.modules = ['Spark'] |
| 17 | + self.sourcesdir = None |
| 18 | + self.variables = { |
| 19 | + 'SPARK_WORKER_CORES': '36', |
| 20 | + 'SPARK_LOCAL_DIRS': '"/tmp"', |
| 21 | + } |
| 22 | + # `SPARK_CONF` needs to be defined after running `start-all.sh`. |
| 23 | + self.pre_run = [ |
| 24 | + 'start-all.sh', |
| 25 | + ('SPARK_CONF="--conf spark.default.parallelism=10 ' |
| 26 | + '--conf spark.executor.cores=8 ' |
| 27 | + '--conf spark.executor.memory=15g"') |
| 28 | + ] |
| 29 | + self.executable = ( |
| 30 | + 'spark-submit ${SPARK_CONF} --master $SPARKURL ' |
| 31 | + '--class org.apache.spark.examples.SparkPi ' |
| 32 | + '$EBROOTSPARK/examples/jars/spark-examples_2.11-2.3.1.jar 10000;') |
| 33 | + self.post_run = ['stop-all.sh'] |
| 34 | + self.num_tasks = 2 |
| 35 | + self.num_tasks_per_node = 1 |
| 36 | + pi_value = sn.extractsingle(r'Pi is roughly\s+(?P<pi>\S+)', |
| 37 | + self.stdout, 'pi', float) |
| 38 | + self.sanity_patterns = sn.assert_lt(sn.abs(pi_value - math.pi), 0.01) |
| 39 | + self.maintainers = ['TM', 'TR'] |
| 40 | + self.tags = {'production'} |
| 41 | + |
| 42 | + def setup(self, partition, environ, **job_opts): |
| 43 | + super().setup(partition, environ, **job_opts) |
| 44 | + # The job launcher has to be changed since the `start_analytics` |
| 45 | + # script is not used with srun. |
| 46 | + self.job.launcher = getlauncher('local')() |
0 commit comments