Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 30 additions & 1 deletion benchmarks/multiplatform/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,39 @@ Alternatively you may open `iosApp/iosApp` project in XCode and run the app from
- `./gradlew :benchmarks:runReleaseExecutableMacosArm64` (Works on Arm64 processors)
- `./gradlew :benchmarks:runReleaseExecutableMacosX64` (Works on Intel processors)

## Run K/Wasm target in D8:
`./gradlew :benchmarks:wasmJsD8ProductionRun`

or with arguments:

`./gradlew :benchmarks:wasmJsD8ProductionRun -PrunArguments=benchmarks=AnimatedVisibility`

## To build and run a K/Wasm D8 distribution for Jetstream3-like:
`./gradlew :benchmarks:buildD8Distribution --rerun-tasks`

then in a distribution directory run using your D8 binary:

`~/.gradle/d8/v8-mac-arm64-rel-11.9.85/d8 --module launcher_jetstream3.mjs -- AnimatedVisibility 1000`

## Run in web browser:

Please run your browser with manual GC enabled before running the benchmark, like for Google Chrome:

`open -a Google\ Chrome --args --js-flags="--expose-gc"`

- `./gradlew :benchmarks:wasmJsBrowserProductionRun` (you can see the results printed on the page itself)
- `./gradlew clean :benchmarks:wasmJsBrowserProductionRun` (you can see the results printed on the page itself)


# Benchmarks description

| Benchmark Name | File Path | Description |
|------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------|
| AnimatedVisibility | [benchmarks/src/commonMain/kotlin/benchmarks/animation/AnimatedVisibility.kt](benchmarks/src/commonMain/kotlin/benchmarks/animation/AnimatedVisibility.kt) | Tests the performance of the AnimatedVisibility component by repeatedly toggling the visibility of a PNG image. |
| LazyGrid | [benchmarks/src/commonMain/kotlin/benchmarks/lazygrid/LazyGrid.kt](benchmarks/src/commonMain/kotlin/benchmarks/lazygrid/LazyGrid.kt) | Tests the performance of the LazyVerticalGrid component with 12,000 items and jumps to specific items multiple times while running. |
| LazyGrid-ItemLaunchedEffect | [benchmarks/src/commonMain/kotlin/benchmarks/lazygrid/LazyGrid.kt](benchmarks/src/commonMain/kotlin/benchmarks/lazygrid/LazyGrid.kt) | Same as LazyGrid but adds a LaunchedEffect in each grid item that simulates an async task. |
| LazyGrid-SmoothScroll | [benchmarks/src/commonMain/kotlin/benchmarks/lazygrid/LazyGrid.kt](benchmarks/src/commonMain/kotlin/benchmarks/lazygrid/LazyGrid.kt) | Same as LazyGrid but uses smooth scrolling instead of jumping to items. |
| LazyGrid-SmoothScroll-ItemLaunchedEffect | [benchmarks/src/commonMain/kotlin/benchmarks/lazygrid/LazyGrid.kt](benchmarks/src/commonMain/kotlin/benchmarks/lazygrid/LazyGrid.kt) | Combines smooth scrolling with LaunchedEffect in each item. |
| VisualEffects | [benchmarks/src/commonMain/kotlin/benchmarks/visualeffects/HappyNY.kt](benchmarks/src/commonMain/kotlin/benchmarks/visualeffects/HappyNY.kt) | Tests the performance of complex animations and visual effects including snow flakes, stars, and rocket particles. |
| LazyList | [benchmarks/src/commonMain/kotlin/benchmarks/complexlazylist/components/MainUI.kt](benchmarks/src/commonMain/kotlin/benchmarks/complexlazylist/components/MainUI.kt) | Tests the performance of a complex LazyColumn implementation with features like pull-to-refresh, loading more items, and continuous scrolling. |
| MultipleComponents | [benchmarks/src/commonMain/kotlin/benchmarks/example1/Example1.kt](benchmarks/src/commonMain/kotlin/benchmarks/multipleComponents/MultipleComponents.kt) | Tests the performance of a comprehensive UI that showcases various Compose components including layouts, animations, and styled text. |
| MultipleComponents-NoVectorGraphics | [benchmarks/src/commonMain/kotlin/benchmarks/example1/Example1.kt](benchmarks/src/commonMain/kotlin/benchmarks/multipleComponents/MultipleComponents.kt) | Same as MultipleComponents but skips the Composables with vector graphics rendering. |
50 changes: 49 additions & 1 deletion benchmarks/multiplatform/benchmarks/build.gradle.kts
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import org.jetbrains.kotlin.gradle.ExperimentalWasmDsl
import org.jetbrains.kotlin.gradle.targets.js.binaryen.BinaryenRootEnvSpec
import org.jetbrains.kotlin.gradle.targets.js.d8.D8Exec
import org.jetbrains.kotlin.gradle.targets.js.webpack.KotlinWebpack
import kotlin.text.replace

Expand Down Expand Up @@ -46,7 +48,14 @@ kotlin {
@OptIn(ExperimentalWasmDsl::class)
wasmJs {
binaries.executable()
browser ()
d8 {
compilerOptions.freeCompilerArgs.add("-Xwasm-attach-js-exception")
runTask {
// It aborts even on coroutine cancellation exceptions:
// d8Args.add("--abort-on-uncaught-exception")
}
}
browser()
}

sourceSets {
Expand Down Expand Up @@ -113,4 +122,43 @@ gradle.taskGraph.whenReady {
open = "http://localhost:8080?$args"
)
}

@OptIn(ExperimentalWasmDsl::class)
tasks.withType<D8Exec>().configureEach {
inputFileProperty.set(rootProject.layout.buildDirectory.file(
"js/packages/compose-benchmarks-benchmarks-wasm-js/kotlin/launcher.mjs")
)

args(appArgs)
}
}


tasks.register("buildD8Distribution", Zip::class.java) {
dependsOn("wasmJsProductionExecutableCompileSync")
from(rootProject.layout.buildDirectory.file("js/packages/compose-benchmarks-benchmarks-wasm-js/kotlin"))
archiveFileName.set("d8-distribution.zip")
destinationDirectory.set(rootProject.layout.buildDirectory.dir("distributions"))
}

tasks.withType<org.jetbrains.kotlin.gradle.targets.js.binaryen.BinaryenExec>().configureEach {
binaryenArgs.add("-g") // keep the readable names
}

@OptIn(ExperimentalWasmDsl::class)
rootProject.the<BinaryenRootEnvSpec>().apply {
// version = "122" // change only if needed
}

val jsOrWasmRegex = Regex("js|wasm")

configurations.all {
resolutionStrategy.eachDependency {
if (requested.group.startsWith("org.jetbrains.skiko") &&
jsOrWasmRegex.containsMatchIn(requested.name)
) {
// to keep the readable names from Skiko
useVersion(requested.version!! + "+profiling")
}
}
}
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.

This file was deleted.

125 changes: 103 additions & 22 deletions benchmarks/multiplatform/benchmarks/src/commonMain/kotlin/Args.kt
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,6 @@ enum class Mode {
}

object Args {
private val modes = mutableSetOf<Mode>()

private val benchmarks = mutableMapOf<String, Int>()

var versionInfo: String? = null
private set

var saveStatsToCSV: Boolean = false
private set

var saveStatsToJSON: Boolean = false
private set

private fun argToSet(arg: String): Set<String> = arg.substring(arg.indexOf('=') + 1)
.split(",").filter{!it.isEmpty()}.map{it.uppercase()}.toSet()
Expand All @@ -30,14 +18,26 @@ object Args {
}
}

private fun String.decodeArg() = replace("%20", " ")

/**
* Parses command line arguments to determine modes and benchmarks settings.
* Parses command line arguments to create a [Config] for benchmarks run.
*
* @param args an array of strings representing the command line arguments.
* Each argument can specify either "modes" or "benchmarks" settings,
* with values separated by commas.
* Each argument can specify either of these settings:
* modes, benchmarks, disabledBenchmarks - comma separated values,
* versionInfo, saveStatsToCSV, saveStatsToJSON - single values.
*
* Example: benchmarks=AnimatedVisibility(100),modes=SIMPLE,versionInfo=Kotlin_2_1_20,saveStatsToCSV=true
*/
fun parseArgs(args: Array<String>) {
fun parseArgs(args: Array<String>): Config {
val modes = mutableSetOf<Mode>()
val benchmarks = mutableMapOf<String, Int>()
val disabledBenchmarks = mutableSetOf<String>()
var versionInfo: String? = null
var saveStatsToCSV: Boolean = false
var saveStatsToJSON: Boolean = false

for (arg in args) {
if (arg.startsWith("modes=", ignoreCase = true)) {
modes.addAll(argToSet(arg.decodeArg()).map { Mode.valueOf(it) })
Expand All @@ -49,18 +49,99 @@ object Args {
saveStatsToCSV = arg.substringAfter("=").toBoolean()
} else if (arg.startsWith("saveStatsToJSON=", ignoreCase = true)) {
saveStatsToJSON = arg.substringAfter("=").toBoolean()
} else if (arg.startsWith("disabledBenchmarks=", ignoreCase = true)) {
disabledBenchmarks += argToMap(arg.decodeArg()).keys
}
}
}

private fun String.decodeArg() = replace("%20", " ")
return Config(
modes = modes,
benchmarks = benchmarks,
disabledBenchmarks = disabledBenchmarks,
versionInfo = versionInfo,
saveStatsToCSV = saveStatsToCSV,
saveStatsToJSON = saveStatsToJSON
)
}
}

/**
* Represents the benchmarks configuration parsed from command line arguments or configured programmatically.
*
* @property modes The set of enabled execution modes. If empty, all modes are considered enabled by default checks.
* @property benchmarks A map of explicitly mentioned benchmarks to their specific problem sizes.
* A value of -1 indicates the benchmark is enabled but should use its default size.
* If the map is empty, all benchmarks are considered enabled by default checks.
* @property disabledBenchmarks A set of benchmarks to skip.
* @property versionInfo Optional string containing version information.
* @property saveStatsToCSV Flag indicating whether statistics should be saved to a CSV file.
* @property saveStatsToJSON Flag indicating whether statistics should be saved to a JSON file.
*/
data class Config(
val modes: Set<Mode> = emptySet(),
val benchmarks: Map<String, Int> = emptyMap(), // Name -> Problem Size (-1 for default)
val disabledBenchmarks: Set<String> = emptySet(),
val versionInfo: String? = null,
val saveStatsToCSV: Boolean = false,
val saveStatsToJSON: Boolean = false
) {
/**
* Checks if a specific mode is enabled based on the configuration.
* A mode is considered enabled if no modes were specified (default) or if it's explicitly listed.
*/
fun isModeEnabled(mode: Mode): Boolean = modes.isEmpty() || modes.contains(mode)

fun isBenchmarkEnabled(benchmark: String): Boolean = benchmarks.isEmpty() || benchmarks.contains(benchmark.uppercase())
/**
* Checks if a specific benchmark is enabled
*/
fun isBenchmarkEnabled(benchmark: String): Boolean {
val normalizedName = benchmark.uppercase()
// Enabled if the benchmarks map is empty OR if the specific benchmark is present
return (benchmarks.isEmpty() || benchmarks.containsKey(normalizedName))
&& !disabledBenchmarks.contains(normalizedName)
&& !disabledBenchmarks.contains(benchmark)
}

/**
* Returns the problem size configured for [benchmark], or [default] if not set.
*
* @param benchmark Benchmark name (case-insensitive).
* @param default Fallback size when no configuration is found.
* @return The problem size to use.
*/
fun getBenchmarkProblemSize(benchmark: String, default: Int): Int {
val result = benchmarks[benchmark.uppercase()]?: -1
return if (result == -1) default else result
val normalizedName = benchmark.uppercase()
val problemSize = benchmarks[normalizedName] ?: -1
return if (problemSize == -1) default else problemSize
}
}

companion object {
private var global: Config = Config()

val versionInfo: String?
get() = global.versionInfo

val saveStatsToCSV: Boolean
get() = global.saveStatsToCSV

val saveStatsToJSON: Boolean
get() = global.saveStatsToJSON

fun setGlobal(global: Config) {
this.global = global
}

fun setGlobalFromArgs(args: Array<String>) {
this.global = Args.parseArgs(args)
}

fun isModeEnabled(mode: Mode): Boolean =
global.isModeEnabled(mode)

fun isBenchmarkEnabled(benchmark: String): Boolean =
global.isBenchmarkEnabled(benchmark)

fun getBenchmarkProblemSize(benchmark: String, default: Int): Int =
global.getBenchmarkProblemSize(benchmark, default)
}
}
Loading