Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 12 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ Options
--debug, -d Run a development build instead of a production build to aid debugging.
--devtools, -t Run Chrome in windowed mode with the devtools open.
--cpuThrottle=X Run Chrome with CPU throttled X times.
--ram, -r Also measures RAM consumption.
--version Prints the version.
--help Prints this message.

Expand Down Expand Up @@ -77,7 +78,7 @@ Path to the benchmark file to run. See the [Usage](#usage) section for more deta
#### options

Type: `Object`
Default: `{ debug: false, devtools: false, cpuThrottle: 1 }`
Default: `{ debug: false, devtools: false, cpuThrottle: 1, isRamMeasured: false }`

Optional object containing additional options.

Expand All @@ -102,6 +103,16 @@ Default: `1`

Run Chrome with CPU throttled X times. Useful to receive more precise results between runs.

##### isRamMeasured

Type: `boolean`<br>
Default: `false`

If `true` RAM measurement is enabled. In this case, 2 metrics are being recorded between the runs:

- Heap size (`JSHeapUsedSize`) represents how much RAM was consumed at the end of a test iteration.
- `Object.prototype` represents how many objects were created in RAM at the end of a test iteration. **Why is it interesting?** JS engine sometimes optimizes a code in different ways which in turn changes its memory footprint. [source / look at the "Counting all the objects" section](https://media-codings.com/articles/automatically-detect-memory-leaks-with-puppeteer)

### Events

#### webpack
Expand Down
52 changes: 46 additions & 6 deletions lib/chrome.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,39 @@
const EventEmitter = require('events')
const puppeteer = require('puppeteer')

/** https://media-codings.com/articles/automatically-detect-memory-leaks-with-puppeteer */
const countObjects = async (page) => {
const prototype = await page.evaluateHandle(() => {
return Object.prototype
})
const objects = await page.queryObjects(prototype)
const numberOfObjects = await page.evaluate(
(instances) => instances.length,
objects
)

await prototype.dispose()
await objects.dispose()

return numberOfObjects
}

module.exports = class Chrome extends EventEmitter {
constructor() {
super()

this.chrome = null
}

async start(port, devtools, { cpuThrottle }) {
async start(port, devtools, { cpuThrottle, isRamMeasured }) {
let completed = false

this.chrome = await puppeteer.launch({ devtools })
const chromeArgs = []
if (isRamMeasured) {
chromeArgs.push('--js-flags=--expose-gc')
}
this.chrome = await puppeteer.launch({ devtools, args: chromeArgs })
const heapSizeMeasurements = []
const objectCountMeasurements = []
const page = await this.chrome.newPage()
const client = await page.target().createCDPSession()

Expand Down Expand Up @@ -57,15 +79,33 @@ module.exports = class Chrome extends EventEmitter {
this.emit('error', error)
})

page.exposeFunction('benchmarkProgress', (data) => {
page.exposeFunction('benchmarkProgress', async (data) => {
const benchmark = JSON.parse(data)
this.emit('progress', benchmark)
if (isRamMeasured) {
// eslint-disable-next-line no-undef
await page.evaluate(() => gc())
const { JSHeapUsedSize } = await page.metrics()
heapSizeMeasurements.push(JSHeapUsedSize)
const n = await countObjects(page)
objectCountMeasurements.push(n)
}
this.emit(
'progress',
benchmark,
heapSizeMeasurements,
objectCountMeasurements
)
})

page.exposeFunction('benchmarkComplete', (data) => {
const benchmark = JSON.parse(data)
completed = true
this.emit('complete', benchmark)
this.emit(
'complete',
benchmark,
heapSizeMeasurements,
objectCountMeasurements
)
})

this.emit('start')
Expand Down
15 changes: 11 additions & 4 deletions lib/cli.js
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ Options
--debug, -d Run a development build instead of a production build to aid debugging.
--devtools, -t Run Chrome in windowed mode with the devtools open.
--cpuThrottle=X Run Chrome with CPU throttled X times.
--ram, -r Also measures RAM consumption.
--version Prints the version.
--help Prints this message.

Expand All @@ -37,6 +38,11 @@ Examples
type: 'number',
default: 1,
},
ram: {
type: 'boolean',
default: false,
alias: 'r',
},
},
})

Expand All @@ -49,7 +55,7 @@ async function main() {
}

const [filepath] = cli.input
const { debug, devtools, cpuThrottle } = cli.flags
const { debug, devtools, cpuThrottle, ram } = cli.flags

spinner = ora().start()

Expand All @@ -72,8 +78,8 @@ async function main() {
spinner.text = 'Starting benchmark '
})

reactBenchmark.on('progress', (benchmark) => {
spinner.text = formatBenchmark(benchmark)
reactBenchmark.on('progress', (...a) => {
spinner.text = formatBenchmark(...a)
})

reactBenchmark.on('console', (log) => {
Expand All @@ -87,10 +93,11 @@ async function main() {
debug,
devtools,
cpuThrottle,
isRamMeasured: ram,
})

spinner.stop()
console.log(formatBenchmark(result))
console.log(formatBenchmark(...result))
}

main().catch((error) => {
Expand Down
98 changes: 96 additions & 2 deletions lib/format-benchmark.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,105 @@
const humanizeNumber = require('humanize-number')
const pluralize = require('pluralize')

module.exports = (benchmark) => {
/**
* Computes the arithmetic mean of a sample.
* https://github.com/bestiejs/benchmark.js/blob/42f3b732bac3640eddb3ae5f50e445f3141016fd/benchmark.js
* @private
* @param {Array} sample The sample.
* @returns {number} The mean.
*/
function getMean(sample = []) {
return sample.reduce((sum, x) => sum + x, 0) / sample.length
}

function bytesToSize(bytes) {
var sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB']
if (bytes == 0) return '0 Byte'
var i = parseInt(Math.floor(Math.log(bytes) / Math.log(1024)))
return Math.round(bytes / Math.pow(1024, i), 2) + ' ' + sizes[i]
}

/**
* T-Distribution two-tailed critical values for 95% confidence.
* For more info see http://www.itl.nist.gov/div898/handbook/eda/section3/eda3672.htm.
*/
const tTable = {
1: 12.706,
2: 4.303,
3: 3.182,
4: 2.776,
5: 2.571,
6: 2.447,
7: 2.365,
8: 2.306,
9: 2.262,
10: 2.228,
11: 2.201,
12: 2.179,
13: 2.16,
14: 2.145,
15: 2.131,
16: 2.12,
17: 2.11,
18: 2.101,
19: 2.093,
20: 2.086,
21: 2.08,
22: 2.074,
23: 2.069,
24: 2.064,
25: 2.06,
26: 2.056,
27: 2.052,
28: 2.048,
29: 2.045,
30: 2.042,
infinity: 1.96,
}

/** https://github.com/bestiejs/benchmark.js/blob/42f3b732bac3640eddb3ae5f50e445f3141016fd/benchmark.js */
function getRme(sample, mean) {
const varOf = function (sum, x) {
return sum + Math.pow(x - mean, 2)
}
// Compute the sample variance (estimate of the population variance).
const variance = sample.reduce(varOf, 0) / (sample.length - 1) || 0
// Compute the sample standard deviation (estimate of the population standard deviation).
const sd = Math.sqrt(variance)
// Compute the standard error of the mean (a.k.a. the standard deviation of the sampling distribution of the sample mean).
const sem = sd / Math.sqrt(sample.length)
// Compute the degrees of freedom.
const df = sample.length - 1
// Compute the critical value.
const critical = tTable[Math.round(df) || 1] || tTable.infinity
// Compute the margin of error.
const moe = sem * critical
// Compute the relative margin of error.
const rme = (moe / mean) * 100 || 0
return rme
}

module.exports = (benchmark, heapSizeMeasurements, objectCountMeasurements) => {
const ops = benchmark.hz // Can be null on the first run if it executes really quickly
? humanizeNumber(benchmark.hz.toFixed(benchmark.hz < 100 ? 2 : 0))
: 0
const marginOfError = benchmark.stats.rme.toFixed(2)
const runs = pluralize('run', benchmark.stats.sample.length, true)
return `${ops} ops/sec ±${marginOfError}% (${runs} sampled)`
let s = `${runs} sampled: ${ops} ops/sec ±${marginOfError}%`
if (heapSizeMeasurements && heapSizeMeasurements.length) {
const averageRam = getMean(heapSizeMeasurements)
const ramMarginOfError = getRme(heapSizeMeasurements, averageRam).toFixed(2)
s += ` / RAM: ${bytesToSize(averageRam)} ±${ramMarginOfError}%`
}
if (objectCountMeasurements && objectCountMeasurements.length) {
const averageObjectsCount = getMean(objectCountMeasurements)
const objectsCountMarginOfError = getRme(
objectCountMeasurements,
averageObjectsCount
).toFixed(2)
s += ` / Objects: ${averageObjectsCount.toFixed(
0
)} ±${objectsCountMarginOfError}%`
}
return s
}
7 changes: 7 additions & 0 deletions lib/index.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,13 @@ export interface RunOptions {
* @default 1
*/
cpuThrottle?: number
/**
* If `true` RAM measurement is enabled. In this case, 2 metrics are being recorded between the runs:
* - Heap size (`JSHeapUsedSize`) represents how much RAM was consumed at the end of a test iteration.
* - `Object.prototype` represents how many objects were created in RAM at the end of a test iteration.
* @default false
*/
isRamMeasured?: boolean
}

export default class ReactBenchmark extends EventEmitter {
Expand Down
17 changes: 11 additions & 6 deletions lib/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@ module.exports = class ReactBenchmark extends EventEmitter {
this.chrome.on('start', () => {
this.emit('start')
})
this.chrome.on('progress', (benchmark) => {
this.emit('progress', benchmark)
this.chrome.on('progress', (...a) => {
this.emit('progress', ...a)
})
this.chrome.on('console', (log) => {
this.emit('console', log)
Expand All @@ -36,7 +36,12 @@ module.exports = class ReactBenchmark extends EventEmitter {

async run(
filepath,
{ debug = false, devtools = false, cpuThrottle = 1 } = {}
{
debug = false,
devtools = false,
cpuThrottle = 1,
isRamMeasured = false,
} = {}
) {
if (this.running) {
throw new Error('Benchmark is already running')
Expand All @@ -61,11 +66,11 @@ module.exports = class ReactBenchmark extends EventEmitter {
const port = await this.server.start(outputPath)

return new Promise((resolve, reject) => {
this.chrome.once('complete', async (benchmark) => {
this.chrome.once('complete', async (...a) => {
if (!devtools) {
await this._shutdown()
}
resolve(benchmark)
resolve([...a])
})

this.chrome.once('error', async (err) => {
Expand All @@ -77,7 +82,7 @@ module.exports = class ReactBenchmark extends EventEmitter {

this.emit('chrome')

this.chrome.start(port, devtools, { cpuThrottle })
this.chrome.start(port, devtools, { cpuThrottle, isRamMeasured })
})
}
}
18 changes: 6 additions & 12 deletions test/cli.js
Original file line number Diff line number Diff line change
Expand Up @@ -8,23 +8,17 @@ test('runs benchmark', async (t) => {

const result = await execa(binPath, [fixturePath])

t.regex(result.stdout, /[0-9,]+ ops\/sec ±[0-9.]+% \(\d+ runs sampled\)/)
t.regex(result.stdout, /\d+ runs sampled: [0-9,]+ ops\/sec ±[0-9.]+%/)
})

test('throttles CPU', async (t) => {
const getOpsSec = (resultString) => {
return parseInt(
resultString.match(/([\d,]+) ops\/sec/)[1].replace(/,/g, '')
)
}
test('measures RAM', async (t) => {
const binPath = path.resolve(__dirname, '../lib/cli.js')
const fixturePath = path.resolve(__dirname, 'fixtures/benchmark.js')

const woutT = (await execa(binPath, [fixturePath])).stdout
const withT = (await execa(binPath, [fixturePath, '--cpuThrottle=4'])).stdout
const result = await execa(binPath, [fixturePath, '-r'])

t.assert(
getOpsSec(withT) < getOpsSec(woutT),
'The difference between throttled and not throttled execution is less then normal'
t.regex(
result.stdout,
/\d+ runs sampled: [0-9,]+ ops\/sec ±[0-9.]+% \/ RAM: [0-9]+ MB ±[0-9.]+% \/ Objects: [0-9]+ ±[0-9.]+%/
)
})
Loading