diff --git a/cli/js/40_bench.js b/cli/js/40_bench.js index fb0e86463d01a9..83f1384a500851 100644 --- a/cli/js/40_bench.js +++ b/cli/js/40_bench.js @@ -16,13 +16,32 @@ const { op_bench_now, } = core.ops; const { - ArrayPrototypePush, + Array, + ArrayPrototypeSort, + ArrayPrototypeSlice, Error, + MathMax, + MathMin, MathCeil, SymbolToStringTag, TypeError, } = primordials; +/** + * @typedef {{ + * id: number, + * name: string, + * fn: BenchFunction + * origin: string, + * ignore: boolean, + * only: boolean. + * n?: number; + * warmup?: number; + * sanitizeExit: boolean, + * permissions: PermissionOptions, + * }} BenchDescription + */ + /** @type {number | null} */ let currentBenchId = null; // These local variables are used to track time measurements at @@ -199,37 +218,43 @@ function benchStats( min, max, all, + allLength, ) { return { n, min, max, - p75: all[MathCeil(n * (75 / 100)) - 1], - p99: all[MathCeil(n * (99 / 100)) - 1], - p995: all[MathCeil(n * (99.5 / 100)) - 1], - p999: all[MathCeil(n * (99.9 / 100)) - 1], + p75: all[MathCeil(allLength * (75 / 100)) - 1], + p99: all[MathCeil(allLength * (99 / 100)) - 1], + p995: all[MathCeil(allLength * (99.5 / 100)) - 1], + p999: all[MathCeil(allLength * (99.9 / 100)) - 1], avg: !highPrecision ? (avg / n) : MathCeil(avg / n), highPrecision, usedExplicitTimers, }; } -async function benchMeasure(timeBudget, fn, async, context) { +// reuse the same array across all benchmarks +// and cap the length so that we don't spend +// too much time sorting +const allMaxLength = 10_000_000; +let all = new Array(allMaxLength); +const lowPrecisionThresholdInNs = 1e4; + +async function benchMeasure(timeBudget, fn, desc, context) { let n = 0; let avg = 0; let wavg = 0; let usedExplicitTimers = false; - const all = []; let min = Infinity; let max = -Infinity; - const lowPrecisionThresholdInNs = 1e4; // warmup step let c = 0; - let iterations = 20; + let iterations = desc.warmup > 0 ? desc.warmup : 20; let budget = 10 * 1e6; - if (!async) { + if (!desc.async) { while (budget > 0 || iterations-- > 0) { const t1 = benchNow(); fn(context); @@ -272,11 +297,11 @@ async function benchMeasure(timeBudget, fn, async, context) { wavg /= c; // measure step - if (wavg > lowPrecisionThresholdInNs) { - let iterations = 10; - let budget = timeBudget * 1e6; + iterations = desc.n > 0 ? desc.n : 10; + budget = timeBudget * 1e6; - if (!async) { + if (wavg > lowPrecisionThresholdInNs) { + if (!desc.async) { while (budget > 0 || iterations-- > 0) { const t1 = benchNow(); fn(context); @@ -292,10 +317,13 @@ async function benchMeasure(timeBudget, fn, async, context) { currentBenchUserExplicitEnd = null; } + if (n < allMaxLength) { + all[n] = measuredTime; + } + n++; avg += measuredTime; budget -= totalTime; - ArrayPrototypePush(all, measuredTime); if (measuredTime < min) min = measuredTime; if (measuredTime > max) max = measuredTime; } @@ -315,10 +343,13 @@ async function benchMeasure(timeBudget, fn, async, context) { currentBenchUserExplicitEnd = null; } + if (n < allMaxLength) { + all[n] = measuredTime; + } + n++; avg += measuredTime; budget -= totalTime; - ArrayPrototypePush(all, measuredTime); if (measuredTime < min) min = measuredTime; if (measuredTime > max) max = measuredTime; } @@ -326,10 +357,9 @@ async function benchMeasure(timeBudget, fn, async, context) { } else { context.start = function start() {}; context.end = function end() {}; - let iterations = 10; - let budget = timeBudget * 1e6; + iterations = MathMax(MathCeil(iterations / lowPrecisionThresholdInNs), 10); - if (!async) { + if (!desc.async) { while (budget > 0 || iterations-- > 0) { const t1 = benchNow(); for (let c = 0; c < lowPrecisionThresholdInNs; c++) { @@ -337,9 +367,12 @@ async function benchMeasure(timeBudget, fn, async, context) { } const iterationTime = (benchNow() - t1) / lowPrecisionThresholdInNs; + if (n < allMaxLength) { + all[n] = iterationTime; + } + n++; avg += iterationTime; - ArrayPrototypePush(all, iterationTime); if (iterationTime < min) min = iterationTime; if (iterationTime > max) max = iterationTime; budget -= iterationTime * lowPrecisionThresholdInNs; @@ -354,9 +387,12 @@ async function benchMeasure(timeBudget, fn, async, context) { } const iterationTime = (benchNow() - t1) / lowPrecisionThresholdInNs; + if (n < allMaxLength) { + all[n] = iterationTime; + } + n++; avg += iterationTime; - ArrayPrototypePush(all, iterationTime); if (iterationTime < min) min = iterationTime; if (iterationTime > max) max = iterationTime; budget -= iterationTime * lowPrecisionThresholdInNs; @@ -364,7 +400,10 @@ async function benchMeasure(timeBudget, fn, async, context) { } } - all.sort(compareMeasurements); + const allLength = MathMin(allMaxLength, n); + const allSlice = ArrayPrototypeSlice(all, 0, allLength); + ArrayPrototypeSort(allSlice, compareMeasurements); + return benchStats( n, wavg > lowPrecisionThresholdInNs, @@ -372,7 +411,8 @@ async function benchMeasure(timeBudget, fn, async, context) { avg, min, max, - all, + allSlice, + allLength, ); } @@ -440,7 +480,7 @@ function wrapBenchmark(desc) { const stats = await benchMeasure( benchTimeInMs, fn, - desc.async, + desc, context, ); diff --git a/cli/js/40_test.js b/cli/js/40_test.js index 3dbb7ec3402f72..befc171254f8a1 100644 --- a/cli/js/40_test.js +++ b/cli/js/40_test.js @@ -75,17 +75,6 @@ const DenoNs = globalThis.Deno; * completed: boolean, * failed: boolean, * }} TestStepState - * - * @typedef {{ - * id: number, - * name: string, - * fn: BenchFunction - * origin: string, - * ignore: boolean, - * only: boolean. - * sanitizeExit: boolean, - * permissions: PermissionOptions, - * }} BenchDescription */ /** @type {Map} */ diff --git a/cli/tsc/dts/lib.deno.ns.d.ts b/cli/tsc/dts/lib.deno.ns.d.ts index a03b35a197295b..8765a82fcca061 100644 --- a/cli/tsc/dts/lib.deno.ns.d.ts +++ b/cli/tsc/dts/lib.deno.ns.d.ts @@ -1242,6 +1242,10 @@ declare namespace Deno { /** If at least one bench has `only` set to true, only run benches that have * `only` set to `true` and fail the bench suite. */ only?: boolean; + /** Number of iterations to perform. */ + n?: number; + /** Number of warmups to do before running the benchmark. */ + warmup?: number; /** Ensure the bench case does not prematurely cause the process to exit, * for example via a call to {@linkcode Deno.exit}. * diff --git a/tests/specs/bench/iterations/__test__.jsonc b/tests/specs/bench/iterations/__test__.jsonc new file mode 100644 index 00000000000000..d49fb09c0352e2 --- /dev/null +++ b/tests/specs/bench/iterations/__test__.jsonc @@ -0,0 +1,4 @@ +{ + "args": "bench", + "output": "bench.out" +} diff --git a/tests/specs/bench/iterations/bench.out b/tests/specs/bench/iterations/bench.out new file mode 100644 index 00000000000000..4ae1927daf1a9a --- /dev/null +++ b/tests/specs/bench/iterations/bench.out @@ -0,0 +1,12 @@ +Check file:///[WILDLINE]/main.bench.ts + CPU | [WILDLINE] +Runtime | [WILDLINE] + +file:///[WILDLINE]main.bench.ts + +benchmark time/iter (avg) iter/s (min … max) p75 p99 p995 +----------------------------- ----------------------------- --------------------- -------------------------- +above 10,000,000 iterations [WILDLINE] +below 10,000,000 iterations [WILDLINE] +negative iterations [WILDLINE] + diff --git a/tests/specs/bench/iterations/main.bench.ts b/tests/specs/bench/iterations/main.bench.ts new file mode 100644 index 00000000000000..12918215ec3754 --- /dev/null +++ b/tests/specs/bench/iterations/main.bench.ts @@ -0,0 +1,17 @@ +Deno.bench("above 10,000,000 iterations", { + n: 10_000_001, + warmup: 10, +}, () => { +}); + +Deno.bench("below 10,000,000 iterations", { + n: 1, + warmup: 10, +}, () => { +}); + +Deno.bench("negative iterations", { + n: -10, + warmup: -10, +}, () => { +}); diff --git a/tests/specs/test/exit_code/main.out b/tests/specs/test/exit_code/main.out index d5fe6c4751c57f..6a2136e6d5af4a 100644 --- a/tests/specs/test/exit_code/main.out +++ b/tests/specs/test/exit_code/main.out @@ -5,8 +5,8 @@ Deno.exitCode ... FAILED ([WILDCARD]) Deno.exitCode => ./main.js:1:6 error: Error: Test case finished with exit code set to 42 - at exitSanitizer (ext:cli/40_test.js:113:15) - at async outerWrapped (ext:cli/40_test.js:134:14) + at exitSanitizer ([WILDLINE]) + at async outerWrapped ([WILDLINE]) FAILURES diff --git a/tests/specs/test/exit_code2/main.out b/tests/specs/test/exit_code2/main.out index 494384652027e3..74978b3056440d 100644 --- a/tests/specs/test/exit_code2/main.out +++ b/tests/specs/test/exit_code2/main.out @@ -12,8 +12,8 @@ error: Error success => ./main.js:6:6 error: Error: Test case finished with exit code set to 5 - at exitSanitizer (ext:cli/40_test.js:113:15) - at async outerWrapped (ext:cli/40_test.js:134:14) + at exitSanitizer ([WILDLINE]) + at async outerWrapped ([WILDLINE]) FAILURES diff --git a/tests/specs/test/exit_code3/main.out b/tests/specs/test/exit_code3/main.out index a461db2f6f5b03..3228421da98219 100644 --- a/tests/specs/test/exit_code3/main.out +++ b/tests/specs/test/exit_code3/main.out @@ -6,8 +6,8 @@ success ... ok ([WILDCARD]) Deno.exitCode => ./main.js:1:6 error: Error: Test case finished with exit code set to 42 - at exitSanitizer (ext:cli/40_test.js:113:15) - at async outerWrapped (ext:cli/40_test.js:134:14) + at exitSanitizer ([WILDLINE]) + at async outerWrapped ([WILDLINE]) FAILURES