|
1 | 1 | import * as fs from 'fs-extra';
|
| 2 | +import * as path from 'path'; |
2 | 3 | import * as yargs from 'yargs';
|
3 | 4 |
|
4 | 5 | import { benchmarks } from '../lib';
|
@@ -39,43 +40,72 @@ interface ResultsJson {
|
39 | 40 | /* eslint-disable-next-line @typescript-eslint/await-thenable */
|
40 | 41 | const argv = await yargs
|
41 | 42 | .command('$0', 'Runs jsii benchmark tests and displays results', (argv) =>
|
42 |
| - argv.option('output', { |
43 |
| - type: 'string', |
44 |
| - desc: 'location of benchmark results json file, does not output to file if not specified.', |
45 |
| - }), |
| 43 | + argv |
| 44 | + .option('output', { |
| 45 | + type: 'string', |
| 46 | + desc: 'location of benchmark results json file, does not output to file if not specified.', |
| 47 | + }) |
| 48 | + .option('profile-dir', { |
| 49 | + type: 'string', |
| 50 | + desc: 'directory to write benchmark profiles to', |
| 51 | + }), |
46 | 52 | )
|
47 | 53 | .help().argv;
|
48 | 54 |
|
49 | 55 | // Run list of benchmarks in sequence
|
50 |
| - const resultsJson: ResultsJson[] = await benchmarks.reduce( |
51 |
| - async ( |
52 |
| - accum: Promise<ResultsJson[]>, |
53 |
| - benchmark: Benchmark<any>, |
54 |
| - ): Promise<ResultsJson[]> => { |
55 |
| - const prev = await accum; |
56 |
| - const result = await benchmark.run(); |
57 |
| - const extra = `${result.name} averaged ${result.average} milliseconds over ${result.iterations.length} runs`; |
58 |
| - console.log(extra); |
59 |
| - return [ |
60 |
| - ...prev, |
61 |
| - { |
62 |
| - name: result.name, |
63 |
| - unit: 'milliseconds', |
64 |
| - value: result.average, |
65 |
| - range: result.variance, |
66 |
| - extra, |
67 |
| - }, |
68 |
| - ]; |
69 |
| - }, |
70 |
| - Promise.resolve([]), |
71 |
| - ); |
| 56 | + try { |
| 57 | + const resultsJson: ResultsJson[] = await benchmarks.reduce( |
| 58 | + async ( |
| 59 | + accum: Promise<ResultsJson[]>, |
| 60 | + benchmark: Benchmark<any>, |
| 61 | + ): Promise<ResultsJson[]> => { |
| 62 | + const bench = argv.profileDir ? benchmark.profile() : benchmark; |
| 63 | + const prev = await accum; |
| 64 | + const result = await bench.run(); |
72 | 65 |
|
73 |
| - if (argv.output) { |
74 |
| - await fs.writeJson(argv.output, resultsJson, { spaces: 2 }); |
75 |
| - console.log(`results written to ${argv.output}`); |
76 |
| - } |
| 66 | + // Output summary to console |
| 67 | + const extra = `${result.name} averaged ${result.average} milliseconds over ${result.iterations.length} runs`; |
| 68 | + console.log(extra); |
| 69 | + |
| 70 | + // Write profiles if enabled |
| 71 | + if (argv.profileDir) { |
| 72 | + const dirName = result.name.replace(/[/\\:*?"<>]/g, ''); |
| 73 | + const profilesTargetDir = path.join(argv.profileDir, dirName); |
| 74 | + await fs.mkdir(profilesTargetDir); |
| 75 | + |
| 76 | + await Promise.all( |
| 77 | + result.iterations.map(async (iter, idx) => { |
| 78 | + const profileFile = path.join(profilesTargetDir, `${idx}.json`); |
| 79 | + await fs.writeJson(profileFile, iter.profile); |
| 80 | + }), |
| 81 | + ); |
| 82 | + console.log(`profiles written to ${profilesTargetDir} directory`); |
| 83 | + } |
77 | 84 |
|
78 |
| - return resultsJson; |
| 85 | + return [ |
| 86 | + ...prev, |
| 87 | + { |
| 88 | + name: result.name, |
| 89 | + unit: 'milliseconds', |
| 90 | + value: result.average, |
| 91 | + range: result.variance, |
| 92 | + extra, |
| 93 | + }, |
| 94 | + ]; |
| 95 | + }, |
| 96 | + Promise.resolve([]), |
| 97 | + ); |
| 98 | + |
| 99 | + if (argv.output) { |
| 100 | + await fs.writeJson(argv.output, resultsJson, { spaces: 2 }); |
| 101 | + console.log(`results written to ${argv.output}`); |
| 102 | + } |
| 103 | + |
| 104 | + return resultsJson; |
| 105 | + } catch (e) { |
| 106 | + console.error(e); |
| 107 | + throw e; |
| 108 | + } |
79 | 109 | })()
|
80 | 110 | .then((results) => {
|
81 | 111 | console.log(`successfully completed ${results.length} benchmarks`);
|
|
0 commit comments