1
+ import fs from 'node:fs'
1
2
import c from 'picocolors'
3
+ import * as pathe from 'pathe'
2
4
import type { TaskResultPack } from '@vitest/runner'
3
5
import type { UserConsoleLog } from '../../../../types/general'
4
6
import { BaseReporter } from '../../base'
5
- import { getFullName } from '../../../../utils'
7
+ import type { BenchmarkResult , File } from '../../../../types'
8
+ import { getFullName , getTasks } from '../../../../utils'
6
9
import { getStateSymbol } from '../../renderers/utils'
7
10
import { type TableRendererOptions , createTableRenderer , renderTree } from './tableRender'
8
11
@@ -20,11 +23,24 @@ export class TableReporter extends BaseReporter {
20
23
super . onWatcherStart ( )
21
24
}
22
25
23
- onCollected ( ) {
26
+ async onCollected ( ) {
27
+ this . rendererOptions . logger = this . ctx . logger
28
+ this . rendererOptions . showHeap = this . ctx . config . logHeapUsage
29
+ this . rendererOptions . slowTestThreshold = this . ctx . config . slowTestThreshold
30
+ if ( this . ctx . config . benchmark ?. compare ) {
31
+ const compareFile = pathe . resolve ( this . ctx . config . root , this . ctx . config . benchmark ?. compare )
32
+ try {
33
+ this . rendererOptions . compare = flattenFormattedBenchamrkReport (
34
+ JSON . parse (
35
+ await fs . promises . readFile ( compareFile , 'utf-8' ) ,
36
+ ) ,
37
+ )
38
+ }
39
+ catch ( e ) {
40
+ this . ctx . logger . error ( `Failed to read '${ compareFile } '` , e )
41
+ }
42
+ }
24
43
if ( this . isTTY ) {
25
- this . rendererOptions . logger = this . ctx . logger
26
- this . rendererOptions . showHeap = this . ctx . config . logHeapUsage
27
- this . rendererOptions . slowTestThreshold = this . ctx . config . slowTestThreshold
28
44
const files = this . ctx . state . getFiles ( this . watchFilters )
29
45
if ( ! this . renderer )
30
46
this . renderer = createTableRenderer ( files , this . rendererOptions ) . start ( )
@@ -56,6 +72,18 @@ export class TableReporter extends BaseReporter {
56
72
await this . stopListRender ( )
57
73
this . ctx . logger . log ( )
58
74
await super . onFinished ( files , errors )
75
+
76
+ // write output for future comparison
77
+ let outputFile = this . ctx . config . benchmark ?. outputJson
78
+ if ( outputFile ) {
79
+ outputFile = pathe . resolve ( this . ctx . config . root , outputFile )
80
+ const outputDirectory = pathe . dirname ( outputFile )
81
+ if ( ! fs . existsSync ( outputDirectory ) )
82
+ await fs . promises . mkdir ( outputDirectory , { recursive : true } )
83
+ const output = createFormattedBenchamrkReport ( files )
84
+ await fs . promises . writeFile ( outputFile , JSON . stringify ( output , null , 2 ) )
85
+ this . ctx . logger . log ( `Benchmark report written to ${ outputFile } ` )
86
+ }
59
87
}
60
88
61
89
async onWatcherStart ( ) {
@@ -80,3 +108,70 @@ export class TableReporter extends BaseReporter {
80
108
super . onUserConsoleLog ( log )
81
109
}
82
110
}
111
+
112
+ export interface FormattedBenchamrkReport {
113
+ files : {
114
+ filepath : string
115
+ groups : FormattedBenchmarkGroup [ ]
116
+ } [ ]
117
+ }
118
+
119
+ // flat results with TaskId as a key
120
+ export interface FlatBenchmarkReport {
121
+ [ id : string ] : FormattedBenchmarkResult
122
+ }
123
+
124
+ interface FormattedBenchmarkGroup {
125
+ fullName : string
126
+ benchmarks : FormattedBenchmarkResult [ ]
127
+ }
128
+
129
+ export type FormattedBenchmarkResult = Omit < BenchmarkResult , 'samples' > & {
130
+ id : string
131
+ sampleCount : number
132
+ }
133
+
134
+ function createFormattedBenchamrkReport ( files : File [ ] ) {
135
+ const report : FormattedBenchamrkReport = { files : [ ] }
136
+ for ( const file of files ) {
137
+ const groups : FormattedBenchmarkGroup [ ] = [ ]
138
+ for ( const task of getTasks ( file ) ) {
139
+ if ( task && task . type === 'suite' ) {
140
+ const benchmarks : FormattedBenchmarkResult [ ] = [ ]
141
+ for ( const t of task . tasks ) {
142
+ const benchmark = t . meta . benchmark && t . result ?. benchmark
143
+ if ( benchmark ) {
144
+ const { samples, ...rest } = benchmark
145
+ benchmarks . push ( {
146
+ id : t . id ,
147
+ sampleCount : samples . length ,
148
+ ...rest ,
149
+ } )
150
+ }
151
+ }
152
+ if ( benchmarks . length ) {
153
+ groups . push ( {
154
+ fullName : getFullName ( task , ' > ' ) ,
155
+ benchmarks,
156
+ } )
157
+ }
158
+ }
159
+ }
160
+ report . files . push ( {
161
+ filepath : file . filepath ,
162
+ groups,
163
+ } )
164
+ }
165
+ return report
166
+ }
167
+
168
+ function flattenFormattedBenchamrkReport ( report : FormattedBenchamrkReport ) : FlatBenchmarkReport {
169
+ const flat : FlatBenchmarkReport = { }
170
+ for ( const file of report . files ) {
171
+ for ( const group of file . groups ) {
172
+ for ( const t of group . benchmarks )
173
+ flat [ t . id ] = t
174
+ }
175
+ }
176
+ return flat
177
+ }
0 commit comments