@@ -473,7 +473,12 @@ func (c *linkerContext) generateChunksInParallel(additionalFiles []graph.OutputF
473
473
c .timer .Begin ("Generate chunks" )
474
474
defer c .timer .End ("Generate chunks" )
475
475
476
- // Generate each chunk on a separate goroutine
476
+ // Generate each chunk on a separate goroutine. When a chunk needs to
477
+ // reference the path of another chunk, it will use a temporary path called
478
+ // the "uniqueKey" since the final path hasn't been computed yet (and is
479
+ // in general uncomputable at this point because paths have hashes that
480
+ // include information about chunk dependencies, and chunk dependencies
481
+ // can be cyclic due to dynamic imports).
477
482
generateWaitGroup := sync.WaitGroup {}
478
483
generateWaitGroup .Add (len (c .chunks ))
479
484
for chunkIndex := range c .chunks {
@@ -487,9 +492,9 @@ func (c *linkerContext) generateChunksInParallel(additionalFiles []graph.OutputF
487
492
c .enforceNoCyclicChunkImports ()
488
493
generateWaitGroup .Wait ()
489
494
490
- // Compute the final hashes of each chunk. This can technically be done in
491
- // parallel but it probably doesn't matter so much because we're not hashing
492
- // that much data.
495
+ // Compute the final hashes of each chunk, then use those to create the final
496
+ // paths of each chunk. This can technically be done in parallel but it
497
+ // probably doesn't matter so much because we're not hashing that much data.
493
498
visited := make ([]uint32 , len (c .chunks ))
494
499
var finalBytes []byte
495
500
for chunkIndex := range c .chunks {
@@ -512,7 +517,9 @@ func (c *linkerContext) generateChunksInParallel(additionalFiles []graph.OutputF
512
517
}))
513
518
}
514
519
515
- // Generate the final output files by joining file pieces together
520
+ // Generate the final output files by joining file pieces together and
521
+ // substituting the temporary paths for the final paths. This substitution
522
+ // can be done in parallel for each chunk.
516
523
c .timer .Begin ("Generate final output files" )
517
524
var resultsWaitGroup sync.WaitGroup
518
525
results := make ([][]graph.OutputFile , len (c .chunks ))
@@ -522,7 +529,7 @@ func (c *linkerContext) generateChunksInParallel(additionalFiles []graph.OutputF
522
529
var outputFiles []graph.OutputFile
523
530
524
531
// Each file may optionally contain additional files to be copied to the
525
- // output directory. This is used by the "file" loader .
532
+ // output directory. This is used by the "file" and "copy" loaders .
526
533
var commentPrefix string
527
534
var commentSuffix string
528
535
switch chunkRepr := chunk .chunkRepr .(type ) {
@@ -1330,7 +1337,8 @@ func (c *linkerContext) scanImportsAndExports() {
1330
1337
}
1331
1338
1332
1339
case ast .ImportRequire :
1333
- // Files that are imported with require() must be CommonJS modules
1340
+ // Files that are imported with require() must be wrapped so that
1341
+ // they can be lazily-evaluated
1334
1342
if otherRepr .AST .ExportsKind == js_ast .ExportsESM {
1335
1343
otherRepr .Meta .Wrap = graph .WrapESM
1336
1344
} else {
@@ -1341,7 +1349,7 @@ func (c *linkerContext) scanImportsAndExports() {
1341
1349
case ast .ImportDynamic :
1342
1350
if ! c .options .CodeSplitting {
1343
1351
// If we're not splitting, then import() is just a require() that
1344
- // returns a promise, so the imported file must be a CommonJS module
1352
+ // returns a promise, so the imported file must also be wrapped
1345
1353
if otherRepr .AST .ExportsKind == js_ast .ExportsESM {
1346
1354
otherRepr .Meta .Wrap = graph .WrapESM
1347
1355
} else {
@@ -3289,7 +3297,7 @@ func (c *linkerContext) computeChunks() {
3289
3297
sortedChunks = append (sortedChunks , chunk )
3290
3298
}
3291
3299
3292
- // Map from the entry point file to this chunk. We will need this later if
3300
+ // Map from the entry point file to its chunk. We will need this later if
3293
3301
// a file contains a dynamic import to this entry point, since we'll need
3294
3302
// to look up the path for this chunk to use with the import.
3295
3303
for chunkIndex , chunk := range sortedChunks {
0 commit comments