UNPKG

39.9 kBJavaScriptView Raw
1/*
2 MIT License http://www.opensource.org/licenses/mit-license.php
3 Author Tobias Koppers @sokra
4*/
5
6"use strict";
7
8const AsyncDependencyToInitialChunkError = require("./AsyncDependencyToInitialChunkError");
9const { connectChunkGroupParentAndChild } = require("./GraphHelpers");
10const ModuleGraphConnection = require("./ModuleGraphConnection");
11
12/** @typedef {import("./AsyncDependenciesBlock")} AsyncDependenciesBlock */
13/** @typedef {import("./Chunk")} Chunk */
14/** @typedef {import("./ChunkGroup")} ChunkGroup */
15/** @typedef {import("./Compilation")} Compilation */
16/** @typedef {import("./DependenciesBlock")} DependenciesBlock */
17/** @typedef {import("./Dependency")} Dependency */
18/** @typedef {import("./Entrypoint")} Entrypoint */
19/** @typedef {import("./Module")} Module */
20/** @typedef {import("./ModuleGraph")} ModuleGraph */
21/** @typedef {import("./ModuleGraphConnection").ConnectionState} ConnectionState */
22/** @typedef {import("./logging/Logger").Logger} Logger */
23
24/**
25 * @typedef {Object} QueueItem
26 * @property {number} action
27 * @property {DependenciesBlock} block
28 * @property {Module} module
29 * @property {Chunk} chunk
30 * @property {ChunkGroup} chunkGroup
31 * @property {ChunkGroupInfo} chunkGroupInfo
32 */
33
34/** @typedef {Set<Module> & { plus: Set<Module> }} ModuleSetPlus */
35
36/**
37 * @typedef {Object} ChunkGroupInfo
38 * @property {ChunkGroup} chunkGroup the chunk group
39 * @property {ModuleSetPlus} minAvailableModules current minimal set of modules available at this point
40 * @property {boolean} minAvailableModulesOwned true, if minAvailableModules is owned and can be modified
41 * @property {ModuleSetPlus[]} availableModulesToBeMerged enqueued updates to the minimal set of available modules
42 * @property {Set<Module>=} skippedItems modules that were skipped because module is already available in parent chunks (need to reconsider when minAvailableModules is shrinking)
43 * @property {ModuleSetPlus} resultingAvailableModules set of modules available including modules from this chunk group
44 * @property {Set<ChunkGroupInfo>} children set of children chunk groups, that will be revisited when availableModules shrink
45 * @property {Set<ChunkGroupInfo>} availableSources set of chunk groups that are the source for minAvailableModules
46 * @property {Set<ChunkGroupInfo>} availableChildren set of chunk groups which depend on the this chunk group as availableSource
47 * @property {number} preOrderIndex next pre order index
48 * @property {number} postOrderIndex next post order index
49 */
50
51/**
52 * @typedef {Object} BlockChunkGroupConnection
53 * @property {ChunkGroupInfo} originChunkGroupInfo origin chunk group
54 * @property {ChunkGroup} chunkGroup referenced chunk group
55 */
56
57const EMPTY_SET = /** @type {ModuleSetPlus} */ (new Set());
58EMPTY_SET.plus = EMPTY_SET;
59
60/**
61 * @param {ModuleSetPlus} a first set
62 * @param {ModuleSetPlus} b second set
63 * @returns {number} cmp
64 */
65const bySetSize = (a, b) => {
66 return b.size + b.plus.size - a.size - a.plus.size;
67};
68
69/**
70 * Extracts block to modules mapping from all modules
71 * @param {Compilation} compilation the compilation
72 * @returns {Map<DependenciesBlock, Map<Module, Exclude<ConnectionState, false>>>} the mapping block to modules
73 */
74const extractBlockModulesMap = compilation => {
75 const { moduleGraph } = compilation;
76
77 /** @type {Map<DependenciesBlock, Map<Module, Exclude<ConnectionState, false>>>} */
78 const blockModulesMap = new Map();
79
80 const blockQueue = new Set();
81
82 for (const module of compilation.modules) {
83 /** @type {WeakMap<Dependency, [Module, Exclude<ConnectionState, false>]>} */
84 let moduleMap;
85
86 for (const connection of moduleGraph.getOutgoingConnections(module)) {
87 const d = connection.dependency;
88 // We skip connections without dependency
89 if (!d) continue;
90 const m = connection.module;
91 // We skip connections without Module pointer
92 if (!m) continue;
93 // We skip weak connections
94 if (connection.weak) continue;
95 const state = connection.getActiveState(undefined);
96 // We skip inactive connections
97 if (state === false) continue;
98 // Store Dependency to Module mapping in local map
99 // to allow to access it faster compared to
100 // moduleGraph.getConnection()
101 if (moduleMap === undefined) {
102 moduleMap = new WeakMap();
103 }
104 moduleMap.set(connection.dependency, [m, state]);
105 }
106
107 blockQueue.clear();
108 blockQueue.add(module);
109 for (const block of blockQueue) {
110 let modules;
111
112 if (moduleMap !== undefined && block.dependencies) {
113 for (const dep of block.dependencies) {
114 const map = moduleMap.get(dep);
115 if (map !== undefined) {
116 const [module, state] = map;
117 if (modules === undefined) {
118 modules = new Map();
119 blockModulesMap.set(block, modules);
120 }
121 let merged = state;
122 if (merged !== true) {
123 const old = modules.get(module);
124 if (old !== undefined) {
125 merged = /** @type {Exclude<ConnectionState, false>} */ (ModuleGraphConnection.addConnectionStates(
126 old,
127 merged
128 ));
129 if (merged === old) {
130 continue;
131 }
132 }
133 }
134 modules.set(module, merged);
135 }
136 }
137 }
138
139 if (block.blocks) {
140 for (const b of block.blocks) {
141 blockQueue.add(b);
142 }
143 }
144 }
145 }
146
147 return blockModulesMap;
148};
149
150/**
151 *
152 * @param {Logger} logger a logger
153 * @param {Compilation} compilation the compilation
154 * @param {Map<Entrypoint, Module[]>} inputEntrypointsAndModules chunk groups which are processed with the modules
155 * @param {Map<ChunkGroup, ChunkGroupInfo>} chunkGroupInfoMap mapping from chunk group to available modules
156 * @param {Map<AsyncDependenciesBlock, BlockChunkGroupConnection[]>} blockConnections connection for blocks
157 * @param {Set<DependenciesBlock>} blocksWithNestedBlocks flag for blocks that have nested blocks
158 * @param {Set<ChunkGroup>} allCreatedChunkGroups filled with all chunk groups that are created here
159 */
160const visitModules = (
161 logger,
162 compilation,
163 inputEntrypointsAndModules,
164 chunkGroupInfoMap,
165 blockConnections,
166 blocksWithNestedBlocks,
167 allCreatedChunkGroups
168) => {
169 const { moduleGraph, chunkGraph } = compilation;
170
171 logger.time("visitModules: prepare");
172 const blockModulesMap = extractBlockModulesMap(compilation);
173
174 let statProcessedQueueItems = 0;
175 let statProcessedBlocks = 0;
176 let statConnectedChunkGroups = 0;
177 let statProcessedChunkGroupsForMerging = 0;
178 let statMergedAvailableModuleSets = 0;
179 let statForkedAvailableModules = 0;
180 let statForkedAvailableModulesCount = 0;
181 let statForkedAvailableModulesCountPlus = 0;
182 let statForkedMergedModulesCount = 0;
183 let statForkedMergedModulesCountPlus = 0;
184 let statForkedResultModulesCount = 0;
185 let statChunkGroupInfoUpdated = 0;
186 let statChildChunkGroupsReconnected = 0;
187
188 let nextChunkGroupIndex = 0;
189 let nextFreeModulePreOrderIndex = 0;
190 let nextFreeModulePostOrderIndex = 0;
191
192 /** @type {Map<DependenciesBlock, ChunkGroupInfo>} */
193 const blockChunkGroups = new Map();
194
195 /** @type {Map<string, ChunkGroupInfo>} */
196 const namedChunkGroups = new Map();
197
198 /** @type {Map<string, ChunkGroupInfo>} */
199 const namedAsyncEntrypoints = new Map();
200
201 const ADD_AND_ENTER_ENTRY_MODULE = 0;
202 const ADD_AND_ENTER_MODULE = 1;
203 const ENTER_MODULE = 2;
204 const PROCESS_BLOCK = 3;
205 const PROCESS_ENTRY_BLOCK = 4;
206 const LEAVE_MODULE = 5;
207
208 /** @type {QueueItem[]} */
209 let queue = [];
210
211 /** @type {Map<ChunkGroupInfo, Set<ChunkGroupInfo>>} */
212 const queueConnect = new Map();
213 /** @type {Set<ChunkGroupInfo>} */
214 const chunkGroupsForCombining = new Set();
215
216 // Fill queue with entrypoint modules
217 // Create ChunkGroupInfo for entrypoints
218 for (const [chunkGroup, modules] of inputEntrypointsAndModules) {
219 /** @type {ChunkGroupInfo} */
220 const chunkGroupInfo = {
221 chunkGroup,
222 minAvailableModules: undefined,
223 minAvailableModulesOwned: false,
224 availableModulesToBeMerged: [],
225 skippedItems: undefined,
226 resultingAvailableModules: undefined,
227 children: undefined,
228 availableSources: undefined,
229 availableChildren: undefined,
230 preOrderIndex: 0,
231 postOrderIndex: 0
232 };
233 chunkGroup.index = nextChunkGroupIndex++;
234 if (chunkGroup.getNumberOfParents() > 0) {
235 // minAvailableModules for child entrypoints are unknown yet, set to undefined.
236 // This means no module is added until other sets are merged into
237 // this minAvailableModules (by the parent entrypoints)
238 const skippedItems = new Set();
239 for (const module of modules) {
240 skippedItems.add(module);
241 }
242 chunkGroupInfo.skippedItems = skippedItems;
243 chunkGroupsForCombining.add(chunkGroupInfo);
244 } else {
245 // The application may start here: We start with an empty list of available modules
246 chunkGroupInfo.minAvailableModules = EMPTY_SET;
247 const chunk = chunkGroup.getEntrypointChunk();
248 for (const module of modules) {
249 queue.push({
250 action: ADD_AND_ENTER_MODULE,
251 block: module,
252 module,
253 chunk,
254 chunkGroup,
255 chunkGroupInfo
256 });
257 }
258 }
259 chunkGroupInfoMap.set(chunkGroup, chunkGroupInfo);
260 if (chunkGroup.name) {
261 namedChunkGroups.set(chunkGroup.name, chunkGroupInfo);
262 }
263 }
264 // Fill availableSources with parent-child dependencies between entrypoints
265 for (const chunkGroupInfo of chunkGroupsForCombining) {
266 const { chunkGroup } = chunkGroupInfo;
267 chunkGroupInfo.availableSources = new Set();
268 for (const parent of chunkGroup.parentsIterable) {
269 const parentChunkGroupInfo = chunkGroupInfoMap.get(parent);
270 chunkGroupInfo.availableSources.add(parentChunkGroupInfo);
271 if (parentChunkGroupInfo.availableChildren === undefined) {
272 parentChunkGroupInfo.availableChildren = new Set();
273 }
274 parentChunkGroupInfo.availableChildren.add(chunkGroupInfo);
275 }
276 }
277 // pop() is used to read from the queue
278 // so it need to be reversed to be iterated in
279 // correct order
280 queue.reverse();
281
282 /** @type {Set<ChunkGroupInfo>} */
283 const outdatedChunkGroupInfo = new Set();
284 /** @type {Set<ChunkGroupInfo>} */
285 const chunkGroupsForMerging = new Set();
286 /** @type {QueueItem[]} */
287 let queueDelayed = [];
288
289 logger.timeEnd("visitModules: prepare");
290
291 /** @type {Module[]} */
292 const skipBuffer = [];
293 /** @type {QueueItem[]} */
294 const queueBuffer = [];
295
296 /** @type {Module} */
297 let module;
298 /** @type {Chunk} */
299 let chunk;
300 /** @type {ChunkGroup} */
301 let chunkGroup;
302 /** @type {DependenciesBlock} */
303 let block;
304 /** @type {ChunkGroupInfo} */
305 let chunkGroupInfo;
306
307 // For each async Block in graph
308 /**
309 * @param {AsyncDependenciesBlock} b iterating over each Async DepBlock
310 * @returns {void}
311 */
312 const iteratorBlock = b => {
313 // 1. We create a chunk group with single chunk in it for this Block
314 // but only once (blockChunkGroups map)
315 let cgi = blockChunkGroups.get(b);
316 /** @type {ChunkGroup} */
317 let c;
318 /** @type {Entrypoint} */
319 let entrypoint;
320 const entryOptions = b.groupOptions && b.groupOptions.entryOptions;
321 if (cgi === undefined) {
322 const chunkName = (b.groupOptions && b.groupOptions.name) || b.chunkName;
323 if (entryOptions) {
324 cgi = namedAsyncEntrypoints.get(chunkName);
325 if (!cgi) {
326 entrypoint = compilation.addAsyncEntrypoint(
327 entryOptions,
328 module,
329 b.loc,
330 b.request
331 );
332 entrypoint.index = nextChunkGroupIndex++;
333 cgi = {
334 chunkGroup: entrypoint,
335 minAvailableModules: EMPTY_SET,
336 minAvailableModulesOwned: false,
337 availableModulesToBeMerged: [],
338 skippedItems: undefined,
339 resultingAvailableModules: undefined,
340 children: undefined,
341 availableSources: undefined,
342 availableChildren: undefined,
343 preOrderIndex: 0,
344 postOrderIndex: 0
345 };
346 chunkGroupInfoMap.set(entrypoint, cgi);
347
348 chunkGraph.connectBlockAndChunkGroup(b, entrypoint);
349 if (chunkName) {
350 namedAsyncEntrypoints.set(chunkName, cgi);
351 }
352 } else {
353 entrypoint = /** @type {Entrypoint} */ (cgi.chunkGroup);
354 // TODO merge entryOptions
355 entrypoint.addOrigin(module, b.loc, b.request);
356 chunkGraph.connectBlockAndChunkGroup(b, entrypoint);
357 }
358
359 // 2. We enqueue the DependenciesBlock for traversal
360 queueDelayed.push({
361 action: PROCESS_ENTRY_BLOCK,
362 block: b,
363 module: module,
364 chunk: entrypoint.chunks[0],
365 chunkGroup: entrypoint,
366 chunkGroupInfo: cgi
367 });
368 } else {
369 cgi = namedChunkGroups.get(chunkName);
370 if (!cgi) {
371 c = compilation.addChunkInGroup(
372 b.groupOptions || b.chunkName,
373 module,
374 b.loc,
375 b.request
376 );
377 c.index = nextChunkGroupIndex++;
378 cgi = {
379 chunkGroup: c,
380 minAvailableModules: undefined,
381 minAvailableModulesOwned: undefined,
382 availableModulesToBeMerged: [],
383 skippedItems: undefined,
384 resultingAvailableModules: undefined,
385 children: undefined,
386 availableSources: undefined,
387 availableChildren: undefined,
388 preOrderIndex: 0,
389 postOrderIndex: 0
390 };
391 allCreatedChunkGroups.add(c);
392 chunkGroupInfoMap.set(c, cgi);
393 if (chunkName) {
394 namedChunkGroups.set(chunkName, cgi);
395 }
396 } else {
397 c = cgi.chunkGroup;
398 if (c.isInitial()) {
399 compilation.errors.push(
400 new AsyncDependencyToInitialChunkError(chunkName, module, b.loc)
401 );
402 c = chunkGroup;
403 }
404 c.addOptions(b.groupOptions);
405 c.addOrigin(module, b.loc, b.request);
406 }
407 blockConnections.set(b, []);
408 }
409 blockChunkGroups.set(b, cgi);
410 } else if (entryOptions) {
411 entrypoint = /** @type {Entrypoint} */ (cgi.chunkGroup);
412 } else {
413 c = cgi.chunkGroup;
414 }
415
416 if (c !== undefined) {
417 // 2. We store the connection for the block
418 // to connect it later if needed
419 blockConnections.get(b).push({
420 originChunkGroupInfo: chunkGroupInfo,
421 chunkGroup: c
422 });
423
424 // 3. We enqueue the chunk group info creation/updating
425 let connectList = queueConnect.get(chunkGroupInfo);
426 if (connectList === undefined) {
427 connectList = new Set();
428 queueConnect.set(chunkGroupInfo, connectList);
429 }
430 connectList.add(cgi);
431
432 // TODO check if this really need to be done for each traversal
433 // or if it is enough when it's queued when created
434 // 4. We enqueue the DependenciesBlock for traversal
435 queueDelayed.push({
436 action: PROCESS_BLOCK,
437 block: b,
438 module: module,
439 chunk: c.chunks[0],
440 chunkGroup: c,
441 chunkGroupInfo: cgi
442 });
443 } else {
444 chunkGroupInfo.chunkGroup.addAsyncEntrypoint(entrypoint);
445 }
446 };
447
448 /**
449 * @param {DependenciesBlock} block the block
450 * @returns {void}
451 */
452 const processBlock = block => {
453 statProcessedBlocks++;
454 // get prepared block info
455 const blockModules = blockModulesMap.get(block);
456
457 if (blockModules !== undefined) {
458 const { minAvailableModules } = chunkGroupInfo;
459 // Buffer items because order need to be reversed to get indices correct
460 // Traverse all referenced modules
461 for (const [refModule, activeState] of blockModules) {
462 if (chunkGraph.isModuleInChunk(refModule, chunk)) {
463 // skip early if already connected
464 continue;
465 }
466 if (
467 minAvailableModules.has(refModule) ||
468 minAvailableModules.plus.has(refModule)
469 ) {
470 // already in parent chunks, skip it for now
471 skipBuffer.push(refModule);
472 continue;
473 }
474 // enqueue, then add and enter to be in the correct order
475 // this is relevant with circular dependencies
476 queueBuffer.push({
477 action: activeState === true ? ADD_AND_ENTER_MODULE : PROCESS_BLOCK,
478 block: refModule,
479 module: refModule,
480 chunk,
481 chunkGroup,
482 chunkGroupInfo
483 });
484 }
485 // Add buffered items in reverse order
486 if (skipBuffer.length > 0) {
487 let { skippedItems } = chunkGroupInfo;
488 if (skippedItems === undefined) {
489 chunkGroupInfo.skippedItems = skippedItems = new Set();
490 }
491 for (let i = skipBuffer.length - 1; i >= 0; i--) {
492 skippedItems.add(skipBuffer[i]);
493 }
494 skipBuffer.length = 0;
495 }
496 if (queueBuffer.length > 0) {
497 for (let i = queueBuffer.length - 1; i >= 0; i--) {
498 queue.push(queueBuffer[i]);
499 }
500 queueBuffer.length = 0;
501 }
502 }
503
504 // Traverse all Blocks
505 for (const b of block.blocks) {
506 iteratorBlock(b);
507 }
508
509 if (block.blocks.length > 0 && module !== block) {
510 blocksWithNestedBlocks.add(block);
511 }
512 };
513
514 /**
515 * @param {DependenciesBlock} block the block
516 * @returns {void}
517 */
518 const processEntryBlock = block => {
519 statProcessedBlocks++;
520 // get prepared block info
521 const blockModules = blockModulesMap.get(block);
522
523 if (blockModules !== undefined) {
524 // Traverse all referenced modules
525 for (const [refModule, activeState] of blockModules) {
526 // enqueue, then add and enter to be in the correct order
527 // this is relevant with circular dependencies
528 queueBuffer.push({
529 action:
530 activeState === true ? ADD_AND_ENTER_ENTRY_MODULE : PROCESS_BLOCK,
531 block: refModule,
532 module: refModule,
533 chunk,
534 chunkGroup,
535 chunkGroupInfo
536 });
537 }
538 // Add buffered items in reverse order
539 if (queueBuffer.length > 0) {
540 for (let i = queueBuffer.length - 1; i >= 0; i--) {
541 queue.push(queueBuffer[i]);
542 }
543 queueBuffer.length = 0;
544 }
545 }
546
547 // Traverse all Blocks
548 for (const b of block.blocks) {
549 iteratorBlock(b);
550 }
551
552 if (block.blocks.length > 0 && module !== block) {
553 blocksWithNestedBlocks.add(block);
554 }
555 };
556
557 const processQueue = () => {
558 while (queue.length) {
559 statProcessedQueueItems++;
560 const queueItem = queue.pop();
561 module = queueItem.module;
562 block = queueItem.block;
563 chunk = queueItem.chunk;
564 chunkGroup = queueItem.chunkGroup;
565 chunkGroupInfo = queueItem.chunkGroupInfo;
566
567 switch (queueItem.action) {
568 case ADD_AND_ENTER_ENTRY_MODULE:
569 chunkGraph.connectChunkAndEntryModule(
570 chunk,
571 module,
572 /** @type {Entrypoint} */ (chunkGroup)
573 );
574 // fallthrough
575 case ADD_AND_ENTER_MODULE: {
576 if (chunkGraph.isModuleInChunk(module, chunk)) {
577 // already connected, skip it
578 break;
579 }
580 // We connect Module and Chunk
581 chunkGraph.connectChunkAndModule(chunk, module);
582 }
583 // fallthrough
584 case ENTER_MODULE: {
585 const index = chunkGroup.getModulePreOrderIndex(module);
586 if (index === undefined) {
587 chunkGroup.setModulePreOrderIndex(
588 module,
589 chunkGroupInfo.preOrderIndex++
590 );
591 }
592
593 if (
594 moduleGraph.setPreOrderIndexIfUnset(
595 module,
596 nextFreeModulePreOrderIndex
597 )
598 ) {
599 nextFreeModulePreOrderIndex++;
600 }
601
602 // reuse queueItem
603 queueItem.action = LEAVE_MODULE;
604 queue.push(queueItem);
605 }
606 // fallthrough
607 case PROCESS_BLOCK: {
608 processBlock(block);
609 break;
610 }
611 case PROCESS_ENTRY_BLOCK: {
612 processEntryBlock(block);
613 break;
614 }
615 case LEAVE_MODULE: {
616 const index = chunkGroup.getModulePostOrderIndex(module);
617 if (index === undefined) {
618 chunkGroup.setModulePostOrderIndex(
619 module,
620 chunkGroupInfo.postOrderIndex++
621 );
622 }
623
624 if (
625 moduleGraph.setPostOrderIndexIfUnset(
626 module,
627 nextFreeModulePostOrderIndex
628 )
629 ) {
630 nextFreeModulePostOrderIndex++;
631 }
632 break;
633 }
634 }
635 }
636 };
637
638 const calculateResultingAvailableModules = chunkGroupInfo => {
639 if (chunkGroupInfo.resultingAvailableModules)
640 return chunkGroupInfo.resultingAvailableModules;
641
642 const minAvailableModules = chunkGroupInfo.minAvailableModules;
643
644 // Create a new Set of available modules at this point
645 // We want to be as lazy as possible. There are multiple ways doing this:
646 // Note that resultingAvailableModules is stored as "(a) + (b)" as it's a ModuleSetPlus
647 // - resultingAvailableModules = (modules of chunk) + (minAvailableModules + minAvailableModules.plus)
648 // - resultingAvailableModules = (minAvailableModules + modules of chunk) + (minAvailableModules.plus)
649 // We choose one depending on the size of minAvailableModules vs minAvailableModules.plus
650
651 let resultingAvailableModules;
652 if (minAvailableModules.size > minAvailableModules.plus.size) {
653 // resultingAvailableModules = (modules of chunk) + (minAvailableModules + minAvailableModules.plus)
654 resultingAvailableModules = /** @type {Set<Module> & {plus: Set<Module>}} */ (new Set());
655 for (const module of minAvailableModules.plus)
656 minAvailableModules.add(module);
657 minAvailableModules.plus = EMPTY_SET;
658 resultingAvailableModules.plus = minAvailableModules;
659 chunkGroupInfo.minAvailableModulesOwned = false;
660 } else {
661 // resultingAvailableModules = (minAvailableModules + modules of chunk) + (minAvailableModules.plus)
662 resultingAvailableModules = /** @type {Set<Module> & {plus: Set<Module>}} */ (new Set(
663 minAvailableModules
664 ));
665 resultingAvailableModules.plus = minAvailableModules.plus;
666 }
667
668 // add the modules from the chunk group to the set
669 for (const chunk of chunkGroupInfo.chunkGroup.chunks) {
670 for (const m of chunkGraph.getChunkModulesIterable(chunk)) {
671 resultingAvailableModules.add(m);
672 }
673 }
674 return (chunkGroupInfo.resultingAvailableModules = resultingAvailableModules);
675 };
676
677 const processConnectQueue = () => {
678 // Figure out new parents for chunk groups
679 // to get new available modules for these children
680 for (const [chunkGroupInfo, targets] of queueConnect) {
681 // 1. Add new targets to the list of children
682 if (chunkGroupInfo.children === undefined) {
683 chunkGroupInfo.children = targets;
684 } else {
685 for (const target of targets) {
686 chunkGroupInfo.children.add(target);
687 }
688 }
689
690 // 2. Calculate resulting available modules
691 const resultingAvailableModules = calculateResultingAvailableModules(
692 chunkGroupInfo
693 );
694
695 // 3. Update chunk group info
696 for (const target of targets) {
697 target.availableModulesToBeMerged.push(resultingAvailableModules);
698 chunkGroupsForMerging.add(target);
699 }
700
701 statConnectedChunkGroups += targets.size;
702 }
703 queueConnect.clear();
704 };
705
706 const processChunkGroupsForMerging = () => {
707 statProcessedChunkGroupsForMerging += chunkGroupsForMerging.size;
708
709 // Execute the merge
710 for (const info of chunkGroupsForMerging) {
711 const availableModulesToBeMerged = info.availableModulesToBeMerged;
712 let cachedMinAvailableModules = info.minAvailableModules;
713
714 statMergedAvailableModuleSets += availableModulesToBeMerged.length;
715
716 // 1. Get minimal available modules
717 // It doesn't make sense to traverse a chunk again with more available modules.
718 // This step calculates the minimal available modules and skips traversal when
719 // the list didn't shrink.
720 if (availableModulesToBeMerged.length > 1) {
721 availableModulesToBeMerged.sort(bySetSize);
722 }
723 let changed = false;
724 merge: for (const availableModules of availableModulesToBeMerged) {
725 if (cachedMinAvailableModules === undefined) {
726 cachedMinAvailableModules = availableModules;
727 info.minAvailableModules = cachedMinAvailableModules;
728 info.minAvailableModulesOwned = false;
729 changed = true;
730 } else {
731 if (info.minAvailableModulesOwned) {
732 // We own it and can modify it
733 if (cachedMinAvailableModules.plus === availableModules.plus) {
734 for (const m of cachedMinAvailableModules) {
735 if (!availableModules.has(m)) {
736 cachedMinAvailableModules.delete(m);
737 changed = true;
738 }
739 }
740 } else {
741 for (const m of cachedMinAvailableModules) {
742 if (!availableModules.has(m) && !availableModules.plus.has(m)) {
743 cachedMinAvailableModules.delete(m);
744 changed = true;
745 }
746 }
747 for (const m of cachedMinAvailableModules.plus) {
748 if (!availableModules.has(m) && !availableModules.plus.has(m)) {
749 // We can't remove modules from the plus part
750 // so we need to merge plus into the normal part to allow modifying it
751 const iterator = cachedMinAvailableModules.plus[
752 Symbol.iterator
753 ]();
754 // fast forward add all modules until m
755 /** @type {IteratorResult<Module>} */
756 let it;
757 while (!(it = iterator.next()).done) {
758 const module = it.value;
759 if (module === m) break;
760 cachedMinAvailableModules.add(module);
761 }
762 // check the remaining modules before adding
763 while (!(it = iterator.next()).done) {
764 const module = it.value;
765 if (
766 availableModules.has(module) ||
767 availableModules.plus.has(m)
768 ) {
769 cachedMinAvailableModules.add(module);
770 }
771 }
772 cachedMinAvailableModules.plus = EMPTY_SET;
773 changed = true;
774 continue merge;
775 }
776 }
777 }
778 } else if (cachedMinAvailableModules.plus === availableModules.plus) {
779 // Common and fast case when the plus part is shared
780 // We only need to care about the normal part
781 if (availableModules.size < cachedMinAvailableModules.size) {
782 // the new availableModules is smaller so it's faster to
783 // fork from the new availableModules
784 statForkedAvailableModules++;
785 statForkedAvailableModulesCount += availableModules.size;
786 statForkedMergedModulesCount += cachedMinAvailableModules.size;
787 // construct a new Set as intersection of cachedMinAvailableModules and availableModules
788 const newSet = /** @type {ModuleSetPlus} */ (new Set());
789 newSet.plus = availableModules.plus;
790 for (const m of availableModules) {
791 if (cachedMinAvailableModules.has(m)) {
792 newSet.add(m);
793 }
794 }
795 statForkedResultModulesCount += newSet.size;
796 cachedMinAvailableModules = newSet;
797 info.minAvailableModulesOwned = true;
798 info.minAvailableModules = newSet;
799 changed = true;
800 continue merge;
801 }
802 for (const m of cachedMinAvailableModules) {
803 if (!availableModules.has(m)) {
804 // cachedMinAvailableModules need to be modified
805 // but we don't own it
806 statForkedAvailableModules++;
807 statForkedAvailableModulesCount +=
808 cachedMinAvailableModules.size;
809 statForkedMergedModulesCount += availableModules.size;
810 // construct a new Set as intersection of cachedMinAvailableModules and availableModules
811 // as the plus part is equal we can just take over this one
812 const newSet = /** @type {ModuleSetPlus} */ (new Set());
813 newSet.plus = availableModules.plus;
814 const iterator = cachedMinAvailableModules[Symbol.iterator]();
815 // fast forward add all modules until m
816 /** @type {IteratorResult<Module>} */
817 let it;
818 while (!(it = iterator.next()).done) {
819 const module = it.value;
820 if (module === m) break;
821 newSet.add(module);
822 }
823 // check the remaining modules before adding
824 while (!(it = iterator.next()).done) {
825 const module = it.value;
826 if (availableModules.has(module)) {
827 newSet.add(module);
828 }
829 }
830 statForkedResultModulesCount += newSet.size;
831 cachedMinAvailableModules = newSet;
832 info.minAvailableModulesOwned = true;
833 info.minAvailableModules = newSet;
834 changed = true;
835 continue merge;
836 }
837 }
838 } else {
839 for (const m of cachedMinAvailableModules) {
840 if (!availableModules.has(m) && !availableModules.plus.has(m)) {
841 // cachedMinAvailableModules need to be modified
842 // but we don't own it
843 statForkedAvailableModules++;
844 statForkedAvailableModulesCount +=
845 cachedMinAvailableModules.size;
846 statForkedAvailableModulesCountPlus +=
847 cachedMinAvailableModules.plus.size;
848 statForkedMergedModulesCount += availableModules.size;
849 statForkedMergedModulesCountPlus += availableModules.plus.size;
850 // construct a new Set as intersection of cachedMinAvailableModules and availableModules
851 const newSet = /** @type {ModuleSetPlus} */ (new Set());
852 newSet.plus = EMPTY_SET;
853 const iterator = cachedMinAvailableModules[Symbol.iterator]();
854 // fast forward add all modules until m
855 /** @type {IteratorResult<Module>} */
856 let it;
857 while (!(it = iterator.next()).done) {
858 const module = it.value;
859 if (module === m) break;
860 newSet.add(module);
861 }
862 // check the remaining modules before adding
863 while (!(it = iterator.next()).done) {
864 const module = it.value;
865 if (
866 availableModules.has(module) ||
867 availableModules.plus.has(module)
868 ) {
869 newSet.add(module);
870 }
871 }
872 // also check all modules in cachedMinAvailableModules.plus
873 for (const module of cachedMinAvailableModules.plus) {
874 if (
875 availableModules.has(module) ||
876 availableModules.plus.has(module)
877 ) {
878 newSet.add(module);
879 }
880 }
881 statForkedResultModulesCount += newSet.size;
882 cachedMinAvailableModules = newSet;
883 info.minAvailableModulesOwned = true;
884 info.minAvailableModules = newSet;
885 changed = true;
886 continue merge;
887 }
888 }
889 for (const m of cachedMinAvailableModules.plus) {
890 if (!availableModules.has(m) && !availableModules.plus.has(m)) {
891 // cachedMinAvailableModules need to be modified
892 // but we don't own it
893 statForkedAvailableModules++;
894 statForkedAvailableModulesCount +=
895 cachedMinAvailableModules.size;
896 statForkedAvailableModulesCountPlus +=
897 cachedMinAvailableModules.plus.size;
898 statForkedMergedModulesCount += availableModules.size;
899 statForkedMergedModulesCountPlus += availableModules.plus.size;
900 // construct a new Set as intersection of cachedMinAvailableModules and availableModules
901 // we already know that all modules directly from cachedMinAvailableModules are in availableModules too
902 const newSet = /** @type {ModuleSetPlus} */ (new Set(
903 cachedMinAvailableModules
904 ));
905 newSet.plus = EMPTY_SET;
906 const iterator = cachedMinAvailableModules.plus[
907 Symbol.iterator
908 ]();
909 // fast forward add all modules until m
910 /** @type {IteratorResult<Module>} */
911 let it;
912 while (!(it = iterator.next()).done) {
913 const module = it.value;
914 if (module === m) break;
915 newSet.add(module);
916 }
917 // check the remaining modules before adding
918 while (!(it = iterator.next()).done) {
919 const module = it.value;
920 if (
921 availableModules.has(module) ||
922 availableModules.plus.has(module)
923 ) {
924 newSet.add(module);
925 }
926 }
927 statForkedResultModulesCount += newSet.size;
928 cachedMinAvailableModules = newSet;
929 info.minAvailableModulesOwned = true;
930 info.minAvailableModules = newSet;
931 changed = true;
932 continue merge;
933 }
934 }
935 }
936 }
937 }
938 availableModulesToBeMerged.length = 0;
939 if (changed) {
940 info.resultingAvailableModules = undefined;
941 outdatedChunkGroupInfo.add(info);
942 }
943 }
944 chunkGroupsForMerging.clear();
945 };
946
947 const processChunkGroupsForCombining = () => {
948 loop: for (const info of chunkGroupsForCombining) {
949 for (const source of info.availableSources) {
950 if (!source.minAvailableModules) continue loop;
951 }
952 const availableModules = /** @type {ModuleSetPlus} */ (new Set());
953 availableModules.plus = EMPTY_SET;
954 const mergeSet = set => {
955 if (set.size > availableModules.plus.size) {
956 for (const item of availableModules.plus) availableModules.add(item);
957 availableModules.plus = set;
958 } else {
959 for (const item of set) availableModules.add(item);
960 }
961 };
962 // combine minAvailableModules from all resultingAvailableModules
963 for (const source of info.availableSources) {
964 const resultingAvailableModules = calculateResultingAvailableModules(
965 source
966 );
967 mergeSet(resultingAvailableModules);
968 mergeSet(resultingAvailableModules.plus);
969 }
970 info.minAvailableModules = availableModules;
971 info.minAvailableModulesOwned = false;
972 info.resultingAvailableModules = undefined;
973 outdatedChunkGroupInfo.add(info);
974 }
975 chunkGroupsForCombining.clear();
976 };
977
978 const processOutdatedChunkGroupInfo = () => {
979 statChunkGroupInfoUpdated += outdatedChunkGroupInfo.size;
980 // Revisit skipped elements
981 for (const info of outdatedChunkGroupInfo) {
982 // 1. Reconsider skipped items
983 if (info.skippedItems !== undefined) {
984 const { minAvailableModules } = info;
985 for (const module of info.skippedItems) {
986 if (
987 !minAvailableModules.has(module) &&
988 !minAvailableModules.plus.has(module)
989 ) {
990 queue.push({
991 action: ADD_AND_ENTER_MODULE,
992 block: module,
993 module,
994 chunk: info.chunkGroup.chunks[0],
995 chunkGroup: info.chunkGroup,
996 chunkGroupInfo: info
997 });
998 info.skippedItems.delete(module);
999 }
1000 }
1001 }
1002
1003 // 2. Reconsider children chunk groups
1004 if (info.children !== undefined) {
1005 statChildChunkGroupsReconnected += info.children.size;
1006 for (const cgi of info.children) {
1007 let connectList = queueConnect.get(info);
1008 if (connectList === undefined) {
1009 connectList = new Set();
1010 queueConnect.set(info, connectList);
1011 }
1012 connectList.add(cgi);
1013 }
1014 }
1015
1016 // 3. Reconsider chunk groups for combining
1017 if (info.availableChildren !== undefined) {
1018 for (const cgi of info.availableChildren) {
1019 chunkGroupsForCombining.add(cgi);
1020 }
1021 }
1022 }
1023 outdatedChunkGroupInfo.clear();
1024 };
1025
1026 // Iterative traversal of the Module graph
1027 // Recursive would be simpler to write but could result in Stack Overflows
1028 while (queue.length || queueConnect.size) {
1029 logger.time("visitModules: visiting");
1030 processQueue();
1031 logger.timeEnd("visitModules: visiting");
1032
1033 if (chunkGroupsForCombining.size > 0) {
1034 logger.time("visitModules: combine available modules");
1035 processChunkGroupsForCombining();
1036 logger.timeEnd("visitModules: combine available modules");
1037 }
1038
1039 if (queueConnect.size > 0) {
1040 logger.time("visitModules: calculating available modules");
1041 processConnectQueue();
1042 logger.timeEnd("visitModules: calculating available modules");
1043
1044 if (chunkGroupsForMerging.size > 0) {
1045 logger.time("visitModules: merging available modules");
1046 processChunkGroupsForMerging();
1047 logger.timeEnd("visitModules: merging available modules");
1048 }
1049 }
1050
1051 if (outdatedChunkGroupInfo.size > 0) {
1052 logger.time("visitModules: check modules for revisit");
1053 processOutdatedChunkGroupInfo();
1054 logger.timeEnd("visitModules: check modules for revisit");
1055 }
1056
1057 // Run queueDelayed when all items of the queue are processed
1058 // This is important to get the global indexing correct
1059 // Async blocks should be processed after all sync blocks are processed
1060 if (queue.length === 0) {
1061 const tempQueue = queue;
1062 queue = queueDelayed.reverse();
1063 queueDelayed = tempQueue;
1064 }
1065 }
1066
1067 logger.log(
1068 `${statProcessedQueueItems} queue items processed (${statProcessedBlocks} blocks)`
1069 );
1070 logger.log(`${statConnectedChunkGroups} chunk groups connected`);
1071 logger.log(
1072 `${statProcessedChunkGroupsForMerging} chunk groups processed for merging (${statMergedAvailableModuleSets} module sets, ${statForkedAvailableModules} forked, ${statForkedAvailableModulesCount} + ${statForkedAvailableModulesCountPlus} modules forked, ${statForkedMergedModulesCount} + ${statForkedMergedModulesCountPlus} modules merged into fork, ${statForkedResultModulesCount} resulting modules)`
1073 );
1074 logger.log(
1075 `${statChunkGroupInfoUpdated} chunk group info updated (${statChildChunkGroupsReconnected} already connected chunk groups reconnected)`
1076 );
1077};
1078
1079/**
1080 *
1081 * @param {Compilation} compilation the compilation
1082 * @param {Set<DependenciesBlock>} blocksWithNestedBlocks flag for blocks that have nested blocks
1083 * @param {Map<AsyncDependenciesBlock, BlockChunkGroupConnection[]>} blockConnections connection for blocks
1084 * @param {Map<ChunkGroup, ChunkGroupInfo>} chunkGroupInfoMap mapping from chunk group to available modules
1085 */
1086const connectChunkGroups = (
1087 compilation,
1088 blocksWithNestedBlocks,
1089 blockConnections,
1090 chunkGroupInfoMap
1091) => {
1092 const { chunkGraph } = compilation;
1093
1094 /**
1095 * Helper function to check if all modules of a chunk are available
1096 *
1097 * @param {ChunkGroup} chunkGroup the chunkGroup to scan
1098 * @param {ModuleSetPlus} availableModules the comparator set
1099 * @returns {boolean} return true if all modules of a chunk are available
1100 */
1101 const areModulesAvailable = (chunkGroup, availableModules) => {
1102 for (const chunk of chunkGroup.chunks) {
1103 for (const module of chunkGraph.getChunkModulesIterable(chunk)) {
1104 if (!availableModules.has(module) && !availableModules.plus.has(module))
1105 return false;
1106 }
1107 }
1108 return true;
1109 };
1110
1111 // For each edge in the basic chunk graph
1112 for (const [block, connections] of blockConnections) {
1113 // 1. Check if connection is needed
1114 // When none of the dependencies need to be connected
1115 // we can skip all of them
1116 // It's not possible to filter each item so it doesn't create inconsistent
1117 // connections and modules can only create one version
1118 // TODO maybe decide this per runtime
1119 if (
1120 // TODO is this needed?
1121 !blocksWithNestedBlocks.has(block) &&
1122 connections.every(({ chunkGroup, originChunkGroupInfo }) =>
1123 areModulesAvailable(
1124 chunkGroup,
1125 originChunkGroupInfo.resultingAvailableModules
1126 )
1127 )
1128 ) {
1129 continue;
1130 }
1131
1132 // 2. Foreach edge
1133 for (let i = 0; i < connections.length; i++) {
1134 const { chunkGroup, originChunkGroupInfo } = connections[i];
1135
1136 // 3. Connect block with chunk
1137 chunkGraph.connectBlockAndChunkGroup(block, chunkGroup);
1138
1139 // 4. Connect chunk with parent
1140 connectChunkGroupParentAndChild(
1141 originChunkGroupInfo.chunkGroup,
1142 chunkGroup
1143 );
1144 }
1145 }
1146};
1147
1148/**
1149 * Remove all unconnected chunk groups
1150 * @param {Compilation} compilation the compilation
1151 * @param {Iterable<ChunkGroup>} allCreatedChunkGroups all chunk groups that where created before
1152 */
1153const cleanupUnconnectedGroups = (compilation, allCreatedChunkGroups) => {
1154 const { chunkGraph } = compilation;
1155
1156 for (const chunkGroup of allCreatedChunkGroups) {
1157 if (chunkGroup.getNumberOfParents() === 0) {
1158 for (const chunk of chunkGroup.chunks) {
1159 compilation.chunks.delete(chunk);
1160 chunkGraph.disconnectChunk(chunk);
1161 }
1162 chunkGraph.disconnectChunkGroup(chunkGroup);
1163 chunkGroup.remove();
1164 }
1165 }
1166};
1167
1168/**
1169 * This method creates the Chunk graph from the Module graph
1170 * @param {Compilation} compilation the compilation
1171 * @param {Map<Entrypoint, Module[]>} inputEntrypointsAndModules chunk groups which are processed with the modules
1172 * @returns {void}
1173 */
1174const buildChunkGraph = (compilation, inputEntrypointsAndModules) => {
1175 const logger = compilation.getLogger("webpack.buildChunkGraph");
1176
1177 // SHARED STATE
1178
1179 /** @type {Map<AsyncDependenciesBlock, BlockChunkGroupConnection[]>} */
1180 const blockConnections = new Map();
1181
1182 /** @type {Set<ChunkGroup>} */
1183 const allCreatedChunkGroups = new Set();
1184
1185 /** @type {Map<ChunkGroup, ChunkGroupInfo>} */
1186 const chunkGroupInfoMap = new Map();
1187
1188 /** @type {Set<DependenciesBlock>} */
1189 const blocksWithNestedBlocks = new Set();
1190
1191 // PART ONE
1192
1193 logger.time("visitModules");
1194 visitModules(
1195 logger,
1196 compilation,
1197 inputEntrypointsAndModules,
1198 chunkGroupInfoMap,
1199 blockConnections,
1200 blocksWithNestedBlocks,
1201 allCreatedChunkGroups
1202 );
1203 logger.timeEnd("visitModules");
1204
1205 // PART TWO
1206
1207 logger.time("connectChunkGroups");
1208 connectChunkGroups(
1209 compilation,
1210 blocksWithNestedBlocks,
1211 blockConnections,
1212 chunkGroupInfoMap
1213 );
1214 logger.timeEnd("connectChunkGroups");
1215
1216 // Cleanup work
1217
1218 logger.time("cleanup");
1219 cleanupUnconnectedGroups(compilation, allCreatedChunkGroups);
1220 logger.timeEnd("cleanup");
1221};
1222
1223module.exports = buildChunkGraph;