UNPKG

43 kBJavaScriptView Raw
1/*
2 MIT License http://www.opensource.org/licenses/mit-license.php
3 Author Tobias Koppers @sokra
4*/
5
6"use strict";
7
8const AsyncDependencyToInitialChunkError = require("./AsyncDependencyToInitialChunkError");
9const { connectChunkGroupParentAndChild } = require("./GraphHelpers");
10const ModuleGraphConnection = require("./ModuleGraphConnection");
11const { getEntryRuntime, mergeRuntime } = require("./util/runtime");
12
13/** @typedef {import("./AsyncDependenciesBlock")} AsyncDependenciesBlock */
14/** @typedef {import("./Chunk")} Chunk */
15/** @typedef {import("./ChunkGroup")} ChunkGroup */
16/** @typedef {import("./Compilation")} Compilation */
17/** @typedef {import("./DependenciesBlock")} DependenciesBlock */
18/** @typedef {import("./Dependency")} Dependency */
19/** @typedef {import("./Entrypoint")} Entrypoint */
20/** @typedef {import("./Module")} Module */
21/** @typedef {import("./ModuleGraph")} ModuleGraph */
22/** @typedef {import("./ModuleGraphConnection").ConnectionState} ConnectionState */
23/** @typedef {import("./logging/Logger").Logger} Logger */
24/** @typedef {import("./util/runtime").RuntimeSpec} RuntimeSpec */
25
26/**
27 * @typedef {Object} QueueItem
28 * @property {number} action
29 * @property {DependenciesBlock} block
30 * @property {Module} module
31 * @property {Chunk} chunk
32 * @property {ChunkGroup} chunkGroup
33 * @property {ChunkGroupInfo} chunkGroupInfo
34 */
35
36/** @typedef {Set<Module> & { plus: Set<Module> }} ModuleSetPlus */
37
38/**
39 * @typedef {Object} ChunkGroupInfo
40 * @property {ChunkGroup} chunkGroup the chunk group
41 * @property {RuntimeSpec} runtime the runtimes
42 * @property {ModuleSetPlus} minAvailableModules current minimal set of modules available at this point
43 * @property {boolean} minAvailableModulesOwned true, if minAvailableModules is owned and can be modified
44 * @property {ModuleSetPlus[]} availableModulesToBeMerged enqueued updates to the minimal set of available modules
45 * @property {Set<Module>=} skippedItems modules that were skipped because module is already available in parent chunks (need to reconsider when minAvailableModules is shrinking)
46 * @property {Set<[Module, ModuleGraphConnection[]]>=} skippedModuleConnections referenced modules that where skipped because they were not active in this runtime
47 * @property {ModuleSetPlus} resultingAvailableModules set of modules available including modules from this chunk group
48 * @property {Set<ChunkGroupInfo>} children set of children chunk groups, that will be revisited when availableModules shrink
49 * @property {Set<ChunkGroupInfo>} availableSources set of chunk groups that are the source for minAvailableModules
50 * @property {Set<ChunkGroupInfo>} availableChildren set of chunk groups which depend on the this chunk group as availableSource
51 * @property {number} preOrderIndex next pre order index
52 * @property {number} postOrderIndex next post order index
53 */
54
55/**
56 * @typedef {Object} BlockChunkGroupConnection
57 * @property {ChunkGroupInfo} originChunkGroupInfo origin chunk group
58 * @property {ChunkGroup} chunkGroup referenced chunk group
59 */
60
61const EMPTY_SET = /** @type {ModuleSetPlus} */ (new Set());
62EMPTY_SET.plus = EMPTY_SET;
63
64/**
65 * @param {ModuleSetPlus} a first set
66 * @param {ModuleSetPlus} b second set
67 * @returns {number} cmp
68 */
69const bySetSize = (a, b) => {
70 return b.size + b.plus.size - a.size - a.plus.size;
71};
72
73/**
74 *
75 * @param {ModuleGraphConnection[]} connections list of connections
76 * @param {RuntimeSpec} runtime for which runtime
77 * @returns {ConnectionState} connection state
78 */
79const getActiveStateOfConnections = (connections, runtime) => {
80 let merged = connections[0].getActiveState(runtime);
81 if (merged === true) return true;
82 for (let i = 1; i < connections.length; i++) {
83 const c = connections[i];
84 merged = ModuleGraphConnection.addConnectionStates(
85 merged,
86 c.getActiveState(runtime)
87 );
88 if (merged === true) return true;
89 }
90 return merged;
91};
92
93/**
94 * Extracts block to modules mapping from all modules
95 * @param {Compilation} compilation the compilation
96 * @returns {Map<DependenciesBlock, Map<Module, ModuleGraphConnection[]>>} the mapping block to modules
97 */
98const extractBlockModulesMap = compilation => {
99 const { moduleGraph } = compilation;
100
101 /** @type {Map<DependenciesBlock, Map<Module, ModuleGraphConnection[]>>} */
102 const blockModulesMap = new Map();
103
104 const blockQueue = new Set();
105
106 for (const module of compilation.modules) {
107 /** @type {WeakMap<Dependency, ModuleGraphConnection>} */
108 let moduleMap;
109
110 for (const connection of moduleGraph.getOutgoingConnections(module)) {
111 const d = connection.dependency;
112 // We skip connections without dependency
113 if (!d) continue;
114 const m = connection.module;
115 // We skip connections without Module pointer
116 if (!m) continue;
117 // We skip weak connections
118 if (connection.weak) continue;
119 const state = connection.getActiveState(undefined);
120 // We skip inactive connections
121 if (state === false) continue;
122 // Store Dependency to Module mapping in local map
123 // to allow to access it faster compared to
124 // moduleGraph.getConnection()
125 if (moduleMap === undefined) {
126 moduleMap = new WeakMap();
127 }
128 moduleMap.set(connection.dependency, connection);
129 }
130
131 blockQueue.clear();
132 blockQueue.add(module);
133 for (const block of blockQueue) {
134 let modules;
135
136 if (moduleMap !== undefined && block.dependencies) {
137 for (const dep of block.dependencies) {
138 const connection = moduleMap.get(dep);
139 if (connection !== undefined) {
140 const { module } = connection;
141 if (modules === undefined) {
142 modules = new Map();
143 blockModulesMap.set(block, modules);
144 }
145 const old = modules.get(module);
146 if (old !== undefined) {
147 old.push(connection);
148 } else {
149 modules.set(module, [connection]);
150 }
151 }
152 }
153 }
154
155 if (block.blocks) {
156 for (const b of block.blocks) {
157 blockQueue.add(b);
158 }
159 }
160 }
161 }
162
163 return blockModulesMap;
164};
165
166/**
167 *
168 * @param {Logger} logger a logger
169 * @param {Compilation} compilation the compilation
170 * @param {Map<Entrypoint, Module[]>} inputEntrypointsAndModules chunk groups which are processed with the modules
171 * @param {Map<ChunkGroup, ChunkGroupInfo>} chunkGroupInfoMap mapping from chunk group to available modules
172 * @param {Map<AsyncDependenciesBlock, BlockChunkGroupConnection[]>} blockConnections connection for blocks
173 * @param {Set<DependenciesBlock>} blocksWithNestedBlocks flag for blocks that have nested blocks
174 * @param {Set<ChunkGroup>} allCreatedChunkGroups filled with all chunk groups that are created here
175 */
176const visitModules = (
177 logger,
178 compilation,
179 inputEntrypointsAndModules,
180 chunkGroupInfoMap,
181 blockConnections,
182 blocksWithNestedBlocks,
183 allCreatedChunkGroups
184) => {
185 const { moduleGraph, chunkGraph } = compilation;
186
187 logger.time("visitModules: prepare");
188 const blockModulesMap = extractBlockModulesMap(compilation);
189
190 let statProcessedQueueItems = 0;
191 let statProcessedBlocks = 0;
192 let statConnectedChunkGroups = 0;
193 let statProcessedChunkGroupsForMerging = 0;
194 let statMergedAvailableModuleSets = 0;
195 let statForkedAvailableModules = 0;
196 let statForkedAvailableModulesCount = 0;
197 let statForkedAvailableModulesCountPlus = 0;
198 let statForkedMergedModulesCount = 0;
199 let statForkedMergedModulesCountPlus = 0;
200 let statForkedResultModulesCount = 0;
201 let statChunkGroupInfoUpdated = 0;
202 let statChildChunkGroupsReconnected = 0;
203
204 let nextChunkGroupIndex = 0;
205 let nextFreeModulePreOrderIndex = 0;
206 let nextFreeModulePostOrderIndex = 0;
207
208 /** @type {Map<DependenciesBlock, ChunkGroupInfo>} */
209 const blockChunkGroups = new Map();
210
211 /** @type {Map<string, ChunkGroupInfo>} */
212 const namedChunkGroups = new Map();
213
214 /** @type {Map<string, ChunkGroupInfo>} */
215 const namedAsyncEntrypoints = new Map();
216
217 const ADD_AND_ENTER_ENTRY_MODULE = 0;
218 const ADD_AND_ENTER_MODULE = 1;
219 const ENTER_MODULE = 2;
220 const PROCESS_BLOCK = 3;
221 const PROCESS_ENTRY_BLOCK = 4;
222 const LEAVE_MODULE = 5;
223
224 /** @type {QueueItem[]} */
225 let queue = [];
226
227 /** @type {Map<ChunkGroupInfo, Set<ChunkGroupInfo>>} */
228 const queueConnect = new Map();
229 /** @type {Set<ChunkGroupInfo>} */
230 const chunkGroupsForCombining = new Set();
231
232 // Fill queue with entrypoint modules
233 // Create ChunkGroupInfo for entrypoints
234 for (const [chunkGroup, modules] of inputEntrypointsAndModules) {
235 const runtime = getEntryRuntime(
236 compilation,
237 chunkGroup.name,
238 chunkGroup.options
239 );
240 /** @type {ChunkGroupInfo} */
241 const chunkGroupInfo = {
242 chunkGroup,
243 runtime,
244 minAvailableModules: undefined,
245 minAvailableModulesOwned: false,
246 availableModulesToBeMerged: [],
247 skippedItems: undefined,
248 resultingAvailableModules: undefined,
249 children: undefined,
250 availableSources: undefined,
251 availableChildren: undefined,
252 preOrderIndex: 0,
253 postOrderIndex: 0
254 };
255 chunkGroup.index = nextChunkGroupIndex++;
256 if (chunkGroup.getNumberOfParents() > 0) {
257 // minAvailableModules for child entrypoints are unknown yet, set to undefined.
258 // This means no module is added until other sets are merged into
259 // this minAvailableModules (by the parent entrypoints)
260 const skippedItems = new Set();
261 for (const module of modules) {
262 skippedItems.add(module);
263 }
264 chunkGroupInfo.skippedItems = skippedItems;
265 chunkGroupsForCombining.add(chunkGroupInfo);
266 } else {
267 // The application may start here: We start with an empty list of available modules
268 chunkGroupInfo.minAvailableModules = EMPTY_SET;
269 const chunk = chunkGroup.getEntrypointChunk();
270 for (const module of modules) {
271 queue.push({
272 action: ADD_AND_ENTER_MODULE,
273 block: module,
274 module,
275 chunk,
276 chunkGroup,
277 chunkGroupInfo
278 });
279 }
280 }
281 chunkGroupInfoMap.set(chunkGroup, chunkGroupInfo);
282 if (chunkGroup.name) {
283 namedChunkGroups.set(chunkGroup.name, chunkGroupInfo);
284 }
285 }
286 // Fill availableSources with parent-child dependencies between entrypoints
287 for (const chunkGroupInfo of chunkGroupsForCombining) {
288 const { chunkGroup } = chunkGroupInfo;
289 chunkGroupInfo.availableSources = new Set();
290 for (const parent of chunkGroup.parentsIterable) {
291 const parentChunkGroupInfo = chunkGroupInfoMap.get(parent);
292 chunkGroupInfo.availableSources.add(parentChunkGroupInfo);
293 if (parentChunkGroupInfo.availableChildren === undefined) {
294 parentChunkGroupInfo.availableChildren = new Set();
295 }
296 parentChunkGroupInfo.availableChildren.add(chunkGroupInfo);
297 }
298 }
299 // pop() is used to read from the queue
300 // so it need to be reversed to be iterated in
301 // correct order
302 queue.reverse();
303
304 /** @type {Set<ChunkGroupInfo>} */
305 const outdatedChunkGroupInfo = new Set();
306 /** @type {Set<ChunkGroupInfo>} */
307 const chunkGroupsForMerging = new Set();
308 /** @type {QueueItem[]} */
309 let queueDelayed = [];
310
311 logger.timeEnd("visitModules: prepare");
312
313 /** @type {[Module, ModuleGraphConnection[]][]} */
314 const skipConnectionBuffer = [];
315 /** @type {Module[]} */
316 const skipBuffer = [];
317 /** @type {QueueItem[]} */
318 const queueBuffer = [];
319
320 /** @type {Module} */
321 let module;
322 /** @type {Chunk} */
323 let chunk;
324 /** @type {ChunkGroup} */
325 let chunkGroup;
326 /** @type {DependenciesBlock} */
327 let block;
328 /** @type {ChunkGroupInfo} */
329 let chunkGroupInfo;
330
331 // For each async Block in graph
332 /**
333 * @param {AsyncDependenciesBlock} b iterating over each Async DepBlock
334 * @returns {void}
335 */
336 const iteratorBlock = b => {
337 // 1. We create a chunk group with single chunk in it for this Block
338 // but only once (blockChunkGroups map)
339 let cgi = blockChunkGroups.get(b);
340 /** @type {ChunkGroup} */
341 let c;
342 /** @type {Entrypoint} */
343 let entrypoint;
344 const entryOptions = b.groupOptions && b.groupOptions.entryOptions;
345 if (cgi === undefined) {
346 const chunkName = (b.groupOptions && b.groupOptions.name) || b.chunkName;
347 if (entryOptions) {
348 cgi = namedAsyncEntrypoints.get(chunkName);
349 if (!cgi) {
350 entrypoint = compilation.addAsyncEntrypoint(
351 entryOptions,
352 module,
353 b.loc,
354 b.request
355 );
356 entrypoint.index = nextChunkGroupIndex++;
357 cgi = {
358 chunkGroup: entrypoint,
359 runtime: entrypoint.options.runtime || entrypoint.name,
360 minAvailableModules: EMPTY_SET,
361 minAvailableModulesOwned: false,
362 availableModulesToBeMerged: [],
363 skippedItems: undefined,
364 resultingAvailableModules: undefined,
365 children: undefined,
366 availableSources: undefined,
367 availableChildren: undefined,
368 preOrderIndex: 0,
369 postOrderIndex: 0
370 };
371 chunkGroupInfoMap.set(entrypoint, cgi);
372
373 chunkGraph.connectBlockAndChunkGroup(b, entrypoint);
374 if (chunkName) {
375 namedAsyncEntrypoints.set(chunkName, cgi);
376 }
377 } else {
378 entrypoint = /** @type {Entrypoint} */ (cgi.chunkGroup);
379 // TODO merge entryOptions
380 entrypoint.addOrigin(module, b.loc, b.request);
381 chunkGraph.connectBlockAndChunkGroup(b, entrypoint);
382 }
383
384 // 2. We enqueue the DependenciesBlock for traversal
385 queueDelayed.push({
386 action: PROCESS_ENTRY_BLOCK,
387 block: b,
388 module: module,
389 chunk: entrypoint.chunks[0],
390 chunkGroup: entrypoint,
391 chunkGroupInfo: cgi
392 });
393 } else {
394 cgi = namedChunkGroups.get(chunkName);
395 if (!cgi) {
396 c = compilation.addChunkInGroup(
397 b.groupOptions || b.chunkName,
398 module,
399 b.loc,
400 b.request
401 );
402 c.index = nextChunkGroupIndex++;
403 cgi = {
404 chunkGroup: c,
405 runtime: chunkGroupInfo.runtime,
406 minAvailableModules: undefined,
407 minAvailableModulesOwned: undefined,
408 availableModulesToBeMerged: [],
409 skippedItems: undefined,
410 resultingAvailableModules: undefined,
411 children: undefined,
412 availableSources: undefined,
413 availableChildren: undefined,
414 preOrderIndex: 0,
415 postOrderIndex: 0
416 };
417 allCreatedChunkGroups.add(c);
418 chunkGroupInfoMap.set(c, cgi);
419 if (chunkName) {
420 namedChunkGroups.set(chunkName, cgi);
421 }
422 } else {
423 c = cgi.chunkGroup;
424 if (c.isInitial()) {
425 compilation.errors.push(
426 new AsyncDependencyToInitialChunkError(chunkName, module, b.loc)
427 );
428 c = chunkGroup;
429 }
430 c.addOptions(b.groupOptions);
431 c.addOrigin(module, b.loc, b.request);
432 }
433 blockConnections.set(b, []);
434 }
435 blockChunkGroups.set(b, cgi);
436 } else if (entryOptions) {
437 entrypoint = /** @type {Entrypoint} */ (cgi.chunkGroup);
438 } else {
439 c = cgi.chunkGroup;
440 }
441
442 if (c !== undefined) {
443 // 2. We store the connection for the block
444 // to connect it later if needed
445 blockConnections.get(b).push({
446 originChunkGroupInfo: chunkGroupInfo,
447 chunkGroup: c
448 });
449
450 // 3. We enqueue the chunk group info creation/updating
451 let connectList = queueConnect.get(chunkGroupInfo);
452 if (connectList === undefined) {
453 connectList = new Set();
454 queueConnect.set(chunkGroupInfo, connectList);
455 }
456 connectList.add(cgi);
457
458 // TODO check if this really need to be done for each traversal
459 // or if it is enough when it's queued when created
460 // 4. We enqueue the DependenciesBlock for traversal
461 queueDelayed.push({
462 action: PROCESS_BLOCK,
463 block: b,
464 module: module,
465 chunk: c.chunks[0],
466 chunkGroup: c,
467 chunkGroupInfo: cgi
468 });
469 } else {
470 chunkGroupInfo.chunkGroup.addAsyncEntrypoint(entrypoint);
471 }
472 };
473
474 /**
475 * @param {DependenciesBlock} block the block
476 * @returns {void}
477 */
478 const processBlock = block => {
479 statProcessedBlocks++;
480 // get prepared block info
481 const blockModules = blockModulesMap.get(block);
482
483 if (blockModules !== undefined) {
484 const { minAvailableModules, runtime } = chunkGroupInfo;
485 // Buffer items because order need to be reversed to get indices correct
486 // Traverse all referenced modules
487 for (const entry of blockModules) {
488 const [refModule, connections] = entry;
489 if (chunkGraph.isModuleInChunk(refModule, chunk)) {
490 // skip early if already connected
491 continue;
492 }
493 const activeState = getActiveStateOfConnections(connections, runtime);
494 if (activeState !== true) {
495 skipConnectionBuffer.push(entry);
496 if (activeState === false) continue;
497 }
498 if (
499 activeState === true &&
500 (minAvailableModules.has(refModule) ||
501 minAvailableModules.plus.has(refModule))
502 ) {
503 // already in parent chunks, skip it for now
504 skipBuffer.push(refModule);
505 continue;
506 }
507 // enqueue, then add and enter to be in the correct order
508 // this is relevant with circular dependencies
509 queueBuffer.push({
510 action: activeState === true ? ADD_AND_ENTER_MODULE : PROCESS_BLOCK,
511 block: refModule,
512 module: refModule,
513 chunk,
514 chunkGroup,
515 chunkGroupInfo
516 });
517 }
518 // Add buffered items in reverse order
519 if (skipConnectionBuffer.length > 0) {
520 let { skippedModuleConnections } = chunkGroupInfo;
521 if (skippedModuleConnections === undefined) {
522 chunkGroupInfo.skippedModuleConnections = skippedModuleConnections = new Set();
523 }
524 for (let i = skipConnectionBuffer.length - 1; i >= 0; i--) {
525 skippedModuleConnections.add(skipConnectionBuffer[i]);
526 }
527 skipConnectionBuffer.length = 0;
528 }
529 if (skipBuffer.length > 0) {
530 let { skippedItems } = chunkGroupInfo;
531 if (skippedItems === undefined) {
532 chunkGroupInfo.skippedItems = skippedItems = new Set();
533 }
534 for (let i = skipBuffer.length - 1; i >= 0; i--) {
535 skippedItems.add(skipBuffer[i]);
536 }
537 skipBuffer.length = 0;
538 }
539 if (queueBuffer.length > 0) {
540 for (let i = queueBuffer.length - 1; i >= 0; i--) {
541 queue.push(queueBuffer[i]);
542 }
543 queueBuffer.length = 0;
544 }
545 }
546
547 // Traverse all Blocks
548 for (const b of block.blocks) {
549 iteratorBlock(b);
550 }
551
552 if (block.blocks.length > 0 && module !== block) {
553 blocksWithNestedBlocks.add(block);
554 }
555 };
556
557 /**
558 * @param {DependenciesBlock} block the block
559 * @returns {void}
560 */
561 const processEntryBlock = block => {
562 statProcessedBlocks++;
563 // get prepared block info
564 const blockModules = blockModulesMap.get(block);
565
566 if (blockModules !== undefined) {
567 // Traverse all referenced modules
568 for (const [refModule, connections] of blockModules) {
569 const activeState = getActiveStateOfConnections(connections, undefined);
570 // enqueue, then add and enter to be in the correct order
571 // this is relevant with circular dependencies
572 queueBuffer.push({
573 action:
574 activeState === true ? ADD_AND_ENTER_ENTRY_MODULE : PROCESS_BLOCK,
575 block: refModule,
576 module: refModule,
577 chunk,
578 chunkGroup,
579 chunkGroupInfo
580 });
581 }
582 // Add buffered items in reverse order
583 if (queueBuffer.length > 0) {
584 for (let i = queueBuffer.length - 1; i >= 0; i--) {
585 queue.push(queueBuffer[i]);
586 }
587 queueBuffer.length = 0;
588 }
589 }
590
591 // Traverse all Blocks
592 for (const b of block.blocks) {
593 iteratorBlock(b);
594 }
595
596 if (block.blocks.length > 0 && module !== block) {
597 blocksWithNestedBlocks.add(block);
598 }
599 };
600
601 const processQueue = () => {
602 while (queue.length) {
603 statProcessedQueueItems++;
604 const queueItem = queue.pop();
605 module = queueItem.module;
606 block = queueItem.block;
607 chunk = queueItem.chunk;
608 chunkGroup = queueItem.chunkGroup;
609 chunkGroupInfo = queueItem.chunkGroupInfo;
610
611 switch (queueItem.action) {
612 case ADD_AND_ENTER_ENTRY_MODULE:
613 chunkGraph.connectChunkAndEntryModule(
614 chunk,
615 module,
616 /** @type {Entrypoint} */ (chunkGroup)
617 );
618 // fallthrough
619 case ADD_AND_ENTER_MODULE: {
620 if (chunkGraph.isModuleInChunk(module, chunk)) {
621 // already connected, skip it
622 break;
623 }
624 // We connect Module and Chunk
625 chunkGraph.connectChunkAndModule(chunk, module);
626 }
627 // fallthrough
628 case ENTER_MODULE: {
629 const index = chunkGroup.getModulePreOrderIndex(module);
630 if (index === undefined) {
631 chunkGroup.setModulePreOrderIndex(
632 module,
633 chunkGroupInfo.preOrderIndex++
634 );
635 }
636
637 if (
638 moduleGraph.setPreOrderIndexIfUnset(
639 module,
640 nextFreeModulePreOrderIndex
641 )
642 ) {
643 nextFreeModulePreOrderIndex++;
644 }
645
646 // reuse queueItem
647 queueItem.action = LEAVE_MODULE;
648 queue.push(queueItem);
649 }
650 // fallthrough
651 case PROCESS_BLOCK: {
652 processBlock(block);
653 break;
654 }
655 case PROCESS_ENTRY_BLOCK: {
656 processEntryBlock(block);
657 break;
658 }
659 case LEAVE_MODULE: {
660 const index = chunkGroup.getModulePostOrderIndex(module);
661 if (index === undefined) {
662 chunkGroup.setModulePostOrderIndex(
663 module,
664 chunkGroupInfo.postOrderIndex++
665 );
666 }
667
668 if (
669 moduleGraph.setPostOrderIndexIfUnset(
670 module,
671 nextFreeModulePostOrderIndex
672 )
673 ) {
674 nextFreeModulePostOrderIndex++;
675 }
676 break;
677 }
678 }
679 }
680 };
681
682 const calculateResultingAvailableModules = chunkGroupInfo => {
683 if (chunkGroupInfo.resultingAvailableModules)
684 return chunkGroupInfo.resultingAvailableModules;
685
686 const minAvailableModules = chunkGroupInfo.minAvailableModules;
687
688 // Create a new Set of available modules at this point
689 // We want to be as lazy as possible. There are multiple ways doing this:
690 // Note that resultingAvailableModules is stored as "(a) + (b)" as it's a ModuleSetPlus
691 // - resultingAvailableModules = (modules of chunk) + (minAvailableModules + minAvailableModules.plus)
692 // - resultingAvailableModules = (minAvailableModules + modules of chunk) + (minAvailableModules.plus)
693 // We choose one depending on the size of minAvailableModules vs minAvailableModules.plus
694
695 let resultingAvailableModules;
696 if (minAvailableModules.size > minAvailableModules.plus.size) {
697 // resultingAvailableModules = (modules of chunk) + (minAvailableModules + minAvailableModules.plus)
698 resultingAvailableModules = /** @type {Set<Module> & {plus: Set<Module>}} */ (new Set());
699 for (const module of minAvailableModules.plus)
700 minAvailableModules.add(module);
701 minAvailableModules.plus = EMPTY_SET;
702 resultingAvailableModules.plus = minAvailableModules;
703 chunkGroupInfo.minAvailableModulesOwned = false;
704 } else {
705 // resultingAvailableModules = (minAvailableModules + modules of chunk) + (minAvailableModules.plus)
706 resultingAvailableModules = /** @type {Set<Module> & {plus: Set<Module>}} */ (new Set(
707 minAvailableModules
708 ));
709 resultingAvailableModules.plus = minAvailableModules.plus;
710 }
711
712 // add the modules from the chunk group to the set
713 for (const chunk of chunkGroupInfo.chunkGroup.chunks) {
714 for (const m of chunkGraph.getChunkModulesIterable(chunk)) {
715 resultingAvailableModules.add(m);
716 }
717 }
718 return (chunkGroupInfo.resultingAvailableModules = resultingAvailableModules);
719 };
720
721 const processConnectQueue = () => {
722 // Figure out new parents for chunk groups
723 // to get new available modules for these children
724 for (const [chunkGroupInfo, targets] of queueConnect) {
725 // 1. Add new targets to the list of children
726 if (chunkGroupInfo.children === undefined) {
727 chunkGroupInfo.children = targets;
728 } else {
729 for (const target of targets) {
730 chunkGroupInfo.children.add(target);
731 }
732 }
733
734 // 2. Calculate resulting available modules
735 const resultingAvailableModules = calculateResultingAvailableModules(
736 chunkGroupInfo
737 );
738
739 const runtime = chunkGroupInfo.runtime;
740
741 // 3. Update chunk group info
742 for (const target of targets) {
743 target.availableModulesToBeMerged.push(resultingAvailableModules);
744 chunkGroupsForMerging.add(target);
745 const oldRuntime = target.runtime;
746 const newRuntime = mergeRuntime(oldRuntime, runtime);
747 if (oldRuntime !== newRuntime) {
748 target.runtime = newRuntime;
749 outdatedChunkGroupInfo.add(target);
750 }
751 }
752
753 statConnectedChunkGroups += targets.size;
754 }
755 queueConnect.clear();
756 };
757
758 const processChunkGroupsForMerging = () => {
759 statProcessedChunkGroupsForMerging += chunkGroupsForMerging.size;
760
761 // Execute the merge
762 for (const info of chunkGroupsForMerging) {
763 const availableModulesToBeMerged = info.availableModulesToBeMerged;
764 let cachedMinAvailableModules = info.minAvailableModules;
765
766 statMergedAvailableModuleSets += availableModulesToBeMerged.length;
767
768 // 1. Get minimal available modules
769 // It doesn't make sense to traverse a chunk again with more available modules.
770 // This step calculates the minimal available modules and skips traversal when
771 // the list didn't shrink.
772 if (availableModulesToBeMerged.length > 1) {
773 availableModulesToBeMerged.sort(bySetSize);
774 }
775 let changed = false;
776 merge: for (const availableModules of availableModulesToBeMerged) {
777 if (cachedMinAvailableModules === undefined) {
778 cachedMinAvailableModules = availableModules;
779 info.minAvailableModules = cachedMinAvailableModules;
780 info.minAvailableModulesOwned = false;
781 changed = true;
782 } else {
783 if (info.minAvailableModulesOwned) {
784 // We own it and can modify it
785 if (cachedMinAvailableModules.plus === availableModules.plus) {
786 for (const m of cachedMinAvailableModules) {
787 if (!availableModules.has(m)) {
788 cachedMinAvailableModules.delete(m);
789 changed = true;
790 }
791 }
792 } else {
793 for (const m of cachedMinAvailableModules) {
794 if (!availableModules.has(m) && !availableModules.plus.has(m)) {
795 cachedMinAvailableModules.delete(m);
796 changed = true;
797 }
798 }
799 for (const m of cachedMinAvailableModules.plus) {
800 if (!availableModules.has(m) && !availableModules.plus.has(m)) {
801 // We can't remove modules from the plus part
802 // so we need to merge plus into the normal part to allow modifying it
803 const iterator = cachedMinAvailableModules.plus[
804 Symbol.iterator
805 ]();
806 // fast forward add all modules until m
807 /** @type {IteratorResult<Module>} */
808 let it;
809 while (!(it = iterator.next()).done) {
810 const module = it.value;
811 if (module === m) break;
812 cachedMinAvailableModules.add(module);
813 }
814 // check the remaining modules before adding
815 while (!(it = iterator.next()).done) {
816 const module = it.value;
817 if (
818 availableModules.has(module) ||
819 availableModules.plus.has(m)
820 ) {
821 cachedMinAvailableModules.add(module);
822 }
823 }
824 cachedMinAvailableModules.plus = EMPTY_SET;
825 changed = true;
826 continue merge;
827 }
828 }
829 }
830 } else if (cachedMinAvailableModules.plus === availableModules.plus) {
831 // Common and fast case when the plus part is shared
832 // We only need to care about the normal part
833 if (availableModules.size < cachedMinAvailableModules.size) {
834 // the new availableModules is smaller so it's faster to
835 // fork from the new availableModules
836 statForkedAvailableModules++;
837 statForkedAvailableModulesCount += availableModules.size;
838 statForkedMergedModulesCount += cachedMinAvailableModules.size;
839 // construct a new Set as intersection of cachedMinAvailableModules and availableModules
840 const newSet = /** @type {ModuleSetPlus} */ (new Set());
841 newSet.plus = availableModules.plus;
842 for (const m of availableModules) {
843 if (cachedMinAvailableModules.has(m)) {
844 newSet.add(m);
845 }
846 }
847 statForkedResultModulesCount += newSet.size;
848 cachedMinAvailableModules = newSet;
849 info.minAvailableModulesOwned = true;
850 info.minAvailableModules = newSet;
851 changed = true;
852 continue merge;
853 }
854 for (const m of cachedMinAvailableModules) {
855 if (!availableModules.has(m)) {
856 // cachedMinAvailableModules need to be modified
857 // but we don't own it
858 statForkedAvailableModules++;
859 statForkedAvailableModulesCount +=
860 cachedMinAvailableModules.size;
861 statForkedMergedModulesCount += availableModules.size;
862 // construct a new Set as intersection of cachedMinAvailableModules and availableModules
863 // as the plus part is equal we can just take over this one
864 const newSet = /** @type {ModuleSetPlus} */ (new Set());
865 newSet.plus = availableModules.plus;
866 const iterator = cachedMinAvailableModules[Symbol.iterator]();
867 // fast forward add all modules until m
868 /** @type {IteratorResult<Module>} */
869 let it;
870 while (!(it = iterator.next()).done) {
871 const module = it.value;
872 if (module === m) break;
873 newSet.add(module);
874 }
875 // check the remaining modules before adding
876 while (!(it = iterator.next()).done) {
877 const module = it.value;
878 if (availableModules.has(module)) {
879 newSet.add(module);
880 }
881 }
882 statForkedResultModulesCount += newSet.size;
883 cachedMinAvailableModules = newSet;
884 info.minAvailableModulesOwned = true;
885 info.minAvailableModules = newSet;
886 changed = true;
887 continue merge;
888 }
889 }
890 } else {
891 for (const m of cachedMinAvailableModules) {
892 if (!availableModules.has(m) && !availableModules.plus.has(m)) {
893 // cachedMinAvailableModules need to be modified
894 // but we don't own it
895 statForkedAvailableModules++;
896 statForkedAvailableModulesCount +=
897 cachedMinAvailableModules.size;
898 statForkedAvailableModulesCountPlus +=
899 cachedMinAvailableModules.plus.size;
900 statForkedMergedModulesCount += availableModules.size;
901 statForkedMergedModulesCountPlus += availableModules.plus.size;
902 // construct a new Set as intersection of cachedMinAvailableModules and availableModules
903 const newSet = /** @type {ModuleSetPlus} */ (new Set());
904 newSet.plus = EMPTY_SET;
905 const iterator = cachedMinAvailableModules[Symbol.iterator]();
906 // fast forward add all modules until m
907 /** @type {IteratorResult<Module>} */
908 let it;
909 while (!(it = iterator.next()).done) {
910 const module = it.value;
911 if (module === m) break;
912 newSet.add(module);
913 }
914 // check the remaining modules before adding
915 while (!(it = iterator.next()).done) {
916 const module = it.value;
917 if (
918 availableModules.has(module) ||
919 availableModules.plus.has(module)
920 ) {
921 newSet.add(module);
922 }
923 }
924 // also check all modules in cachedMinAvailableModules.plus
925 for (const module of cachedMinAvailableModules.plus) {
926 if (
927 availableModules.has(module) ||
928 availableModules.plus.has(module)
929 ) {
930 newSet.add(module);
931 }
932 }
933 statForkedResultModulesCount += newSet.size;
934 cachedMinAvailableModules = newSet;
935 info.minAvailableModulesOwned = true;
936 info.minAvailableModules = newSet;
937 changed = true;
938 continue merge;
939 }
940 }
941 for (const m of cachedMinAvailableModules.plus) {
942 if (!availableModules.has(m) && !availableModules.plus.has(m)) {
943 // cachedMinAvailableModules need to be modified
944 // but we don't own it
945 statForkedAvailableModules++;
946 statForkedAvailableModulesCount +=
947 cachedMinAvailableModules.size;
948 statForkedAvailableModulesCountPlus +=
949 cachedMinAvailableModules.plus.size;
950 statForkedMergedModulesCount += availableModules.size;
951 statForkedMergedModulesCountPlus += availableModules.plus.size;
952 // construct a new Set as intersection of cachedMinAvailableModules and availableModules
953 // we already know that all modules directly from cachedMinAvailableModules are in availableModules too
954 const newSet = /** @type {ModuleSetPlus} */ (new Set(
955 cachedMinAvailableModules
956 ));
957 newSet.plus = EMPTY_SET;
958 const iterator = cachedMinAvailableModules.plus[
959 Symbol.iterator
960 ]();
961 // fast forward add all modules until m
962 /** @type {IteratorResult<Module>} */
963 let it;
964 while (!(it = iterator.next()).done) {
965 const module = it.value;
966 if (module === m) break;
967 newSet.add(module);
968 }
969 // check the remaining modules before adding
970 while (!(it = iterator.next()).done) {
971 const module = it.value;
972 if (
973 availableModules.has(module) ||
974 availableModules.plus.has(module)
975 ) {
976 newSet.add(module);
977 }
978 }
979 statForkedResultModulesCount += newSet.size;
980 cachedMinAvailableModules = newSet;
981 info.minAvailableModulesOwned = true;
982 info.minAvailableModules = newSet;
983 changed = true;
984 continue merge;
985 }
986 }
987 }
988 }
989 }
990 availableModulesToBeMerged.length = 0;
991 if (changed) {
992 info.resultingAvailableModules = undefined;
993 outdatedChunkGroupInfo.add(info);
994 }
995 }
996 chunkGroupsForMerging.clear();
997 };
998
999 const processChunkGroupsForCombining = () => {
1000 for (const info of chunkGroupsForCombining) {
1001 for (const source of info.availableSources) {
1002 if (!source.minAvailableModules) {
1003 chunkGroupsForCombining.delete(info);
1004 break;
1005 }
1006 }
1007 }
1008 for (const info of chunkGroupsForCombining) {
1009 const availableModules = /** @type {ModuleSetPlus} */ (new Set());
1010 availableModules.plus = EMPTY_SET;
1011 const mergeSet = set => {
1012 if (set.size > availableModules.plus.size) {
1013 for (const item of availableModules.plus) availableModules.add(item);
1014 availableModules.plus = set;
1015 } else {
1016 for (const item of set) availableModules.add(item);
1017 }
1018 };
1019 // combine minAvailableModules from all resultingAvailableModules
1020 for (const source of info.availableSources) {
1021 const resultingAvailableModules = calculateResultingAvailableModules(
1022 source
1023 );
1024 mergeSet(resultingAvailableModules);
1025 mergeSet(resultingAvailableModules.plus);
1026 }
1027 info.minAvailableModules = availableModules;
1028 info.minAvailableModulesOwned = false;
1029 info.resultingAvailableModules = undefined;
1030 outdatedChunkGroupInfo.add(info);
1031 }
1032 chunkGroupsForCombining.clear();
1033 };
1034
1035 const processOutdatedChunkGroupInfo = () => {
1036 statChunkGroupInfoUpdated += outdatedChunkGroupInfo.size;
1037 // Revisit skipped elements
1038 for (const info of outdatedChunkGroupInfo) {
1039 // 1. Reconsider skipped items
1040 if (info.skippedItems !== undefined) {
1041 const { minAvailableModules } = info;
1042 for (const module of info.skippedItems) {
1043 if (
1044 !minAvailableModules.has(module) &&
1045 !minAvailableModules.plus.has(module)
1046 ) {
1047 queue.push({
1048 action: ADD_AND_ENTER_MODULE,
1049 block: module,
1050 module,
1051 chunk: info.chunkGroup.chunks[0],
1052 chunkGroup: info.chunkGroup,
1053 chunkGroupInfo: info
1054 });
1055 info.skippedItems.delete(module);
1056 }
1057 }
1058 }
1059
1060 // 2. Reconsider skipped connections
1061 if (info.skippedModuleConnections !== undefined) {
1062 const { minAvailableModules, runtime } = info;
1063 for (const entry of info.skippedModuleConnections) {
1064 const [module, connections] = entry;
1065 const activeState = getActiveStateOfConnections(connections, runtime);
1066 if (activeState === false) continue;
1067 if (activeState === true) {
1068 info.skippedModuleConnections.delete(entry);
1069 }
1070 if (
1071 activeState === true &&
1072 (minAvailableModules.has(module) ||
1073 minAvailableModules.plus.has(module))
1074 ) {
1075 info.skippedItems.add(module);
1076 continue;
1077 }
1078 queue.push({
1079 action: activeState === true ? ADD_AND_ENTER_MODULE : PROCESS_BLOCK,
1080 block: module,
1081 module,
1082 chunk: info.chunkGroup.chunks[0],
1083 chunkGroup: info.chunkGroup,
1084 chunkGroupInfo: info
1085 });
1086 }
1087 }
1088
1089 // 2. Reconsider children chunk groups
1090 if (info.children !== undefined) {
1091 statChildChunkGroupsReconnected += info.children.size;
1092 for (const cgi of info.children) {
1093 let connectList = queueConnect.get(info);
1094 if (connectList === undefined) {
1095 connectList = new Set();
1096 queueConnect.set(info, connectList);
1097 }
1098 connectList.add(cgi);
1099 }
1100 }
1101
1102 // 3. Reconsider chunk groups for combining
1103 if (info.availableChildren !== undefined) {
1104 for (const cgi of info.availableChildren) {
1105 chunkGroupsForCombining.add(cgi);
1106 }
1107 }
1108 }
1109 outdatedChunkGroupInfo.clear();
1110 };
1111
1112 // Iterative traversal of the Module graph
1113 // Recursive would be simpler to write but could result in Stack Overflows
1114 while (queue.length || queueConnect.size) {
1115 logger.time("visitModules: visiting");
1116 processQueue();
1117 logger.timeEnd("visitModules: visiting");
1118
1119 if (chunkGroupsForCombining.size > 0) {
1120 logger.time("visitModules: combine available modules");
1121 processChunkGroupsForCombining();
1122 logger.timeEnd("visitModules: combine available modules");
1123 }
1124
1125 if (queueConnect.size > 0) {
1126 logger.time("visitModules: calculating available modules");
1127 processConnectQueue();
1128 logger.timeEnd("visitModules: calculating available modules");
1129
1130 if (chunkGroupsForMerging.size > 0) {
1131 logger.time("visitModules: merging available modules");
1132 processChunkGroupsForMerging();
1133 logger.timeEnd("visitModules: merging available modules");
1134 }
1135 }
1136
1137 if (outdatedChunkGroupInfo.size > 0) {
1138 logger.time("visitModules: check modules for revisit");
1139 processOutdatedChunkGroupInfo();
1140 logger.timeEnd("visitModules: check modules for revisit");
1141 }
1142
1143 // Run queueDelayed when all items of the queue are processed
1144 // This is important to get the global indexing correct
1145 // Async blocks should be processed after all sync blocks are processed
1146 if (queue.length === 0) {
1147 const tempQueue = queue;
1148 queue = queueDelayed.reverse();
1149 queueDelayed = tempQueue;
1150 }
1151 }
1152
1153 logger.log(
1154 `${statProcessedQueueItems} queue items processed (${statProcessedBlocks} blocks)`
1155 );
1156 logger.log(`${statConnectedChunkGroups} chunk groups connected`);
1157 logger.log(
1158 `${statProcessedChunkGroupsForMerging} chunk groups processed for merging (${statMergedAvailableModuleSets} module sets, ${statForkedAvailableModules} forked, ${statForkedAvailableModulesCount} + ${statForkedAvailableModulesCountPlus} modules forked, ${statForkedMergedModulesCount} + ${statForkedMergedModulesCountPlus} modules merged into fork, ${statForkedResultModulesCount} resulting modules)`
1159 );
1160 logger.log(
1161 `${statChunkGroupInfoUpdated} chunk group info updated (${statChildChunkGroupsReconnected} already connected chunk groups reconnected)`
1162 );
1163};
1164
1165/**
1166 *
1167 * @param {Compilation} compilation the compilation
1168 * @param {Set<DependenciesBlock>} blocksWithNestedBlocks flag for blocks that have nested blocks
1169 * @param {Map<AsyncDependenciesBlock, BlockChunkGroupConnection[]>} blockConnections connection for blocks
1170 * @param {Map<ChunkGroup, ChunkGroupInfo>} chunkGroupInfoMap mapping from chunk group to available modules
1171 */
1172const connectChunkGroups = (
1173 compilation,
1174 blocksWithNestedBlocks,
1175 blockConnections,
1176 chunkGroupInfoMap
1177) => {
1178 const { chunkGraph } = compilation;
1179
1180 /**
1181 * Helper function to check if all modules of a chunk are available
1182 *
1183 * @param {ChunkGroup} chunkGroup the chunkGroup to scan
1184 * @param {ModuleSetPlus} availableModules the comparator set
1185 * @returns {boolean} return true if all modules of a chunk are available
1186 */
1187 const areModulesAvailable = (chunkGroup, availableModules) => {
1188 for (const chunk of chunkGroup.chunks) {
1189 for (const module of chunkGraph.getChunkModulesIterable(chunk)) {
1190 if (!availableModules.has(module) && !availableModules.plus.has(module))
1191 return false;
1192 }
1193 }
1194 return true;
1195 };
1196
1197 // For each edge in the basic chunk graph
1198 for (const [block, connections] of blockConnections) {
1199 // 1. Check if connection is needed
1200 // When none of the dependencies need to be connected
1201 // we can skip all of them
1202 // It's not possible to filter each item so it doesn't create inconsistent
1203 // connections and modules can only create one version
1204 // TODO maybe decide this per runtime
1205 if (
1206 // TODO is this needed?
1207 !blocksWithNestedBlocks.has(block) &&
1208 connections.every(({ chunkGroup, originChunkGroupInfo }) =>
1209 areModulesAvailable(
1210 chunkGroup,
1211 originChunkGroupInfo.resultingAvailableModules
1212 )
1213 )
1214 ) {
1215 continue;
1216 }
1217
1218 // 2. Foreach edge
1219 for (let i = 0; i < connections.length; i++) {
1220 const { chunkGroup, originChunkGroupInfo } = connections[i];
1221
1222 // 3. Connect block with chunk
1223 chunkGraph.connectBlockAndChunkGroup(block, chunkGroup);
1224
1225 // 4. Connect chunk with parent
1226 connectChunkGroupParentAndChild(
1227 originChunkGroupInfo.chunkGroup,
1228 chunkGroup
1229 );
1230 }
1231 }
1232};
1233
1234/**
1235 * Remove all unconnected chunk groups
1236 * @param {Compilation} compilation the compilation
1237 * @param {Iterable<ChunkGroup>} allCreatedChunkGroups all chunk groups that where created before
1238 */
1239const cleanupUnconnectedGroups = (compilation, allCreatedChunkGroups) => {
1240 const { chunkGraph } = compilation;
1241
1242 for (const chunkGroup of allCreatedChunkGroups) {
1243 if (chunkGroup.getNumberOfParents() === 0) {
1244 for (const chunk of chunkGroup.chunks) {
1245 compilation.chunks.delete(chunk);
1246 chunkGraph.disconnectChunk(chunk);
1247 }
1248 chunkGraph.disconnectChunkGroup(chunkGroup);
1249 chunkGroup.remove();
1250 }
1251 }
1252};
1253
1254/**
1255 * This method creates the Chunk graph from the Module graph
1256 * @param {Compilation} compilation the compilation
1257 * @param {Map<Entrypoint, Module[]>} inputEntrypointsAndModules chunk groups which are processed with the modules
1258 * @returns {void}
1259 */
1260const buildChunkGraph = (compilation, inputEntrypointsAndModules) => {
1261 const logger = compilation.getLogger("webpack.buildChunkGraph");
1262
1263 // SHARED STATE
1264
1265 /** @type {Map<AsyncDependenciesBlock, BlockChunkGroupConnection[]>} */
1266 const blockConnections = new Map();
1267
1268 /** @type {Set<ChunkGroup>} */
1269 const allCreatedChunkGroups = new Set();
1270
1271 /** @type {Map<ChunkGroup, ChunkGroupInfo>} */
1272 const chunkGroupInfoMap = new Map();
1273
1274 /** @type {Set<DependenciesBlock>} */
1275 const blocksWithNestedBlocks = new Set();
1276
1277 // PART ONE
1278
1279 logger.time("visitModules");
1280 visitModules(
1281 logger,
1282 compilation,
1283 inputEntrypointsAndModules,
1284 chunkGroupInfoMap,
1285 blockConnections,
1286 blocksWithNestedBlocks,
1287 allCreatedChunkGroups
1288 );
1289 logger.timeEnd("visitModules");
1290
1291 // PART TWO
1292
1293 logger.time("connectChunkGroups");
1294 connectChunkGroups(
1295 compilation,
1296 blocksWithNestedBlocks,
1297 blockConnections,
1298 chunkGroupInfoMap
1299 );
1300 logger.timeEnd("connectChunkGroups");
1301
1302 for (const [chunkGroup, chunkGroupInfo] of chunkGroupInfoMap) {
1303 for (const chunk of chunkGroup.chunks)
1304 chunk.runtime = mergeRuntime(chunk.runtime, chunkGroupInfo.runtime);
1305 }
1306
1307 // Cleanup work
1308
1309 logger.time("cleanup");
1310 cleanupUnconnectedGroups(compilation, allCreatedChunkGroups);
1311 logger.timeEnd("cleanup");
1312};
1313
1314module.exports = buildChunkGraph;