UNPKG

8.66 kBJavaScriptView Raw
1"use strict";
2
3var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
4exports.__esModule = true;
5exports.guessSafeChunkSize = guessSafeChunkSize;
6exports.readFromCache = readFromCache;
7exports.writeToCache = writeToCache;
8var _path = _interopRequireDefault(require("path"));
9var _os = _interopRequireDefault(require("os"));
10var _v = _interopRequireDefault(require("v8"));
11var _fsExtra = require("fs-extra");
12var _glob = require("glob");
13var _gatsbyCoreUtils = require("gatsby-core-utils");
14var _reporter = _interopRequireDefault(require("gatsby-cli/lib/reporter"));
15const getReduxCacheFolder = () =>
16// This is a function for the case that somebody does a process.chdir (#19800)
17_path.default.join(process.cwd(), `.cache/redux`);
18const getWorkerSlicesFolder = () =>
19// This is a function for the case that somebody does a process.chdir (#19800)
20_path.default.join(process.cwd(), `.cache/worker`);
21function reduxSharedFile(dir) {
22 return _path.default.join(dir, `redux.rest.state`);
23}
24function reduxChunkedNodesFilePrefix(dir) {
25 return _path.default.join(dir, `redux.node.state_`);
26}
27function reduxChunkedPagesFilePrefix(dir) {
28 return _path.default.join(dir, `redux.page.state_`);
29}
30function reduxWorkerSlicesPrefix(dir) {
31 return _path.default.join(dir, `redux.worker.slices_`);
32}
33function readFromCache(slices, optionalPrefix = ``) {
34 // The cache is stored in two steps; the nodes and pages in chunks and the rest
35 // First we revive the rest, then we inject the nodes and pages into that obj (if any)
36 // Each chunk is stored in its own file, this circumvents max buffer lengths
37 // for sites with a _lot_ of content. Since all nodes / pages go into a Map, the order
38 // of reading them is not relevant.
39
40 let cacheFolder = getReduxCacheFolder();
41 if (slices) {
42 cacheFolder = getWorkerSlicesFolder();
43 return _v.default.deserialize((0, _fsExtra.readFileSync)(reduxWorkerSlicesPrefix(cacheFolder) + `${optionalPrefix}_` + (0, _gatsbyCoreUtils.createContentDigest)(slices)));
44 }
45 const obj = _v.default.deserialize((0, _fsExtra.readFileSync)(reduxSharedFile(cacheFolder)));
46
47 // Note: at 1M pages, this will be 1M/chunkSize chunks (ie. 1m/10k=100)
48 const nodesChunks = (0, _glob.sync)(reduxChunkedNodesFilePrefix(cacheFolder) + `*`).map(file => _v.default.deserialize((0, _fsExtra.readFileSync)(file)));
49 const nodes = [].concat(...nodesChunks);
50 if (!nodesChunks.length) {
51 _reporter.default.info(`Cache exists but contains no nodes. There should be at least some nodes available so it seems the cache was corrupted. Disregarding the cache and proceeding as if there was none.`);
52 return {};
53 }
54 obj.nodes = new Map(nodes);
55
56 // Note: at 1M pages, this will be 1M/chunkSize chunks (ie. 1m/10k=100)
57 const pagesChunks = (0, _glob.sync)(reduxChunkedPagesFilePrefix(cacheFolder) + `*`).map(file => _v.default.deserialize((0, _fsExtra.readFileSync)(file)));
58 const pages = [].concat(...pagesChunks);
59 obj.pages = new Map(pages);
60 return obj;
61}
62function guessSafeChunkSize(values, showMaxSizeWarning = false) {
63 // Pick a few random elements and measure their size then pick a chunk size
64 // ceiling based on the worst case. Each test takes time so there's trade-off.
65 // This attempts to prevent small sites with very large pages from OOMing.
66 // This heuristic could still fail if it randomly grabs the smallest nodes.
67 // TODO: test a few nodes per each type instead of from all nodes
68
69 const nodesToTest = 11; // Very arbitrary number
70 const valueCount = values.length;
71 const step = Math.max(1, Math.ceil(valueCount / nodesToTest));
72 let maxSize = 0;
73 for (let i = 0; i < valueCount; i += step) {
74 const size = _v.default.serialize(values[i]).length;
75 maxSize = Math.max(size, maxSize);
76 }
77
78 // Sends a warning once if any of the chunkSizes exceeds approx 500kb limit
79 if (showMaxSizeWarning && maxSize > 500000) {
80 _reporter.default.warn(`The size of at least one page context chunk exceeded 500kb, which could lead to degraded performance. Consider putting less data in the page context.`);
81 }
82
83 // Max size of a Buffer is 2gb (yeah, we're assuming 64bit system)
84 // https://stackoverflow.com/questions/8974375/whats-the-maximum-size-of-a-node-js-buffer
85 // Use 1.5gb as the target ceiling, allowing for some margin of error
86 return Math.floor(1.5 * 1024 * 1024 * 1024 / maxSize);
87}
88function prepareCacheFolder(targetDir, contents) {
89 // Temporarily save the nodes and pages and remove them from the main redux store
90 // This prevents an OOM when the page nodes collectively contain too much data
91 const nodesMap = contents.nodes;
92 contents.nodes = undefined;
93 const pagesMap = contents.pages;
94 contents.pages = undefined;
95 (0, _fsExtra.writeFileSync)(reduxSharedFile(targetDir), _v.default.serialize(contents));
96
97 // Now restore them on the redux store
98 contents.nodes = nodesMap;
99 contents.pages = pagesMap;
100 if (nodesMap) {
101 if (nodesMap.size === 0) {
102 // Nodes are actually stored in LMDB.
103 // But we need at least one node in redux state to workaround the warning above:
104 // "Cache exists but contains no nodes..." (when loading cache).
105 const dummyNode = {
106 id: `dummy-node-id`,
107 parent: ``,
108 children: [],
109 internal: {
110 type: `DummyNode`,
111 contentDigest: `dummy-node`,
112 counter: 0,
113 owner: ``
114 },
115 fields: []
116 };
117 nodesMap.set(dummyNode.id, dummyNode);
118 }
119 // Now store the nodes separately, chunk size determined by a heuristic
120 const values = [...nodesMap.entries()];
121 const chunkSize = guessSafeChunkSize(values);
122 const chunks = Math.ceil(values.length / chunkSize);
123 for (let i = 0; i < chunks; ++i) {
124 (0, _fsExtra.writeFileSync)(reduxChunkedNodesFilePrefix(targetDir) + i, _v.default.serialize(values.slice(i * chunkSize, i * chunkSize + chunkSize)));
125 }
126 }
127 if (pagesMap) {
128 // Now store the nodes separately, chunk size determined by a heuristic
129 const values = [...pagesMap.entries()];
130 const chunkSize = guessSafeChunkSize(values, true);
131 const chunks = Math.ceil(values.length / chunkSize);
132 for (let i = 0; i < chunks; ++i) {
133 (0, _fsExtra.writeFileSync)(reduxChunkedPagesFilePrefix(targetDir) + i, _v.default.serialize(values.slice(i * chunkSize, i * chunkSize + chunkSize)));
134 }
135 }
136}
137function safelyRenameToBak(cacheFolder) {
138 // Basically try to work around the potential of previous renamed caches
139 // not being removed for whatever reason. _That_ should not be a blocker.
140 const tmpSuffix = `.bak`;
141 let suffixCounter = 0;
142 let bakName = cacheFolder + tmpSuffix; // Start without number
143
144 while ((0, _fsExtra.existsSync)(bakName)) {
145 ++suffixCounter;
146 bakName = cacheFolder + tmpSuffix + suffixCounter;
147 }
148 (0, _fsExtra.moveSync)(cacheFolder, bakName);
149 return bakName;
150}
151function writeToCache(contents, slices, optionalPrefix = ``) {
152 // Writing the "slices" also to the "redux" folder introduces subtle bugs when
153 // e.g. the whole folder gets replaced some "slices" are lost
154 // Thus they get written to dedicated "worker" folder
155 if (slices) {
156 const cacheFolder = getWorkerSlicesFolder();
157 (0, _fsExtra.outputFileSync)(reduxWorkerSlicesPrefix(cacheFolder) + `${optionalPrefix}_` + (0, _gatsbyCoreUtils.createContentDigest)(slices), _v.default.serialize(contents));
158 return;
159 }
160
161 // Note: this should be a transactional operation. So work in a tmp dir and
162 // make sure the cache cannot be left in a corruptable state due to errors.
163
164 const tmpDir = (0, _fsExtra.mkdtempSync)(_path.default.join(_os.default.tmpdir(), `reduxcache`)); // linux / windows
165
166 prepareCacheFolder(tmpDir, contents);
167
168 // Replace old cache folder with new. If the first rename fails, the cache
169 // is just stale. If the second rename fails, the cache is empty. In either
170 // case the cache is not left in a corrupt state.
171
172 const reduxCacheFolder = getReduxCacheFolder();
173 let bakName = ``;
174 if ((0, _fsExtra.existsSync)(reduxCacheFolder)) {
175 // Don't drop until after swapping over (renaming is less likely to fail)
176 bakName = safelyRenameToBak(reduxCacheFolder);
177 }
178
179 // The redux cache folder should now not exist so we can rename our tmp to it
180 (0, _fsExtra.moveSync)(tmpDir, reduxCacheFolder);
181
182 // Now try to yolorimraf the old cache folder
183 try {
184 if (bakName !== ``) {
185 (0, _fsExtra.removeSync)(bakName);
186 }
187 } catch (e) {
188 _reporter.default.warn(`Non-fatal: Deleting the old cache folder failed, left behind in \`${bakName}\`. Rimraf reported this error: ${e}`);
189 }
190}
191//# sourceMappingURL=persist.js.map
\No newline at end of file