1 | import { __assign } from "tslib";
|
2 | import { equal } from "@wry/equality";
|
3 | import { DeepMerger } from "../utilities/index.js";
|
4 | import { mergeIncrementalData } from "../utilities/index.js";
|
5 | import { reobserveCacheFirst } from "./ObservableQuery.js";
|
6 | import { isNonEmptyArray, graphQLResultHasError, canUseWeakMap, } from "../utilities/index.js";
|
7 | import { NetworkStatus, isNetworkRequestInFlight } from "./networkStatus.js";
|
8 | var destructiveMethodCounts = new (canUseWeakMap ? WeakMap : Map)();
|
9 | function wrapDestructiveCacheMethod(cache, methodName) {
|
10 | var original = cache[methodName];
|
11 | if (typeof original === "function") {
|
12 | // @ts-expect-error this is just too generic to be typed correctly
|
13 | cache[methodName] = function () {
|
14 | destructiveMethodCounts.set(cache,
|
15 | // The %1e15 allows the count to wrap around to 0 safely every
|
16 | // quadrillion evictions, so there's no risk of overflow. To be
|
17 | // clear, this is more of a pedantic principle than something
|
18 | // that matters in any conceivable practical scenario.
|
19 | (destructiveMethodCounts.get(cache) + 1) % 1e15);
|
20 | // @ts-expect-error this is just too generic to be typed correctly
|
21 | return original.apply(this, arguments);
|
22 | };
|
23 | }
|
24 | }
|
25 | function cancelNotifyTimeout(info) {
|
26 | if (info["notifyTimeout"]) {
|
27 | clearTimeout(info["notifyTimeout"]);
|
28 | info["notifyTimeout"] = void 0;
|
29 | }
|
30 | }
|
31 | // A QueryInfo object represents a single query managed by the
|
32 | // QueryManager, which tracks all QueryInfo objects by queryId in its
|
33 | // this.queries Map. QueryInfo objects store the latest results and errors
|
34 | // for the given query, and are responsible for reporting those results to
|
35 | // the corresponding ObservableQuery, via the QueryInfo.notify method.
|
36 | // Results are reported asynchronously whenever setDiff marks the
|
37 | // QueryInfo object as dirty, though a call to the QueryManager's
|
38 | // broadcastQueries method may trigger the notification before it happens
|
39 | // automatically. This class used to be a simple interface type without
|
40 | // any field privacy or meaningful methods, which is why it still has so
|
41 | // many public fields. The effort to lock down and simplify the QueryInfo
|
42 | // interface is ongoing, and further improvements are welcome.
|
43 | var QueryInfo = /** @class */ (function () {
|
44 | function QueryInfo(queryManager, queryId) {
|
45 | if (queryId === void 0) { queryId = queryManager.generateQueryId(); }
|
46 | this.queryId = queryId;
|
47 | this.listeners = new Set();
|
48 | this.document = null;
|
49 | this.lastRequestId = 1;
|
50 | this.stopped = false;
|
51 | this.dirty = false;
|
52 | this.observableQuery = null;
|
53 | var cache = (this.cache = queryManager.cache);
|
54 | // Track how often cache.evict is called, since we want eviction to
|
55 | // override the feud-stopping logic in the markResult method, by
|
56 | // causing shouldWrite to return true. Wrapping the cache.evict method
|
57 | // is a bit of a hack, but it saves us from having to make eviction
|
58 | // counting an official part of the ApolloCache API.
|
59 | if (!destructiveMethodCounts.has(cache)) {
|
60 | destructiveMethodCounts.set(cache, 0);
|
61 | wrapDestructiveCacheMethod(cache, "evict");
|
62 | wrapDestructiveCacheMethod(cache, "modify");
|
63 | wrapDestructiveCacheMethod(cache, "reset");
|
64 | }
|
65 | }
|
66 | QueryInfo.prototype.init = function (query) {
|
67 | var networkStatus = query.networkStatus || NetworkStatus.loading;
|
68 | if (this.variables &&
|
69 | this.networkStatus !== NetworkStatus.loading &&
|
70 | !equal(this.variables, query.variables)) {
|
71 | networkStatus = NetworkStatus.setVariables;
|
72 | }
|
73 | if (!equal(query.variables, this.variables)) {
|
74 | this.lastDiff = void 0;
|
75 | }
|
76 | Object.assign(this, {
|
77 | document: query.document,
|
78 | variables: query.variables,
|
79 | networkError: null,
|
80 | graphQLErrors: this.graphQLErrors || [],
|
81 | networkStatus: networkStatus,
|
82 | });
|
83 | if (query.observableQuery) {
|
84 | this.setObservableQuery(query.observableQuery);
|
85 | }
|
86 | if (query.lastRequestId) {
|
87 | this.lastRequestId = query.lastRequestId;
|
88 | }
|
89 | return this;
|
90 | };
|
91 | QueryInfo.prototype.reset = function () {
|
92 | cancelNotifyTimeout(this);
|
93 | this.dirty = false;
|
94 | };
|
95 | QueryInfo.prototype.resetDiff = function () {
|
96 | this.lastDiff = void 0;
|
97 | };
|
98 | QueryInfo.prototype.getDiff = function () {
|
99 | var options = this.getDiffOptions();
|
100 | if (this.lastDiff && equal(options, this.lastDiff.options)) {
|
101 | return this.lastDiff.diff;
|
102 | }
|
103 | this.updateWatch(this.variables);
|
104 | var oq = this.observableQuery;
|
105 | if (oq && oq.options.fetchPolicy === "no-cache") {
|
106 | return { complete: false };
|
107 | }
|
108 | var diff = this.cache.diff(options);
|
109 | this.updateLastDiff(diff, options);
|
110 | return diff;
|
111 | };
|
112 | QueryInfo.prototype.updateLastDiff = function (diff, options) {
|
113 | this.lastDiff =
|
114 | diff ?
|
115 | {
|
116 | diff: diff,
|
117 | options: options || this.getDiffOptions(),
|
118 | }
|
119 | : void 0;
|
120 | };
|
121 | QueryInfo.prototype.getDiffOptions = function (variables) {
|
122 | var _a;
|
123 | if (variables === void 0) { variables = this.variables; }
|
124 | return {
|
125 | query: this.document,
|
126 | variables: variables,
|
127 | returnPartialData: true,
|
128 | optimistic: true,
|
129 | canonizeResults: (_a = this.observableQuery) === null || _a === void 0 ? void 0 : _a.options.canonizeResults,
|
130 | };
|
131 | };
|
132 | QueryInfo.prototype.setDiff = function (diff) {
|
133 | var _this = this;
|
134 | var oldDiff = this.lastDiff && this.lastDiff.diff;
|
135 | this.updateLastDiff(diff);
|
136 | if (!this.dirty && !equal(oldDiff && oldDiff.result, diff && diff.result)) {
|
137 | this.dirty = true;
|
138 | if (!this.notifyTimeout) {
|
139 | this.notifyTimeout = setTimeout(function () { return _this.notify(); }, 0);
|
140 | }
|
141 | }
|
142 | };
|
143 | QueryInfo.prototype.setObservableQuery = function (oq) {
|
144 | var _this = this;
|
145 | if (oq === this.observableQuery)
|
146 | return;
|
147 | if (this.oqListener) {
|
148 | this.listeners.delete(this.oqListener);
|
149 | }
|
150 | this.observableQuery = oq;
|
151 | if (oq) {
|
152 | oq["queryInfo"] = this;
|
153 | this.listeners.add((this.oqListener = function () {
|
154 | var diff = _this.getDiff();
|
155 | if (diff.fromOptimisticTransaction) {
|
156 | // If this diff came from an optimistic transaction, deliver the
|
157 | // current cache data to the ObservableQuery, but don't perform a
|
158 | // reobservation, since oq.reobserveCacheFirst might make a network
|
159 | // request, and we never want to trigger network requests in the
|
160 | // middle of optimistic updates.
|
161 | oq["observe"]();
|
162 | }
|
163 | else {
|
164 | // Otherwise, make the ObservableQuery "reobserve" the latest data
|
165 | // using a temporary fetch policy of "cache-first", so complete cache
|
166 | // results have a chance to be delivered without triggering additional
|
167 | // network requests, even when options.fetchPolicy is "network-only"
|
168 | // or "cache-and-network". All other fetch policies are preserved by
|
169 | // this method, and are handled by calling oq.reobserve(). If this
|
170 | // reobservation is spurious, isDifferentFromLastResult still has a
|
171 | // chance to catch it before delivery to ObservableQuery subscribers.
|
172 | reobserveCacheFirst(oq);
|
173 | }
|
174 | }));
|
175 | }
|
176 | else {
|
177 | delete this.oqListener;
|
178 | }
|
179 | };
|
180 | QueryInfo.prototype.notify = function () {
|
181 | var _this = this;
|
182 | cancelNotifyTimeout(this);
|
183 | if (this.shouldNotify()) {
|
184 | this.listeners.forEach(function (listener) { return listener(_this); });
|
185 | }
|
186 | this.dirty = false;
|
187 | };
|
188 | QueryInfo.prototype.shouldNotify = function () {
|
189 | if (!this.dirty || !this.listeners.size) {
|
190 | return false;
|
191 | }
|
192 | if (isNetworkRequestInFlight(this.networkStatus) && this.observableQuery) {
|
193 | var fetchPolicy = this.observableQuery.options.fetchPolicy;
|
194 | if (fetchPolicy !== "cache-only" && fetchPolicy !== "cache-and-network") {
|
195 | return false;
|
196 | }
|
197 | }
|
198 | return true;
|
199 | };
|
200 | QueryInfo.prototype.stop = function () {
|
201 | if (!this.stopped) {
|
202 | this.stopped = true;
|
203 | // Cancel the pending notify timeout
|
204 | this.reset();
|
205 | this.cancel();
|
206 | // Revert back to the no-op version of cancel inherited from
|
207 | // QueryInfo.prototype.
|
208 | this.cancel = QueryInfo.prototype.cancel;
|
209 | var oq = this.observableQuery;
|
210 | if (oq)
|
211 | oq.stopPolling();
|
212 | }
|
213 | };
|
214 | // This method is a no-op by default, until/unless overridden by the
|
215 | // updateWatch method.
|
216 | QueryInfo.prototype.cancel = function () { };
|
217 | QueryInfo.prototype.updateWatch = function (variables) {
|
218 | var _this = this;
|
219 | if (variables === void 0) { variables = this.variables; }
|
220 | var oq = this.observableQuery;
|
221 | if (oq && oq.options.fetchPolicy === "no-cache") {
|
222 | return;
|
223 | }
|
224 | var watchOptions = __assign(__assign({}, this.getDiffOptions(variables)), { watcher: this, callback: function (diff) { return _this.setDiff(diff); } });
|
225 | if (!this.lastWatch || !equal(watchOptions, this.lastWatch)) {
|
226 | this.cancel();
|
227 | this.cancel = this.cache.watch((this.lastWatch = watchOptions));
|
228 | }
|
229 | };
|
230 | QueryInfo.prototype.resetLastWrite = function () {
|
231 | this.lastWrite = void 0;
|
232 | };
|
233 | QueryInfo.prototype.shouldWrite = function (result, variables) {
|
234 | var lastWrite = this.lastWrite;
|
235 | return !(lastWrite &&
|
236 | // If cache.evict has been called since the last time we wrote this
|
237 | // data into the cache, there's a chance writing this result into
|
238 | // the cache will repair what was evicted.
|
239 | lastWrite.dmCount === destructiveMethodCounts.get(this.cache) &&
|
240 | equal(variables, lastWrite.variables) &&
|
241 | equal(result.data, lastWrite.result.data));
|
242 | };
|
243 | QueryInfo.prototype.markResult = function (result, document, options, cacheWriteBehavior) {
|
244 | var _this = this;
|
245 | result = __assign({}, result);
|
246 | var merger = new DeepMerger();
|
247 | var graphQLErrors = isNonEmptyArray(result.errors) ? result.errors.slice(0) : [];
|
248 | // Cancel the pending notify timeout (if it exists) to prevent extraneous network
|
249 | // requests. To allow future notify timeouts, diff and dirty are reset as well.
|
250 | this.reset();
|
251 | if ("incremental" in result && isNonEmptyArray(result.incremental)) {
|
252 | var mergedData = mergeIncrementalData(this.getDiff().result, result);
|
253 | result.data = mergedData;
|
254 | // Detect the first chunk of a deferred query and merge it with existing
|
255 | // cache data. This ensures a `cache-first` fetch policy that returns
|
256 | // partial cache data or a `cache-and-network` fetch policy that already
|
257 | // has full data in the cache does not complain when trying to merge the
|
258 | // initial deferred server data with existing cache data.
|
259 | }
|
260 | else if ("hasNext" in result && result.hasNext) {
|
261 | var diff = this.getDiff();
|
262 | result.data = merger.merge(diff.result, result.data);
|
263 | }
|
264 | this.graphQLErrors = graphQLErrors;
|
265 | if (options.fetchPolicy === "no-cache") {
|
266 | this.updateLastDiff({ result: result.data, complete: true }, this.getDiffOptions(options.variables));
|
267 | }
|
268 | else if (cacheWriteBehavior !== 0 /* CacheWriteBehavior.FORBID */) {
|
269 | if (shouldWriteResult(result, options.errorPolicy)) {
|
270 | // Using a transaction here so we have a chance to read the result
|
271 | // back from the cache before the watch callback fires as a result
|
272 | // of writeQuery, so we can store the new diff quietly and ignore
|
273 | // it when we receive it redundantly from the watch callback.
|
274 | this.cache.performTransaction(function (cache) {
|
275 | if (_this.shouldWrite(result, options.variables)) {
|
276 | cache.writeQuery({
|
277 | query: document,
|
278 | data: result.data,
|
279 | variables: options.variables,
|
280 | overwrite: cacheWriteBehavior === 1 /* CacheWriteBehavior.OVERWRITE */,
|
281 | });
|
282 | _this.lastWrite = {
|
283 | // Make a shallow defensive copy of the result object, in case we
|
284 | // later later modify result.data in place, since we don't want
|
285 | // that mutation affecting the saved lastWrite.result.data.
|
286 | result: __assign({}, result),
|
287 | variables: options.variables,
|
288 | dmCount: destructiveMethodCounts.get(_this.cache),
|
289 | };
|
290 | }
|
291 | else {
|
292 | // If result is the same as the last result we received from
|
293 | // the network (and the variables match too), avoid writing
|
294 | // result into the cache again. The wisdom of skipping this
|
295 | // cache write is far from obvious, since any cache write
|
296 | // could be the one that puts the cache back into a desired
|
297 | // state, fixing corruption or missing data. However, if we
|
298 | // always write every network result into the cache, we enable
|
299 | // feuds between queries competing to update the same data in
|
300 | // incompatible ways, which can lead to an endless cycle of
|
301 | // cache broadcasts and useless network requests. As with any
|
302 | // feud, eventually one side must step back from the brink,
|
303 | // letting the other side(s) have the last word(s). There may
|
304 | // be other points where we could break this cycle, such as
|
305 | // silencing the broadcast for cache.writeQuery (not a good
|
306 | // idea, since it just delays the feud a bit) or somehow
|
307 | // avoiding the network request that just happened (also bad,
|
308 | // because the server could return useful new data). All
|
309 | // options considered, skipping this cache write seems to be
|
310 | // the least damaging place to break the cycle, because it
|
311 | // reflects the intuition that we recently wrote this exact
|
312 | // result into the cache, so the cache *should* already/still
|
313 | // contain this data. If some other query has clobbered that
|
314 | // data in the meantime, that's too bad, but there will be no
|
315 | // winners if every query blindly reverts to its own version
|
316 | // of the data. This approach also gives the network a chance
|
317 | // to return new data, which will be written into the cache as
|
318 | // usual, notifying only those queries that are directly
|
319 | // affected by the cache updates, as usual. In the future, an
|
320 | // even more sophisticated cache could perhaps prevent or
|
321 | // mitigate the clobbering somehow, but that would make this
|
322 | // particular cache write even less important, and thus
|
323 | // skipping it would be even safer than it is today.
|
324 | if (_this.lastDiff && _this.lastDiff.diff.complete) {
|
325 | // Reuse data from the last good (complete) diff that we
|
326 | // received, when possible.
|
327 | result.data = _this.lastDiff.diff.result;
|
328 | return;
|
329 | }
|
330 | // If the previous this.diff was incomplete, fall through to
|
331 | // re-reading the latest data with cache.diff, below.
|
332 | }
|
333 | var diffOptions = _this.getDiffOptions(options.variables);
|
334 | var diff = cache.diff(diffOptions);
|
335 | // In case the QueryManager stops this QueryInfo before its
|
336 | // results are delivered, it's important to avoid restarting the
|
337 | // cache watch when markResult is called. We also avoid updating
|
338 | // the watch if we are writing a result that doesn't match the current
|
339 | // variables to avoid race conditions from broadcasting the wrong
|
340 | // result.
|
341 | if (!_this.stopped && equal(_this.variables, options.variables)) {
|
342 | // Any time we're about to update this.diff, we need to make
|
343 | // sure we've started watching the cache.
|
344 | _this.updateWatch(options.variables);
|
345 | }
|
346 | // If we're allowed to write to the cache, update result.data to be
|
347 | // the result as re-read from the cache, rather than the raw network
|
348 | // result. Set without setDiff to avoid triggering a notify call,
|
349 | // since we have other ways of notifying for this result.
|
350 | _this.updateLastDiff(diff, diffOptions);
|
351 | result.data = diff.result;
|
352 | });
|
353 | }
|
354 | else {
|
355 | this.lastWrite = void 0;
|
356 | }
|
357 | }
|
358 | return result;
|
359 | };
|
360 | QueryInfo.prototype.markReady = function () {
|
361 | this.networkError = null;
|
362 | return (this.networkStatus = NetworkStatus.ready);
|
363 | };
|
364 | QueryInfo.prototype.markError = function (error) {
|
365 | this.networkStatus = NetworkStatus.error;
|
366 | this.lastWrite = void 0;
|
367 | this.reset();
|
368 | if (error.graphQLErrors) {
|
369 | this.graphQLErrors = error.graphQLErrors;
|
370 | }
|
371 | if (error.networkError) {
|
372 | this.networkError = error.networkError;
|
373 | }
|
374 | return error;
|
375 | };
|
376 | return QueryInfo;
|
377 | }());
|
378 | export { QueryInfo };
|
379 | export function shouldWriteResult(result, errorPolicy) {
|
380 | if (errorPolicy === void 0) { errorPolicy = "none"; }
|
381 | var ignoreErrors = errorPolicy === "ignore" || errorPolicy === "all";
|
382 | var writeWithErrors = !graphQLResultHasError(result);
|
383 | if (!writeWithErrors && ignoreErrors && result.data) {
|
384 | writeWithErrors = true;
|
385 | }
|
386 | return writeWithErrors;
|
387 | }
|
388 | //# sourceMappingURL=QueryInfo.js.map |
\ | No newline at end of file |