UNPKG

787 kBJavaScriptView Raw
1import { _getProvider, getApp as t, _removeServiceInstance as e, _registerComponent as n, registerVersion as s, SDK_VERSION as i } from "@firebase/app";
2
3import { Component as r } from "@firebase/component";
4
5import { Logger as o, LogLevel as u } from "@firebase/logger";
6
7import { FirebaseError as c, getUA as a, isIndexedDBAvailable as h, base64 as l, isSafari as f, createMockUserToken as d, getModularInstance as _, deepEqual as w, getDefaultEmulatorHostnameAndPort as m } from "@firebase/util";
8
9import { XhrIo as g, EventType as y, ErrorCode as p, createWebChannelTransport as I, getStatEventTarget as T, FetchXmlHttpFactory as E, WebChannel as A, Event as R, Stat as b } from "@firebase/webchannel-wrapper";
10
11const P = "@firebase/firestore";
12
13/**
14 * @license
15 * Copyright 2017 Google LLC
16 *
17 * Licensed under the Apache License, Version 2.0 (the "License");
18 * you may not use this file except in compliance with the License.
19 * You may obtain a copy of the License at
20 *
21 * http://www.apache.org/licenses/LICENSE-2.0
22 *
23 * Unless required by applicable law or agreed to in writing, software
24 * distributed under the License is distributed on an "AS IS" BASIS,
25 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
26 * See the License for the specific language governing permissions and
27 * limitations under the License.
28 */
29/**
30 * Simple wrapper around a nullable UID. Mostly exists to make code more
31 * readable.
32 */
33class v {
34 constructor(t) {
35 this.uid = t;
36 }
37 isAuthenticated() {
38 return null != this.uid;
39 }
40 /**
41 * Returns a key representing this user, suitable for inclusion in a
42 * dictionary.
43 */ toKey() {
44 return this.isAuthenticated() ? "uid:" + this.uid : "anonymous-user";
45 }
46 isEqual(t) {
47 return t.uid === this.uid;
48 }
49}
50
51/** A user with a null UID. */ v.UNAUTHENTICATED = new v(null),
52// TODO(mikelehen): Look into getting a proper uid-equivalent for
53// non-FirebaseAuth providers.
54v.GOOGLE_CREDENTIALS = new v("google-credentials-uid"), v.FIRST_PARTY = new v("first-party-uid"),
55v.MOCK_USER = new v("mock-user");
56
57/**
58 * @license
59 * Copyright 2017 Google LLC
60 *
61 * Licensed under the Apache License, Version 2.0 (the "License");
62 * you may not use this file except in compliance with the License.
63 * You may obtain a copy of the License at
64 *
65 * http://www.apache.org/licenses/LICENSE-2.0
66 *
67 * Unless required by applicable law or agreed to in writing, software
68 * distributed under the License is distributed on an "AS IS" BASIS,
69 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
70 * See the License for the specific language governing permissions and
71 * limitations under the License.
72 */
73let V = "9.14.0";
74
75/**
76 * @license
77 * Copyright 2017 Google LLC
78 *
79 * Licensed under the Apache License, Version 2.0 (the "License");
80 * you may not use this file except in compliance with the License.
81 * You may obtain a copy of the License at
82 *
83 * http://www.apache.org/licenses/LICENSE-2.0
84 *
85 * Unless required by applicable law or agreed to in writing, software
86 * distributed under the License is distributed on an "AS IS" BASIS,
87 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
88 * See the License for the specific language governing permissions and
89 * limitations under the License.
90 */
91const S = new o("@firebase/firestore");
92
93// Helper methods are needed because variables can't be exported as read/write
94function D() {
95 return S.logLevel;
96}
97
98/**
99 * Sets the verbosity of Cloud Firestore logs (debug, error, or silent).
100 *
101 * @param logLevel - The verbosity you set for activity and error logging. Can
102 * be any of the following values:
103 *
104 * <ul>
105 * <li>`debug` for the most verbose logging level, primarily for
106 * debugging.</li>
107 * <li>`error` to log errors only.</li>
108 * <li><code>`silent` to turn off logging.</li>
109 * </ul>
110 */ function C(t) {
111 S.setLogLevel(t);
112}
113
114function x(t, ...e) {
115 if (S.logLevel <= u.DEBUG) {
116 const n = e.map(O);
117 S.debug(`Firestore (${V}): ${t}`, ...n);
118 }
119}
120
121function N(t, ...e) {
122 if (S.logLevel <= u.ERROR) {
123 const n = e.map(O);
124 S.error(`Firestore (${V}): ${t}`, ...n);
125 }
126}
127
128/**
129 * @internal
130 */ function k(t, ...e) {
131 if (S.logLevel <= u.WARN) {
132 const n = e.map(O);
133 S.warn(`Firestore (${V}): ${t}`, ...n);
134 }
135}
136
137/**
138 * Converts an additional log parameter to a string representation.
139 */ function O(t) {
140 if ("string" == typeof t) return t;
141 try {
142 return e = t, JSON.stringify(e);
143 } catch (e) {
144 // Converting to JSON failed, just log the object directly
145 return t;
146 }
147 /**
148 * @license
149 * Copyright 2020 Google LLC
150 *
151 * Licensed under the Apache License, Version 2.0 (the "License");
152 * you may not use this file except in compliance with the License.
153 * You may obtain a copy of the License at
154 *
155 * http://www.apache.org/licenses/LICENSE-2.0
156 *
157 * Unless required by applicable law or agreed to in writing, software
158 * distributed under the License is distributed on an "AS IS" BASIS,
159 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
160 * See the License for the specific language governing permissions and
161 * limitations under the License.
162 */
163 /** Formats an object as a JSON string, suitable for logging. */
164 var e;
165}
166
167/**
168 * @license
169 * Copyright 2017 Google LLC
170 *
171 * Licensed under the Apache License, Version 2.0 (the "License");
172 * you may not use this file except in compliance with the License.
173 * You may obtain a copy of the License at
174 *
175 * http://www.apache.org/licenses/LICENSE-2.0
176 *
177 * Unless required by applicable law or agreed to in writing, software
178 * distributed under the License is distributed on an "AS IS" BASIS,
179 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
180 * See the License for the specific language governing permissions and
181 * limitations under the License.
182 */
183/**
184 * Unconditionally fails, throwing an Error with the given message.
185 * Messages are stripped in production builds.
186 *
187 * Returns `never` and can be used in expressions:
188 * @example
189 * let futureVar = fail('not implemented yet');
190 */ function M(t = "Unexpected state") {
191 // Log the failure in addition to throw an exception, just in case the
192 // exception is swallowed.
193 const e = `FIRESTORE (${V}) INTERNAL ASSERTION FAILED: ` + t;
194 // NOTE: We don't use FirestoreError here because these are internal failures
195 // that cannot be handled by the user. (Also it would create a circular
196 // dependency between the error and assert modules which doesn't work.)
197 throw N(e), new Error(e);
198}
199
200/**
201 * Fails if the given assertion condition is false, throwing an Error with the
202 * given message if it did.
203 *
204 * Messages are stripped in production builds.
205 */ function F(t, e) {
206 t || M();
207}
208
209/**
210 * Fails if the given assertion condition is false, throwing an Error with the
211 * given message if it did.
212 *
213 * The code of callsites invoking this function are stripped out in production
214 * builds. Any side-effects of code within the debugAssert() invocation will not
215 * happen in this case.
216 *
217 * @internal
218 */ function $(t, e) {
219 t || M();
220}
221
222/**
223 * Casts `obj` to `T`. In non-production builds, verifies that `obj` is an
224 * instance of `T` before casting.
225 */ function B(t,
226// eslint-disable-next-line @typescript-eslint/no-explicit-any
227e) {
228 return t;
229}
230
231/**
232 * @license
233 * Copyright 2017 Google LLC
234 *
235 * Licensed under the Apache License, Version 2.0 (the "License");
236 * you may not use this file except in compliance with the License.
237 * You may obtain a copy of the License at
238 *
239 * http://www.apache.org/licenses/LICENSE-2.0
240 *
241 * Unless required by applicable law or agreed to in writing, software
242 * distributed under the License is distributed on an "AS IS" BASIS,
243 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
244 * See the License for the specific language governing permissions and
245 * limitations under the License.
246 */ const L = {
247 // Causes are copied from:
248 // https://github.com/grpc/grpc/blob/bceec94ea4fc5f0085d81235d8e1c06798dc341a/include/grpc%2B%2B/impl/codegen/status_code_enum.h
249 /** Not an error; returned on success. */
250 OK: "ok",
251 /** The operation was cancelled (typically by the caller). */
252 CANCELLED: "cancelled",
253 /** Unknown error or an error from a different error domain. */
254 UNKNOWN: "unknown",
255 /**
256 * Client specified an invalid argument. Note that this differs from
257 * FAILED_PRECONDITION. INVALID_ARGUMENT indicates arguments that are
258 * problematic regardless of the state of the system (e.g., a malformed file
259 * name).
260 */
261 INVALID_ARGUMENT: "invalid-argument",
262 /**
263 * Deadline expired before operation could complete. For operations that
264 * change the state of the system, this error may be returned even if the
265 * operation has completed successfully. For example, a successful response
266 * from a server could have been delayed long enough for the deadline to
267 * expire.
268 */
269 DEADLINE_EXCEEDED: "deadline-exceeded",
270 /** Some requested entity (e.g., file or directory) was not found. */
271 NOT_FOUND: "not-found",
272 /**
273 * Some entity that we attempted to create (e.g., file or directory) already
274 * exists.
275 */
276 ALREADY_EXISTS: "already-exists",
277 /**
278 * The caller does not have permission to execute the specified operation.
279 * PERMISSION_DENIED must not be used for rejections caused by exhausting
280 * some resource (use RESOURCE_EXHAUSTED instead for those errors).
281 * PERMISSION_DENIED must not be used if the caller can not be identified
282 * (use UNAUTHENTICATED instead for those errors).
283 */
284 PERMISSION_DENIED: "permission-denied",
285 /**
286 * The request does not have valid authentication credentials for the
287 * operation.
288 */
289 UNAUTHENTICATED: "unauthenticated",
290 /**
291 * Some resource has been exhausted, perhaps a per-user quota, or perhaps the
292 * entire file system is out of space.
293 */
294 RESOURCE_EXHAUSTED: "resource-exhausted",
295 /**
296 * Operation was rejected because the system is not in a state required for
297 * the operation's execution. For example, directory to be deleted may be
298 * non-empty, an rmdir operation is applied to a non-directory, etc.
299 *
300 * A litmus test that may help a service implementor in deciding
301 * between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE:
302 * (a) Use UNAVAILABLE if the client can retry just the failing call.
303 * (b) Use ABORTED if the client should retry at a higher-level
304 * (e.g., restarting a read-modify-write sequence).
305 * (c) Use FAILED_PRECONDITION if the client should not retry until
306 * the system state has been explicitly fixed. E.g., if an "rmdir"
307 * fails because the directory is non-empty, FAILED_PRECONDITION
308 * should be returned since the client should not retry unless
309 * they have first fixed up the directory by deleting files from it.
310 * (d) Use FAILED_PRECONDITION if the client performs conditional
311 * REST Get/Update/Delete on a resource and the resource on the
312 * server does not match the condition. E.g., conflicting
313 * read-modify-write on the same resource.
314 */
315 FAILED_PRECONDITION: "failed-precondition",
316 /**
317 * The operation was aborted, typically due to a concurrency issue like
318 * sequencer check failures, transaction aborts, etc.
319 *
320 * See litmus test above for deciding between FAILED_PRECONDITION, ABORTED,
321 * and UNAVAILABLE.
322 */
323 ABORTED: "aborted",
324 /**
325 * Operation was attempted past the valid range. E.g., seeking or reading
326 * past end of file.
327 *
328 * Unlike INVALID_ARGUMENT, this error indicates a problem that may be fixed
329 * if the system state changes. For example, a 32-bit file system will
330 * generate INVALID_ARGUMENT if asked to read at an offset that is not in the
331 * range [0,2^32-1], but it will generate OUT_OF_RANGE if asked to read from
332 * an offset past the current file size.
333 *
334 * There is a fair bit of overlap between FAILED_PRECONDITION and
335 * OUT_OF_RANGE. We recommend using OUT_OF_RANGE (the more specific error)
336 * when it applies so that callers who are iterating through a space can
337 * easily look for an OUT_OF_RANGE error to detect when they are done.
338 */
339 OUT_OF_RANGE: "out-of-range",
340 /** Operation is not implemented or not supported/enabled in this service. */
341 UNIMPLEMENTED: "unimplemented",
342 /**
343 * Internal errors. Means some invariants expected by underlying System has
344 * been broken. If you see one of these errors, Something is very broken.
345 */
346 INTERNAL: "internal",
347 /**
348 * The service is currently unavailable. This is a most likely a transient
349 * condition and may be corrected by retrying with a backoff.
350 *
351 * See litmus test above for deciding between FAILED_PRECONDITION, ABORTED,
352 * and UNAVAILABLE.
353 */
354 UNAVAILABLE: "unavailable",
355 /** Unrecoverable data loss or corruption. */
356 DATA_LOSS: "data-loss"
357};
358
359/** An error returned by a Firestore operation. */ class U extends c {
360 /** @hideconstructor */
361 constructor(
362 /**
363 * The backend error code associated with this error.
364 */
365 t,
366 /**
367 * A custom error description.
368 */
369 e) {
370 super(t, e), this.code = t, this.message = e,
371 // HACK: We write a toString property directly because Error is not a real
372 // class and so inheritance does not work correctly. We could alternatively
373 // do the same "back-door inheritance" trick that FirebaseError does.
374 this.toString = () => `${this.name}: [code=${this.code}]: ${this.message}`;
375 }
376}
377
378/**
379 * @license
380 * Copyright 2017 Google LLC
381 *
382 * Licensed under the Apache License, Version 2.0 (the "License");
383 * you may not use this file except in compliance with the License.
384 * You may obtain a copy of the License at
385 *
386 * http://www.apache.org/licenses/LICENSE-2.0
387 *
388 * Unless required by applicable law or agreed to in writing, software
389 * distributed under the License is distributed on an "AS IS" BASIS,
390 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
391 * See the License for the specific language governing permissions and
392 * limitations under the License.
393 */ class q {
394 constructor() {
395 this.promise = new Promise(((t, e) => {
396 this.resolve = t, this.reject = e;
397 }));
398 }
399}
400
401/**
402 * @license
403 * Copyright 2017 Google LLC
404 *
405 * Licensed under the Apache License, Version 2.0 (the "License");
406 * you may not use this file except in compliance with the License.
407 * You may obtain a copy of the License at
408 *
409 * http://www.apache.org/licenses/LICENSE-2.0
410 *
411 * Unless required by applicable law or agreed to in writing, software
412 * distributed under the License is distributed on an "AS IS" BASIS,
413 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
414 * See the License for the specific language governing permissions and
415 * limitations under the License.
416 */ class K {
417 constructor(t, e) {
418 this.user = e, this.type = "OAuth", this.headers = new Map, this.headers.set("Authorization", `Bearer ${t}`);
419 }
420}
421
422/**
423 * A CredentialsProvider that always yields an empty token.
424 * @internal
425 */ class G {
426 getToken() {
427 return Promise.resolve(null);
428 }
429 invalidateToken() {}
430 start(t, e) {
431 // Fire with initial user.
432 t.enqueueRetryable((() => e(v.UNAUTHENTICATED)));
433 }
434 shutdown() {}
435}
436
437/**
438 * A CredentialsProvider that always returns a constant token. Used for
439 * emulator token mocking.
440 */ class Q {
441 constructor(t) {
442 this.token = t,
443 /**
444 * Stores the listener registered with setChangeListener()
445 * This isn't actually necessary since the UID never changes, but we use this
446 * to verify the listen contract is adhered to in tests.
447 */
448 this.changeListener = null;
449 }
450 getToken() {
451 return Promise.resolve(this.token);
452 }
453 invalidateToken() {}
454 start(t, e) {
455 this.changeListener = e,
456 // Fire with initial user.
457 t.enqueueRetryable((() => e(this.token.user)));
458 }
459 shutdown() {
460 this.changeListener = null;
461 }
462}
463
464class j {
465 constructor(t) {
466 this.t = t,
467 /** Tracks the current User. */
468 this.currentUser = v.UNAUTHENTICATED,
469 /**
470 * Counter used to detect if the token changed while a getToken request was
471 * outstanding.
472 */
473 this.i = 0, this.forceRefresh = !1, this.auth = null;
474 }
475 start(t, e) {
476 let n = this.i;
477 // A change listener that prevents double-firing for the same token change.
478 const s = t => this.i !== n ? (n = this.i, e(t)) : Promise.resolve();
479 // A promise that can be waited on to block on the next token change.
480 // This promise is re-created after each change.
481 let i = new q;
482 this.o = () => {
483 this.i++, this.currentUser = this.u(), i.resolve(), i = new q, t.enqueueRetryable((() => s(this.currentUser)));
484 };
485 const r = () => {
486 const e = i;
487 t.enqueueRetryable((async () => {
488 await e.promise, await s(this.currentUser);
489 }));
490 }, o = t => {
491 x("FirebaseAuthCredentialsProvider", "Auth detected"), this.auth = t, this.auth.addAuthTokenListener(this.o),
492 r();
493 };
494 this.t.onInit((t => o(t))),
495 // Our users can initialize Auth right after Firestore, so we give it
496 // a chance to register itself with the component framework before we
497 // determine whether to start up in unauthenticated mode.
498 setTimeout((() => {
499 if (!this.auth) {
500 const t = this.t.getImmediate({
501 optional: !0
502 });
503 t ? o(t) : (
504 // If auth is still not available, proceed with `null` user
505 x("FirebaseAuthCredentialsProvider", "Auth not yet detected"), i.resolve(), i = new q);
506 }
507 }), 0), r();
508 }
509 getToken() {
510 // Take note of the current value of the tokenCounter so that this method
511 // can fail (with an ABORTED error) if there is a token change while the
512 // request is outstanding.
513 const t = this.i, e = this.forceRefresh;
514 return this.forceRefresh = !1, this.auth ? this.auth.getToken(e).then((e =>
515 // Cancel the request since the token changed while the request was
516 // outstanding so the response is potentially for a previous user (which
517 // user, we can't be sure).
518 this.i !== t ? (x("FirebaseAuthCredentialsProvider", "getToken aborted due to token change."),
519 this.getToken()) : e ? (F("string" == typeof e.accessToken), new K(e.accessToken, this.currentUser)) : null)) : Promise.resolve(null);
520 }
521 invalidateToken() {
522 this.forceRefresh = !0;
523 }
524 shutdown() {
525 this.auth && this.auth.removeAuthTokenListener(this.o);
526 }
527 // Auth.getUid() can return null even with a user logged in. It is because
528 // getUid() is synchronous, but the auth code populating Uid is asynchronous.
529 // This method should only be called in the AuthTokenListener callback
530 // to guarantee to get the actual user.
531 u() {
532 const t = this.auth && this.auth.getUid();
533 return F(null === t || "string" == typeof t), new v(t);
534 }
535}
536
537/*
538 * FirstPartyToken provides a fresh token each time its value
539 * is requested, because if the token is too old, requests will be rejected.
540 * Technically this may no longer be necessary since the SDK should gracefully
541 * recover from unauthenticated errors (see b/33147818 for context), but it's
542 * safer to keep the implementation as-is.
543 */ class W {
544 constructor(t, e, n, s) {
545 this.h = t, this.l = e, this.m = n, this.g = s, this.type = "FirstParty", this.user = v.FIRST_PARTY,
546 this.p = new Map;
547 }
548 /** Gets an authorization token, using a provided factory function, or falling back to First Party GAPI. */ I() {
549 return this.g ? this.g() : (
550 // Make sure this really is a Gapi client.
551 F(!("object" != typeof this.h || null === this.h || !this.h.auth || !this.h.auth.getAuthHeaderValueForFirstParty)),
552 this.h.auth.getAuthHeaderValueForFirstParty([]));
553 }
554 get headers() {
555 this.p.set("X-Goog-AuthUser", this.l);
556 // Use array notation to prevent minification
557 const t = this.I();
558 return t && this.p.set("Authorization", t), this.m && this.p.set("X-Goog-Iam-Authorization-Token", this.m),
559 this.p;
560 }
561}
562
563/*
564 * Provides user credentials required for the Firestore JavaScript SDK
565 * to authenticate the user, using technique that is only available
566 * to applications hosted by Google.
567 */ class z {
568 constructor(t, e, n, s) {
569 this.h = t, this.l = e, this.m = n, this.g = s;
570 }
571 getToken() {
572 return Promise.resolve(new W(this.h, this.l, this.m, this.g));
573 }
574 start(t, e) {
575 // Fire with initial uid.
576 t.enqueueRetryable((() => e(v.FIRST_PARTY)));
577 }
578 shutdown() {}
579 invalidateToken() {}
580}
581
582class H {
583 constructor(t) {
584 this.value = t, this.type = "AppCheck", this.headers = new Map, t && t.length > 0 && this.headers.set("x-firebase-appcheck", this.value);
585 }
586}
587
588class J {
589 constructor(t) {
590 this.T = t, this.forceRefresh = !1, this.appCheck = null, this.A = null;
591 }
592 start(t, e) {
593 const n = t => {
594 null != t.error && x("FirebaseAppCheckTokenProvider", `Error getting App Check token; using placeholder token instead. Error: ${t.error.message}`);
595 const n = t.token !== this.A;
596 return this.A = t.token, x("FirebaseAppCheckTokenProvider", `Received ${n ? "new" : "existing"} token.`),
597 n ? e(t.token) : Promise.resolve();
598 };
599 this.o = e => {
600 t.enqueueRetryable((() => n(e)));
601 };
602 const s = t => {
603 x("FirebaseAppCheckTokenProvider", "AppCheck detected"), this.appCheck = t, this.appCheck.addTokenListener(this.o);
604 };
605 this.T.onInit((t => s(t))),
606 // Our users can initialize AppCheck after Firestore, so we give it
607 // a chance to register itself with the component framework.
608 setTimeout((() => {
609 if (!this.appCheck) {
610 const t = this.T.getImmediate({
611 optional: !0
612 });
613 t ? s(t) :
614 // If AppCheck is still not available, proceed without it.
615 x("FirebaseAppCheckTokenProvider", "AppCheck not yet detected");
616 }
617 }), 0);
618 }
619 getToken() {
620 const t = this.forceRefresh;
621 return this.forceRefresh = !1, this.appCheck ? this.appCheck.getToken(t).then((t => t ? (F("string" == typeof t.token),
622 this.A = t.token, new H(t.token)) : null)) : Promise.resolve(null);
623 }
624 invalidateToken() {
625 this.forceRefresh = !0;
626 }
627 shutdown() {
628 this.appCheck && this.appCheck.removeTokenListener(this.o);
629 }
630}
631
632/**
633 * An AppCheck token provider that always yields an empty token.
634 * @internal
635 */ class Y {
636 getToken() {
637 return Promise.resolve(new H(""));
638 }
639 invalidateToken() {}
640 start(t, e) {}
641 shutdown() {}
642}
643
644/**
645 * Builds a CredentialsProvider depending on the type of
646 * the credentials passed in.
647 */
648/**
649 * @license
650 * Copyright 2020 Google LLC
651 *
652 * Licensed under the Apache License, Version 2.0 (the "License");
653 * you may not use this file except in compliance with the License.
654 * You may obtain a copy of the License at
655 *
656 * http://www.apache.org/licenses/LICENSE-2.0
657 *
658 * Unless required by applicable law or agreed to in writing, software
659 * distributed under the License is distributed on an "AS IS" BASIS,
660 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
661 * See the License for the specific language governing permissions and
662 * limitations under the License.
663 */
664/**
665 * Generates `nBytes` of random bytes.
666 *
667 * If `nBytes < 0` , an error will be thrown.
668 */
669function X(t) {
670 // Polyfills for IE and WebWorker by using `self` and `msCrypto` when `crypto` is not available.
671 const e =
672 // eslint-disable-next-line @typescript-eslint/no-explicit-any
673 "undefined" != typeof self && (self.crypto || self.msCrypto), n = new Uint8Array(t);
674 if (e && "function" == typeof e.getRandomValues) e.getRandomValues(n); else
675 // Falls back to Math.random
676 for (let e = 0; e < t; e++) n[e] = Math.floor(256 * Math.random());
677 return n;
678}
679
680/**
681 * @license
682 * Copyright 2017 Google LLC
683 *
684 * Licensed under the Apache License, Version 2.0 (the "License");
685 * you may not use this file except in compliance with the License.
686 * You may obtain a copy of the License at
687 *
688 * http://www.apache.org/licenses/LICENSE-2.0
689 *
690 * Unless required by applicable law or agreed to in writing, software
691 * distributed under the License is distributed on an "AS IS" BASIS,
692 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
693 * See the License for the specific language governing permissions and
694 * limitations under the License.
695 */ class Z {
696 static R() {
697 // Alphanumeric characters
698 const t = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", e = Math.floor(256 / t.length) * t.length;
699 // The largest byte value that is a multiple of `char.length`.
700 let n = "";
701 for (;n.length < 20; ) {
702 const s = X(40);
703 for (let i = 0; i < s.length; ++i)
704 // Only accept values that are [0, maxMultiple), this ensures they can
705 // be evenly mapped to indices of `chars` via a modulo operation.
706 n.length < 20 && s[i] < e && (n += t.charAt(s[i] % t.length));
707 }
708 return n;
709 }
710}
711
712function tt(t, e) {
713 return t < e ? -1 : t > e ? 1 : 0;
714}
715
716/** Helper to compare arrays using isEqual(). */ function et(t, e, n) {
717 return t.length === e.length && t.every(((t, s) => n(t, e[s])));
718}
719
720/**
721 * Returns the immediate lexicographically-following string. This is useful to
722 * construct an inclusive range for indexeddb iterators.
723 */ function nt(t) {
724 // Return the input string, with an additional NUL byte appended.
725 return t + "\0";
726}
727
728/**
729 * @license
730 * Copyright 2017 Google LLC
731 *
732 * Licensed under the Apache License, Version 2.0 (the "License");
733 * you may not use this file except in compliance with the License.
734 * You may obtain a copy of the License at
735 *
736 * http://www.apache.org/licenses/LICENSE-2.0
737 *
738 * Unless required by applicable law or agreed to in writing, software
739 * distributed under the License is distributed on an "AS IS" BASIS,
740 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
741 * See the License for the specific language governing permissions and
742 * limitations under the License.
743 */
744// The earliest date supported by Firestore timestamps (0001-01-01T00:00:00Z).
745/**
746 * A `Timestamp` represents a point in time independent of any time zone or
747 * calendar, represented as seconds and fractions of seconds at nanosecond
748 * resolution in UTC Epoch time.
749 *
750 * It is encoded using the Proleptic Gregorian Calendar which extends the
751 * Gregorian calendar backwards to year one. It is encoded assuming all minutes
752 * are 60 seconds long, i.e. leap seconds are "smeared" so that no leap second
753 * table is needed for interpretation. Range is from 0001-01-01T00:00:00Z to
754 * 9999-12-31T23:59:59.999999999Z.
755 *
756 * For examples and further specifications, refer to the
757 * {@link https://github.com/google/protobuf/blob/master/src/google/protobuf/timestamp.proto | Timestamp definition}.
758 */
759class st {
760 /**
761 * Creates a new timestamp.
762 *
763 * @param seconds - The number of seconds of UTC time since Unix epoch
764 * 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
765 * 9999-12-31T23:59:59Z inclusive.
766 * @param nanoseconds - The non-negative fractions of a second at nanosecond
767 * resolution. Negative second values with fractions must still have
768 * non-negative nanoseconds values that count forward in time. Must be
769 * from 0 to 999,999,999 inclusive.
770 */
771 constructor(
772 /**
773 * The number of seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z.
774 */
775 t,
776 /**
777 * The fractions of a second at nanosecond resolution.*
778 */
779 e) {
780 if (this.seconds = t, this.nanoseconds = e, e < 0) throw new U(L.INVALID_ARGUMENT, "Timestamp nanoseconds out of range: " + e);
781 if (e >= 1e9) throw new U(L.INVALID_ARGUMENT, "Timestamp nanoseconds out of range: " + e);
782 if (t < -62135596800) throw new U(L.INVALID_ARGUMENT, "Timestamp seconds out of range: " + t);
783 // This will break in the year 10,000.
784 if (t >= 253402300800) throw new U(L.INVALID_ARGUMENT, "Timestamp seconds out of range: " + t);
785 }
786 /**
787 * Creates a new timestamp with the current date, with millisecond precision.
788 *
789 * @returns a new timestamp representing the current date.
790 */ static now() {
791 return st.fromMillis(Date.now());
792 }
793 /**
794 * Creates a new timestamp from the given date.
795 *
796 * @param date - The date to initialize the `Timestamp` from.
797 * @returns A new `Timestamp` representing the same point in time as the given
798 * date.
799 */ static fromDate(t) {
800 return st.fromMillis(t.getTime());
801 }
802 /**
803 * Creates a new timestamp from the given number of milliseconds.
804 *
805 * @param milliseconds - Number of milliseconds since Unix epoch
806 * 1970-01-01T00:00:00Z.
807 * @returns A new `Timestamp` representing the same point in time as the given
808 * number of milliseconds.
809 */ static fromMillis(t) {
810 const e = Math.floor(t / 1e3), n = Math.floor(1e6 * (t - 1e3 * e));
811 return new st(e, n);
812 }
813 /**
814 * Converts a `Timestamp` to a JavaScript `Date` object. This conversion
815 * causes a loss of precision since `Date` objects only support millisecond
816 * precision.
817 *
818 * @returns JavaScript `Date` object representing the same point in time as
819 * this `Timestamp`, with millisecond precision.
820 */ toDate() {
821 return new Date(this.toMillis());
822 }
823 /**
824 * Converts a `Timestamp` to a numeric timestamp (in milliseconds since
825 * epoch). This operation causes a loss of precision.
826 *
827 * @returns The point in time corresponding to this timestamp, represented as
828 * the number of milliseconds since Unix epoch 1970-01-01T00:00:00Z.
829 */ toMillis() {
830 return 1e3 * this.seconds + this.nanoseconds / 1e6;
831 }
832 _compareTo(t) {
833 return this.seconds === t.seconds ? tt(this.nanoseconds, t.nanoseconds) : tt(this.seconds, t.seconds);
834 }
835 /**
836 * Returns true if this `Timestamp` is equal to the provided one.
837 *
838 * @param other - The `Timestamp` to compare against.
839 * @returns true if this `Timestamp` is equal to the provided one.
840 */ isEqual(t) {
841 return t.seconds === this.seconds && t.nanoseconds === this.nanoseconds;
842 }
843 /** Returns a textual representation of this `Timestamp`. */ toString() {
844 return "Timestamp(seconds=" + this.seconds + ", nanoseconds=" + this.nanoseconds + ")";
845 }
846 /** Returns a JSON-serializable representation of this `Timestamp`. */ toJSON() {
847 return {
848 seconds: this.seconds,
849 nanoseconds: this.nanoseconds
850 };
851 }
852 /**
853 * Converts this object to a primitive string, which allows `Timestamp` objects
854 * to be compared using the `>`, `<=`, `>=` and `>` operators.
855 */ valueOf() {
856 // This method returns a string of the form <seconds>.<nanoseconds> where
857 // <seconds> is translated to have a non-negative value and both <seconds>
858 // and <nanoseconds> are left-padded with zeroes to be a consistent length.
859 // Strings with this format then have a lexiographical ordering that matches
860 // the expected ordering. The <seconds> translation is done to avoid having
861 // a leading negative sign (i.e. a leading '-' character) in its string
862 // representation, which would affect its lexiographical ordering.
863 const t = this.seconds - -62135596800;
864 // Note: Up to 12 decimal digits are required to represent all valid
865 // 'seconds' values.
866 return String(t).padStart(12, "0") + "." + String(this.nanoseconds).padStart(9, "0");
867 }
868}
869
870/**
871 * @license
872 * Copyright 2017 Google LLC
873 *
874 * Licensed under the Apache License, Version 2.0 (the "License");
875 * you may not use this file except in compliance with the License.
876 * You may obtain a copy of the License at
877 *
878 * http://www.apache.org/licenses/LICENSE-2.0
879 *
880 * Unless required by applicable law or agreed to in writing, software
881 * distributed under the License is distributed on an "AS IS" BASIS,
882 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
883 * See the License for the specific language governing permissions and
884 * limitations under the License.
885 */
886/**
887 * A version of a document in Firestore. This corresponds to the version
888 * timestamp, such as update_time or read_time.
889 */ class it {
890 constructor(t) {
891 this.timestamp = t;
892 }
893 static fromTimestamp(t) {
894 return new it(t);
895 }
896 static min() {
897 return new it(new st(0, 0));
898 }
899 static max() {
900 return new it(new st(253402300799, 999999999));
901 }
902 compareTo(t) {
903 return this.timestamp._compareTo(t.timestamp);
904 }
905 isEqual(t) {
906 return this.timestamp.isEqual(t.timestamp);
907 }
908 /** Returns a number representation of the version for use in spec tests. */ toMicroseconds() {
909 // Convert to microseconds.
910 return 1e6 * this.timestamp.seconds + this.timestamp.nanoseconds / 1e3;
911 }
912 toString() {
913 return "SnapshotVersion(" + this.timestamp.toString() + ")";
914 }
915 toTimestamp() {
916 return this.timestamp;
917 }
918}
919
920/**
921 * @license
922 * Copyright 2017 Google LLC
923 *
924 * Licensed under the Apache License, Version 2.0 (the "License");
925 * you may not use this file except in compliance with the License.
926 * You may obtain a copy of the License at
927 *
928 * http://www.apache.org/licenses/LICENSE-2.0
929 *
930 * Unless required by applicable law or agreed to in writing, software
931 * distributed under the License is distributed on an "AS IS" BASIS,
932 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
933 * See the License for the specific language governing permissions and
934 * limitations under the License.
935 */
936/**
937 * Path represents an ordered sequence of string segments.
938 */
939class rt {
940 constructor(t, e, n) {
941 void 0 === e ? e = 0 : e > t.length && M(), void 0 === n ? n = t.length - e : n > t.length - e && M(),
942 this.segments = t, this.offset = e, this.len = n;
943 }
944 get length() {
945 return this.len;
946 }
947 isEqual(t) {
948 return 0 === rt.comparator(this, t);
949 }
950 child(t) {
951 const e = this.segments.slice(this.offset, this.limit());
952 return t instanceof rt ? t.forEach((t => {
953 e.push(t);
954 })) : e.push(t), this.construct(e);
955 }
956 /** The index of one past the last segment of the path. */ limit() {
957 return this.offset + this.length;
958 }
959 popFirst(t) {
960 return t = void 0 === t ? 1 : t, this.construct(this.segments, this.offset + t, this.length - t);
961 }
962 popLast() {
963 return this.construct(this.segments, this.offset, this.length - 1);
964 }
965 firstSegment() {
966 return this.segments[this.offset];
967 }
968 lastSegment() {
969 return this.get(this.length - 1);
970 }
971 get(t) {
972 return this.segments[this.offset + t];
973 }
974 isEmpty() {
975 return 0 === this.length;
976 }
977 isPrefixOf(t) {
978 if (t.length < this.length) return !1;
979 for (let e = 0; e < this.length; e++) if (this.get(e) !== t.get(e)) return !1;
980 return !0;
981 }
982 isImmediateParentOf(t) {
983 if (this.length + 1 !== t.length) return !1;
984 for (let e = 0; e < this.length; e++) if (this.get(e) !== t.get(e)) return !1;
985 return !0;
986 }
987 forEach(t) {
988 for (let e = this.offset, n = this.limit(); e < n; e++) t(this.segments[e]);
989 }
990 toArray() {
991 return this.segments.slice(this.offset, this.limit());
992 }
993 static comparator(t, e) {
994 const n = Math.min(t.length, e.length);
995 for (let s = 0; s < n; s++) {
996 const n = t.get(s), i = e.get(s);
997 if (n < i) return -1;
998 if (n > i) return 1;
999 }
1000 return t.length < e.length ? -1 : t.length > e.length ? 1 : 0;
1001 }
1002}
1003
1004/**
1005 * A slash-separated path for navigating resources (documents and collections)
1006 * within Firestore.
1007 *
1008 * @internal
1009 */ class ot extends rt {
1010 construct(t, e, n) {
1011 return new ot(t, e, n);
1012 }
1013 canonicalString() {
1014 // NOTE: The client is ignorant of any path segments containing escape
1015 // sequences (e.g. __id123__) and just passes them through raw (they exist
1016 // for legacy reasons and should not be used frequently).
1017 return this.toArray().join("/");
1018 }
1019 toString() {
1020 return this.canonicalString();
1021 }
1022 /**
1023 * Creates a resource path from the given slash-delimited string. If multiple
1024 * arguments are provided, all components are combined. Leading and trailing
1025 * slashes from all components are ignored.
1026 */ static fromString(...t) {
1027 // NOTE: The client is ignorant of any path segments containing escape
1028 // sequences (e.g. __id123__) and just passes them through raw (they exist
1029 // for legacy reasons and should not be used frequently).
1030 const e = [];
1031 for (const n of t) {
1032 if (n.indexOf("//") >= 0) throw new U(L.INVALID_ARGUMENT, `Invalid segment (${n}). Paths must not contain // in them.`);
1033 // Strip leading and traling slashed.
1034 e.push(...n.split("/").filter((t => t.length > 0)));
1035 }
1036 return new ot(e);
1037 }
1038 static emptyPath() {
1039 return new ot([]);
1040 }
1041}
1042
1043const ut = /^[_a-zA-Z][_a-zA-Z0-9]*$/;
1044
1045/**
1046 * A dot-separated path for navigating sub-objects within a document.
1047 * @internal
1048 */ class ct extends rt {
1049 construct(t, e, n) {
1050 return new ct(t, e, n);
1051 }
1052 /**
1053 * Returns true if the string could be used as a segment in a field path
1054 * without escaping.
1055 */ static isValidIdentifier(t) {
1056 return ut.test(t);
1057 }
1058 canonicalString() {
1059 return this.toArray().map((t => (t = t.replace(/\\/g, "\\\\").replace(/`/g, "\\`"),
1060 ct.isValidIdentifier(t) || (t = "`" + t + "`"), t))).join(".");
1061 }
1062 toString() {
1063 return this.canonicalString();
1064 }
1065 /**
1066 * Returns true if this field references the key of a document.
1067 */ isKeyField() {
1068 return 1 === this.length && "__name__" === this.get(0);
1069 }
1070 /**
1071 * The field designating the key of a document.
1072 */ static keyField() {
1073 return new ct([ "__name__" ]);
1074 }
1075 /**
1076 * Parses a field string from the given server-formatted string.
1077 *
1078 * - Splitting the empty string is not allowed (for now at least).
1079 * - Empty segments within the string (e.g. if there are two consecutive
1080 * separators) are not allowed.
1081 *
1082 * TODO(b/37244157): we should make this more strict. Right now, it allows
1083 * non-identifier path components, even if they aren't escaped.
1084 */ static fromServerFormat(t) {
1085 const e = [];
1086 let n = "", s = 0;
1087 const i = () => {
1088 if (0 === n.length) throw new U(L.INVALID_ARGUMENT, `Invalid field path (${t}). Paths must not be empty, begin with '.', end with '.', or contain '..'`);
1089 e.push(n), n = "";
1090 };
1091 let r = !1;
1092 for (;s < t.length; ) {
1093 const e = t[s];
1094 if ("\\" === e) {
1095 if (s + 1 === t.length) throw new U(L.INVALID_ARGUMENT, "Path has trailing escape character: " + t);
1096 const e = t[s + 1];
1097 if ("\\" !== e && "." !== e && "`" !== e) throw new U(L.INVALID_ARGUMENT, "Path has invalid escape sequence: " + t);
1098 n += e, s += 2;
1099 } else "`" === e ? (r = !r, s++) : "." !== e || r ? (n += e, s++) : (i(), s++);
1100 }
1101 if (i(), r) throw new U(L.INVALID_ARGUMENT, "Unterminated ` in path: " + t);
1102 return new ct(e);
1103 }
1104 static emptyPath() {
1105 return new ct([]);
1106 }
1107}
1108
1109/**
1110 * @license
1111 * Copyright 2017 Google LLC
1112 *
1113 * Licensed under the Apache License, Version 2.0 (the "License");
1114 * you may not use this file except in compliance with the License.
1115 * You may obtain a copy of the License at
1116 *
1117 * http://www.apache.org/licenses/LICENSE-2.0
1118 *
1119 * Unless required by applicable law or agreed to in writing, software
1120 * distributed under the License is distributed on an "AS IS" BASIS,
1121 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1122 * See the License for the specific language governing permissions and
1123 * limitations under the License.
1124 */
1125/**
1126 * @internal
1127 */ class at {
1128 constructor(t) {
1129 this.path = t;
1130 }
1131 static fromPath(t) {
1132 return new at(ot.fromString(t));
1133 }
1134 static fromName(t) {
1135 return new at(ot.fromString(t).popFirst(5));
1136 }
1137 static empty() {
1138 return new at(ot.emptyPath());
1139 }
1140 get collectionGroup() {
1141 return this.path.popLast().lastSegment();
1142 }
1143 /** Returns true if the document is in the specified collectionId. */ hasCollectionId(t) {
1144 return this.path.length >= 2 && this.path.get(this.path.length - 2) === t;
1145 }
1146 /** Returns the collection group (i.e. the name of the parent collection) for this key. */ getCollectionGroup() {
1147 return this.path.get(this.path.length - 2);
1148 }
1149 /** Returns the fully qualified path to the parent collection. */ getCollectionPath() {
1150 return this.path.popLast();
1151 }
1152 isEqual(t) {
1153 return null !== t && 0 === ot.comparator(this.path, t.path);
1154 }
1155 toString() {
1156 return this.path.toString();
1157 }
1158 static comparator(t, e) {
1159 return ot.comparator(t.path, e.path);
1160 }
1161 static isDocumentKey(t) {
1162 return t.length % 2 == 0;
1163 }
1164 /**
1165 * Creates and returns a new document key with the given segments.
1166 *
1167 * @param segments - The segments of the path to the document
1168 * @returns A new instance of DocumentKey
1169 */ static fromSegments(t) {
1170 return new at(new ot(t.slice()));
1171 }
1172}
1173
1174/**
1175 * @license
1176 * Copyright 2021 Google LLC
1177 *
1178 * Licensed under the Apache License, Version 2.0 (the "License");
1179 * you may not use this file except in compliance with the License.
1180 * You may obtain a copy of the License at
1181 *
1182 * http://www.apache.org/licenses/LICENSE-2.0
1183 *
1184 * Unless required by applicable law or agreed to in writing, software
1185 * distributed under the License is distributed on an "AS IS" BASIS,
1186 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1187 * See the License for the specific language governing permissions and
1188 * limitations under the License.
1189 */
1190/**
1191 * The initial mutation batch id for each index. Gets updated during index
1192 * backfill.
1193 */
1194/**
1195 * An index definition for field indexes in Firestore.
1196 *
1197 * Every index is associated with a collection. The definition contains a list
1198 * of fields and their index kind (which can be `ASCENDING`, `DESCENDING` or
1199 * `CONTAINS` for ArrayContains/ArrayContainsAny queries).
1200 *
1201 * Unlike the backend, the SDK does not differentiate between collection or
1202 * collection group-scoped indices. Every index can be used for both single
1203 * collection and collection group queries.
1204 */
1205class ht {
1206 constructor(
1207 /**
1208 * The index ID. Returns -1 if the index ID is not available (e.g. the index
1209 * has not yet been persisted).
1210 */
1211 t,
1212 /** The collection ID this index applies to. */
1213 e,
1214 /** The field segments for this index. */
1215 n,
1216 /** Shows how up-to-date the index is for the current user. */
1217 s) {
1218 this.indexId = t, this.collectionGroup = e, this.fields = n, this.indexState = s;
1219 }
1220}
1221
1222/** An ID for an index that has not yet been added to persistence. */
1223/** Returns the ArrayContains/ArrayContainsAny segment for this index. */
1224function lt(t) {
1225 return t.fields.find((t => 2 /* CONTAINS */ === t.kind));
1226}
1227
1228/** Returns all directional (ascending/descending) segments for this index. */ function ft(t) {
1229 return t.fields.filter((t => 2 /* CONTAINS */ !== t.kind));
1230}
1231
1232/**
1233 * Returns the order of the document key component for the given index.
1234 *
1235 * PORTING NOTE: This is only used in the Web IndexedDb implementation.
1236 */
1237/**
1238 * Compares indexes by collection group and segments. Ignores update time and
1239 * index ID.
1240 */
1241function dt(t, e) {
1242 let n = tt(t.collectionGroup, e.collectionGroup);
1243 if (0 !== n) return n;
1244 for (let s = 0; s < Math.min(t.fields.length, e.fields.length); ++s) if (n = wt(t.fields[s], e.fields[s]),
1245 0 !== n) return n;
1246 return tt(t.fields.length, e.fields.length);
1247}
1248
1249/** Returns a debug representation of the field index */ ht.UNKNOWN_ID = -1;
1250
1251/** An index component consisting of field path and index type. */
1252class _t {
1253 constructor(
1254 /** The field path of the component. */
1255 t,
1256 /** The fields sorting order. */
1257 e) {
1258 this.fieldPath = t, this.kind = e;
1259 }
1260}
1261
1262function wt(t, e) {
1263 const n = ct.comparator(t.fieldPath, e.fieldPath);
1264 return 0 !== n ? n : tt(t.kind, e.kind);
1265}
1266
1267/**
1268 * Stores the "high water mark" that indicates how updated the Index is for the
1269 * current user.
1270 */ class mt {
1271 constructor(
1272 /**
1273 * Indicates when the index was last updated (relative to other indexes).
1274 */
1275 t,
1276 /** The the latest indexed read time, document and batch id. */
1277 e) {
1278 this.sequenceNumber = t, this.offset = e;
1279 }
1280 /** The state of an index that has not yet been backfilled. */ static empty() {
1281 return new mt(0, pt.min());
1282 }
1283}
1284
1285/**
1286 * Creates an offset that matches all documents with a read time higher than
1287 * `readTime`.
1288 */ function gt(t, e) {
1289 // We want to create an offset that matches all documents with a read time
1290 // greater than the provided read time. To do so, we technically need to
1291 // create an offset for `(readTime, MAX_DOCUMENT_KEY)`. While we could use
1292 // Unicode codepoints to generate MAX_DOCUMENT_KEY, it is much easier to use
1293 // `(readTime + 1, DocumentKey.empty())` since `> DocumentKey.empty()` matches
1294 // all valid document IDs.
1295 const n = t.toTimestamp().seconds, s = t.toTimestamp().nanoseconds + 1, i = it.fromTimestamp(1e9 === s ? new st(n + 1, 0) : new st(n, s));
1296 return new pt(i, at.empty(), e);
1297}
1298
1299/** Creates a new offset based on the provided document. */ function yt(t) {
1300 return new pt(t.readTime, t.key, -1);
1301}
1302
1303/**
1304 * Stores the latest read time, document and batch ID that were processed for an
1305 * index.
1306 */ class pt {
1307 constructor(
1308 /**
1309 * The latest read time version that has been indexed by Firestore for this
1310 * field index.
1311 */
1312 t,
1313 /**
1314 * The key of the last document that was indexed for this query. Use
1315 * `DocumentKey.empty()` if no document has been indexed.
1316 */
1317 e,
1318 /*
1319 * The largest mutation batch id that's been processed by Firestore.
1320 */
1321 n) {
1322 this.readTime = t, this.documentKey = e, this.largestBatchId = n;
1323 }
1324 /** Returns an offset that sorts before all regular offsets. */ static min() {
1325 return new pt(it.min(), at.empty(), -1);
1326 }
1327 /** Returns an offset that sorts after all regular offsets. */ static max() {
1328 return new pt(it.max(), at.empty(), -1);
1329 }
1330}
1331
1332function It(t, e) {
1333 let n = t.readTime.compareTo(e.readTime);
1334 return 0 !== n ? n : (n = at.comparator(t.documentKey, e.documentKey), 0 !== n ? n : tt(t.largestBatchId, e.largestBatchId));
1335}
1336
1337/**
1338 * @license
1339 * Copyright 2020 Google LLC
1340 *
1341 * Licensed under the Apache License, Version 2.0 (the "License");
1342 * you may not use this file except in compliance with the License.
1343 * You may obtain a copy of the License at
1344 *
1345 * http://www.apache.org/licenses/LICENSE-2.0
1346 *
1347 * Unless required by applicable law or agreed to in writing, software
1348 * distributed under the License is distributed on an "AS IS" BASIS,
1349 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1350 * See the License for the specific language governing permissions and
1351 * limitations under the License.
1352 */ const Tt = "The current tab is not in the required state to perform this operation. It might be necessary to refresh the browser tab.";
1353
1354/**
1355 * A base class representing a persistence transaction, encapsulating both the
1356 * transaction's sequence numbers as well as a list of onCommitted listeners.
1357 *
1358 * When you call Persistence.runTransaction(), it will create a transaction and
1359 * pass it to your callback. You then pass it to any method that operates
1360 * on persistence.
1361 */ class Et {
1362 constructor() {
1363 this.onCommittedListeners = [];
1364 }
1365 addOnCommittedListener(t) {
1366 this.onCommittedListeners.push(t);
1367 }
1368 raiseOnCommittedEvent() {
1369 this.onCommittedListeners.forEach((t => t()));
1370 }
1371}
1372
1373/**
1374 * @license
1375 * Copyright 2017 Google LLC
1376 *
1377 * Licensed under the Apache License, Version 2.0 (the "License");
1378 * you may not use this file except in compliance with the License.
1379 * You may obtain a copy of the License at
1380 *
1381 * http://www.apache.org/licenses/LICENSE-2.0
1382 *
1383 * Unless required by applicable law or agreed to in writing, software
1384 * distributed under the License is distributed on an "AS IS" BASIS,
1385 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1386 * See the License for the specific language governing permissions and
1387 * limitations under the License.
1388 */
1389/**
1390 * Verifies the error thrown by a LocalStore operation. If a LocalStore
1391 * operation fails because the primary lease has been taken by another client,
1392 * we ignore the error (the persistence layer will immediately call
1393 * `applyPrimaryLease` to propagate the primary state change). All other errors
1394 * are re-thrown.
1395 *
1396 * @param err - An error returned by a LocalStore operation.
1397 * @returns A Promise that resolves after we recovered, or the original error.
1398 */ async function At(t) {
1399 if (t.code !== L.FAILED_PRECONDITION || t.message !== Tt) throw t;
1400 x("LocalStore", "Unexpectedly lost primary lease");
1401}
1402
1403/**
1404 * @license
1405 * Copyright 2017 Google LLC
1406 *
1407 * Licensed under the Apache License, Version 2.0 (the "License");
1408 * you may not use this file except in compliance with the License.
1409 * You may obtain a copy of the License at
1410 *
1411 * http://www.apache.org/licenses/LICENSE-2.0
1412 *
1413 * Unless required by applicable law or agreed to in writing, software
1414 * distributed under the License is distributed on an "AS IS" BASIS,
1415 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1416 * See the License for the specific language governing permissions and
1417 * limitations under the License.
1418 */
1419/**
1420 * PersistencePromise is essentially a re-implementation of Promise except
1421 * it has a .next() method instead of .then() and .next() and .catch() callbacks
1422 * are executed synchronously when a PersistencePromise resolves rather than
1423 * asynchronously (Promise implementations use setImmediate() or similar).
1424 *
1425 * This is necessary to interoperate with IndexedDB which will automatically
1426 * commit transactions if control is returned to the event loop without
1427 * synchronously initiating another operation on the transaction.
1428 *
1429 * NOTE: .then() and .catch() only allow a single consumer, unlike normal
1430 * Promises.
1431 */ class Rt {
1432 constructor(t) {
1433 // NOTE: next/catchCallback will always point to our own wrapper functions,
1434 // not the user's raw next() or catch() callbacks.
1435 this.nextCallback = null, this.catchCallback = null,
1436 // When the operation resolves, we'll set result or error and mark isDone.
1437 this.result = void 0, this.error = void 0, this.isDone = !1,
1438 // Set to true when .then() or .catch() are called and prevents additional
1439 // chaining.
1440 this.callbackAttached = !1, t((t => {
1441 this.isDone = !0, this.result = t, this.nextCallback &&
1442 // value should be defined unless T is Void, but we can't express
1443 // that in the type system.
1444 this.nextCallback(t);
1445 }), (t => {
1446 this.isDone = !0, this.error = t, this.catchCallback && this.catchCallback(t);
1447 }));
1448 }
1449 catch(t) {
1450 return this.next(void 0, t);
1451 }
1452 next(t, e) {
1453 return this.callbackAttached && M(), this.callbackAttached = !0, this.isDone ? this.error ? this.wrapFailure(e, this.error) : this.wrapSuccess(t, this.result) : new Rt(((n, s) => {
1454 this.nextCallback = e => {
1455 this.wrapSuccess(t, e).next(n, s);
1456 }, this.catchCallback = t => {
1457 this.wrapFailure(e, t).next(n, s);
1458 };
1459 }));
1460 }
1461 toPromise() {
1462 return new Promise(((t, e) => {
1463 this.next(t, e);
1464 }));
1465 }
1466 wrapUserFunction(t) {
1467 try {
1468 const e = t();
1469 return e instanceof Rt ? e : Rt.resolve(e);
1470 } catch (t) {
1471 return Rt.reject(t);
1472 }
1473 }
1474 wrapSuccess(t, e) {
1475 return t ? this.wrapUserFunction((() => t(e))) : Rt.resolve(e);
1476 }
1477 wrapFailure(t, e) {
1478 return t ? this.wrapUserFunction((() => t(e))) : Rt.reject(e);
1479 }
1480 static resolve(t) {
1481 return new Rt(((e, n) => {
1482 e(t);
1483 }));
1484 }
1485 static reject(t) {
1486 return new Rt(((e, n) => {
1487 n(t);
1488 }));
1489 }
1490 static waitFor(
1491 // Accept all Promise types in waitFor().
1492 // eslint-disable-next-line @typescript-eslint/no-explicit-any
1493 t) {
1494 return new Rt(((e, n) => {
1495 let s = 0, i = 0, r = !1;
1496 t.forEach((t => {
1497 ++s, t.next((() => {
1498 ++i, r && i === s && e();
1499 }), (t => n(t)));
1500 })), r = !0, i === s && e();
1501 }));
1502 }
1503 /**
1504 * Given an array of predicate functions that asynchronously evaluate to a
1505 * boolean, implements a short-circuiting `or` between the results. Predicates
1506 * will be evaluated until one of them returns `true`, then stop. The final
1507 * result will be whether any of them returned `true`.
1508 */ static or(t) {
1509 let e = Rt.resolve(!1);
1510 for (const n of t) e = e.next((t => t ? Rt.resolve(t) : n()));
1511 return e;
1512 }
1513 static forEach(t, e) {
1514 const n = [];
1515 return t.forEach(((t, s) => {
1516 n.push(e.call(this, t, s));
1517 })), this.waitFor(n);
1518 }
1519 /**
1520 * Concurrently map all array elements through asynchronous function.
1521 */ static mapArray(t, e) {
1522 return new Rt(((n, s) => {
1523 const i = t.length, r = new Array(i);
1524 let o = 0;
1525 for (let u = 0; u < i; u++) {
1526 const c = u;
1527 e(t[c]).next((t => {
1528 r[c] = t, ++o, o === i && n(r);
1529 }), (t => s(t)));
1530 }
1531 }));
1532 }
1533 /**
1534 * An alternative to recursive PersistencePromise calls, that avoids
1535 * potential memory problems from unbounded chains of promises.
1536 *
1537 * The `action` will be called repeatedly while `condition` is true.
1538 */ static doWhile(t, e) {
1539 return new Rt(((n, s) => {
1540 const i = () => {
1541 !0 === t() ? e().next((() => {
1542 i();
1543 }), s) : n();
1544 };
1545 i();
1546 }));
1547 }
1548}
1549
1550/**
1551 * @license
1552 * Copyright 2017 Google LLC
1553 *
1554 * Licensed under the Apache License, Version 2.0 (the "License");
1555 * you may not use this file except in compliance with the License.
1556 * You may obtain a copy of the License at
1557 *
1558 * http://www.apache.org/licenses/LICENSE-2.0
1559 *
1560 * Unless required by applicable law or agreed to in writing, software
1561 * distributed under the License is distributed on an "AS IS" BASIS,
1562 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1563 * See the License for the specific language governing permissions and
1564 * limitations under the License.
1565 */
1566// References to `window` are guarded by SimpleDb.isAvailable()
1567/* eslint-disable no-restricted-globals */
1568/**
1569 * Wraps an IDBTransaction and exposes a store() method to get a handle to a
1570 * specific object store.
1571 */
1572class bt {
1573 constructor(t, e) {
1574 this.action = t, this.transaction = e, this.aborted = !1,
1575 /**
1576 * A `Promise` that resolves with the result of the IndexedDb transaction.
1577 */
1578 this.P = new q, this.transaction.oncomplete = () => {
1579 this.P.resolve();
1580 }, this.transaction.onabort = () => {
1581 e.error ? this.P.reject(new Vt(t, e.error)) : this.P.resolve();
1582 }, this.transaction.onerror = e => {
1583 const n = Nt(e.target.error);
1584 this.P.reject(new Vt(t, n));
1585 };
1586 }
1587 static open(t, e, n, s) {
1588 try {
1589 return new bt(e, t.transaction(s, n));
1590 } catch (t) {
1591 throw new Vt(e, t);
1592 }
1593 }
1594 get v() {
1595 return this.P.promise;
1596 }
1597 abort(t) {
1598 t && this.P.reject(t), this.aborted || (x("SimpleDb", "Aborting transaction:", t ? t.message : "Client-initiated abort"),
1599 this.aborted = !0, this.transaction.abort());
1600 }
1601 V() {
1602 // If the browser supports V3 IndexedDB, we invoke commit() explicitly to
1603 // speed up index DB processing if the event loop remains blocks.
1604 // eslint-disable-next-line @typescript-eslint/no-explicit-any
1605 const t = this.transaction;
1606 this.aborted || "function" != typeof t.commit || t.commit();
1607 }
1608 /**
1609 * Returns a SimpleDbStore<KeyType, ValueType> for the specified store. All
1610 * operations performed on the SimpleDbStore happen within the context of this
1611 * transaction and it cannot be used anymore once the transaction is
1612 * completed.
1613 *
1614 * Note that we can't actually enforce that the KeyType and ValueType are
1615 * correct, but they allow type safety through the rest of the consuming code.
1616 */ store(t) {
1617 const e = this.transaction.objectStore(t);
1618 return new Dt(e);
1619 }
1620}
1621
1622/**
1623 * Provides a wrapper around IndexedDb with a simplified interface that uses
1624 * Promise-like return values to chain operations. Real promises cannot be used
1625 * since .then() continuations are executed asynchronously (e.g. via
1626 * .setImmediate), which would cause IndexedDB to end the transaction.
1627 * See PersistencePromise for more details.
1628 */ class Pt {
1629 /*
1630 * Creates a new SimpleDb wrapper for IndexedDb database `name`.
1631 *
1632 * Note that `version` must not be a downgrade. IndexedDB does not support
1633 * downgrading the schema version. We currently do not support any way to do
1634 * versioning outside of IndexedDB's versioning mechanism, as only
1635 * version-upgrade transactions are allowed to do things like create
1636 * objectstores.
1637 */
1638 constructor(t, e, n) {
1639 this.name = t, this.version = e, this.S = n;
1640 // NOTE: According to https://bugs.webkit.org/show_bug.cgi?id=197050, the
1641 // bug we're checking for should exist in iOS >= 12.2 and < 13, but for
1642 // whatever reason it's much harder to hit after 12.2 so we only proactively
1643 // log on 12.2.
1644 12.2 === Pt.D(a()) && N("Firestore persistence suffers from a bug in iOS 12.2 Safari that may cause your app to stop working. See https://stackoverflow.com/q/56496296/110915 for details and a potential workaround.");
1645 }
1646 /** Deletes the specified database. */ static delete(t) {
1647 return x("SimpleDb", "Removing database:", t), Ct(window.indexedDB.deleteDatabase(t)).toPromise();
1648 }
1649 /** Returns true if IndexedDB is available in the current environment. */ static C() {
1650 if (!h()) return !1;
1651 if (Pt.N()) return !0;
1652 // We extensively use indexed array values and compound keys,
1653 // which IE and Edge do not support. However, they still have indexedDB
1654 // defined on the window, so we need to check for them here and make sure
1655 // to return that persistence is not enabled for those browsers.
1656 // For tracking support of this feature, see here:
1657 // https://developer.microsoft.com/en-us/microsoft-edge/platform/status/indexeddbarraysandmultientrysupport/
1658 // Check the UA string to find out the browser.
1659 const t = a(), e = Pt.D(t), n = 0 < e && e < 10, s = Pt.k(t), i = 0 < s && s < 4.5;
1660 // IE 10
1661 // ua = 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)';
1662 // IE 11
1663 // ua = 'Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko';
1664 // Edge
1665 // ua = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML,
1666 // like Gecko) Chrome/39.0.2171.71 Safari/537.36 Edge/12.0';
1667 // iOS Safari: Disable for users running iOS version < 10.
1668 return !(t.indexOf("MSIE ") > 0 || t.indexOf("Trident/") > 0 || t.indexOf("Edge/") > 0 || n || i);
1669 }
1670 /**
1671 * Returns true if the backing IndexedDB store is the Node IndexedDBShim
1672 * (see https://github.com/axemclion/IndexedDBShim).
1673 */ static N() {
1674 var t;
1675 return "undefined" != typeof process && "YES" === (null === (t = process.env) || void 0 === t ? void 0 : t.O);
1676 }
1677 /** Helper to get a typed SimpleDbStore from a transaction. */ static M(t, e) {
1678 return t.store(e);
1679 }
1680 // visible for testing
1681 /** Parse User Agent to determine iOS version. Returns -1 if not found. */
1682 static D(t) {
1683 const e = t.match(/i(?:phone|pad|pod) os ([\d_]+)/i), n = e ? e[1].split("_").slice(0, 2).join(".") : "-1";
1684 return Number(n);
1685 }
1686 // visible for testing
1687 /** Parse User Agent to determine Android version. Returns -1 if not found. */
1688 static k(t) {
1689 const e = t.match(/Android ([\d.]+)/i), n = e ? e[1].split(".").slice(0, 2).join(".") : "-1";
1690 return Number(n);
1691 }
1692 /**
1693 * Opens the specified database, creating or upgrading it if necessary.
1694 */ async F(t) {
1695 return this.db || (x("SimpleDb", "Opening database:", this.name), this.db = await new Promise(((e, n) => {
1696 // TODO(mikelehen): Investigate browser compatibility.
1697 // https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API/Using_IndexedDB
1698 // suggests IE9 and older WebKit browsers handle upgrade
1699 // differently. They expect setVersion, as described here:
1700 // https://developer.mozilla.org/en-US/docs/Web/API/IDBVersionChangeRequest/setVersion
1701 const s = indexedDB.open(this.name, this.version);
1702 s.onsuccess = t => {
1703 const n = t.target.result;
1704 e(n);
1705 }, s.onblocked = () => {
1706 n(new Vt(t, "Cannot upgrade IndexedDB schema while another tab is open. Close all tabs that access Firestore and reload this page to proceed."));
1707 }, s.onerror = e => {
1708 const s = e.target.error;
1709 "VersionError" === s.name ? n(new U(L.FAILED_PRECONDITION, "A newer version of the Firestore SDK was previously used and so the persisted data is not compatible with the version of the SDK you are now using. The SDK will operate with persistence disabled. If you need persistence, please re-upgrade to a newer version of the SDK or else clear the persisted IndexedDB data for your app to start fresh.")) : "InvalidStateError" === s.name ? n(new U(L.FAILED_PRECONDITION, "Unable to open an IndexedDB connection. This could be due to running in a private browsing session on a browser whose private browsing sessions do not support IndexedDB: " + s)) : n(new Vt(t, s));
1710 }, s.onupgradeneeded = t => {
1711 x("SimpleDb", 'Database "' + this.name + '" requires upgrade from version:', t.oldVersion);
1712 const e = t.target.result;
1713 this.S.$(e, s.transaction, t.oldVersion, this.version).next((() => {
1714 x("SimpleDb", "Database upgrade to version " + this.version + " complete");
1715 }));
1716 };
1717 }))), this.B && (this.db.onversionchange = t => this.B(t)), this.db;
1718 }
1719 L(t) {
1720 this.B = t, this.db && (this.db.onversionchange = e => t(e));
1721 }
1722 async runTransaction(t, e, n, s) {
1723 const i = "readonly" === e;
1724 let r = 0;
1725 for (;;) {
1726 ++r;
1727 try {
1728 this.db = await this.F(t);
1729 const e = bt.open(this.db, t, i ? "readonly" : "readwrite", n), r = s(e).next((t => (e.V(),
1730 t))).catch((t => (
1731 // Abort the transaction if there was an error.
1732 e.abort(t), Rt.reject(t)))).toPromise();
1733 // As noted above, errors are propagated by aborting the transaction. So
1734 // we swallow any error here to avoid the browser logging it as unhandled.
1735 return r.catch((() => {})),
1736 // Wait for the transaction to complete (i.e. IndexedDb's onsuccess event to
1737 // fire), but still return the original transactionFnResult back to the
1738 // caller.
1739 await e.v, r;
1740 } catch (t) {
1741 const e = t, n = "FirebaseError" !== e.name && r < 3;
1742 // TODO(schmidt-sebastian): We could probably be smarter about this and
1743 // not retry exceptions that are likely unrecoverable (such as quota
1744 // exceeded errors).
1745 // Note: We cannot use an instanceof check for FirestoreException, since the
1746 // exception is wrapped in a generic error by our async/await handling.
1747 if (x("SimpleDb", "Transaction failed with error:", e.message, "Retrying:", n),
1748 this.close(), !n) return Promise.reject(e);
1749 }
1750 }
1751 }
1752 close() {
1753 this.db && this.db.close(), this.db = void 0;
1754 }
1755}
1756
1757/**
1758 * A controller for iterating over a key range or index. It allows an iterate
1759 * callback to delete the currently-referenced object, or jump to a new key
1760 * within the key range or index.
1761 */ class vt {
1762 constructor(t) {
1763 this.U = t, this.q = !1, this.K = null;
1764 }
1765 get isDone() {
1766 return this.q;
1767 }
1768 get G() {
1769 return this.K;
1770 }
1771 set cursor(t) {
1772 this.U = t;
1773 }
1774 /**
1775 * This function can be called to stop iteration at any point.
1776 */ done() {
1777 this.q = !0;
1778 }
1779 /**
1780 * This function can be called to skip to that next key, which could be
1781 * an index or a primary key.
1782 */ j(t) {
1783 this.K = t;
1784 }
1785 /**
1786 * Delete the current cursor value from the object store.
1787 *
1788 * NOTE: You CANNOT do this with a keysOnly query.
1789 */ delete() {
1790 return Ct(this.U.delete());
1791 }
1792}
1793
1794/** An error that wraps exceptions that thrown during IndexedDB execution. */ class Vt extends U {
1795 constructor(t, e) {
1796 super(L.UNAVAILABLE, `IndexedDB transaction '${t}' failed: ${e}`), this.name = "IndexedDbTransactionError";
1797 }
1798}
1799
1800/** Verifies whether `e` is an IndexedDbTransactionError. */ function St(t) {
1801 // Use name equality, as instanceof checks on errors don't work with errors
1802 // that wrap other errors.
1803 return "IndexedDbTransactionError" === t.name;
1804}
1805
1806/**
1807 * A wrapper around an IDBObjectStore providing an API that:
1808 *
1809 * 1) Has generic KeyType / ValueType parameters to provide strongly-typed
1810 * methods for acting against the object store.
1811 * 2) Deals with IndexedDB's onsuccess / onerror event callbacks, making every
1812 * method return a PersistencePromise instead.
1813 * 3) Provides a higher-level API to avoid needing to do excessive wrapping of
1814 * intermediate IndexedDB types (IDBCursorWithValue, etc.)
1815 */ class Dt {
1816 constructor(t) {
1817 this.store = t;
1818 }
1819 put(t, e) {
1820 let n;
1821 return void 0 !== e ? (x("SimpleDb", "PUT", this.store.name, t, e), n = this.store.put(e, t)) : (x("SimpleDb", "PUT", this.store.name, "<auto-key>", t),
1822 n = this.store.put(t)), Ct(n);
1823 }
1824 /**
1825 * Adds a new value into an Object Store and returns the new key. Similar to
1826 * IndexedDb's `add()`, this method will fail on primary key collisions.
1827 *
1828 * @param value - The object to write.
1829 * @returns The key of the value to add.
1830 */ add(t) {
1831 x("SimpleDb", "ADD", this.store.name, t, t);
1832 return Ct(this.store.add(t));
1833 }
1834 /**
1835 * Gets the object with the specified key from the specified store, or null
1836 * if no object exists with the specified key.
1837 *
1838 * @key The key of the object to get.
1839 * @returns The object with the specified key or null if no object exists.
1840 */ get(t) {
1841 // We're doing an unsafe cast to ValueType.
1842 // eslint-disable-next-line @typescript-eslint/no-explicit-any
1843 return Ct(this.store.get(t)).next((e => (
1844 // Normalize nonexistence to null.
1845 void 0 === e && (e = null), x("SimpleDb", "GET", this.store.name, t, e), e)));
1846 }
1847 delete(t) {
1848 x("SimpleDb", "DELETE", this.store.name, t);
1849 return Ct(this.store.delete(t));
1850 }
1851 /**
1852 * If we ever need more of the count variants, we can add overloads. For now,
1853 * all we need is to count everything in a store.
1854 *
1855 * Returns the number of rows in the store.
1856 */ count() {
1857 x("SimpleDb", "COUNT", this.store.name);
1858 return Ct(this.store.count());
1859 }
1860 W(t, e) {
1861 const n = this.options(t, e);
1862 // Use `getAll()` if the browser supports IndexedDB v3, as it is roughly
1863 // 20% faster. Unfortunately, getAll() does not support custom indices.
1864 if (n.index || "function" != typeof this.store.getAll) {
1865 const t = this.cursor(n), e = [];
1866 return this.H(t, ((t, n) => {
1867 e.push(n);
1868 })).next((() => e));
1869 }
1870 {
1871 const t = this.store.getAll(n.range);
1872 return new Rt(((e, n) => {
1873 t.onerror = t => {
1874 n(t.target.error);
1875 }, t.onsuccess = t => {
1876 e(t.target.result);
1877 };
1878 }));
1879 }
1880 }
1881 /**
1882 * Loads the first `count` elements from the provided index range. Loads all
1883 * elements if no limit is provided.
1884 */ J(t, e) {
1885 const n = this.store.getAll(t, null === e ? void 0 : e);
1886 return new Rt(((t, e) => {
1887 n.onerror = t => {
1888 e(t.target.error);
1889 }, n.onsuccess = e => {
1890 t(e.target.result);
1891 };
1892 }));
1893 }
1894 Y(t, e) {
1895 x("SimpleDb", "DELETE ALL", this.store.name);
1896 const n = this.options(t, e);
1897 n.X = !1;
1898 const s = this.cursor(n);
1899 return this.H(s, ((t, e, n) => n.delete()));
1900 }
1901 Z(t, e) {
1902 let n;
1903 e ? n = t : (n = {}, e = t);
1904 const s = this.cursor(n);
1905 return this.H(s, e);
1906 }
1907 /**
1908 * Iterates over a store, but waits for the given callback to complete for
1909 * each entry before iterating the next entry. This allows the callback to do
1910 * asynchronous work to determine if this iteration should continue.
1911 *
1912 * The provided callback should return `true` to continue iteration, and
1913 * `false` otherwise.
1914 */ tt(t) {
1915 const e = this.cursor({});
1916 return new Rt(((n, s) => {
1917 e.onerror = t => {
1918 const e = Nt(t.target.error);
1919 s(e);
1920 }, e.onsuccess = e => {
1921 const s = e.target.result;
1922 s ? t(s.primaryKey, s.value).next((t => {
1923 t ? s.continue() : n();
1924 })) : n();
1925 };
1926 }));
1927 }
1928 H(t, e) {
1929 const n = [];
1930 return new Rt(((s, i) => {
1931 t.onerror = t => {
1932 i(t.target.error);
1933 }, t.onsuccess = t => {
1934 const i = t.target.result;
1935 if (!i) return void s();
1936 const r = new vt(i), o = e(i.primaryKey, i.value, r);
1937 if (o instanceof Rt) {
1938 const t = o.catch((t => (r.done(), Rt.reject(t))));
1939 n.push(t);
1940 }
1941 r.isDone ? s() : null === r.G ? i.continue() : i.continue(r.G);
1942 };
1943 })).next((() => Rt.waitFor(n)));
1944 }
1945 options(t, e) {
1946 let n;
1947 return void 0 !== t && ("string" == typeof t ? n = t : e = t), {
1948 index: n,
1949 range: e
1950 };
1951 }
1952 cursor(t) {
1953 let e = "next";
1954 if (t.reverse && (e = "prev"), t.index) {
1955 const n = this.store.index(t.index);
1956 return t.X ? n.openKeyCursor(t.range, e) : n.openCursor(t.range, e);
1957 }
1958 return this.store.openCursor(t.range, e);
1959 }
1960}
1961
1962/**
1963 * Wraps an IDBRequest in a PersistencePromise, using the onsuccess / onerror
1964 * handlers to resolve / reject the PersistencePromise as appropriate.
1965 */ function Ct(t) {
1966 return new Rt(((e, n) => {
1967 t.onsuccess = t => {
1968 const n = t.target.result;
1969 e(n);
1970 }, t.onerror = t => {
1971 const e = Nt(t.target.error);
1972 n(e);
1973 };
1974 }));
1975}
1976
1977// Guard so we only report the error once.
1978let xt = !1;
1979
1980function Nt(t) {
1981 const e = Pt.D(a());
1982 if (e >= 12.2 && e < 13) {
1983 const e = "An internal error was encountered in the Indexed Database server";
1984 if (t.message.indexOf(e) >= 0) {
1985 // Wrap error in a more descriptive one.
1986 const t = new U("internal", `IOS_INDEXEDDB_BUG1: IndexedDb has thrown '${e}'. This is likely due to an unavoidable bug in iOS. See https://stackoverflow.com/q/56496296/110915 for details and a potential workaround.`);
1987 return xt || (xt = !0,
1988 // Throw a global exception outside of this promise chain, for the user to
1989 // potentially catch.
1990 setTimeout((() => {
1991 throw t;
1992 }), 0)), t;
1993 }
1994 }
1995 return t;
1996}
1997
1998/** This class is responsible for the scheduling of Index Backfiller. */
1999class kt {
2000 constructor(t, e) {
2001 this.asyncQueue = t, this.et = e, this.task = null;
2002 }
2003 start() {
2004 this.nt(15e3);
2005 }
2006 stop() {
2007 this.task && (this.task.cancel(), this.task = null);
2008 }
2009 get started() {
2010 return null !== this.task;
2011 }
2012 nt(t) {
2013 x("IndexBackiller", `Scheduled in ${t}ms`), this.task = this.asyncQueue.enqueueAfterDelay("index_backfill" /* IndexBackfill */ , t, (async () => {
2014 this.task = null;
2015 try {
2016 x("IndexBackiller", `Documents written: ${await this.et.st()}`);
2017 } catch (t) {
2018 St(t) ? x("IndexBackiller", "Ignoring IndexedDB error during index backfill: ", t) : await At(t);
2019 }
2020 await this.nt(6e4);
2021 }));
2022 }
2023}
2024
2025/** Implements the steps for backfilling indexes. */ class Ot {
2026 constructor(
2027 /**
2028 * LocalStore provides access to IndexManager and LocalDocumentView.
2029 * These properties will update when the user changes. Consequently,
2030 * making a local copy of IndexManager and LocalDocumentView will require
2031 * updates over time. The simpler solution is to rely on LocalStore to have
2032 * an up-to-date references to IndexManager and LocalDocumentStore.
2033 */
2034 t, e) {
2035 this.localStore = t, this.persistence = e;
2036 }
2037 async st(t = 50) {
2038 return this.persistence.runTransaction("Backfill Indexes", "readwrite-primary", (e => this.it(e, t)));
2039 }
2040 /** Writes index entries until the cap is reached. Returns the number of documents processed. */ it(t, e) {
2041 const n = new Set;
2042 let s = e, i = !0;
2043 return Rt.doWhile((() => !0 === i && s > 0), (() => this.localStore.indexManager.getNextCollectionGroupToUpdate(t).next((e => {
2044 if (null !== e && !n.has(e)) return x("IndexBackiller", `Processing collection: ${e}`),
2045 this.rt(t, e, s).next((t => {
2046 s -= t, n.add(e);
2047 }));
2048 i = !1;
2049 })))).next((() => e - s));
2050 }
2051 /**
2052 * Writes entries for the provided collection group. Returns the number of documents processed.
2053 */ rt(t, e, n) {
2054 // Use the earliest offset of all field indexes to query the local cache.
2055 return this.localStore.indexManager.getMinOffsetFromCollectionGroup(t, e).next((s => this.localStore.localDocuments.getNextDocuments(t, e, s, n).next((n => {
2056 const i = n.changes;
2057 return this.localStore.indexManager.updateIndexEntries(t, i).next((() => this.ot(s, n))).next((n => (x("IndexBackiller", `Updating offset: ${n}`),
2058 this.localStore.indexManager.updateCollectionGroup(t, e, n)))).next((() => i.size));
2059 }))));
2060 }
2061 /** Returns the next offset based on the provided documents. */ ot(t, e) {
2062 let n = t;
2063 return e.changes.forEach(((t, e) => {
2064 const s = yt(e);
2065 It(s, n) > 0 && (n = s);
2066 })), new pt(n.readTime, n.documentKey, Math.max(e.batchId, t.largestBatchId));
2067 }
2068}
2069
2070/**
2071 * @license
2072 * Copyright 2018 Google LLC
2073 *
2074 * Licensed under the Apache License, Version 2.0 (the "License");
2075 * you may not use this file except in compliance with the License.
2076 * You may obtain a copy of the License at
2077 *
2078 * http://www.apache.org/licenses/LICENSE-2.0
2079 *
2080 * Unless required by applicable law or agreed to in writing, software
2081 * distributed under the License is distributed on an "AS IS" BASIS,
2082 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2083 * See the License for the specific language governing permissions and
2084 * limitations under the License.
2085 */
2086/**
2087 * `ListenSequence` is a monotonic sequence. It is initialized with a minimum value to
2088 * exceed. All subsequent calls to next will return increasing values. If provided with a
2089 * `SequenceNumberSyncer`, it will additionally bump its next value when told of a new value, as
2090 * well as write out sequence numbers that it produces via `next()`.
2091 */ class Mt {
2092 constructor(t, e) {
2093 this.previousValue = t, e && (e.sequenceNumberHandler = t => this.ut(t), this.ct = t => e.writeSequenceNumber(t));
2094 }
2095 ut(t) {
2096 return this.previousValue = Math.max(t, this.previousValue), this.previousValue;
2097 }
2098 next() {
2099 const t = ++this.previousValue;
2100 return this.ct && this.ct(t), t;
2101 }
2102}
2103
2104/**
2105 * @license
2106 * Copyright 2017 Google LLC
2107 *
2108 * Licensed under the Apache License, Version 2.0 (the "License");
2109 * you may not use this file except in compliance with the License.
2110 * You may obtain a copy of the License at
2111 *
2112 * http://www.apache.org/licenses/LICENSE-2.0
2113 *
2114 * Unless required by applicable law or agreed to in writing, software
2115 * distributed under the License is distributed on an "AS IS" BASIS,
2116 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2117 * See the License for the specific language governing permissions and
2118 * limitations under the License.
2119 */
2120function Ft(t) {
2121 let e = 0;
2122 for (const n in t) Object.prototype.hasOwnProperty.call(t, n) && e++;
2123 return e;
2124}
2125
2126function $t(t, e) {
2127 for (const n in t) Object.prototype.hasOwnProperty.call(t, n) && e(n, t[n]);
2128}
2129
2130function Bt(t) {
2131 for (const e in t) if (Object.prototype.hasOwnProperty.call(t, e)) return !1;
2132 return !0;
2133}
2134
2135/**
2136 * @license
2137 * Copyright 2017 Google LLC
2138 *
2139 * Licensed under the Apache License, Version 2.0 (the "License");
2140 * you may not use this file except in compliance with the License.
2141 * You may obtain a copy of the License at
2142 *
2143 * http://www.apache.org/licenses/LICENSE-2.0
2144 *
2145 * Unless required by applicable law or agreed to in writing, software
2146 * distributed under the License is distributed on an "AS IS" BASIS,
2147 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2148 * See the License for the specific language governing permissions and
2149 * limitations under the License.
2150 */
2151// An immutable sorted map implementation, based on a Left-leaning Red-Black
2152// tree.
2153Mt.at = -1;
2154
2155class Lt {
2156 constructor(t, e) {
2157 this.comparator = t, this.root = e || qt.EMPTY;
2158 }
2159 // Returns a copy of the map, with the specified key/value added or replaced.
2160 insert(t, e) {
2161 return new Lt(this.comparator, this.root.insert(t, e, this.comparator).copy(null, null, qt.BLACK, null, null));
2162 }
2163 // Returns a copy of the map, with the specified key removed.
2164 remove(t) {
2165 return new Lt(this.comparator, this.root.remove(t, this.comparator).copy(null, null, qt.BLACK, null, null));
2166 }
2167 // Returns the value of the node with the given key, or null.
2168 get(t) {
2169 let e = this.root;
2170 for (;!e.isEmpty(); ) {
2171 const n = this.comparator(t, e.key);
2172 if (0 === n) return e.value;
2173 n < 0 ? e = e.left : n > 0 && (e = e.right);
2174 }
2175 return null;
2176 }
2177 // Returns the index of the element in this sorted map, or -1 if it doesn't
2178 // exist.
2179 indexOf(t) {
2180 // Number of nodes that were pruned when descending right
2181 let e = 0, n = this.root;
2182 for (;!n.isEmpty(); ) {
2183 const s = this.comparator(t, n.key);
2184 if (0 === s) return e + n.left.size;
2185 s < 0 ? n = n.left : (
2186 // Count all nodes left of the node plus the node itself
2187 e += n.left.size + 1, n = n.right);
2188 }
2189 // Node not found
2190 return -1;
2191 }
2192 isEmpty() {
2193 return this.root.isEmpty();
2194 }
2195 // Returns the total number of nodes in the map.
2196 get size() {
2197 return this.root.size;
2198 }
2199 // Returns the minimum key in the map.
2200 minKey() {
2201 return this.root.minKey();
2202 }
2203 // Returns the maximum key in the map.
2204 maxKey() {
2205 return this.root.maxKey();
2206 }
2207 // Traverses the map in key order and calls the specified action function
2208 // for each key/value pair. If action returns true, traversal is aborted.
2209 // Returns the first truthy value returned by action, or the last falsey
2210 // value returned by action.
2211 inorderTraversal(t) {
2212 return this.root.inorderTraversal(t);
2213 }
2214 forEach(t) {
2215 this.inorderTraversal(((e, n) => (t(e, n), !1)));
2216 }
2217 toString() {
2218 const t = [];
2219 return this.inorderTraversal(((e, n) => (t.push(`${e}:${n}`), !1))), `{${t.join(", ")}}`;
2220 }
2221 // Traverses the map in reverse key order and calls the specified action
2222 // function for each key/value pair. If action returns true, traversal is
2223 // aborted.
2224 // Returns the first truthy value returned by action, or the last falsey
2225 // value returned by action.
2226 reverseTraversal(t) {
2227 return this.root.reverseTraversal(t);
2228 }
2229 // Returns an iterator over the SortedMap.
2230 getIterator() {
2231 return new Ut(this.root, null, this.comparator, !1);
2232 }
2233 getIteratorFrom(t) {
2234 return new Ut(this.root, t, this.comparator, !1);
2235 }
2236 getReverseIterator() {
2237 return new Ut(this.root, null, this.comparator, !0);
2238 }
2239 getReverseIteratorFrom(t) {
2240 return new Ut(this.root, t, this.comparator, !0);
2241 }
2242}
2243
2244 // end SortedMap
2245// An iterator over an LLRBNode.
2246class Ut {
2247 constructor(t, e, n, s) {
2248 this.isReverse = s, this.nodeStack = [];
2249 let i = 1;
2250 for (;!t.isEmpty(); ) if (i = e ? n(t.key, e) : 1,
2251 // flip the comparison if we're going in reverse
2252 e && s && (i *= -1), i < 0)
2253 // This node is less than our start key. ignore it
2254 t = this.isReverse ? t.left : t.right; else {
2255 if (0 === i) {
2256 // This node is exactly equal to our start key. Push it on the stack,
2257 // but stop iterating;
2258 this.nodeStack.push(t);
2259 break;
2260 }
2261 // This node is greater than our start key, add it to the stack and move
2262 // to the next one
2263 this.nodeStack.push(t), t = this.isReverse ? t.right : t.left;
2264 }
2265 }
2266 getNext() {
2267 let t = this.nodeStack.pop();
2268 const e = {
2269 key: t.key,
2270 value: t.value
2271 };
2272 if (this.isReverse) for (t = t.left; !t.isEmpty(); ) this.nodeStack.push(t), t = t.right; else for (t = t.right; !t.isEmpty(); ) this.nodeStack.push(t),
2273 t = t.left;
2274 return e;
2275 }
2276 hasNext() {
2277 return this.nodeStack.length > 0;
2278 }
2279 peek() {
2280 if (0 === this.nodeStack.length) return null;
2281 const t = this.nodeStack[this.nodeStack.length - 1];
2282 return {
2283 key: t.key,
2284 value: t.value
2285 };
2286 }
2287}
2288
2289 // end SortedMapIterator
2290// Represents a node in a Left-leaning Red-Black tree.
2291class qt {
2292 constructor(t, e, n, s, i) {
2293 this.key = t, this.value = e, this.color = null != n ? n : qt.RED, this.left = null != s ? s : qt.EMPTY,
2294 this.right = null != i ? i : qt.EMPTY, this.size = this.left.size + 1 + this.right.size;
2295 }
2296 // Returns a copy of the current node, optionally replacing pieces of it.
2297 copy(t, e, n, s, i) {
2298 return new qt(null != t ? t : this.key, null != e ? e : this.value, null != n ? n : this.color, null != s ? s : this.left, null != i ? i : this.right);
2299 }
2300 isEmpty() {
2301 return !1;
2302 }
2303 // Traverses the tree in key order and calls the specified action function
2304 // for each node. If action returns true, traversal is aborted.
2305 // Returns the first truthy value returned by action, or the last falsey
2306 // value returned by action.
2307 inorderTraversal(t) {
2308 return this.left.inorderTraversal(t) || t(this.key, this.value) || this.right.inorderTraversal(t);
2309 }
2310 // Traverses the tree in reverse key order and calls the specified action
2311 // function for each node. If action returns true, traversal is aborted.
2312 // Returns the first truthy value returned by action, or the last falsey
2313 // value returned by action.
2314 reverseTraversal(t) {
2315 return this.right.reverseTraversal(t) || t(this.key, this.value) || this.left.reverseTraversal(t);
2316 }
2317 // Returns the minimum node in the tree.
2318 min() {
2319 return this.left.isEmpty() ? this : this.left.min();
2320 }
2321 // Returns the maximum key in the tree.
2322 minKey() {
2323 return this.min().key;
2324 }
2325 // Returns the maximum key in the tree.
2326 maxKey() {
2327 return this.right.isEmpty() ? this.key : this.right.maxKey();
2328 }
2329 // Returns new tree, with the key/value added.
2330 insert(t, e, n) {
2331 let s = this;
2332 const i = n(t, s.key);
2333 return s = i < 0 ? s.copy(null, null, null, s.left.insert(t, e, n), null) : 0 === i ? s.copy(null, e, null, null, null) : s.copy(null, null, null, null, s.right.insert(t, e, n)),
2334 s.fixUp();
2335 }
2336 removeMin() {
2337 if (this.left.isEmpty()) return qt.EMPTY;
2338 let t = this;
2339 return t.left.isRed() || t.left.left.isRed() || (t = t.moveRedLeft()), t = t.copy(null, null, null, t.left.removeMin(), null),
2340 t.fixUp();
2341 }
2342 // Returns new tree, with the specified item removed.
2343 remove(t, e) {
2344 let n, s = this;
2345 if (e(t, s.key) < 0) s.left.isEmpty() || s.left.isRed() || s.left.left.isRed() || (s = s.moveRedLeft()),
2346 s = s.copy(null, null, null, s.left.remove(t, e), null); else {
2347 if (s.left.isRed() && (s = s.rotateRight()), s.right.isEmpty() || s.right.isRed() || s.right.left.isRed() || (s = s.moveRedRight()),
2348 0 === e(t, s.key)) {
2349 if (s.right.isEmpty()) return qt.EMPTY;
2350 n = s.right.min(), s = s.copy(n.key, n.value, null, null, s.right.removeMin());
2351 }
2352 s = s.copy(null, null, null, null, s.right.remove(t, e));
2353 }
2354 return s.fixUp();
2355 }
2356 isRed() {
2357 return this.color;
2358 }
2359 // Returns new tree after performing any needed rotations.
2360 fixUp() {
2361 let t = this;
2362 return t.right.isRed() && !t.left.isRed() && (t = t.rotateLeft()), t.left.isRed() && t.left.left.isRed() && (t = t.rotateRight()),
2363 t.left.isRed() && t.right.isRed() && (t = t.colorFlip()), t;
2364 }
2365 moveRedLeft() {
2366 let t = this.colorFlip();
2367 return t.right.left.isRed() && (t = t.copy(null, null, null, null, t.right.rotateRight()),
2368 t = t.rotateLeft(), t = t.colorFlip()), t;
2369 }
2370 moveRedRight() {
2371 let t = this.colorFlip();
2372 return t.left.left.isRed() && (t = t.rotateRight(), t = t.colorFlip()), t;
2373 }
2374 rotateLeft() {
2375 const t = this.copy(null, null, qt.RED, null, this.right.left);
2376 return this.right.copy(null, null, this.color, t, null);
2377 }
2378 rotateRight() {
2379 const t = this.copy(null, null, qt.RED, this.left.right, null);
2380 return this.left.copy(null, null, this.color, null, t);
2381 }
2382 colorFlip() {
2383 const t = this.left.copy(null, null, !this.left.color, null, null), e = this.right.copy(null, null, !this.right.color, null, null);
2384 return this.copy(null, null, !this.color, t, e);
2385 }
2386 // For testing.
2387 checkMaxDepth() {
2388 const t = this.check();
2389 return Math.pow(2, t) <= this.size + 1;
2390 }
2391 // In a balanced RB tree, the black-depth (number of black nodes) from root to
2392 // leaves is equal on both sides. This function verifies that or asserts.
2393 check() {
2394 if (this.isRed() && this.left.isRed()) throw M();
2395 if (this.right.isRed()) throw M();
2396 const t = this.left.check();
2397 if (t !== this.right.check()) throw M();
2398 return t + (this.isRed() ? 0 : 1);
2399 }
2400}
2401
2402 // end LLRBNode
2403// Empty node is shared between all LLRB trees.
2404// eslint-disable-next-line @typescript-eslint/no-explicit-any
2405qt.EMPTY = null, qt.RED = !0, qt.BLACK = !1;
2406
2407// end LLRBEmptyNode
2408qt.EMPTY = new
2409// Represents an empty node (a leaf node in the Red-Black Tree).
2410class {
2411 constructor() {
2412 this.size = 0;
2413 }
2414 get key() {
2415 throw M();
2416 }
2417 get value() {
2418 throw M();
2419 }
2420 get color() {
2421 throw M();
2422 }
2423 get left() {
2424 throw M();
2425 }
2426 get right() {
2427 throw M();
2428 }
2429 // Returns a copy of the current node.
2430 copy(t, e, n, s, i) {
2431 return this;
2432 }
2433 // Returns a copy of the tree, with the specified key/value added.
2434 insert(t, e, n) {
2435 return new qt(t, e);
2436 }
2437 // Returns a copy of the tree, with the specified key removed.
2438 remove(t, e) {
2439 return this;
2440 }
2441 isEmpty() {
2442 return !0;
2443 }
2444 inorderTraversal(t) {
2445 return !1;
2446 }
2447 reverseTraversal(t) {
2448 return !1;
2449 }
2450 minKey() {
2451 return null;
2452 }
2453 maxKey() {
2454 return null;
2455 }
2456 isRed() {
2457 return !1;
2458 }
2459 // For testing.
2460 checkMaxDepth() {
2461 return !0;
2462 }
2463 check() {
2464 return 0;
2465 }
2466};
2467
2468/**
2469 * @license
2470 * Copyright 2017 Google LLC
2471 *
2472 * Licensed under the Apache License, Version 2.0 (the "License");
2473 * you may not use this file except in compliance with the License.
2474 * You may obtain a copy of the License at
2475 *
2476 * http://www.apache.org/licenses/LICENSE-2.0
2477 *
2478 * Unless required by applicable law or agreed to in writing, software
2479 * distributed under the License is distributed on an "AS IS" BASIS,
2480 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2481 * See the License for the specific language governing permissions and
2482 * limitations under the License.
2483 */
2484/**
2485 * SortedSet is an immutable (copy-on-write) collection that holds elements
2486 * in order specified by the provided comparator.
2487 *
2488 * NOTE: if provided comparator returns 0 for two elements, we consider them to
2489 * be equal!
2490 */
2491class Kt {
2492 constructor(t) {
2493 this.comparator = t, this.data = new Lt(this.comparator);
2494 }
2495 has(t) {
2496 return null !== this.data.get(t);
2497 }
2498 first() {
2499 return this.data.minKey();
2500 }
2501 last() {
2502 return this.data.maxKey();
2503 }
2504 get size() {
2505 return this.data.size;
2506 }
2507 indexOf(t) {
2508 return this.data.indexOf(t);
2509 }
2510 /** Iterates elements in order defined by "comparator" */ forEach(t) {
2511 this.data.inorderTraversal(((e, n) => (t(e), !1)));
2512 }
2513 /** Iterates over `elem`s such that: range[0] &lt;= elem &lt; range[1]. */ forEachInRange(t, e) {
2514 const n = this.data.getIteratorFrom(t[0]);
2515 for (;n.hasNext(); ) {
2516 const s = n.getNext();
2517 if (this.comparator(s.key, t[1]) >= 0) return;
2518 e(s.key);
2519 }
2520 }
2521 /**
2522 * Iterates over `elem`s such that: start &lt;= elem until false is returned.
2523 */ forEachWhile(t, e) {
2524 let n;
2525 for (n = void 0 !== e ? this.data.getIteratorFrom(e) : this.data.getIterator(); n.hasNext(); ) {
2526 if (!t(n.getNext().key)) return;
2527 }
2528 }
2529 /** Finds the least element greater than or equal to `elem`. */ firstAfterOrEqual(t) {
2530 const e = this.data.getIteratorFrom(t);
2531 return e.hasNext() ? e.getNext().key : null;
2532 }
2533 getIterator() {
2534 return new Gt(this.data.getIterator());
2535 }
2536 getIteratorFrom(t) {
2537 return new Gt(this.data.getIteratorFrom(t));
2538 }
2539 /** Inserts or updates an element */ add(t) {
2540 return this.copy(this.data.remove(t).insert(t, !0));
2541 }
2542 /** Deletes an element */ delete(t) {
2543 return this.has(t) ? this.copy(this.data.remove(t)) : this;
2544 }
2545 isEmpty() {
2546 return this.data.isEmpty();
2547 }
2548 unionWith(t) {
2549 let e = this;
2550 // Make sure `result` always refers to the larger one of the two sets.
2551 return e.size < t.size && (e = t, t = this), t.forEach((t => {
2552 e = e.add(t);
2553 })), e;
2554 }
2555 isEqual(t) {
2556 if (!(t instanceof Kt)) return !1;
2557 if (this.size !== t.size) return !1;
2558 const e = this.data.getIterator(), n = t.data.getIterator();
2559 for (;e.hasNext(); ) {
2560 const t = e.getNext().key, s = n.getNext().key;
2561 if (0 !== this.comparator(t, s)) return !1;
2562 }
2563 return !0;
2564 }
2565 toArray() {
2566 const t = [];
2567 return this.forEach((e => {
2568 t.push(e);
2569 })), t;
2570 }
2571 toString() {
2572 const t = [];
2573 return this.forEach((e => t.push(e))), "SortedSet(" + t.toString() + ")";
2574 }
2575 copy(t) {
2576 const e = new Kt(this.comparator);
2577 return e.data = t, e;
2578 }
2579}
2580
2581class Gt {
2582 constructor(t) {
2583 this.iter = t;
2584 }
2585 getNext() {
2586 return this.iter.getNext().key;
2587 }
2588 hasNext() {
2589 return this.iter.hasNext();
2590 }
2591}
2592
2593/**
2594 * Compares two sorted sets for equality using their natural ordering. The
2595 * method computes the intersection and invokes `onAdd` for every element that
2596 * is in `after` but not `before`. `onRemove` is invoked for every element in
2597 * `before` but missing from `after`.
2598 *
2599 * The method creates a copy of both `before` and `after` and runs in O(n log
2600 * n), where n is the size of the two lists.
2601 *
2602 * @param before - The elements that exist in the original set.
2603 * @param after - The elements to diff against the original set.
2604 * @param comparator - The comparator for the elements in before and after.
2605 * @param onAdd - A function to invoke for every element that is part of `
2606 * after` but not `before`.
2607 * @param onRemove - A function to invoke for every element that is part of
2608 * `before` but not `after`.
2609 */
2610/**
2611 * Returns the next element from the iterator or `undefined` if none available.
2612 */
2613function Qt(t) {
2614 return t.hasNext() ? t.getNext() : void 0;
2615}
2616
2617/**
2618 * @license
2619 * Copyright 2020 Google LLC
2620 *
2621 * Licensed under the Apache License, Version 2.0 (the "License");
2622 * you may not use this file except in compliance with the License.
2623 * You may obtain a copy of the License at
2624 *
2625 * http://www.apache.org/licenses/LICENSE-2.0
2626 *
2627 * Unless required by applicable law or agreed to in writing, software
2628 * distributed under the License is distributed on an "AS IS" BASIS,
2629 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2630 * See the License for the specific language governing permissions and
2631 * limitations under the License.
2632 */
2633/**
2634 * Provides a set of fields that can be used to partially patch a document.
2635 * FieldMask is used in conjunction with ObjectValue.
2636 * Examples:
2637 * foo - Overwrites foo entirely with the provided value. If foo is not
2638 * present in the companion ObjectValue, the field is deleted.
2639 * foo.bar - Overwrites only the field bar of the object foo.
2640 * If foo is not an object, foo is replaced with an object
2641 * containing foo
2642 */ class jt {
2643 constructor(t) {
2644 this.fields = t,
2645 // TODO(dimond): validation of FieldMask
2646 // Sort the field mask to support `FieldMask.isEqual()` and assert below.
2647 t.sort(ct.comparator);
2648 }
2649 static empty() {
2650 return new jt([]);
2651 }
2652 /**
2653 * Returns a new FieldMask object that is the result of adding all the given
2654 * fields paths to this field mask.
2655 */ unionWith(t) {
2656 let e = new Kt(ct.comparator);
2657 for (const t of this.fields) e = e.add(t);
2658 for (const n of t) e = e.add(n);
2659 return new jt(e.toArray());
2660 }
2661 /**
2662 * Verifies that `fieldPath` is included by at least one field in this field
2663 * mask.
2664 *
2665 * This is an O(n) operation, where `n` is the size of the field mask.
2666 */ covers(t) {
2667 for (const e of this.fields) if (e.isPrefixOf(t)) return !0;
2668 return !1;
2669 }
2670 isEqual(t) {
2671 return et(this.fields, t.fields, ((t, e) => t.isEqual(e)));
2672 }
2673}
2674
2675/**
2676 * @license
2677 * Copyright 2020 Google LLC
2678 *
2679 * Licensed under the Apache License, Version 2.0 (the "License");
2680 * you may not use this file except in compliance with the License.
2681 * You may obtain a copy of the License at
2682 *
2683 * http://www.apache.org/licenses/LICENSE-2.0
2684 *
2685 * Unless required by applicable law or agreed to in writing, software
2686 * distributed under the License is distributed on an "AS IS" BASIS,
2687 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2688 * See the License for the specific language governing permissions and
2689 * limitations under the License.
2690 */
2691// WebSafe uses a different URL-encoding safe alphabet that doesn't match
2692// the encoding used on the backend.
2693/** Converts a Base64 encoded string to a binary string. */
2694function Wt(t) {
2695 return String.fromCharCode.apply(null,
2696 // We use `decodeStringToByteArray()` instead of `decodeString()` since
2697 // `decodeString()` returns Unicode strings, which doesn't match the values
2698 // returned by `atob()`'s Latin1 representation.
2699 l.decodeStringToByteArray(t, false));
2700}
2701
2702/** Converts a binary string to a Base64 encoded string. */
2703/** True if and only if the Base64 conversion functions are available. */
2704function zt() {
2705 return !0;
2706}
2707
2708/**
2709 * @license
2710 * Copyright 2020 Google LLC
2711 *
2712 * Licensed under the Apache License, Version 2.0 (the "License");
2713 * you may not use this file except in compliance with the License.
2714 * You may obtain a copy of the License at
2715 *
2716 * http://www.apache.org/licenses/LICENSE-2.0
2717 *
2718 * Unless required by applicable law or agreed to in writing, software
2719 * distributed under the License is distributed on an "AS IS" BASIS,
2720 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2721 * See the License for the specific language governing permissions and
2722 * limitations under the License.
2723 */
2724/**
2725 * Immutable class that represents a "proto" byte string.
2726 *
2727 * Proto byte strings can either be Base64-encoded strings or Uint8Arrays when
2728 * sent on the wire. This class abstracts away this differentiation by holding
2729 * the proto byte string in a common class that must be converted into a string
2730 * before being sent as a proto.
2731 * @internal
2732 */ class Ht {
2733 constructor(t) {
2734 this.binaryString = t;
2735 }
2736 static fromBase64String(t) {
2737 const e = Wt(t);
2738 return new Ht(e);
2739 }
2740 static fromUint8Array(t) {
2741 // TODO(indexing); Remove the copy of the byte string here as this method
2742 // is frequently called during indexing.
2743 const e =
2744 /**
2745 * Helper function to convert an Uint8array to a binary string.
2746 */
2747 function(t) {
2748 let e = "";
2749 for (let n = 0; n < t.length; ++n) e += String.fromCharCode(t[n]);
2750 return e;
2751 }
2752 /**
2753 * Helper function to convert a binary string to an Uint8Array.
2754 */ (t);
2755 return new Ht(e);
2756 }
2757 [Symbol.iterator]() {
2758 let t = 0;
2759 return {
2760 next: () => t < this.binaryString.length ? {
2761 value: this.binaryString.charCodeAt(t++),
2762 done: !1
2763 } : {
2764 value: void 0,
2765 done: !0
2766 }
2767 };
2768 }
2769 toBase64() {
2770 return function(t) {
2771 const e = [];
2772 for (let n = 0; n < t.length; n++) e[n] = t.charCodeAt(n);
2773 return l.encodeByteArray(e, !1);
2774 }(this.binaryString);
2775 }
2776 toUint8Array() {
2777 return function(t) {
2778 const e = new Uint8Array(t.length);
2779 for (let n = 0; n < t.length; n++) e[n] = t.charCodeAt(n);
2780 return e;
2781 }
2782 /**
2783 * @license
2784 * Copyright 2020 Google LLC
2785 *
2786 * Licensed under the Apache License, Version 2.0 (the "License");
2787 * you may not use this file except in compliance with the License.
2788 * You may obtain a copy of the License at
2789 *
2790 * http://www.apache.org/licenses/LICENSE-2.0
2791 *
2792 * Unless required by applicable law or agreed to in writing, software
2793 * distributed under the License is distributed on an "AS IS" BASIS,
2794 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2795 * See the License for the specific language governing permissions and
2796 * limitations under the License.
2797 */
2798 // A RegExp matching ISO 8601 UTC timestamps with optional fraction.
2799 (this.binaryString);
2800 }
2801 approximateByteSize() {
2802 return 2 * this.binaryString.length;
2803 }
2804 compareTo(t) {
2805 return tt(this.binaryString, t.binaryString);
2806 }
2807 isEqual(t) {
2808 return this.binaryString === t.binaryString;
2809 }
2810}
2811
2812Ht.EMPTY_BYTE_STRING = new Ht("");
2813
2814const Jt = new RegExp(/^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d(?:\.(\d+))?Z$/);
2815
2816/**
2817 * Converts the possible Proto values for a timestamp value into a "seconds and
2818 * nanos" representation.
2819 */ function Yt(t) {
2820 // The json interface (for the browser) will return an iso timestamp string,
2821 // while the proto js library (for node) will return a
2822 // google.protobuf.Timestamp instance.
2823 if (F(!!t), "string" == typeof t) {
2824 // The date string can have higher precision (nanos) than the Date class
2825 // (millis), so we do some custom parsing here.
2826 // Parse the nanos right out of the string.
2827 let e = 0;
2828 const n = Jt.exec(t);
2829 if (F(!!n), n[1]) {
2830 // Pad the fraction out to 9 digits (nanos).
2831 let t = n[1];
2832 t = (t + "000000000").substr(0, 9), e = Number(t);
2833 }
2834 // Parse the date to get the seconds.
2835 const s = new Date(t);
2836 return {
2837 seconds: Math.floor(s.getTime() / 1e3),
2838 nanos: e
2839 };
2840 }
2841 return {
2842 seconds: Xt(t.seconds),
2843 nanos: Xt(t.nanos)
2844 };
2845}
2846
2847/**
2848 * Converts the possible Proto types for numbers into a JavaScript number.
2849 * Returns 0 if the value is not numeric.
2850 */ function Xt(t) {
2851 // TODO(bjornick): Handle int64 greater than 53 bits.
2852 return "number" == typeof t ? t : "string" == typeof t ? Number(t) : 0;
2853}
2854
2855/** Converts the possible Proto types for Blobs into a ByteString. */ function Zt(t) {
2856 return "string" == typeof t ? Ht.fromBase64String(t) : Ht.fromUint8Array(t);
2857}
2858
2859/**
2860 * @license
2861 * Copyright 2020 Google LLC
2862 *
2863 * Licensed under the Apache License, Version 2.0 (the "License");
2864 * you may not use this file except in compliance with the License.
2865 * You may obtain a copy of the License at
2866 *
2867 * http://www.apache.org/licenses/LICENSE-2.0
2868 *
2869 * Unless required by applicable law or agreed to in writing, software
2870 * distributed under the License is distributed on an "AS IS" BASIS,
2871 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2872 * See the License for the specific language governing permissions and
2873 * limitations under the License.
2874 */
2875/**
2876 * Represents a locally-applied ServerTimestamp.
2877 *
2878 * Server Timestamps are backed by MapValues that contain an internal field
2879 * `__type__` with a value of `server_timestamp`. The previous value and local
2880 * write time are stored in its `__previous_value__` and `__local_write_time__`
2881 * fields respectively.
2882 *
2883 * Notes:
2884 * - ServerTimestampValue instances are created as the result of applying a
2885 * transform. They can only exist in the local view of a document. Therefore
2886 * they do not need to be parsed or serialized.
2887 * - When evaluated locally (e.g. for snapshot.data()), they by default
2888 * evaluate to `null`. This behavior can be configured by passing custom
2889 * FieldValueOptions to value().
2890 * - With respect to other ServerTimestampValues, they sort by their
2891 * localWriteTime.
2892 */ function te(t) {
2893 var e, n;
2894 return "server_timestamp" === (null === (n = ((null === (e = null == t ? void 0 : t.mapValue) || void 0 === e ? void 0 : e.fields) || {}).__type__) || void 0 === n ? void 0 : n.stringValue);
2895}
2896
2897/**
2898 * Creates a new ServerTimestamp proto value (using the internal format).
2899 */
2900/**
2901 * Returns the value of the field before this ServerTimestamp was set.
2902 *
2903 * Preserving the previous values allows the user to display the last resoled
2904 * value until the backend responds with the timestamp.
2905 */
2906function ee(t) {
2907 const e = t.mapValue.fields.__previous_value__;
2908 return te(e) ? ee(e) : e;
2909}
2910
2911/**
2912 * Returns the local time at which this timestamp was first set.
2913 */ function ne(t) {
2914 const e = Yt(t.mapValue.fields.__local_write_time__.timestampValue);
2915 return new st(e.seconds, e.nanos);
2916}
2917
2918/**
2919 * @license
2920 * Copyright 2017 Google LLC
2921 *
2922 * Licensed under the Apache License, Version 2.0 (the "License");
2923 * you may not use this file except in compliance with the License.
2924 * You may obtain a copy of the License at
2925 *
2926 * http://www.apache.org/licenses/LICENSE-2.0
2927 *
2928 * Unless required by applicable law or agreed to in writing, software
2929 * distributed under the License is distributed on an "AS IS" BASIS,
2930 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2931 * See the License for the specific language governing permissions and
2932 * limitations under the License.
2933 */ class se {
2934 /**
2935 * Constructs a DatabaseInfo using the provided host, databaseId and
2936 * persistenceKey.
2937 *
2938 * @param databaseId - The database to use.
2939 * @param appId - The Firebase App Id.
2940 * @param persistenceKey - A unique identifier for this Firestore's local
2941 * storage (used in conjunction with the databaseId).
2942 * @param host - The Firestore backend host to connect to.
2943 * @param ssl - Whether to use SSL when connecting.
2944 * @param forceLongPolling - Whether to use the forceLongPolling option
2945 * when using WebChannel as the network transport.
2946 * @param autoDetectLongPolling - Whether to use the detectBufferingProxy
2947 * option when using WebChannel as the network transport.
2948 * @param useFetchStreams Whether to use the Fetch API instead of
2949 * XMLHTTPRequest
2950 */
2951 constructor(t, e, n, s, i, r, o, u) {
2952 this.databaseId = t, this.appId = e, this.persistenceKey = n, this.host = s, this.ssl = i,
2953 this.forceLongPolling = r, this.autoDetectLongPolling = o, this.useFetchStreams = u;
2954 }
2955}
2956
2957/** The default database name for a project. */
2958/**
2959 * Represents the database ID a Firestore client is associated with.
2960 * @internal
2961 */
2962class ie {
2963 constructor(t, e) {
2964 this.projectId = t, this.database = e || "(default)";
2965 }
2966 static empty() {
2967 return new ie("", "");
2968 }
2969 get isDefaultDatabase() {
2970 return "(default)" === this.database;
2971 }
2972 isEqual(t) {
2973 return t instanceof ie && t.projectId === this.projectId && t.database === this.database;
2974 }
2975}
2976
2977/**
2978 * Returns whether a variable is either undefined or null.
2979 */
2980function re(t) {
2981 return null == t;
2982}
2983
2984/** Returns whether the value represents -0. */ function oe(t) {
2985 // Detect if the value is -0.0. Based on polyfill from
2986 // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/is
2987 return 0 === t && 1 / t == -1 / 0;
2988}
2989
2990/**
2991 * Returns whether a value is an integer and in the safe integer range
2992 * @param value - The value to test for being an integer and in the safe range
2993 */ function ue(t) {
2994 return "number" == typeof t && Number.isInteger(t) && !oe(t) && t <= Number.MAX_SAFE_INTEGER && t >= Number.MIN_SAFE_INTEGER;
2995}
2996
2997/**
2998 * @license
2999 * Copyright 2020 Google LLC
3000 *
3001 * Licensed under the Apache License, Version 2.0 (the "License");
3002 * you may not use this file except in compliance with the License.
3003 * You may obtain a copy of the License at
3004 *
3005 * http://www.apache.org/licenses/LICENSE-2.0
3006 *
3007 * Unless required by applicable law or agreed to in writing, software
3008 * distributed under the License is distributed on an "AS IS" BASIS,
3009 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3010 * See the License for the specific language governing permissions and
3011 * limitations under the License.
3012 */ const ce = {
3013 mapValue: {
3014 fields: {
3015 __type__: {
3016 stringValue: "__max__"
3017 }
3018 }
3019 }
3020}, ae = {
3021 nullValue: "NULL_VALUE"
3022};
3023
3024/** Extracts the backend's type order for the provided value. */
3025function he(t) {
3026 return "nullValue" in t ? 0 /* NullValue */ : "booleanValue" in t ? 1 /* BooleanValue */ : "integerValue" in t || "doubleValue" in t ? 2 /* NumberValue */ : "timestampValue" in t ? 3 /* TimestampValue */ : "stringValue" in t ? 5 /* StringValue */ : "bytesValue" in t ? 6 /* BlobValue */ : "referenceValue" in t ? 7 /* RefValue */ : "geoPointValue" in t ? 8 /* GeoPointValue */ : "arrayValue" in t ? 9 /* ArrayValue */ : "mapValue" in t ? te(t) ? 4 /* ServerTimestampValue */ : Re(t) ? 9007199254740991 /* MaxValue */ : 10 /* ObjectValue */ : M();
3027}
3028
3029/** Tests `left` and `right` for equality based on the backend semantics. */ function le(t, e) {
3030 if (t === e) return !0;
3031 const n = he(t);
3032 if (n !== he(e)) return !1;
3033 switch (n) {
3034 case 0 /* NullValue */ :
3035 case 9007199254740991 /* MaxValue */ :
3036 return !0;
3037
3038 case 1 /* BooleanValue */ :
3039 return t.booleanValue === e.booleanValue;
3040
3041 case 4 /* ServerTimestampValue */ :
3042 return ne(t).isEqual(ne(e));
3043
3044 case 3 /* TimestampValue */ :
3045 return function(t, e) {
3046 if ("string" == typeof t.timestampValue && "string" == typeof e.timestampValue && t.timestampValue.length === e.timestampValue.length)
3047 // Use string equality for ISO 8601 timestamps
3048 return t.timestampValue === e.timestampValue;
3049 const n = Yt(t.timestampValue), s = Yt(e.timestampValue);
3050 return n.seconds === s.seconds && n.nanos === s.nanos;
3051 }(t, e);
3052
3053 case 5 /* StringValue */ :
3054 return t.stringValue === e.stringValue;
3055
3056 case 6 /* BlobValue */ :
3057 return function(t, e) {
3058 return Zt(t.bytesValue).isEqual(Zt(e.bytesValue));
3059 }(t, e);
3060
3061 case 7 /* RefValue */ :
3062 return t.referenceValue === e.referenceValue;
3063
3064 case 8 /* GeoPointValue */ :
3065 return function(t, e) {
3066 return Xt(t.geoPointValue.latitude) === Xt(e.geoPointValue.latitude) && Xt(t.geoPointValue.longitude) === Xt(e.geoPointValue.longitude);
3067 }(t, e);
3068
3069 case 2 /* NumberValue */ :
3070 return function(t, e) {
3071 if ("integerValue" in t && "integerValue" in e) return Xt(t.integerValue) === Xt(e.integerValue);
3072 if ("doubleValue" in t && "doubleValue" in e) {
3073 const n = Xt(t.doubleValue), s = Xt(e.doubleValue);
3074 return n === s ? oe(n) === oe(s) : isNaN(n) && isNaN(s);
3075 }
3076 return !1;
3077 }(t, e);
3078
3079 case 9 /* ArrayValue */ :
3080 return et(t.arrayValue.values || [], e.arrayValue.values || [], le);
3081
3082 case 10 /* ObjectValue */ :
3083 return function(t, e) {
3084 const n = t.mapValue.fields || {}, s = e.mapValue.fields || {};
3085 if (Ft(n) !== Ft(s)) return !1;
3086 for (const t in n) if (n.hasOwnProperty(t) && (void 0 === s[t] || !le(n[t], s[t]))) return !1;
3087 return !0;
3088 }
3089 /** Returns true if the ArrayValue contains the specified element. */ (t, e);
3090
3091 default:
3092 return M();
3093 }
3094}
3095
3096function fe(t, e) {
3097 return void 0 !== (t.values || []).find((t => le(t, e)));
3098}
3099
3100function de(t, e) {
3101 if (t === e) return 0;
3102 const n = he(t), s = he(e);
3103 if (n !== s) return tt(n, s);
3104 switch (n) {
3105 case 0 /* NullValue */ :
3106 case 9007199254740991 /* MaxValue */ :
3107 return 0;
3108
3109 case 1 /* BooleanValue */ :
3110 return tt(t.booleanValue, e.booleanValue);
3111
3112 case 2 /* NumberValue */ :
3113 return function(t, e) {
3114 const n = Xt(t.integerValue || t.doubleValue), s = Xt(e.integerValue || e.doubleValue);
3115 return n < s ? -1 : n > s ? 1 : n === s ? 0 :
3116 // one or both are NaN.
3117 isNaN(n) ? isNaN(s) ? 0 : -1 : 1;
3118 }(t, e);
3119
3120 case 3 /* TimestampValue */ :
3121 return _e(t.timestampValue, e.timestampValue);
3122
3123 case 4 /* ServerTimestampValue */ :
3124 return _e(ne(t), ne(e));
3125
3126 case 5 /* StringValue */ :
3127 return tt(t.stringValue, e.stringValue);
3128
3129 case 6 /* BlobValue */ :
3130 return function(t, e) {
3131 const n = Zt(t), s = Zt(e);
3132 return n.compareTo(s);
3133 }(t.bytesValue, e.bytesValue);
3134
3135 case 7 /* RefValue */ :
3136 return function(t, e) {
3137 const n = t.split("/"), s = e.split("/");
3138 for (let t = 0; t < n.length && t < s.length; t++) {
3139 const e = tt(n[t], s[t]);
3140 if (0 !== e) return e;
3141 }
3142 return tt(n.length, s.length);
3143 }(t.referenceValue, e.referenceValue);
3144
3145 case 8 /* GeoPointValue */ :
3146 return function(t, e) {
3147 const n = tt(Xt(t.latitude), Xt(e.latitude));
3148 if (0 !== n) return n;
3149 return tt(Xt(t.longitude), Xt(e.longitude));
3150 }(t.geoPointValue, e.geoPointValue);
3151
3152 case 9 /* ArrayValue */ :
3153 return function(t, e) {
3154 const n = t.values || [], s = e.values || [];
3155 for (let t = 0; t < n.length && t < s.length; ++t) {
3156 const e = de(n[t], s[t]);
3157 if (e) return e;
3158 }
3159 return tt(n.length, s.length);
3160 }(t.arrayValue, e.arrayValue);
3161
3162 case 10 /* ObjectValue */ :
3163 return function(t, e) {
3164 if (t === ce.mapValue && e === ce.mapValue) return 0;
3165 if (t === ce.mapValue) return 1;
3166 if (e === ce.mapValue) return -1;
3167 const n = t.fields || {}, s = Object.keys(n), i = e.fields || {}, r = Object.keys(i);
3168 // Even though MapValues are likely sorted correctly based on their insertion
3169 // order (e.g. when received from the backend), local modifications can bring
3170 // elements out of order. We need to re-sort the elements to ensure that
3171 // canonical IDs are independent of insertion order.
3172 s.sort(), r.sort();
3173 for (let t = 0; t < s.length && t < r.length; ++t) {
3174 const e = tt(s[t], r[t]);
3175 if (0 !== e) return e;
3176 const o = de(n[s[t]], i[r[t]]);
3177 if (0 !== o) return o;
3178 }
3179 return tt(s.length, r.length);
3180 }
3181 /**
3182 * Generates the canonical ID for the provided field value (as used in Target
3183 * serialization).
3184 */ (t.mapValue, e.mapValue);
3185
3186 default:
3187 throw M();
3188 }
3189}
3190
3191function _e(t, e) {
3192 if ("string" == typeof t && "string" == typeof e && t.length === e.length) return tt(t, e);
3193 const n = Yt(t), s = Yt(e), i = tt(n.seconds, s.seconds);
3194 return 0 !== i ? i : tt(n.nanos, s.nanos);
3195}
3196
3197function we(t) {
3198 return me(t);
3199}
3200
3201function me(t) {
3202 return "nullValue" in t ? "null" : "booleanValue" in t ? "" + t.booleanValue : "integerValue" in t ? "" + t.integerValue : "doubleValue" in t ? "" + t.doubleValue : "timestampValue" in t ? function(t) {
3203 const e = Yt(t);
3204 return `time(${e.seconds},${e.nanos})`;
3205 }(t.timestampValue) : "stringValue" in t ? t.stringValue : "bytesValue" in t ? Zt(t.bytesValue).toBase64() : "referenceValue" in t ? (n = t.referenceValue,
3206 at.fromName(n).toString()) : "geoPointValue" in t ? `geo(${(e = t.geoPointValue).latitude},${e.longitude})` : "arrayValue" in t ? function(t) {
3207 let e = "[", n = !0;
3208 for (const s of t.values || []) n ? n = !1 : e += ",", e += me(s);
3209 return e + "]";
3210 }
3211 /** Returns a reference value for the provided database and key. */ (t.arrayValue) : "mapValue" in t ? function(t) {
3212 // Iteration order in JavaScript is not guaranteed. To ensure that we generate
3213 // matching canonical IDs for identical maps, we need to sort the keys.
3214 const e = Object.keys(t.fields || {}).sort();
3215 let n = "{", s = !0;
3216 for (const i of e) s ? s = !1 : n += ",", n += `${i}:${me(t.fields[i])}`;
3217 return n + "}";
3218 }(t.mapValue) : M();
3219 var e, n;
3220}
3221
3222function ge(t, e) {
3223 return {
3224 referenceValue: `projects/${t.projectId}/databases/${t.database}/documents/${e.path.canonicalString()}`
3225 };
3226}
3227
3228/** Returns true if `value` is an IntegerValue . */ function ye(t) {
3229 return !!t && "integerValue" in t;
3230}
3231
3232/** Returns true if `value` is a DoubleValue. */
3233/** Returns true if `value` is an ArrayValue. */
3234function pe(t) {
3235 return !!t && "arrayValue" in t;
3236}
3237
3238/** Returns true if `value` is a NullValue. */ function Ie(t) {
3239 return !!t && "nullValue" in t;
3240}
3241
3242/** Returns true if `value` is NaN. */ function Te(t) {
3243 return !!t && "doubleValue" in t && isNaN(Number(t.doubleValue));
3244}
3245
3246/** Returns true if `value` is a MapValue. */ function Ee(t) {
3247 return !!t && "mapValue" in t;
3248}
3249
3250/** Creates a deep copy of `source`. */ function Ae(t) {
3251 if (t.geoPointValue) return {
3252 geoPointValue: Object.assign({}, t.geoPointValue)
3253 };
3254 if (t.timestampValue && "object" == typeof t.timestampValue) return {
3255 timestampValue: Object.assign({}, t.timestampValue)
3256 };
3257 if (t.mapValue) {
3258 const e = {
3259 mapValue: {
3260 fields: {}
3261 }
3262 };
3263 return $t(t.mapValue.fields, ((t, n) => e.mapValue.fields[t] = Ae(n))), e;
3264 }
3265 if (t.arrayValue) {
3266 const e = {
3267 arrayValue: {
3268 values: []
3269 }
3270 };
3271 for (let n = 0; n < (t.arrayValue.values || []).length; ++n) e.arrayValue.values[n] = Ae(t.arrayValue.values[n]);
3272 return e;
3273 }
3274 return Object.assign({}, t);
3275}
3276
3277/** Returns true if the Value represents the canonical {@link #MAX_VALUE} . */ function Re(t) {
3278 return "__max__" === (((t.mapValue || {}).fields || {}).__type__ || {}).stringValue;
3279}
3280
3281/** Returns the lowest value for the given value type (inclusive). */ function be(t) {
3282 return "nullValue" in t ? ae : "booleanValue" in t ? {
3283 booleanValue: !1
3284 } : "integerValue" in t || "doubleValue" in t ? {
3285 doubleValue: NaN
3286 } : "timestampValue" in t ? {
3287 timestampValue: {
3288 seconds: Number.MIN_SAFE_INTEGER
3289 }
3290 } : "stringValue" in t ? {
3291 stringValue: ""
3292 } : "bytesValue" in t ? {
3293 bytesValue: ""
3294 } : "referenceValue" in t ? ge(ie.empty(), at.empty()) : "geoPointValue" in t ? {
3295 geoPointValue: {
3296 latitude: -90,
3297 longitude: -180
3298 }
3299 } : "arrayValue" in t ? {
3300 arrayValue: {}
3301 } : "mapValue" in t ? {
3302 mapValue: {}
3303 } : M();
3304}
3305
3306/** Returns the largest value for the given value type (exclusive). */ function Pe(t) {
3307 return "nullValue" in t ? {
3308 booleanValue: !1
3309 } : "booleanValue" in t ? {
3310 doubleValue: NaN
3311 } : "integerValue" in t || "doubleValue" in t ? {
3312 timestampValue: {
3313 seconds: Number.MIN_SAFE_INTEGER
3314 }
3315 } : "timestampValue" in t ? {
3316 stringValue: ""
3317 } : "stringValue" in t ? {
3318 bytesValue: ""
3319 } : "bytesValue" in t ? ge(ie.empty(), at.empty()) : "referenceValue" in t ? {
3320 geoPointValue: {
3321 latitude: -90,
3322 longitude: -180
3323 }
3324 } : "geoPointValue" in t ? {
3325 arrayValue: {}
3326 } : "arrayValue" in t ? {
3327 mapValue: {}
3328 } : "mapValue" in t ? ce : M();
3329}
3330
3331function ve(t, e) {
3332 const n = de(t.value, e.value);
3333 return 0 !== n ? n : t.inclusive && !e.inclusive ? -1 : !t.inclusive && e.inclusive ? 1 : 0;
3334}
3335
3336function Ve(t, e) {
3337 const n = de(t.value, e.value);
3338 return 0 !== n ? n : t.inclusive && !e.inclusive ? 1 : !t.inclusive && e.inclusive ? -1 : 0;
3339}
3340
3341/**
3342 * @license
3343 * Copyright 2017 Google LLC
3344 *
3345 * Licensed under the Apache License, Version 2.0 (the "License");
3346 * you may not use this file except in compliance with the License.
3347 * You may obtain a copy of the License at
3348 *
3349 * http://www.apache.org/licenses/LICENSE-2.0
3350 *
3351 * Unless required by applicable law or agreed to in writing, software
3352 * distributed under the License is distributed on an "AS IS" BASIS,
3353 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3354 * See the License for the specific language governing permissions and
3355 * limitations under the License.
3356 */
3357/**
3358 * An ObjectValue represents a MapValue in the Firestore Proto and offers the
3359 * ability to add and remove fields (via the ObjectValueBuilder).
3360 */ class Se {
3361 constructor(t) {
3362 this.value = t;
3363 }
3364 static empty() {
3365 return new Se({
3366 mapValue: {}
3367 });
3368 }
3369 /**
3370 * Returns the value at the given path or null.
3371 *
3372 * @param path - the path to search
3373 * @returns The value at the path or null if the path is not set.
3374 */ field(t) {
3375 if (t.isEmpty()) return this.value;
3376 {
3377 let e = this.value;
3378 for (let n = 0; n < t.length - 1; ++n) if (e = (e.mapValue.fields || {})[t.get(n)],
3379 !Ee(e)) return null;
3380 return e = (e.mapValue.fields || {})[t.lastSegment()], e || null;
3381 }
3382 }
3383 /**
3384 * Sets the field to the provided value.
3385 *
3386 * @param path - The field path to set.
3387 * @param value - The value to set.
3388 */ set(t, e) {
3389 this.getFieldsMap(t.popLast())[t.lastSegment()] = Ae(e);
3390 }
3391 /**
3392 * Sets the provided fields to the provided values.
3393 *
3394 * @param data - A map of fields to values (or null for deletes).
3395 */ setAll(t) {
3396 let e = ct.emptyPath(), n = {}, s = [];
3397 t.forEach(((t, i) => {
3398 if (!e.isImmediateParentOf(i)) {
3399 // Insert the accumulated changes at this parent location
3400 const t = this.getFieldsMap(e);
3401 this.applyChanges(t, n, s), n = {}, s = [], e = i.popLast();
3402 }
3403 t ? n[i.lastSegment()] = Ae(t) : s.push(i.lastSegment());
3404 }));
3405 const i = this.getFieldsMap(e);
3406 this.applyChanges(i, n, s);
3407 }
3408 /**
3409 * Removes the field at the specified path. If there is no field at the
3410 * specified path, nothing is changed.
3411 *
3412 * @param path - The field path to remove.
3413 */ delete(t) {
3414 const e = this.field(t.popLast());
3415 Ee(e) && e.mapValue.fields && delete e.mapValue.fields[t.lastSegment()];
3416 }
3417 isEqual(t) {
3418 return le(this.value, t.value);
3419 }
3420 /**
3421 * Returns the map that contains the leaf element of `path`. If the parent
3422 * entry does not yet exist, or if it is not a map, a new map will be created.
3423 */ getFieldsMap(t) {
3424 let e = this.value;
3425 e.mapValue.fields || (e.mapValue = {
3426 fields: {}
3427 });
3428 for (let n = 0; n < t.length; ++n) {
3429 let s = e.mapValue.fields[t.get(n)];
3430 Ee(s) && s.mapValue.fields || (s = {
3431 mapValue: {
3432 fields: {}
3433 }
3434 }, e.mapValue.fields[t.get(n)] = s), e = s;
3435 }
3436 return e.mapValue.fields;
3437 }
3438 /**
3439 * Modifies `fieldsMap` by adding, replacing or deleting the specified
3440 * entries.
3441 */ applyChanges(t, e, n) {
3442 $t(e, ((e, n) => t[e] = n));
3443 for (const e of n) delete t[e];
3444 }
3445 clone() {
3446 return new Se(Ae(this.value));
3447 }
3448}
3449
3450/**
3451 * Returns a FieldMask built from all fields in a MapValue.
3452 */ function De(t) {
3453 const e = [];
3454 return $t(t.fields, ((t, n) => {
3455 const s = new ct([ t ]);
3456 if (Ee(n)) {
3457 const t = De(n.mapValue).fields;
3458 if (0 === t.length)
3459 // Preserve the empty map by adding it to the FieldMask.
3460 e.push(s); else
3461 // For nested and non-empty ObjectValues, add the FieldPath of the
3462 // leaf nodes.
3463 for (const n of t) e.push(s.child(n));
3464 } else
3465 // For nested and non-empty ObjectValues, add the FieldPath of the leaf
3466 // nodes.
3467 e.push(s);
3468 })), new jt(e);
3469}
3470
3471/**
3472 * @license
3473 * Copyright 2017 Google LLC
3474 *
3475 * Licensed under the Apache License, Version 2.0 (the "License");
3476 * you may not use this file except in compliance with the License.
3477 * You may obtain a copy of the License at
3478 *
3479 * http://www.apache.org/licenses/LICENSE-2.0
3480 *
3481 * Unless required by applicable law or agreed to in writing, software
3482 * distributed under the License is distributed on an "AS IS" BASIS,
3483 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3484 * See the License for the specific language governing permissions and
3485 * limitations under the License.
3486 */
3487/**
3488 * Represents a document in Firestore with a key, version, data and whether it
3489 * has local mutations applied to it.
3490 *
3491 * Documents can transition between states via `convertToFoundDocument()`,
3492 * `convertToNoDocument()` and `convertToUnknownDocument()`. If a document does
3493 * not transition to one of these states even after all mutations have been
3494 * applied, `isValidDocument()` returns false and the document should be removed
3495 * from all views.
3496 */ class Ce {
3497 constructor(t, e, n, s, i, r) {
3498 this.key = t, this.documentType = e, this.version = n, this.readTime = s, this.data = i,
3499 this.documentState = r;
3500 }
3501 /**
3502 * Creates a document with no known version or data, but which can serve as
3503 * base document for mutations.
3504 */ static newInvalidDocument(t) {
3505 return new Ce(t, 0 /* INVALID */ , it.min(), it.min(), Se.empty(), 0 /* SYNCED */);
3506 }
3507 /**
3508 * Creates a new document that is known to exist with the given data at the
3509 * given version.
3510 */ static newFoundDocument(t, e, n) {
3511 return new Ce(t, 1 /* FOUND_DOCUMENT */ , e, it.min(), n, 0 /* SYNCED */);
3512 }
3513 /** Creates a new document that is known to not exist at the given version. */ static newNoDocument(t, e) {
3514 return new Ce(t, 2 /* NO_DOCUMENT */ , e, it.min(), Se.empty(), 0 /* SYNCED */);
3515 }
3516 /**
3517 * Creates a new document that is known to exist at the given version but
3518 * whose data is not known (e.g. a document that was updated without a known
3519 * base document).
3520 */ static newUnknownDocument(t, e) {
3521 return new Ce(t, 3 /* UNKNOWN_DOCUMENT */ , e, it.min(), Se.empty(), 2 /* HAS_COMMITTED_MUTATIONS */);
3522 }
3523 /**
3524 * Changes the document type to indicate that it exists and that its version
3525 * and data are known.
3526 */ convertToFoundDocument(t, e) {
3527 return this.version = t, this.documentType = 1 /* FOUND_DOCUMENT */ , this.data = e,
3528 this.documentState = 0 /* SYNCED */ , this;
3529 }
3530 /**
3531 * Changes the document type to indicate that it doesn't exist at the given
3532 * version.
3533 */ convertToNoDocument(t) {
3534 return this.version = t, this.documentType = 2 /* NO_DOCUMENT */ , this.data = Se.empty(),
3535 this.documentState = 0 /* SYNCED */ , this;
3536 }
3537 /**
3538 * Changes the document type to indicate that it exists at a given version but
3539 * that its data is not known (e.g. a document that was updated without a known
3540 * base document).
3541 */ convertToUnknownDocument(t) {
3542 return this.version = t, this.documentType = 3 /* UNKNOWN_DOCUMENT */ , this.data = Se.empty(),
3543 this.documentState = 2 /* HAS_COMMITTED_MUTATIONS */ , this;
3544 }
3545 setHasCommittedMutations() {
3546 return this.documentState = 2 /* HAS_COMMITTED_MUTATIONS */ , this;
3547 }
3548 setHasLocalMutations() {
3549 return this.documentState = 1 /* HAS_LOCAL_MUTATIONS */ , this.version = it.min(),
3550 this;
3551 }
3552 setReadTime(t) {
3553 return this.readTime = t, this;
3554 }
3555 get hasLocalMutations() {
3556 return 1 /* HAS_LOCAL_MUTATIONS */ === this.documentState;
3557 }
3558 get hasCommittedMutations() {
3559 return 2 /* HAS_COMMITTED_MUTATIONS */ === this.documentState;
3560 }
3561 get hasPendingWrites() {
3562 return this.hasLocalMutations || this.hasCommittedMutations;
3563 }
3564 isValidDocument() {
3565 return 0 /* INVALID */ !== this.documentType;
3566 }
3567 isFoundDocument() {
3568 return 1 /* FOUND_DOCUMENT */ === this.documentType;
3569 }
3570 isNoDocument() {
3571 return 2 /* NO_DOCUMENT */ === this.documentType;
3572 }
3573 isUnknownDocument() {
3574 return 3 /* UNKNOWN_DOCUMENT */ === this.documentType;
3575 }
3576 isEqual(t) {
3577 return t instanceof Ce && this.key.isEqual(t.key) && this.version.isEqual(t.version) && this.documentType === t.documentType && this.documentState === t.documentState && this.data.isEqual(t.data);
3578 }
3579 mutableCopy() {
3580 return new Ce(this.key, this.documentType, this.version, this.readTime, this.data.clone(), this.documentState);
3581 }
3582 toString() {
3583 return `Document(${this.key}, ${this.version}, ${JSON.stringify(this.data.value)}, {documentType: ${this.documentType}}), {documentState: ${this.documentState}})`;
3584 }
3585}
3586
3587/**
3588 * Compares the value for field `field` in the provided documents. Throws if
3589 * the field does not exist in both documents.
3590 */
3591/**
3592 * @license
3593 * Copyright 2019 Google LLC
3594 *
3595 * Licensed under the Apache License, Version 2.0 (the "License");
3596 * you may not use this file except in compliance with the License.
3597 * You may obtain a copy of the License at
3598 *
3599 * http://www.apache.org/licenses/LICENSE-2.0
3600 *
3601 * Unless required by applicable law or agreed to in writing, software
3602 * distributed under the License is distributed on an "AS IS" BASIS,
3603 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3604 * See the License for the specific language governing permissions and
3605 * limitations under the License.
3606 */
3607// Visible for testing
3608class xe {
3609 constructor(t, e = null, n = [], s = [], i = null, r = null, o = null) {
3610 this.path = t, this.collectionGroup = e, this.orderBy = n, this.filters = s, this.limit = i,
3611 this.startAt = r, this.endAt = o, this.ht = null;
3612 }
3613}
3614
3615/**
3616 * Initializes a Target with a path and optional additional query constraints.
3617 * Path must currently be empty if this is a collection group query.
3618 *
3619 * NOTE: you should always construct `Target` from `Query.toTarget` instead of
3620 * using this factory method, because `Query` provides an implicit `orderBy`
3621 * property.
3622 */ function Ne(t, e = null, n = [], s = [], i = null, r = null, o = null) {
3623 return new xe(t, e, n, s, i, r, o);
3624}
3625
3626function ke(t) {
3627 const e = B(t);
3628 if (null === e.ht) {
3629 let t = e.path.canonicalString();
3630 null !== e.collectionGroup && (t += "|cg:" + e.collectionGroup), t += "|f:", t += e.filters.map((t => {
3631 return (e = t).field.canonicalString() + e.op.toString() + we(e.value);
3632 var e;
3633 })).join(","), t += "|ob:", t += e.orderBy.map((t => function(t) {
3634 // TODO(b/29183165): Make this collision robust.
3635 return t.field.canonicalString() + t.dir;
3636 }(t))).join(","), re(e.limit) || (t += "|l:", t += e.limit), e.startAt && (t += "|lb:",
3637 t += e.startAt.inclusive ? "b:" : "a:", t += e.startAt.position.map((t => we(t))).join(",")),
3638 e.endAt && (t += "|ub:", t += e.endAt.inclusive ? "a:" : "b:", t += e.endAt.position.map((t => we(t))).join(",")),
3639 e.ht = t;
3640 }
3641 return e.ht;
3642}
3643
3644function Oe(t) {
3645 let e = t.path.canonicalString();
3646 return null !== t.collectionGroup && (e += " collectionGroup=" + t.collectionGroup),
3647 t.filters.length > 0 && (e += `, filters: [${t.filters.map((t => {
3648 return `${(e = t).field.canonicalString()} ${e.op} ${we(e.value)}`;
3649 /** Returns a debug description for `filter`. */
3650 var e;
3651 /** Filter that matches on key fields (i.e. '__name__'). */ })).join(", ")}]`),
3652 re(t.limit) || (e += ", limit: " + t.limit), t.orderBy.length > 0 && (e += `, orderBy: [${t.orderBy.map((t => function(t) {
3653 return `${t.field.canonicalString()} (${t.dir})`;
3654 }(t))).join(", ")}]`), t.startAt && (e += ", startAt: ", e += t.startAt.inclusive ? "b:" : "a:",
3655 e += t.startAt.position.map((t => we(t))).join(",")), t.endAt && (e += ", endAt: ",
3656 e += t.endAt.inclusive ? "a:" : "b:", e += t.endAt.position.map((t => we(t))).join(",")),
3657 `Target(${e})`;
3658}
3659
3660function Me(t, e) {
3661 if (t.limit !== e.limit) return !1;
3662 if (t.orderBy.length !== e.orderBy.length) return !1;
3663 for (let n = 0; n < t.orderBy.length; n++) if (!Xe(t.orderBy[n], e.orderBy[n])) return !1;
3664 if (t.filters.length !== e.filters.length) return !1;
3665 for (let i = 0; i < t.filters.length; i++) if (n = t.filters[i], s = e.filters[i],
3666 n.op !== s.op || !n.field.isEqual(s.field) || !le(n.value, s.value)) return !1;
3667 var n, s;
3668 return t.collectionGroup === e.collectionGroup && (!!t.path.isEqual(e.path) && (!!tn(t.startAt, e.startAt) && tn(t.endAt, e.endAt)));
3669}
3670
3671function Fe(t) {
3672 return at.isDocumentKey(t.path) && null === t.collectionGroup && 0 === t.filters.length;
3673}
3674
3675/** Returns the field filters that target the given field path. */ function $e(t, e) {
3676 return t.filters.filter((t => t instanceof Ue && t.field.isEqual(e)));
3677}
3678
3679/**
3680 * Returns the values that are used in ARRAY_CONTAINS or ARRAY_CONTAINS_ANY
3681 * filters. Returns `null` if there are no such filters.
3682 */
3683/**
3684 * Returns the value to use as the lower bound for ascending index segment at
3685 * the provided `fieldPath` (or the upper bound for an descending segment).
3686 */
3687function Be(t, e, n) {
3688 let s = ae, i = !0;
3689 // Process all filters to find a value for the current field segment
3690 for (const n of $e(t, e)) {
3691 let t = ae, e = !0;
3692 switch (n.op) {
3693 case "<" /* LESS_THAN */ :
3694 case "<=" /* LESS_THAN_OR_EQUAL */ :
3695 t = be(n.value);
3696 break;
3697
3698 case "==" /* EQUAL */ :
3699 case "in" /* IN */ :
3700 case ">=" /* GREATER_THAN_OR_EQUAL */ :
3701 t = n.value;
3702 break;
3703
3704 case ">" /* GREATER_THAN */ :
3705 t = n.value, e = !1;
3706 break;
3707
3708 case "!=" /* NOT_EQUAL */ :
3709 case "not-in" /* NOT_IN */ :
3710 t = ae;
3711 // Remaining filters cannot be used as lower bounds.
3712 }
3713 ve({
3714 value: s,
3715 inclusive: i
3716 }, {
3717 value: t,
3718 inclusive: e
3719 }) < 0 && (s = t, i = e);
3720 }
3721 // If there is an additional bound, compare the values against the existing
3722 // range to see if we can narrow the scope.
3723 if (null !== n) for (let r = 0; r < t.orderBy.length; ++r) {
3724 if (t.orderBy[r].field.isEqual(e)) {
3725 const t = n.position[r];
3726 ve({
3727 value: s,
3728 inclusive: i
3729 }, {
3730 value: t,
3731 inclusive: n.inclusive
3732 }) < 0 && (s = t, i = n.inclusive);
3733 break;
3734 }
3735 }
3736 return {
3737 value: s,
3738 inclusive: i
3739 };
3740}
3741
3742/**
3743 * Returns the value to use as the upper bound for ascending index segment at
3744 * the provided `fieldPath` (or the lower bound for a descending segment).
3745 */ function Le(t, e, n) {
3746 let s = ce, i = !0;
3747 // Process all filters to find a value for the current field segment
3748 for (const n of $e(t, e)) {
3749 let t = ce, e = !0;
3750 switch (n.op) {
3751 case ">=" /* GREATER_THAN_OR_EQUAL */ :
3752 case ">" /* GREATER_THAN */ :
3753 t = Pe(n.value), e = !1;
3754 break;
3755
3756 case "==" /* EQUAL */ :
3757 case "in" /* IN */ :
3758 case "<=" /* LESS_THAN_OR_EQUAL */ :
3759 t = n.value;
3760 break;
3761
3762 case "<" /* LESS_THAN */ :
3763 t = n.value, e = !1;
3764 break;
3765
3766 case "!=" /* NOT_EQUAL */ :
3767 case "not-in" /* NOT_IN */ :
3768 t = ce;
3769 // Remaining filters cannot be used as upper bounds.
3770 }
3771 Ve({
3772 value: s,
3773 inclusive: i
3774 }, {
3775 value: t,
3776 inclusive: e
3777 }) > 0 && (s = t, i = e);
3778 }
3779 // If there is an additional bound, compare the values against the existing
3780 // range to see if we can narrow the scope.
3781 if (null !== n) for (let r = 0; r < t.orderBy.length; ++r) {
3782 if (t.orderBy[r].field.isEqual(e)) {
3783 const t = n.position[r];
3784 Ve({
3785 value: s,
3786 inclusive: i
3787 }, {
3788 value: t,
3789 inclusive: n.inclusive
3790 }) > 0 && (s = t, i = n.inclusive);
3791 break;
3792 }
3793 }
3794 return {
3795 value: s,
3796 inclusive: i
3797 };
3798}
3799
3800/** Returns the number of segments of a perfect index for this target. */ class Ue extends class {} {
3801 constructor(t, e, n) {
3802 super(), this.field = t, this.op = e, this.value = n;
3803 }
3804 /**
3805 * Creates a filter based on the provided arguments.
3806 */ static create(t, e, n) {
3807 return t.isKeyField() ? "in" /* IN */ === e || "not-in" /* NOT_IN */ === e ? this.lt(t, e, n) : new qe(t, e, n) : "array-contains" /* ARRAY_CONTAINS */ === e ? new je(t, n) : "in" /* IN */ === e ? new We(t, n) : "not-in" /* NOT_IN */ === e ? new ze(t, n) : "array-contains-any" /* ARRAY_CONTAINS_ANY */ === e ? new He(t, n) : new Ue(t, e, n);
3808 }
3809 static lt(t, e, n) {
3810 return "in" /* IN */ === e ? new Ke(t, n) : new Ge(t, n);
3811 }
3812 matches(t) {
3813 const e = t.data.field(this.field);
3814 // Types do not have to match in NOT_EQUAL filters.
3815 return "!=" /* NOT_EQUAL */ === this.op ? null !== e && this.ft(de(e, this.value)) : null !== e && he(this.value) === he(e) && this.ft(de(e, this.value));
3816 // Only compare types with matching backend order (such as double and int).
3817 }
3818 ft(t) {
3819 switch (this.op) {
3820 case "<" /* LESS_THAN */ :
3821 return t < 0;
3822
3823 case "<=" /* LESS_THAN_OR_EQUAL */ :
3824 return t <= 0;
3825
3826 case "==" /* EQUAL */ :
3827 return 0 === t;
3828
3829 case "!=" /* NOT_EQUAL */ :
3830 return 0 !== t;
3831
3832 case ">" /* GREATER_THAN */ :
3833 return t > 0;
3834
3835 case ">=" /* GREATER_THAN_OR_EQUAL */ :
3836 return t >= 0;
3837
3838 default:
3839 return M();
3840 }
3841 }
3842 dt() {
3843 return [ "<" /* LESS_THAN */ , "<=" /* LESS_THAN_OR_EQUAL */ , ">" /* GREATER_THAN */ , ">=" /* GREATER_THAN_OR_EQUAL */ , "!=" /* NOT_EQUAL */ , "not-in" /* NOT_IN */ ].indexOf(this.op) >= 0;
3844 }
3845}
3846
3847class qe extends Ue {
3848 constructor(t, e, n) {
3849 super(t, e, n), this.key = at.fromName(n.referenceValue);
3850 }
3851 matches(t) {
3852 const e = at.comparator(t.key, this.key);
3853 return this.ft(e);
3854 }
3855}
3856
3857/** Filter that matches on key fields within an array. */ class Ke extends Ue {
3858 constructor(t, e) {
3859 super(t, "in" /* IN */ , e), this.keys = Qe("in" /* IN */ , e);
3860 }
3861 matches(t) {
3862 return this.keys.some((e => e.isEqual(t.key)));
3863 }
3864}
3865
3866/** Filter that matches on key fields not present within an array. */ class Ge extends Ue {
3867 constructor(t, e) {
3868 super(t, "not-in" /* NOT_IN */ , e), this.keys = Qe("not-in" /* NOT_IN */ , e);
3869 }
3870 matches(t) {
3871 return !this.keys.some((e => e.isEqual(t.key)));
3872 }
3873}
3874
3875function Qe(t, e) {
3876 var n;
3877 return ((null === (n = e.arrayValue) || void 0 === n ? void 0 : n.values) || []).map((t => at.fromName(t.referenceValue)));
3878}
3879
3880/** A Filter that implements the array-contains operator. */ class je extends Ue {
3881 constructor(t, e) {
3882 super(t, "array-contains" /* ARRAY_CONTAINS */ , e);
3883 }
3884 matches(t) {
3885 const e = t.data.field(this.field);
3886 return pe(e) && fe(e.arrayValue, this.value);
3887 }
3888}
3889
3890/** A Filter that implements the IN operator. */ class We extends Ue {
3891 constructor(t, e) {
3892 super(t, "in" /* IN */ , e);
3893 }
3894 matches(t) {
3895 const e = t.data.field(this.field);
3896 return null !== e && fe(this.value.arrayValue, e);
3897 }
3898}
3899
3900/** A Filter that implements the not-in operator. */ class ze extends Ue {
3901 constructor(t, e) {
3902 super(t, "not-in" /* NOT_IN */ , e);
3903 }
3904 matches(t) {
3905 if (fe(this.value.arrayValue, {
3906 nullValue: "NULL_VALUE"
3907 })) return !1;
3908 const e = t.data.field(this.field);
3909 return null !== e && !fe(this.value.arrayValue, e);
3910 }
3911}
3912
3913/** A Filter that implements the array-contains-any operator. */ class He extends Ue {
3914 constructor(t, e) {
3915 super(t, "array-contains-any" /* ARRAY_CONTAINS_ANY */ , e);
3916 }
3917 matches(t) {
3918 const e = t.data.field(this.field);
3919 return !(!pe(e) || !e.arrayValue.values) && e.arrayValue.values.some((t => fe(this.value.arrayValue, t)));
3920 }
3921}
3922
3923/**
3924 * Represents a bound of a query.
3925 *
3926 * The bound is specified with the given components representing a position and
3927 * whether it's just before or just after the position (relative to whatever the
3928 * query order is).
3929 *
3930 * The position represents a logical index position for a query. It's a prefix
3931 * of values for the (potentially implicit) order by clauses of a query.
3932 *
3933 * Bound provides a function to determine whether a document comes before or
3934 * after a bound. This is influenced by whether the position is just before or
3935 * just after the provided values.
3936 */ class Je {
3937 constructor(t, e) {
3938 this.position = t, this.inclusive = e;
3939 }
3940}
3941
3942/**
3943 * An ordering on a field, in some Direction. Direction defaults to ASCENDING.
3944 */ class Ye {
3945 constructor(t, e = "asc" /* ASCENDING */) {
3946 this.field = t, this.dir = e;
3947 }
3948}
3949
3950function Xe(t, e) {
3951 return t.dir === e.dir && t.field.isEqual(e.field);
3952}
3953
3954function Ze(t, e, n) {
3955 let s = 0;
3956 for (let i = 0; i < t.position.length; i++) {
3957 const r = e[i], o = t.position[i];
3958 if (r.field.isKeyField()) s = at.comparator(at.fromName(o.referenceValue), n.key); else {
3959 s = de(o, n.data.field(r.field));
3960 }
3961 if ("desc" /* DESCENDING */ === r.dir && (s *= -1), 0 !== s) break;
3962 }
3963 return s;
3964}
3965
3966/**
3967 * Returns true if a document sorts after a bound using the provided sort
3968 * order.
3969 */ function tn(t, e) {
3970 if (null === t) return null === e;
3971 if (null === e) return !1;
3972 if (t.inclusive !== e.inclusive || t.position.length !== e.position.length) return !1;
3973 for (let n = 0; n < t.position.length; n++) {
3974 if (!le(t.position[n], e.position[n])) return !1;
3975 }
3976 return !0;
3977}
3978
3979/**
3980 * @license
3981 * Copyright 2017 Google LLC
3982 *
3983 * Licensed under the Apache License, Version 2.0 (the "License");
3984 * you may not use this file except in compliance with the License.
3985 * You may obtain a copy of the License at
3986 *
3987 * http://www.apache.org/licenses/LICENSE-2.0
3988 *
3989 * Unless required by applicable law or agreed to in writing, software
3990 * distributed under the License is distributed on an "AS IS" BASIS,
3991 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
3992 * See the License for the specific language governing permissions and
3993 * limitations under the License.
3994 */
3995/**
3996 * Query encapsulates all the query attributes we support in the SDK. It can
3997 * be run against the LocalStore, as well as be converted to a `Target` to
3998 * query the RemoteStore results.
3999 *
4000 * Visible for testing.
4001 */ class en {
4002 /**
4003 * Initializes a Query with a path and optional additional query constraints.
4004 * Path must currently be empty if this is a collection group query.
4005 */
4006 constructor(t, e = null, n = [], s = [], i = null, r = "F" /* First */ , o = null, u = null) {
4007 this.path = t, this.collectionGroup = e, this.explicitOrderBy = n, this.filters = s,
4008 this.limit = i, this.limitType = r, this.startAt = o, this.endAt = u, this._t = null,
4009 // The corresponding `Target` of this `Query` instance.
4010 this.wt = null, this.startAt, this.endAt;
4011 }
4012}
4013
4014/** Creates a new Query instance with the options provided. */ function nn(t, e, n, s, i, r, o, u) {
4015 return new en(t, e, n, s, i, r, o, u);
4016}
4017
4018/** Creates a new Query for a query that matches all documents at `path` */ function sn(t) {
4019 return new en(t);
4020}
4021
4022/**
4023 * Helper to convert a collection group query into a collection query at a
4024 * specific path. This is used when executing collection group queries, since
4025 * we have to split the query into a set of collection queries at multiple
4026 * paths.
4027 */
4028/**
4029 * Returns true if this query does not specify any query constraints that
4030 * could remove results.
4031 */
4032function rn(t) {
4033 return 0 === t.filters.length && null === t.limit && null == t.startAt && null == t.endAt && (0 === t.explicitOrderBy.length || 1 === t.explicitOrderBy.length && t.explicitOrderBy[0].field.isKeyField());
4034}
4035
4036function on(t) {
4037 return t.explicitOrderBy.length > 0 ? t.explicitOrderBy[0].field : null;
4038}
4039
4040function un(t) {
4041 for (const e of t.filters) if (e.dt()) return e.field;
4042 return null;
4043}
4044
4045/**
4046 * Checks if any of the provided Operators are included in the query and
4047 * returns the first one that is, or null if none are.
4048 */
4049/**
4050 * Returns whether the query matches a collection group rather than a specific
4051 * collection.
4052 */
4053function cn(t) {
4054 return null !== t.collectionGroup;
4055}
4056
4057/**
4058 * Returns the implicit order by constraint that is used to execute the Query,
4059 * which can be different from the order by constraints the user provided (e.g.
4060 * the SDK and backend always orders by `__name__`).
4061 */ function an(t) {
4062 const e = B(t);
4063 if (null === e._t) {
4064 e._t = [];
4065 const t = un(e), n = on(e);
4066 if (null !== t && null === n)
4067 // In order to implicitly add key ordering, we must also add the
4068 // inequality filter field for it to be a valid query.
4069 // Note that the default inequality field and key ordering is ascending.
4070 t.isKeyField() || e._t.push(new Ye(t)), e._t.push(new Ye(ct.keyField(), "asc" /* ASCENDING */)); else {
4071 let t = !1;
4072 for (const n of e.explicitOrderBy) e._t.push(n), n.field.isKeyField() && (t = !0);
4073 if (!t) {
4074 // The order of the implicit key ordering always matches the last
4075 // explicit order by
4076 const t = e.explicitOrderBy.length > 0 ? e.explicitOrderBy[e.explicitOrderBy.length - 1].dir : "asc" /* ASCENDING */;
4077 e._t.push(new Ye(ct.keyField(), t));
4078 }
4079 }
4080 }
4081 return e._t;
4082}
4083
4084/**
4085 * Converts this `Query` instance to it's corresponding `Target` representation.
4086 */ function hn(t) {
4087 const e = B(t);
4088 if (!e.wt) if ("F" /* First */ === e.limitType) e.wt = Ne(e.path, e.collectionGroup, an(e), e.filters, e.limit, e.startAt, e.endAt); else {
4089 // Flip the orderBy directions since we want the last results
4090 const t = [];
4091 for (const n of an(e)) {
4092 const e = "desc" /* DESCENDING */ === n.dir ? "asc" /* ASCENDING */ : "desc" /* DESCENDING */;
4093 t.push(new Ye(n.field, e));
4094 }
4095 // We need to swap the cursors to match the now-flipped query ordering.
4096 const n = e.endAt ? new Je(e.endAt.position, e.endAt.inclusive) : null, s = e.startAt ? new Je(e.startAt.position, e.startAt.inclusive) : null;
4097 // Now return as a LimitType.First query.
4098 e.wt = Ne(e.path, e.collectionGroup, t, e.filters, e.limit, n, s);
4099 }
4100 return e.wt;
4101}
4102
4103function ln(t, e, n) {
4104 return new en(t.path, t.collectionGroup, t.explicitOrderBy.slice(), t.filters.slice(), e, n, t.startAt, t.endAt);
4105}
4106
4107function fn(t, e) {
4108 return Me(hn(t), hn(e)) && t.limitType === e.limitType;
4109}
4110
4111// TODO(b/29183165): This is used to get a unique string from a query to, for
4112// example, use as a dictionary key, but the implementation is subject to
4113// collisions. Make it collision-free.
4114function dn(t) {
4115 return `${ke(hn(t))}|lt:${t.limitType}`;
4116}
4117
4118function _n(t) {
4119 return `Query(target=${Oe(hn(t))}; limitType=${t.limitType})`;
4120}
4121
4122/** Returns whether `doc` matches the constraints of `query`. */ function wn(t, e) {
4123 return e.isFoundDocument() && function(t, e) {
4124 const n = e.key.path;
4125 return null !== t.collectionGroup ? e.key.hasCollectionId(t.collectionGroup) && t.path.isPrefixOf(n) : at.isDocumentKey(t.path) ? t.path.isEqual(n) : t.path.isImmediateParentOf(n);
4126 }
4127 /**
4128 * A document must have a value for every ordering clause in order to show up
4129 * in the results.
4130 */ (t, e) && function(t, e) {
4131 for (const n of t.explicitOrderBy)
4132 // order by key always matches
4133 if (!n.field.isKeyField() && null === e.data.field(n.field)) return !1;
4134 return !0;
4135 }(t, e) && function(t, e) {
4136 for (const n of t.filters) if (!n.matches(e)) return !1;
4137 return !0;
4138 }
4139 /** Makes sure a document is within the bounds, if provided. */ (t, e) && function(t, e) {
4140 if (t.startAt && !
4141 /**
4142 * Returns true if a document sorts before a bound using the provided sort
4143 * order.
4144 */
4145 function(t, e, n) {
4146 const s = Ze(t, e, n);
4147 return t.inclusive ? s <= 0 : s < 0;
4148 }(t.startAt, an(t), e)) return !1;
4149 if (t.endAt && !function(t, e, n) {
4150 const s = Ze(t, e, n);
4151 return t.inclusive ? s >= 0 : s > 0;
4152 }(t.endAt, an(t), e)) return !1;
4153 return !0;
4154 }
4155 /**
4156 * Returns the collection group that this query targets.
4157 *
4158 * PORTING NOTE: This is only used in the Web SDK to facilitate multi-tab
4159 * synchronization for query results.
4160 */ (t, e);
4161}
4162
4163function mn(t) {
4164 return t.collectionGroup || (t.path.length % 2 == 1 ? t.path.lastSegment() : t.path.get(t.path.length - 2));
4165}
4166
4167/**
4168 * Returns a new comparator function that can be used to compare two documents
4169 * based on the Query's ordering constraint.
4170 */ function gn(t) {
4171 return (e, n) => {
4172 let s = !1;
4173 for (const i of an(t)) {
4174 const t = yn(i, e, n);
4175 if (0 !== t) return t;
4176 s = s || i.field.isKeyField();
4177 }
4178 return 0;
4179 };
4180}
4181
4182function yn(t, e, n) {
4183 const s = t.field.isKeyField() ? at.comparator(e.key, n.key) : function(t, e, n) {
4184 const s = e.data.field(t), i = n.data.field(t);
4185 return null !== s && null !== i ? de(s, i) : M();
4186 }(t.field, e, n);
4187 switch (t.dir) {
4188 case "asc" /* ASCENDING */ :
4189 return s;
4190
4191 case "desc" /* DESCENDING */ :
4192 return -1 * s;
4193
4194 default:
4195 return M();
4196 }
4197}
4198
4199/**
4200 * @license
4201 * Copyright 2020 Google LLC
4202 *
4203 * Licensed under the Apache License, Version 2.0 (the "License");
4204 * you may not use this file except in compliance with the License.
4205 * You may obtain a copy of the License at
4206 *
4207 * http://www.apache.org/licenses/LICENSE-2.0
4208 *
4209 * Unless required by applicable law or agreed to in writing, software
4210 * distributed under the License is distributed on an "AS IS" BASIS,
4211 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
4212 * See the License for the specific language governing permissions and
4213 * limitations under the License.
4214 */
4215/**
4216 * Returns an DoubleValue for `value` that is encoded based the serializer's
4217 * `useProto3Json` setting.
4218 */ function pn(t, e) {
4219 if (t.gt) {
4220 if (isNaN(e)) return {
4221 doubleValue: "NaN"
4222 };
4223 if (e === 1 / 0) return {
4224 doubleValue: "Infinity"
4225 };
4226 if (e === -1 / 0) return {
4227 doubleValue: "-Infinity"
4228 };
4229 }
4230 return {
4231 doubleValue: oe(e) ? "-0" : e
4232 };
4233}
4234
4235/**
4236 * Returns an IntegerValue for `value`.
4237 */ function In(t) {
4238 return {
4239 integerValue: "" + t
4240 };
4241}
4242
4243/**
4244 * Returns a value for a number that's appropriate to put into a proto.
4245 * The return value is an IntegerValue if it can safely represent the value,
4246 * otherwise a DoubleValue is returned.
4247 */ function Tn(t, e) {
4248 return ue(e) ? In(e) : pn(t, e);
4249}
4250
4251/**
4252 * @license
4253 * Copyright 2018 Google LLC
4254 *
4255 * Licensed under the Apache License, Version 2.0 (the "License");
4256 * you may not use this file except in compliance with the License.
4257 * You may obtain a copy of the License at
4258 *
4259 * http://www.apache.org/licenses/LICENSE-2.0
4260 *
4261 * Unless required by applicable law or agreed to in writing, software
4262 * distributed under the License is distributed on an "AS IS" BASIS,
4263 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
4264 * See the License for the specific language governing permissions and
4265 * limitations under the License.
4266 */
4267/** Used to represent a field transform on a mutation. */ class En {
4268 constructor() {
4269 // Make sure that the structural type of `TransformOperation` is unique.
4270 // See https://github.com/microsoft/TypeScript/issues/5451
4271 this._ = void 0;
4272 }
4273}
4274
4275/**
4276 * Computes the local transform result against the provided `previousValue`,
4277 * optionally using the provided localWriteTime.
4278 */ function An(t, e, n) {
4279 return t instanceof Pn ? function(t, e) {
4280 const n = {
4281 fields: {
4282 __type__: {
4283 stringValue: "server_timestamp"
4284 },
4285 __local_write_time__: {
4286 timestampValue: {
4287 seconds: t.seconds,
4288 nanos: t.nanoseconds
4289 }
4290 }
4291 }
4292 };
4293 return e && (n.fields.__previous_value__ = e), {
4294 mapValue: n
4295 };
4296 }(n, e) : t instanceof vn ? Vn(t, e) : t instanceof Sn ? Dn(t, e) : function(t, e) {
4297 // PORTING NOTE: Since JavaScript's integer arithmetic is limited to 53 bit
4298 // precision and resolves overflows by reducing precision, we do not
4299 // manually cap overflows at 2^63.
4300 const n = bn(t, e), s = xn(n) + xn(t.yt);
4301 return ye(n) && ye(t.yt) ? In(s) : pn(t.It, s);
4302 }(t, e);
4303}
4304
4305/**
4306 * Computes a final transform result after the transform has been acknowledged
4307 * by the server, potentially using the server-provided transformResult.
4308 */ function Rn(t, e, n) {
4309 // The server just sends null as the transform result for array operations,
4310 // so we have to calculate a result the same as we do for local
4311 // applications.
4312 return t instanceof vn ? Vn(t, e) : t instanceof Sn ? Dn(t, e) : n;
4313}
4314
4315/**
4316 * If this transform operation is not idempotent, returns the base value to
4317 * persist for this transform. If a base value is returned, the transform
4318 * operation is always applied to this base value, even if document has
4319 * already been updated.
4320 *
4321 * Base values provide consistent behavior for non-idempotent transforms and
4322 * allow us to return the same latency-compensated value even if the backend
4323 * has already applied the transform operation. The base value is null for
4324 * idempotent transforms, as they can be re-played even if the backend has
4325 * already applied them.
4326 *
4327 * @returns a base value to store along with the mutation, or null for
4328 * idempotent transforms.
4329 */ function bn(t, e) {
4330 return t instanceof Cn ? ye(n = e) || function(t) {
4331 return !!t && "doubleValue" in t;
4332 }
4333 /** Returns true if `value` is either an IntegerValue or a DoubleValue. */ (n) ? e : {
4334 integerValue: 0
4335 } : null;
4336 var n;
4337}
4338
4339/** Transforms a value into a server-generated timestamp. */
4340class Pn extends En {}
4341
4342/** Transforms an array value via a union operation. */ class vn extends En {
4343 constructor(t) {
4344 super(), this.elements = t;
4345 }
4346}
4347
4348function Vn(t, e) {
4349 const n = Nn(e);
4350 for (const e of t.elements) n.some((t => le(t, e))) || n.push(e);
4351 return {
4352 arrayValue: {
4353 values: n
4354 }
4355 };
4356}
4357
4358/** Transforms an array value via a remove operation. */ class Sn extends En {
4359 constructor(t) {
4360 super(), this.elements = t;
4361 }
4362}
4363
4364function Dn(t, e) {
4365 let n = Nn(e);
4366 for (const e of t.elements) n = n.filter((t => !le(t, e)));
4367 return {
4368 arrayValue: {
4369 values: n
4370 }
4371 };
4372}
4373
4374/**
4375 * Implements the backend semantics for locally computed NUMERIC_ADD (increment)
4376 * transforms. Converts all field values to integers or doubles, but unlike the
4377 * backend does not cap integer values at 2^63. Instead, JavaScript number
4378 * arithmetic is used and precision loss can occur for values greater than 2^53.
4379 */ class Cn extends En {
4380 constructor(t, e) {
4381 super(), this.It = t, this.yt = e;
4382 }
4383}
4384
4385function xn(t) {
4386 return Xt(t.integerValue || t.doubleValue);
4387}
4388
4389function Nn(t) {
4390 return pe(t) && t.arrayValue.values ? t.arrayValue.values.slice() : [];
4391}
4392
4393/**
4394 * @license
4395 * Copyright 2017 Google LLC
4396 *
4397 * Licensed under the Apache License, Version 2.0 (the "License");
4398 * you may not use this file except in compliance with the License.
4399 * You may obtain a copy of the License at
4400 *
4401 * http://www.apache.org/licenses/LICENSE-2.0
4402 *
4403 * Unless required by applicable law or agreed to in writing, software
4404 * distributed under the License is distributed on an "AS IS" BASIS,
4405 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
4406 * See the License for the specific language governing permissions and
4407 * limitations under the License.
4408 */
4409/** A field path and the TransformOperation to perform upon it. */ class kn {
4410 constructor(t, e) {
4411 this.field = t, this.transform = e;
4412 }
4413}
4414
4415function On(t, e) {
4416 return t.field.isEqual(e.field) && function(t, e) {
4417 return t instanceof vn && e instanceof vn || t instanceof Sn && e instanceof Sn ? et(t.elements, e.elements, le) : t instanceof Cn && e instanceof Cn ? le(t.yt, e.yt) : t instanceof Pn && e instanceof Pn;
4418 }(t.transform, e.transform);
4419}
4420
4421/** The result of successfully applying a mutation to the backend. */
4422class Mn {
4423 constructor(
4424 /**
4425 * The version at which the mutation was committed:
4426 *
4427 * - For most operations, this is the updateTime in the WriteResult.
4428 * - For deletes, the commitTime of the WriteResponse (because deletes are
4429 * not stored and have no updateTime).
4430 *
4431 * Note that these versions can be different: No-op writes will not change
4432 * the updateTime even though the commitTime advances.
4433 */
4434 t,
4435 /**
4436 * The resulting fields returned from the backend after a mutation
4437 * containing field transforms has been committed. Contains one FieldValue
4438 * for each FieldTransform that was in the mutation.
4439 *
4440 * Will be empty if the mutation did not contain any field transforms.
4441 */
4442 e) {
4443 this.version = t, this.transformResults = e;
4444 }
4445}
4446
4447/**
4448 * Encodes a precondition for a mutation. This follows the model that the
4449 * backend accepts with the special case of an explicit "empty" precondition
4450 * (meaning no precondition).
4451 */ class Fn {
4452 constructor(t, e) {
4453 this.updateTime = t, this.exists = e;
4454 }
4455 /** Creates a new empty Precondition. */ static none() {
4456 return new Fn;
4457 }
4458 /** Creates a new Precondition with an exists flag. */ static exists(t) {
4459 return new Fn(void 0, t);
4460 }
4461 /** Creates a new Precondition based on a version a document exists at. */ static updateTime(t) {
4462 return new Fn(t);
4463 }
4464 /** Returns whether this Precondition is empty. */ get isNone() {
4465 return void 0 === this.updateTime && void 0 === this.exists;
4466 }
4467 isEqual(t) {
4468 return this.exists === t.exists && (this.updateTime ? !!t.updateTime && this.updateTime.isEqual(t.updateTime) : !t.updateTime);
4469 }
4470}
4471
4472/** Returns true if the preconditions is valid for the given document. */ function $n(t, e) {
4473 return void 0 !== t.updateTime ? e.isFoundDocument() && e.version.isEqual(t.updateTime) : void 0 === t.exists || t.exists === e.isFoundDocument();
4474}
4475
4476/**
4477 * A mutation describes a self-contained change to a document. Mutations can
4478 * create, replace, delete, and update subsets of documents.
4479 *
4480 * Mutations not only act on the value of the document but also its version.
4481 *
4482 * For local mutations (mutations that haven't been committed yet), we preserve
4483 * the existing version for Set and Patch mutations. For Delete mutations, we
4484 * reset the version to 0.
4485 *
4486 * Here's the expected transition table.
4487 *
4488 * MUTATION APPLIED TO RESULTS IN
4489 *
4490 * SetMutation Document(v3) Document(v3)
4491 * SetMutation NoDocument(v3) Document(v0)
4492 * SetMutation InvalidDocument(v0) Document(v0)
4493 * PatchMutation Document(v3) Document(v3)
4494 * PatchMutation NoDocument(v3) NoDocument(v3)
4495 * PatchMutation InvalidDocument(v0) UnknownDocument(v3)
4496 * DeleteMutation Document(v3) NoDocument(v0)
4497 * DeleteMutation NoDocument(v3) NoDocument(v0)
4498 * DeleteMutation InvalidDocument(v0) NoDocument(v0)
4499 *
4500 * For acknowledged mutations, we use the updateTime of the WriteResponse as
4501 * the resulting version for Set and Patch mutations. As deletes have no
4502 * explicit update time, we use the commitTime of the WriteResponse for
4503 * Delete mutations.
4504 *
4505 * If a mutation is acknowledged by the backend but fails the precondition check
4506 * locally, we transition to an `UnknownDocument` and rely on Watch to send us
4507 * the updated version.
4508 *
4509 * Field transforms are used only with Patch and Set Mutations. We use the
4510 * `updateTransforms` message to store transforms, rather than the `transforms`s
4511 * messages.
4512 *
4513 * ## Subclassing Notes
4514 *
4515 * Every type of mutation needs to implement its own applyToRemoteDocument() and
4516 * applyToLocalView() to implement the actual behavior of applying the mutation
4517 * to some source document (see `setMutationApplyToRemoteDocument()` for an
4518 * example).
4519 */ class Bn {}
4520
4521/**
4522 * A utility method to calculate a `Mutation` representing the overlay from the
4523 * final state of the document, and a `FieldMask` representing the fields that
4524 * are mutated by the local mutations.
4525 */ function Ln(t, e) {
4526 if (!t.hasLocalMutations || e && 0 === e.fields.length) return null;
4527 // mask is null when sets or deletes are applied to the current document.
4528 if (null === e) return t.isNoDocument() ? new Jn(t.key, Fn.none()) : new Qn(t.key, t.data, Fn.none());
4529 {
4530 const n = t.data, s = Se.empty();
4531 let i = new Kt(ct.comparator);
4532 for (let t of e.fields) if (!i.has(t)) {
4533 let e = n.field(t);
4534 // If we are deleting a nested field, we take the immediate parent as
4535 // the mask used to construct the resulting mutation.
4536 // Justification: Nested fields can create parent fields implicitly. If
4537 // only a leaf entry is deleted in later mutations, the parent field
4538 // should still remain, but we may have lost this information.
4539 // Consider mutation (foo.bar 1), then mutation (foo.bar delete()).
4540 // This leaves the final result (foo, {}). Despite the fact that `doc`
4541 // has the correct result, `foo` is not in `mask`, and the resulting
4542 // mutation would miss `foo`.
4543 null === e && t.length > 1 && (t = t.popLast(), e = n.field(t)), null === e ? s.delete(t) : s.set(t, e),
4544 i = i.add(t);
4545 }
4546 return new jn(t.key, s, new jt(i.toArray()), Fn.none());
4547 }
4548}
4549
4550/**
4551 * Applies this mutation to the given document for the purposes of computing a
4552 * new remote document. If the input document doesn't match the expected state
4553 * (e.g. it is invalid or outdated), the document type may transition to
4554 * unknown.
4555 *
4556 * @param mutation - The mutation to apply.
4557 * @param document - The document to mutate. The input document can be an
4558 * invalid document if the client has no knowledge of the pre-mutation state
4559 * of the document.
4560 * @param mutationResult - The result of applying the mutation from the backend.
4561 */ function Un(t, e, n) {
4562 t instanceof Qn ? function(t, e, n) {
4563 // Unlike setMutationApplyToLocalView, if we're applying a mutation to a
4564 // remote document the server has accepted the mutation so the precondition
4565 // must have held.
4566 const s = t.value.clone(), i = zn(t.fieldTransforms, e, n.transformResults);
4567 s.setAll(i), e.convertToFoundDocument(n.version, s).setHasCommittedMutations();
4568 }(t, e, n) : t instanceof jn ? function(t, e, n) {
4569 if (!$n(t.precondition, e))
4570 // Since the mutation was not rejected, we know that the precondition
4571 // matched on the backend. We therefore must not have the expected version
4572 // of the document in our cache and convert to an UnknownDocument with a
4573 // known updateTime.
4574 return void e.convertToUnknownDocument(n.version);
4575 const s = zn(t.fieldTransforms, e, n.transformResults), i = e.data;
4576 i.setAll(Wn(t)), i.setAll(s), e.convertToFoundDocument(n.version, i).setHasCommittedMutations();
4577 }(t, e, n) : function(t, e, n) {
4578 // Unlike applyToLocalView, if we're applying a mutation to a remote
4579 // document the server has accepted the mutation so the precondition must
4580 // have held.
4581 e.convertToNoDocument(n.version).setHasCommittedMutations();
4582 }(0, e, n);
4583}
4584
4585/**
4586 * Applies this mutation to the given document for the purposes of computing
4587 * the new local view of a document. If the input document doesn't match the
4588 * expected state, the document is not modified.
4589 *
4590 * @param mutation - The mutation to apply.
4591 * @param document - The document to mutate. The input document can be an
4592 * invalid document if the client has no knowledge of the pre-mutation state
4593 * of the document.
4594 * @param previousMask - The fields that have been updated before applying this mutation.
4595 * @param localWriteTime - A timestamp indicating the local write time of the
4596 * batch this mutation is a part of.
4597 * @returns A `FieldMask` representing the fields that are changed by applying this mutation.
4598 */ function qn(t, e, n, s) {
4599 return t instanceof Qn ? function(t, e, n, s) {
4600 if (!$n(t.precondition, e))
4601 // The mutation failed to apply (e.g. a document ID created with add()
4602 // caused a name collision).
4603 return n;
4604 const i = t.value.clone(), r = Hn(t.fieldTransforms, s, e);
4605 return i.setAll(r), e.convertToFoundDocument(e.version, i).setHasLocalMutations(),
4606 null;
4607 // SetMutation overwrites all fields.
4608 }
4609 /**
4610 * A mutation that modifies fields of the document at the given key with the
4611 * given values. The values are applied through a field mask:
4612 *
4613 * * When a field is in both the mask and the values, the corresponding field
4614 * is updated.
4615 * * When a field is in neither the mask nor the values, the corresponding
4616 * field is unmodified.
4617 * * When a field is in the mask but not in the values, the corresponding field
4618 * is deleted.
4619 * * When a field is not in the mask but is in the values, the values map is
4620 * ignored.
4621 */ (t, e, n, s) : t instanceof jn ? function(t, e, n, s) {
4622 if (!$n(t.precondition, e)) return n;
4623 const i = Hn(t.fieldTransforms, s, e), r = e.data;
4624 if (r.setAll(Wn(t)), r.setAll(i), e.convertToFoundDocument(e.version, r).setHasLocalMutations(),
4625 null === n) return null;
4626 return n.unionWith(t.fieldMask.fields).unionWith(t.fieldTransforms.map((t => t.field)));
4627 }
4628 /**
4629 * Returns a FieldPath/Value map with the content of the PatchMutation.
4630 */ (t, e, n, s) : function(t, e, n) {
4631 if ($n(t.precondition, e)) return e.convertToNoDocument(e.version).setHasLocalMutations(),
4632 null;
4633 return n;
4634 }
4635 /**
4636 * A mutation that verifies the existence of the document at the given key with
4637 * the provided precondition.
4638 *
4639 * The `verify` operation is only used in Transactions, and this class serves
4640 * primarily to facilitate serialization into protos.
4641 */ (t, e, n);
4642}
4643
4644/**
4645 * If this mutation is not idempotent, returns the base value to persist with
4646 * this mutation. If a base value is returned, the mutation is always applied
4647 * to this base value, even if document has already been updated.
4648 *
4649 * The base value is a sparse object that consists of only the document
4650 * fields for which this mutation contains a non-idempotent transformation
4651 * (e.g. a numeric increment). The provided value guarantees consistent
4652 * behavior for non-idempotent transforms and allow us to return the same
4653 * latency-compensated value even if the backend has already applied the
4654 * mutation. The base value is null for idempotent mutations, as they can be
4655 * re-played even if the backend has already applied them.
4656 *
4657 * @returns a base value to store along with the mutation, or null for
4658 * idempotent mutations.
4659 */ function Kn(t, e) {
4660 let n = null;
4661 for (const s of t.fieldTransforms) {
4662 const t = e.data.field(s.field), i = bn(s.transform, t || null);
4663 null != i && (null === n && (n = Se.empty()), n.set(s.field, i));
4664 }
4665 return n || null;
4666}
4667
4668function Gn(t, e) {
4669 return t.type === e.type && (!!t.key.isEqual(e.key) && (!!t.precondition.isEqual(e.precondition) && (!!function(t, e) {
4670 return void 0 === t && void 0 === e || !(!t || !e) && et(t, e, ((t, e) => On(t, e)));
4671 }(t.fieldTransforms, e.fieldTransforms) && (0 /* Set */ === t.type ? t.value.isEqual(e.value) : 1 /* Patch */ !== t.type || t.data.isEqual(e.data) && t.fieldMask.isEqual(e.fieldMask)))));
4672}
4673
4674/**
4675 * A mutation that creates or replaces the document at the given key with the
4676 * object value contents.
4677 */ class Qn extends Bn {
4678 constructor(t, e, n, s = []) {
4679 super(), this.key = t, this.value = e, this.precondition = n, this.fieldTransforms = s,
4680 this.type = 0 /* Set */;
4681 }
4682 getFieldMask() {
4683 return null;
4684 }
4685}
4686
4687class jn extends Bn {
4688 constructor(t, e, n, s, i = []) {
4689 super(), this.key = t, this.data = e, this.fieldMask = n, this.precondition = s,
4690 this.fieldTransforms = i, this.type = 1 /* Patch */;
4691 }
4692 getFieldMask() {
4693 return this.fieldMask;
4694 }
4695}
4696
4697function Wn(t) {
4698 const e = new Map;
4699 return t.fieldMask.fields.forEach((n => {
4700 if (!n.isEmpty()) {
4701 const s = t.data.field(n);
4702 e.set(n, s);
4703 }
4704 })), e;
4705}
4706
4707/**
4708 * Creates a list of "transform results" (a transform result is a field value
4709 * representing the result of applying a transform) for use after a mutation
4710 * containing transforms has been acknowledged by the server.
4711 *
4712 * @param fieldTransforms - The field transforms to apply the result to.
4713 * @param mutableDocument - The current state of the document after applying all
4714 * previous mutations.
4715 * @param serverTransformResults - The transform results received by the server.
4716 * @returns The transform results list.
4717 */ function zn(t, e, n) {
4718 const s = new Map;
4719 F(t.length === n.length);
4720 for (let i = 0; i < n.length; i++) {
4721 const r = t[i], o = r.transform, u = e.data.field(r.field);
4722 s.set(r.field, Rn(o, u, n[i]));
4723 }
4724 return s;
4725}
4726
4727/**
4728 * Creates a list of "transform results" (a transform result is a field value
4729 * representing the result of applying a transform) for use when applying a
4730 * transform locally.
4731 *
4732 * @param fieldTransforms - The field transforms to apply the result to.
4733 * @param localWriteTime - The local time of the mutation (used to
4734 * generate ServerTimestampValues).
4735 * @param mutableDocument - The document to apply transforms on.
4736 * @returns The transform results list.
4737 */ function Hn(t, e, n) {
4738 const s = new Map;
4739 for (const i of t) {
4740 const t = i.transform, r = n.data.field(i.field);
4741 s.set(i.field, An(t, r, e));
4742 }
4743 return s;
4744}
4745
4746/** A mutation that deletes the document at the given key. */ class Jn extends Bn {
4747 constructor(t, e) {
4748 super(), this.key = t, this.precondition = e, this.type = 2 /* Delete */ , this.fieldTransforms = [];
4749 }
4750 getFieldMask() {
4751 return null;
4752 }
4753}
4754
4755class Yn extends Bn {
4756 constructor(t, e) {
4757 super(), this.key = t, this.precondition = e, this.type = 3 /* Verify */ , this.fieldTransforms = [];
4758 }
4759 getFieldMask() {
4760 return null;
4761 }
4762}
4763
4764/**
4765 * @license
4766 * Copyright 2017 Google LLC
4767 *
4768 * Licensed under the Apache License, Version 2.0 (the "License");
4769 * you may not use this file except in compliance with the License.
4770 * You may obtain a copy of the License at
4771 *
4772 * http://www.apache.org/licenses/LICENSE-2.0
4773 *
4774 * Unless required by applicable law or agreed to in writing, software
4775 * distributed under the License is distributed on an "AS IS" BASIS,
4776 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
4777 * See the License for the specific language governing permissions and
4778 * limitations under the License.
4779 */ class Xn {
4780 // TODO(b/33078163): just use simplest form of existence filter for now
4781 constructor(t) {
4782 this.count = t;
4783 }
4784}
4785
4786/**
4787 * @license
4788 * Copyright 2017 Google LLC
4789 *
4790 * Licensed under the Apache License, Version 2.0 (the "License");
4791 * you may not use this file except in compliance with the License.
4792 * You may obtain a copy of the License at
4793 *
4794 * http://www.apache.org/licenses/LICENSE-2.0
4795 *
4796 * Unless required by applicable law or agreed to in writing, software
4797 * distributed under the License is distributed on an "AS IS" BASIS,
4798 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
4799 * See the License for the specific language governing permissions and
4800 * limitations under the License.
4801 */
4802/**
4803 * Error Codes describing the different ways GRPC can fail. These are copied
4804 * directly from GRPC's sources here:
4805 *
4806 * https://github.com/grpc/grpc/blob/bceec94ea4fc5f0085d81235d8e1c06798dc341a/include/grpc%2B%2B/impl/codegen/status_code_enum.h
4807 *
4808 * Important! The names of these identifiers matter because the string forms
4809 * are used for reverse lookups from the webchannel stream. Do NOT change the
4810 * names of these identifiers or change this into a const enum.
4811 */ var Zn, ts;
4812
4813/**
4814 * Determines whether an error code represents a permanent error when received
4815 * in response to a non-write operation.
4816 *
4817 * See isPermanentWriteError for classifying write errors.
4818 */
4819function es(t) {
4820 switch (t) {
4821 default:
4822 return M();
4823
4824 case L.CANCELLED:
4825 case L.UNKNOWN:
4826 case L.DEADLINE_EXCEEDED:
4827 case L.RESOURCE_EXHAUSTED:
4828 case L.INTERNAL:
4829 case L.UNAVAILABLE:
4830 // Unauthenticated means something went wrong with our token and we need
4831 // to retry with new credentials which will happen automatically.
4832 case L.UNAUTHENTICATED:
4833 return !1;
4834
4835 case L.INVALID_ARGUMENT:
4836 case L.NOT_FOUND:
4837 case L.ALREADY_EXISTS:
4838 case L.PERMISSION_DENIED:
4839 case L.FAILED_PRECONDITION:
4840 // Aborted might be retried in some scenarios, but that is dependant on
4841 // the context and should handled individually by the calling code.
4842 // See https://cloud.google.com/apis/design/errors.
4843 case L.ABORTED:
4844 case L.OUT_OF_RANGE:
4845 case L.UNIMPLEMENTED:
4846 case L.DATA_LOSS:
4847 return !0;
4848 }
4849}
4850
4851/**
4852 * Determines whether an error code represents a permanent error when received
4853 * in response to a write operation.
4854 *
4855 * Write operations must be handled specially because as of b/119437764, ABORTED
4856 * errors on the write stream should be retried too (even though ABORTED errors
4857 * are not generally retryable).
4858 *
4859 * Note that during the initial handshake on the write stream an ABORTED error
4860 * signals that we should discard our stream token (i.e. it is permanent). This
4861 * means a handshake error should be classified with isPermanentError, above.
4862 */
4863/**
4864 * Maps an error Code from GRPC status code number, like 0, 1, or 14. These
4865 * are not the same as HTTP status codes.
4866 *
4867 * @returns The Code equivalent to the given GRPC status code. Fails if there
4868 * is no match.
4869 */
4870function ns(t) {
4871 if (void 0 === t)
4872 // This shouldn't normally happen, but in certain error cases (like trying
4873 // to send invalid proto messages) we may get an error with no GRPC code.
4874 return N("GRPC error has no .code"), L.UNKNOWN;
4875 switch (t) {
4876 case Zn.OK:
4877 return L.OK;
4878
4879 case Zn.CANCELLED:
4880 return L.CANCELLED;
4881
4882 case Zn.UNKNOWN:
4883 return L.UNKNOWN;
4884
4885 case Zn.DEADLINE_EXCEEDED:
4886 return L.DEADLINE_EXCEEDED;
4887
4888 case Zn.RESOURCE_EXHAUSTED:
4889 return L.RESOURCE_EXHAUSTED;
4890
4891 case Zn.INTERNAL:
4892 return L.INTERNAL;
4893
4894 case Zn.UNAVAILABLE:
4895 return L.UNAVAILABLE;
4896
4897 case Zn.UNAUTHENTICATED:
4898 return L.UNAUTHENTICATED;
4899
4900 case Zn.INVALID_ARGUMENT:
4901 return L.INVALID_ARGUMENT;
4902
4903 case Zn.NOT_FOUND:
4904 return L.NOT_FOUND;
4905
4906 case Zn.ALREADY_EXISTS:
4907 return L.ALREADY_EXISTS;
4908
4909 case Zn.PERMISSION_DENIED:
4910 return L.PERMISSION_DENIED;
4911
4912 case Zn.FAILED_PRECONDITION:
4913 return L.FAILED_PRECONDITION;
4914
4915 case Zn.ABORTED:
4916 return L.ABORTED;
4917
4918 case Zn.OUT_OF_RANGE:
4919 return L.OUT_OF_RANGE;
4920
4921 case Zn.UNIMPLEMENTED:
4922 return L.UNIMPLEMENTED;
4923
4924 case Zn.DATA_LOSS:
4925 return L.DATA_LOSS;
4926
4927 default:
4928 return M();
4929 }
4930}
4931
4932/**
4933 * Converts an HTTP response's error status to the equivalent error code.
4934 *
4935 * @param status - An HTTP error response status ("FAILED_PRECONDITION",
4936 * "UNKNOWN", etc.)
4937 * @returns The equivalent Code. Non-matching responses are mapped to
4938 * Code.UNKNOWN.
4939 */ (ts = Zn || (Zn = {}))[ts.OK = 0] = "OK", ts[ts.CANCELLED = 1] = "CANCELLED",
4940ts[ts.UNKNOWN = 2] = "UNKNOWN", ts[ts.INVALID_ARGUMENT = 3] = "INVALID_ARGUMENT",
4941ts[ts.DEADLINE_EXCEEDED = 4] = "DEADLINE_EXCEEDED", ts[ts.NOT_FOUND = 5] = "NOT_FOUND",
4942ts[ts.ALREADY_EXISTS = 6] = "ALREADY_EXISTS", ts[ts.PERMISSION_DENIED = 7] = "PERMISSION_DENIED",
4943ts[ts.UNAUTHENTICATED = 16] = "UNAUTHENTICATED", ts[ts.RESOURCE_EXHAUSTED = 8] = "RESOURCE_EXHAUSTED",
4944ts[ts.FAILED_PRECONDITION = 9] = "FAILED_PRECONDITION", ts[ts.ABORTED = 10] = "ABORTED",
4945ts[ts.OUT_OF_RANGE = 11] = "OUT_OF_RANGE", ts[ts.UNIMPLEMENTED = 12] = "UNIMPLEMENTED",
4946ts[ts.INTERNAL = 13] = "INTERNAL", ts[ts.UNAVAILABLE = 14] = "UNAVAILABLE", ts[ts.DATA_LOSS = 15] = "DATA_LOSS";
4947
4948/**
4949 * @license
4950 * Copyright 2017 Google LLC
4951 *
4952 * Licensed under the Apache License, Version 2.0 (the "License");
4953 * you may not use this file except in compliance with the License.
4954 * You may obtain a copy of the License at
4955 *
4956 * http://www.apache.org/licenses/LICENSE-2.0
4957 *
4958 * Unless required by applicable law or agreed to in writing, software
4959 * distributed under the License is distributed on an "AS IS" BASIS,
4960 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
4961 * See the License for the specific language governing permissions and
4962 * limitations under the License.
4963 */
4964/**
4965 * A map implementation that uses objects as keys. Objects must have an
4966 * associated equals function and must be immutable. Entries in the map are
4967 * stored together with the key being produced from the mapKeyFn. This map
4968 * automatically handles collisions of keys.
4969 */
4970class ss {
4971 constructor(t, e) {
4972 this.mapKeyFn = t, this.equalsFn = e,
4973 /**
4974 * The inner map for a key/value pair. Due to the possibility of collisions we
4975 * keep a list of entries that we do a linear search through to find an actual
4976 * match. Note that collisions should be rare, so we still expect near
4977 * constant time lookups in practice.
4978 */
4979 this.inner = {},
4980 /** The number of entries stored in the map */
4981 this.innerSize = 0;
4982 }
4983 /** Get a value for this key, or undefined if it does not exist. */ get(t) {
4984 const e = this.mapKeyFn(t), n = this.inner[e];
4985 if (void 0 !== n) for (const [e, s] of n) if (this.equalsFn(e, t)) return s;
4986 }
4987 has(t) {
4988 return void 0 !== this.get(t);
4989 }
4990 /** Put this key and value in the map. */ set(t, e) {
4991 const n = this.mapKeyFn(t), s = this.inner[n];
4992 if (void 0 === s) return this.inner[n] = [ [ t, e ] ], void this.innerSize++;
4993 for (let n = 0; n < s.length; n++) if (this.equalsFn(s[n][0], t))
4994 // This is updating an existing entry and does not increase `innerSize`.
4995 return void (s[n] = [ t, e ]);
4996 s.push([ t, e ]), this.innerSize++;
4997 }
4998 /**
4999 * Remove this key from the map. Returns a boolean if anything was deleted.
5000 */ delete(t) {
5001 const e = this.mapKeyFn(t), n = this.inner[e];
5002 if (void 0 === n) return !1;
5003 for (let s = 0; s < n.length; s++) if (this.equalsFn(n[s][0], t)) return 1 === n.length ? delete this.inner[e] : n.splice(s, 1),
5004 this.innerSize--, !0;
5005 return !1;
5006 }
5007 forEach(t) {
5008 $t(this.inner, ((e, n) => {
5009 for (const [e, s] of n) t(e, s);
5010 }));
5011 }
5012 isEmpty() {
5013 return Bt(this.inner);
5014 }
5015 size() {
5016 return this.innerSize;
5017 }
5018}
5019
5020/**
5021 * @license
5022 * Copyright 2017 Google LLC
5023 *
5024 * Licensed under the Apache License, Version 2.0 (the "License");
5025 * you may not use this file except in compliance with the License.
5026 * You may obtain a copy of the License at
5027 *
5028 * http://www.apache.org/licenses/LICENSE-2.0
5029 *
5030 * Unless required by applicable law or agreed to in writing, software
5031 * distributed under the License is distributed on an "AS IS" BASIS,
5032 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
5033 * See the License for the specific language governing permissions and
5034 * limitations under the License.
5035 */ const is = new Lt(at.comparator);
5036
5037function rs() {
5038 return is;
5039}
5040
5041const os = new Lt(at.comparator);
5042
5043function us(...t) {
5044 let e = os;
5045 for (const n of t) e = e.insert(n.key, n);
5046 return e;
5047}
5048
5049function cs(t) {
5050 let e = os;
5051 return t.forEach(((t, n) => e = e.insert(t, n.overlayedDocument))), e;
5052}
5053
5054function as() {
5055 return ls();
5056}
5057
5058function hs() {
5059 return ls();
5060}
5061
5062function ls() {
5063 return new ss((t => t.toString()), ((t, e) => t.isEqual(e)));
5064}
5065
5066const fs = new Lt(at.comparator);
5067
5068const ds = new Kt(at.comparator);
5069
5070function _s(...t) {
5071 let e = ds;
5072 for (const n of t) e = e.add(n);
5073 return e;
5074}
5075
5076const ws = new Kt(tt);
5077
5078function ms() {
5079 return ws;
5080}
5081
5082/**
5083 * @license
5084 * Copyright 2017 Google LLC
5085 *
5086 * Licensed under the Apache License, Version 2.0 (the "License");
5087 * you may not use this file except in compliance with the License.
5088 * You may obtain a copy of the License at
5089 *
5090 * http://www.apache.org/licenses/LICENSE-2.0
5091 *
5092 * Unless required by applicable law or agreed to in writing, software
5093 * distributed under the License is distributed on an "AS IS" BASIS,
5094 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
5095 * See the License for the specific language governing permissions and
5096 * limitations under the License.
5097 */
5098/**
5099 * An event from the RemoteStore. It is split into targetChanges (changes to the
5100 * state or the set of documents in our watched targets) and documentUpdates
5101 * (changes to the actual documents).
5102 */ class gs {
5103 constructor(
5104 /**
5105 * The snapshot version this event brings us up to, or MIN if not set.
5106 */
5107 t,
5108 /**
5109 * A map from target to changes to the target. See TargetChange.
5110 */
5111 e,
5112 /**
5113 * A set of targets that is known to be inconsistent. Listens for these
5114 * targets should be re-established without resume tokens.
5115 */
5116 n,
5117 /**
5118 * A set of which documents have changed or been deleted, along with the
5119 * doc's new values (if not deleted).
5120 */
5121 s,
5122 /**
5123 * A set of which document updates are due only to limbo resolution targets.
5124 */
5125 i) {
5126 this.snapshotVersion = t, this.targetChanges = e, this.targetMismatches = n, this.documentUpdates = s,
5127 this.resolvedLimboDocuments = i;
5128 }
5129 /**
5130 * HACK: Views require RemoteEvents in order to determine whether the view is
5131 * CURRENT, but secondary tabs don't receive remote events. So this method is
5132 * used to create a synthesized RemoteEvent that can be used to apply a
5133 * CURRENT status change to a View, for queries executed in a different tab.
5134 */
5135 // PORTING NOTE: Multi-tab only
5136 static createSynthesizedRemoteEventForCurrentChange(t, e, n) {
5137 const s = new Map;
5138 return s.set(t, ys.createSynthesizedTargetChangeForCurrentChange(t, e, n)), new gs(it.min(), s, ms(), rs(), _s());
5139 }
5140}
5141
5142/**
5143 * A TargetChange specifies the set of changes for a specific target as part of
5144 * a RemoteEvent. These changes track which documents are added, modified or
5145 * removed, as well as the target's resume token and whether the target is
5146 * marked CURRENT.
5147 * The actual changes *to* documents are not part of the TargetChange since
5148 * documents may be part of multiple targets.
5149 */ class ys {
5150 constructor(
5151 /**
5152 * An opaque, server-assigned token that allows watching a query to be resumed
5153 * after disconnecting without retransmitting all the data that matches the
5154 * query. The resume token essentially identifies a point in time from which
5155 * the server should resume sending results.
5156 */
5157 t,
5158 /**
5159 * The "current" (synced) status of this target. Note that "current"
5160 * has special meaning in the RPC protocol that implies that a target is
5161 * both up-to-date and consistent with the rest of the watch stream.
5162 */
5163 e,
5164 /**
5165 * The set of documents that were newly assigned to this target as part of
5166 * this remote event.
5167 */
5168 n,
5169 /**
5170 * The set of documents that were already assigned to this target but received
5171 * an update during this remote event.
5172 */
5173 s,
5174 /**
5175 * The set of documents that were removed from this target as part of this
5176 * remote event.
5177 */
5178 i) {
5179 this.resumeToken = t, this.current = e, this.addedDocuments = n, this.modifiedDocuments = s,
5180 this.removedDocuments = i;
5181 }
5182 /**
5183 * This method is used to create a synthesized TargetChanges that can be used to
5184 * apply a CURRENT status change to a View (for queries executed in a different
5185 * tab) or for new queries (to raise snapshots with correct CURRENT status).
5186 */ static createSynthesizedTargetChangeForCurrentChange(t, e, n) {
5187 return new ys(n, e, _s(), _s(), _s());
5188 }
5189}
5190
5191/**
5192 * @license
5193 * Copyright 2017 Google LLC
5194 *
5195 * Licensed under the Apache License, Version 2.0 (the "License");
5196 * you may not use this file except in compliance with the License.
5197 * You may obtain a copy of the License at
5198 *
5199 * http://www.apache.org/licenses/LICENSE-2.0
5200 *
5201 * Unless required by applicable law or agreed to in writing, software
5202 * distributed under the License is distributed on an "AS IS" BASIS,
5203 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
5204 * See the License for the specific language governing permissions and
5205 * limitations under the License.
5206 */
5207/**
5208 * Represents a changed document and a list of target ids to which this change
5209 * applies.
5210 *
5211 * If document has been deleted NoDocument will be provided.
5212 */ class ps {
5213 constructor(
5214 /** The new document applies to all of these targets. */
5215 t,
5216 /** The new document is removed from all of these targets. */
5217 e,
5218 /** The key of the document for this change. */
5219 n,
5220 /**
5221 * The new document or NoDocument if it was deleted. Is null if the
5222 * document went out of view without the server sending a new document.
5223 */
5224 s) {
5225 this.Tt = t, this.removedTargetIds = e, this.key = n, this.Et = s;
5226 }
5227}
5228
5229class Is {
5230 constructor(t, e) {
5231 this.targetId = t, this.At = e;
5232 }
5233}
5234
5235class Ts {
5236 constructor(
5237 /** What kind of change occurred to the watch target. */
5238 t,
5239 /** The target IDs that were added/removed/set. */
5240 e,
5241 /**
5242 * An opaque, server-assigned token that allows watching a target to be
5243 * resumed after disconnecting without retransmitting all the data that
5244 * matches the target. The resume token essentially identifies a point in
5245 * time from which the server should resume sending results.
5246 */
5247 n = Ht.EMPTY_BYTE_STRING
5248 /** An RPC error indicating why the watch failed. */ , s = null) {
5249 this.state = t, this.targetIds = e, this.resumeToken = n, this.cause = s;
5250 }
5251}
5252
5253/** Tracks the internal state of a Watch target. */ class Es {
5254 constructor() {
5255 /**
5256 * The number of pending responses (adds or removes) that we are waiting on.
5257 * We only consider targets active that have no pending responses.
5258 */
5259 this.Rt = 0,
5260 /**
5261 * Keeps track of the document changes since the last raised snapshot.
5262 *
5263 * These changes are continuously updated as we receive document updates and
5264 * always reflect the current set of changes against the last issued snapshot.
5265 */
5266 this.bt = bs(),
5267 /** See public getters for explanations of these fields. */
5268 this.Pt = Ht.EMPTY_BYTE_STRING, this.vt = !1,
5269 /**
5270 * Whether this target state should be included in the next snapshot. We
5271 * initialize to true so that newly-added targets are included in the next
5272 * RemoteEvent.
5273 */
5274 this.Vt = !0;
5275 }
5276 /**
5277 * Whether this target has been marked 'current'.
5278 *
5279 * 'Current' has special meaning in the RPC protocol: It implies that the
5280 * Watch backend has sent us all changes up to the point at which the target
5281 * was added and that the target is consistent with the rest of the watch
5282 * stream.
5283 */ get current() {
5284 return this.vt;
5285 }
5286 /** The last resume token sent to us for this target. */ get resumeToken() {
5287 return this.Pt;
5288 }
5289 /** Whether this target has pending target adds or target removes. */ get St() {
5290 return 0 !== this.Rt;
5291 }
5292 /** Whether we have modified any state that should trigger a snapshot. */ get Dt() {
5293 return this.Vt;
5294 }
5295 /**
5296 * Applies the resume token to the TargetChange, but only when it has a new
5297 * value. Empty resumeTokens are discarded.
5298 */ Ct(t) {
5299 t.approximateByteSize() > 0 && (this.Vt = !0, this.Pt = t);
5300 }
5301 /**
5302 * Creates a target change from the current set of changes.
5303 *
5304 * To reset the document changes after raising this snapshot, call
5305 * `clearPendingChanges()`.
5306 */ xt() {
5307 let t = _s(), e = _s(), n = _s();
5308 return this.bt.forEach(((s, i) => {
5309 switch (i) {
5310 case 0 /* Added */ :
5311 t = t.add(s);
5312 break;
5313
5314 case 2 /* Modified */ :
5315 e = e.add(s);
5316 break;
5317
5318 case 1 /* Removed */ :
5319 n = n.add(s);
5320 break;
5321
5322 default:
5323 M();
5324 }
5325 })), new ys(this.Pt, this.vt, t, e, n);
5326 }
5327 /**
5328 * Resets the document changes and sets `hasPendingChanges` to false.
5329 */ Nt() {
5330 this.Vt = !1, this.bt = bs();
5331 }
5332 kt(t, e) {
5333 this.Vt = !0, this.bt = this.bt.insert(t, e);
5334 }
5335 Ot(t) {
5336 this.Vt = !0, this.bt = this.bt.remove(t);
5337 }
5338 Mt() {
5339 this.Rt += 1;
5340 }
5341 Ft() {
5342 this.Rt -= 1;
5343 }
5344 $t() {
5345 this.Vt = !0, this.vt = !0;
5346 }
5347}
5348
5349/**
5350 * A helper class to accumulate watch changes into a RemoteEvent.
5351 */
5352class As {
5353 constructor(t) {
5354 this.Bt = t,
5355 /** The internal state of all tracked targets. */
5356 this.Lt = new Map,
5357 /** Keeps track of the documents to update since the last raised snapshot. */
5358 this.Ut = rs(),
5359 /** A mapping of document keys to their set of target IDs. */
5360 this.qt = Rs(),
5361 /**
5362 * A list of targets with existence filter mismatches. These targets are
5363 * known to be inconsistent and their listens needs to be re-established by
5364 * RemoteStore.
5365 */
5366 this.Kt = new Kt(tt);
5367 }
5368 /**
5369 * Processes and adds the DocumentWatchChange to the current set of changes.
5370 */ Gt(t) {
5371 for (const e of t.Tt) t.Et && t.Et.isFoundDocument() ? this.Qt(e, t.Et) : this.jt(e, t.key, t.Et);
5372 for (const e of t.removedTargetIds) this.jt(e, t.key, t.Et);
5373 }
5374 /** Processes and adds the WatchTargetChange to the current set of changes. */ Wt(t) {
5375 this.forEachTarget(t, (e => {
5376 const n = this.zt(e);
5377 switch (t.state) {
5378 case 0 /* NoChange */ :
5379 this.Ht(e) && n.Ct(t.resumeToken);
5380 break;
5381
5382 case 1 /* Added */ :
5383 // We need to decrement the number of pending acks needed from watch
5384 // for this targetId.
5385 n.Ft(), n.St ||
5386 // We have a freshly added target, so we need to reset any state
5387 // that we had previously. This can happen e.g. when remove and add
5388 // back a target for existence filter mismatches.
5389 n.Nt(), n.Ct(t.resumeToken);
5390 break;
5391
5392 case 2 /* Removed */ :
5393 // We need to keep track of removed targets to we can post-filter and
5394 // remove any target changes.
5395 // We need to decrement the number of pending acks needed from watch
5396 // for this targetId.
5397 n.Ft(), n.St || this.removeTarget(e);
5398 break;
5399
5400 case 3 /* Current */ :
5401 this.Ht(e) && (n.$t(), n.Ct(t.resumeToken));
5402 break;
5403
5404 case 4 /* Reset */ :
5405 this.Ht(e) && (
5406 // Reset the target and synthesizes removes for all existing
5407 // documents. The backend will re-add any documents that still
5408 // match the target before it sends the next global snapshot.
5409 this.Jt(e), n.Ct(t.resumeToken));
5410 break;
5411
5412 default:
5413 M();
5414 }
5415 }));
5416 }
5417 /**
5418 * Iterates over all targetIds that the watch change applies to: either the
5419 * targetIds explicitly listed in the change or the targetIds of all currently
5420 * active targets.
5421 */ forEachTarget(t, e) {
5422 t.targetIds.length > 0 ? t.targetIds.forEach(e) : this.Lt.forEach(((t, n) => {
5423 this.Ht(n) && e(n);
5424 }));
5425 }
5426 /**
5427 * Handles existence filters and synthesizes deletes for filter mismatches.
5428 * Targets that are invalidated by filter mismatches are added to
5429 * `pendingTargetResets`.
5430 */ Yt(t) {
5431 const e = t.targetId, n = t.At.count, s = this.Xt(e);
5432 if (s) {
5433 const t = s.target;
5434 if (Fe(t)) if (0 === n) {
5435 // The existence filter told us the document does not exist. We deduce
5436 // that this document does not exist and apply a deleted document to
5437 // our updates. Without applying this deleted document there might be
5438 // another query that will raise this document as part of a snapshot
5439 // until it is resolved, essentially exposing inconsistency between
5440 // queries.
5441 const n = new at(t.path);
5442 this.jt(e, n, Ce.newNoDocument(n, it.min()));
5443 } else F(1 === n); else {
5444 this.Zt(e) !== n && (
5445 // Existence filter mismatch: We reset the mapping and raise a new
5446 // snapshot with `isFromCache:true`.
5447 this.Jt(e), this.Kt = this.Kt.add(e));
5448 }
5449 }
5450 }
5451 /**
5452 * Converts the currently accumulated state into a remote event at the
5453 * provided snapshot version. Resets the accumulated changes before returning.
5454 */ te(t) {
5455 const e = new Map;
5456 this.Lt.forEach(((n, s) => {
5457 const i = this.Xt(s);
5458 if (i) {
5459 if (n.current && Fe(i.target)) {
5460 // Document queries for document that don't exist can produce an empty
5461 // result set. To update our local cache, we synthesize a document
5462 // delete if we have not previously received the document. This
5463 // resolves the limbo state of the document, removing it from
5464 // limboDocumentRefs.
5465 // TODO(dimond): Ideally we would have an explicit lookup target
5466 // instead resulting in an explicit delete message and we could
5467 // remove this special logic.
5468 const e = new at(i.target.path);
5469 null !== this.Ut.get(e) || this.ee(s, e) || this.jt(s, e, Ce.newNoDocument(e, t));
5470 }
5471 n.Dt && (e.set(s, n.xt()), n.Nt());
5472 }
5473 }));
5474 let n = _s();
5475 // We extract the set of limbo-only document updates as the GC logic
5476 // special-cases documents that do not appear in the target cache.
5477
5478 // TODO(gsoltis): Expand on this comment once GC is available in the JS
5479 // client.
5480 this.qt.forEach(((t, e) => {
5481 let s = !0;
5482 e.forEachWhile((t => {
5483 const e = this.Xt(t);
5484 return !e || 2 /* LimboResolution */ === e.purpose || (s = !1, !1);
5485 })), s && (n = n.add(t));
5486 })), this.Ut.forEach(((e, n) => n.setReadTime(t)));
5487 const s = new gs(t, e, this.Kt, this.Ut, n);
5488 return this.Ut = rs(), this.qt = Rs(), this.Kt = new Kt(tt), s;
5489 }
5490 /**
5491 * Adds the provided document to the internal list of document updates and
5492 * its document key to the given target's mapping.
5493 */
5494 // Visible for testing.
5495 Qt(t, e) {
5496 if (!this.Ht(t)) return;
5497 const n = this.ee(t, e.key) ? 2 /* Modified */ : 0 /* Added */;
5498 this.zt(t).kt(e.key, n), this.Ut = this.Ut.insert(e.key, e), this.qt = this.qt.insert(e.key, this.ne(e.key).add(t));
5499 }
5500 /**
5501 * Removes the provided document from the target mapping. If the
5502 * document no longer matches the target, but the document's state is still
5503 * known (e.g. we know that the document was deleted or we received the change
5504 * that caused the filter mismatch), the new document can be provided
5505 * to update the remote document cache.
5506 */
5507 // Visible for testing.
5508 jt(t, e, n) {
5509 if (!this.Ht(t)) return;
5510 const s = this.zt(t);
5511 this.ee(t, e) ? s.kt(e, 1 /* Removed */) :
5512 // The document may have entered and left the target before we raised a
5513 // snapshot, so we can just ignore the change.
5514 s.Ot(e), this.qt = this.qt.insert(e, this.ne(e).delete(t)), n && (this.Ut = this.Ut.insert(e, n));
5515 }
5516 removeTarget(t) {
5517 this.Lt.delete(t);
5518 }
5519 /**
5520 * Returns the current count of documents in the target. This includes both
5521 * the number of documents that the LocalStore considers to be part of the
5522 * target as well as any accumulated changes.
5523 */ Zt(t) {
5524 const e = this.zt(t).xt();
5525 return this.Bt.getRemoteKeysForTarget(t).size + e.addedDocuments.size - e.removedDocuments.size;
5526 }
5527 /**
5528 * Increment the number of acks needed from watch before we can consider the
5529 * server to be 'in-sync' with the client's active targets.
5530 */ Mt(t) {
5531 this.zt(t).Mt();
5532 }
5533 zt(t) {
5534 let e = this.Lt.get(t);
5535 return e || (e = new Es, this.Lt.set(t, e)), e;
5536 }
5537 ne(t) {
5538 let e = this.qt.get(t);
5539 return e || (e = new Kt(tt), this.qt = this.qt.insert(t, e)), e;
5540 }
5541 /**
5542 * Verifies that the user is still interested in this target (by calling
5543 * `getTargetDataForTarget()`) and that we are not waiting for pending ADDs
5544 * from watch.
5545 */ Ht(t) {
5546 const e = null !== this.Xt(t);
5547 return e || x("WatchChangeAggregator", "Detected inactive target", t), e;
5548 }
5549 /**
5550 * Returns the TargetData for an active target (i.e. a target that the user
5551 * is still interested in that has no outstanding target change requests).
5552 */ Xt(t) {
5553 const e = this.Lt.get(t);
5554 return e && e.St ? null : this.Bt.se(t);
5555 }
5556 /**
5557 * Resets the state of a Watch target to its initial state (e.g. sets
5558 * 'current' to false, clears the resume token and removes its target mapping
5559 * from all documents).
5560 */ Jt(t) {
5561 this.Lt.set(t, new Es);
5562 this.Bt.getRemoteKeysForTarget(t).forEach((e => {
5563 this.jt(t, e, /*updatedDocument=*/ null);
5564 }));
5565 }
5566 /**
5567 * Returns whether the LocalStore considers the document to be part of the
5568 * specified target.
5569 */ ee(t, e) {
5570 return this.Bt.getRemoteKeysForTarget(t).has(e);
5571 }
5572}
5573
5574function Rs() {
5575 return new Lt(at.comparator);
5576}
5577
5578function bs() {
5579 return new Lt(at.comparator);
5580}
5581
5582/**
5583 * @license
5584 * Copyright 2017 Google LLC
5585 *
5586 * Licensed under the Apache License, Version 2.0 (the "License");
5587 * you may not use this file except in compliance with the License.
5588 * You may obtain a copy of the License at
5589 *
5590 * http://www.apache.org/licenses/LICENSE-2.0
5591 *
5592 * Unless required by applicable law or agreed to in writing, software
5593 * distributed under the License is distributed on an "AS IS" BASIS,
5594 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
5595 * See the License for the specific language governing permissions and
5596 * limitations under the License.
5597 */ const Ps = (() => {
5598 const t = {
5599 asc: "ASCENDING",
5600 desc: "DESCENDING"
5601 };
5602 return t;
5603})(), vs = (() => {
5604 const t = {
5605 "<": "LESS_THAN",
5606 "<=": "LESS_THAN_OR_EQUAL",
5607 ">": "GREATER_THAN",
5608 ">=": "GREATER_THAN_OR_EQUAL",
5609 "==": "EQUAL",
5610 "!=": "NOT_EQUAL",
5611 "array-contains": "ARRAY_CONTAINS",
5612 in: "IN",
5613 "not-in": "NOT_IN",
5614 "array-contains-any": "ARRAY_CONTAINS_ANY"
5615 };
5616 return t;
5617})();
5618
5619/**
5620 * This class generates JsonObject values for the Datastore API suitable for
5621 * sending to either GRPC stub methods or via the JSON/HTTP REST API.
5622 *
5623 * The serializer supports both Protobuf.js and Proto3 JSON formats. By
5624 * setting `useProto3Json` to true, the serializer will use the Proto3 JSON
5625 * format.
5626 *
5627 * For a description of the Proto3 JSON format check
5628 * https://developers.google.com/protocol-buffers/docs/proto3#json
5629 *
5630 * TODO(klimt): We can remove the databaseId argument if we keep the full
5631 * resource name in documents.
5632 */
5633class Vs {
5634 constructor(t, e) {
5635 this.databaseId = t, this.gt = e;
5636 }
5637}
5638
5639/**
5640 * Returns a value for a Date that's appropriate to put into a proto.
5641 */
5642function Ss(t, e) {
5643 if (t.gt) {
5644 return `${new Date(1e3 * e.seconds).toISOString().replace(/\.\d*/, "").replace("Z", "")}.${("000000000" + e.nanoseconds).slice(-9)}Z`;
5645 }
5646 return {
5647 seconds: "" + e.seconds,
5648 nanos: e.nanoseconds
5649 };
5650}
5651
5652/**
5653 * Returns a value for bytes that's appropriate to put in a proto.
5654 *
5655 * Visible for testing.
5656 */
5657function Ds(t, e) {
5658 return t.gt ? e.toBase64() : e.toUint8Array();
5659}
5660
5661/**
5662 * Returns a ByteString based on the proto string value.
5663 */ function Cs(t, e) {
5664 return Ss(t, e.toTimestamp());
5665}
5666
5667function xs(t) {
5668 return F(!!t), it.fromTimestamp(function(t) {
5669 const e = Yt(t);
5670 return new st(e.seconds, e.nanos);
5671 }(t));
5672}
5673
5674function Ns(t, e) {
5675 return function(t) {
5676 return new ot([ "projects", t.projectId, "databases", t.database ]);
5677 }(t).child("documents").child(e).canonicalString();
5678}
5679
5680function ks(t) {
5681 const e = ot.fromString(t);
5682 return F(oi(e)), e;
5683}
5684
5685function Os(t, e) {
5686 return Ns(t.databaseId, e.path);
5687}
5688
5689function Ms(t, e) {
5690 const n = ks(e);
5691 if (n.get(1) !== t.databaseId.projectId) throw new U(L.INVALID_ARGUMENT, "Tried to deserialize key from different project: " + n.get(1) + " vs " + t.databaseId.projectId);
5692 if (n.get(3) !== t.databaseId.database) throw new U(L.INVALID_ARGUMENT, "Tried to deserialize key from different database: " + n.get(3) + " vs " + t.databaseId.database);
5693 return new at(Ls(n));
5694}
5695
5696function Fs(t, e) {
5697 return Ns(t.databaseId, e);
5698}
5699
5700function $s(t) {
5701 const e = ks(t);
5702 // In v1beta1 queries for collections at the root did not have a trailing
5703 // "/documents". In v1 all resource paths contain "/documents". Preserve the
5704 // ability to read the v1beta1 form for compatibility with queries persisted
5705 // in the local target cache.
5706 return 4 === e.length ? ot.emptyPath() : Ls(e);
5707}
5708
5709function Bs(t) {
5710 return new ot([ "projects", t.databaseId.projectId, "databases", t.databaseId.database ]).canonicalString();
5711}
5712
5713function Ls(t) {
5714 return F(t.length > 4 && "documents" === t.get(4)), t.popFirst(5);
5715}
5716
5717/** Creates a Document proto from key and fields (but no create/update time) */ function Us(t, e, n) {
5718 return {
5719 name: Os(t, e),
5720 fields: n.value.mapValue.fields
5721 };
5722}
5723
5724function qs(t, e, n) {
5725 const s = Ms(t, e.name), i = xs(e.updateTime), r = new Se({
5726 mapValue: {
5727 fields: e.fields
5728 }
5729 }), o = Ce.newFoundDocument(s, i, r);
5730 return n && o.setHasCommittedMutations(), n ? o.setHasCommittedMutations() : o;
5731}
5732
5733function Ks(t, e) {
5734 return "found" in e ? function(t, e) {
5735 F(!!e.found), e.found.name, e.found.updateTime;
5736 const n = Ms(t, e.found.name), s = xs(e.found.updateTime), i = new Se({
5737 mapValue: {
5738 fields: e.found.fields
5739 }
5740 });
5741 return Ce.newFoundDocument(n, s, i);
5742 }(t, e) : "missing" in e ? function(t, e) {
5743 F(!!e.missing), F(!!e.readTime);
5744 const n = Ms(t, e.missing), s = xs(e.readTime);
5745 return Ce.newNoDocument(n, s);
5746 }(t, e) : M();
5747}
5748
5749function Gs(t, e) {
5750 let n;
5751 if ("targetChange" in e) {
5752 e.targetChange;
5753 // proto3 default value is unset in JSON (undefined), so use 'NO_CHANGE'
5754 // if unset
5755 const s = function(t) {
5756 return "NO_CHANGE" === t ? 0 /* NoChange */ : "ADD" === t ? 1 /* Added */ : "REMOVE" === t ? 2 /* Removed */ : "CURRENT" === t ? 3 /* Current */ : "RESET" === t ? 4 /* Reset */ : M();
5757 }(e.targetChange.targetChangeType || "NO_CHANGE"), i = e.targetChange.targetIds || [], r = function(t, e) {
5758 return t.gt ? (F(void 0 === e || "string" == typeof e), Ht.fromBase64String(e || "")) : (F(void 0 === e || e instanceof Uint8Array),
5759 Ht.fromUint8Array(e || new Uint8Array));
5760 }(t, e.targetChange.resumeToken), o = e.targetChange.cause, u = o && function(t) {
5761 const e = void 0 === t.code ? L.UNKNOWN : ns(t.code);
5762 return new U(e, t.message || "");
5763 }
5764 /**
5765 * Returns a value for a number (or null) that's appropriate to put into
5766 * a google.protobuf.Int32Value proto.
5767 * DO NOT USE THIS FOR ANYTHING ELSE.
5768 * This method cheats. It's typed as returning "number" because that's what
5769 * our generated proto interfaces say Int32Value must be. But GRPC actually
5770 * expects a { value: <number> } struct.
5771 */ (o);
5772 n = new Ts(s, i, r, u || null);
5773 } else if ("documentChange" in e) {
5774 e.documentChange;
5775 const s = e.documentChange;
5776 s.document, s.document.name, s.document.updateTime;
5777 const i = Ms(t, s.document.name), r = xs(s.document.updateTime), o = new Se({
5778 mapValue: {
5779 fields: s.document.fields
5780 }
5781 }), u = Ce.newFoundDocument(i, r, o), c = s.targetIds || [], a = s.removedTargetIds || [];
5782 n = new ps(c, a, u.key, u);
5783 } else if ("documentDelete" in e) {
5784 e.documentDelete;
5785 const s = e.documentDelete;
5786 s.document;
5787 const i = Ms(t, s.document), r = s.readTime ? xs(s.readTime) : it.min(), o = Ce.newNoDocument(i, r), u = s.removedTargetIds || [];
5788 n = new ps([], u, o.key, o);
5789 } else if ("documentRemove" in e) {
5790 e.documentRemove;
5791 const s = e.documentRemove;
5792 s.document;
5793 const i = Ms(t, s.document), r = s.removedTargetIds || [];
5794 n = new ps([], r, i, null);
5795 } else {
5796 if (!("filter" in e)) return M();
5797 {
5798 e.filter;
5799 const t = e.filter;
5800 t.targetId;
5801 const s = t.count || 0, i = new Xn(s), r = t.targetId;
5802 n = new Is(r, i);
5803 }
5804 }
5805 return n;
5806}
5807
5808function Qs(t, e) {
5809 let n;
5810 if (e instanceof Qn) n = {
5811 update: Us(t, e.key, e.value)
5812 }; else if (e instanceof Jn) n = {
5813 delete: Os(t, e.key)
5814 }; else if (e instanceof jn) n = {
5815 update: Us(t, e.key, e.data),
5816 updateMask: ri(e.fieldMask)
5817 }; else {
5818 if (!(e instanceof Yn)) return M();
5819 n = {
5820 verify: Os(t, e.key)
5821 };
5822 }
5823 return e.fieldTransforms.length > 0 && (n.updateTransforms = e.fieldTransforms.map((t => function(t, e) {
5824 const n = e.transform;
5825 if (n instanceof Pn) return {
5826 fieldPath: e.field.canonicalString(),
5827 setToServerValue: "REQUEST_TIME"
5828 };
5829 if (n instanceof vn) return {
5830 fieldPath: e.field.canonicalString(),
5831 appendMissingElements: {
5832 values: n.elements
5833 }
5834 };
5835 if (n instanceof Sn) return {
5836 fieldPath: e.field.canonicalString(),
5837 removeAllFromArray: {
5838 values: n.elements
5839 }
5840 };
5841 if (n instanceof Cn) return {
5842 fieldPath: e.field.canonicalString(),
5843 increment: n.yt
5844 };
5845 throw M();
5846 }(0, t)))), e.precondition.isNone || (n.currentDocument = function(t, e) {
5847 return void 0 !== e.updateTime ? {
5848 updateTime: Cs(t, e.updateTime)
5849 } : void 0 !== e.exists ? {
5850 exists: e.exists
5851 } : M();
5852 }(t, e.precondition)), n;
5853}
5854
5855function js(t, e) {
5856 const n = e.currentDocument ? function(t) {
5857 return void 0 !== t.updateTime ? Fn.updateTime(xs(t.updateTime)) : void 0 !== t.exists ? Fn.exists(t.exists) : Fn.none();
5858 }(e.currentDocument) : Fn.none(), s = e.updateTransforms ? e.updateTransforms.map((e => function(t, e) {
5859 let n = null;
5860 if ("setToServerValue" in e) F("REQUEST_TIME" === e.setToServerValue), n = new Pn; else if ("appendMissingElements" in e) {
5861 const t = e.appendMissingElements.values || [];
5862 n = new vn(t);
5863 } else if ("removeAllFromArray" in e) {
5864 const t = e.removeAllFromArray.values || [];
5865 n = new Sn(t);
5866 } else "increment" in e ? n = new Cn(t, e.increment) : M();
5867 const s = ct.fromServerFormat(e.fieldPath);
5868 return new kn(s, n);
5869 }(t, e))) : [];
5870 if (e.update) {
5871 e.update.name;
5872 const i = Ms(t, e.update.name), r = new Se({
5873 mapValue: {
5874 fields: e.update.fields
5875 }
5876 });
5877 if (e.updateMask) {
5878 const t = function(t) {
5879 const e = t.fieldPaths || [];
5880 return new jt(e.map((t => ct.fromServerFormat(t))));
5881 }(e.updateMask);
5882 return new jn(i, r, t, n, s);
5883 }
5884 return new Qn(i, r, n, s);
5885 }
5886 if (e.delete) {
5887 const s = Ms(t, e.delete);
5888 return new Jn(s, n);
5889 }
5890 if (e.verify) {
5891 const s = Ms(t, e.verify);
5892 return new Yn(s, n);
5893 }
5894 return M();
5895}
5896
5897function Ws(t, e) {
5898 return t && t.length > 0 ? (F(void 0 !== e), t.map((t => function(t, e) {
5899 // NOTE: Deletes don't have an updateTime.
5900 let n = t.updateTime ? xs(t.updateTime) : xs(e);
5901 return n.isEqual(it.min()) && (
5902 // The Firestore Emulator currently returns an update time of 0 for
5903 // deletes of non-existing documents (rather than null). This breaks the
5904 // test "get deleted doc while offline with source=cache" as NoDocuments
5905 // with version 0 are filtered by IndexedDb's RemoteDocumentCache.
5906 // TODO(#2149): Remove this when Emulator is fixed
5907 n = xs(e)), new Mn(n, t.transformResults || []);
5908 }(t, e)))) : [];
5909}
5910
5911function zs(t, e) {
5912 return {
5913 documents: [ Fs(t, e.path) ]
5914 };
5915}
5916
5917function Hs(t, e) {
5918 // Dissect the path into parent, collectionId, and optional key filter.
5919 const n = {
5920 structuredQuery: {}
5921 }, s = e.path;
5922 null !== e.collectionGroup ? (n.parent = Fs(t, s), n.structuredQuery.from = [ {
5923 collectionId: e.collectionGroup,
5924 allDescendants: !0
5925 } ]) : (n.parent = Fs(t, s.popLast()), n.structuredQuery.from = [ {
5926 collectionId: s.lastSegment()
5927 } ]);
5928 const i = function(t) {
5929 if (0 === t.length) return;
5930 const e = t.map((t =>
5931 // visible for testing
5932 function(t) {
5933 if ("==" /* EQUAL */ === t.op) {
5934 if (Te(t.value)) return {
5935 unaryFilter: {
5936 field: ei(t.field),
5937 op: "IS_NAN"
5938 }
5939 };
5940 if (Ie(t.value)) return {
5941 unaryFilter: {
5942 field: ei(t.field),
5943 op: "IS_NULL"
5944 }
5945 };
5946 } else if ("!=" /* NOT_EQUAL */ === t.op) {
5947 if (Te(t.value)) return {
5948 unaryFilter: {
5949 field: ei(t.field),
5950 op: "IS_NOT_NAN"
5951 }
5952 };
5953 if (Ie(t.value)) return {
5954 unaryFilter: {
5955 field: ei(t.field),
5956 op: "IS_NOT_NULL"
5957 }
5958 };
5959 }
5960 return {
5961 fieldFilter: {
5962 field: ei(t.field),
5963 op: ti(t.op),
5964 value: t.value
5965 }
5966 };
5967 }(t)));
5968 if (1 === e.length) return e[0];
5969 return {
5970 compositeFilter: {
5971 op: "AND",
5972 filters: e
5973 }
5974 };
5975 }(e.filters);
5976 i && (n.structuredQuery.where = i);
5977 const r = function(t) {
5978 if (0 === t.length) return;
5979 return t.map((t =>
5980 // visible for testing
5981 function(t) {
5982 return {
5983 field: ei(t.field),
5984 direction: Zs(t.dir)
5985 };
5986 }(t)));
5987 }(e.orderBy);
5988 r && (n.structuredQuery.orderBy = r);
5989 const o = function(t, e) {
5990 return t.gt || re(e) ? e : {
5991 value: e
5992 };
5993 }
5994 /**
5995 * Returns a number (or null) from a google.protobuf.Int32Value proto.
5996 */ (t, e.limit);
5997 var u;
5998 return null !== o && (n.structuredQuery.limit = o), e.startAt && (n.structuredQuery.startAt = {
5999 before: (u = e.startAt).inclusive,
6000 values: u.position
6001 }), e.endAt && (n.structuredQuery.endAt = function(t) {
6002 return {
6003 before: !t.inclusive,
6004 values: t.position
6005 };
6006 }(e.endAt)), n;
6007}
6008
6009function Js(t) {
6010 let e = $s(t.parent);
6011 const n = t.structuredQuery, s = n.from ? n.from.length : 0;
6012 let i = null;
6013 if (s > 0) {
6014 F(1 === s);
6015 const t = n.from[0];
6016 t.allDescendants ? i = t.collectionId : e = e.child(t.collectionId);
6017 }
6018 let r = [];
6019 n.where && (r = Xs(n.where));
6020 let o = [];
6021 n.orderBy && (o = n.orderBy.map((t => function(t) {
6022 return new Ye(ni(t.field),
6023 // visible for testing
6024 function(t) {
6025 switch (t) {
6026 case "ASCENDING":
6027 return "asc" /* ASCENDING */;
6028
6029 case "DESCENDING":
6030 return "desc" /* DESCENDING */;
6031
6032 default:
6033 return;
6034 }
6035 }
6036 // visible for testing
6037 (t.direction));
6038 }(t))));
6039 let u = null;
6040 n.limit && (u = function(t) {
6041 let e;
6042 return e = "object" == typeof t ? t.value : t, re(e) ? null : e;
6043 }(n.limit));
6044 let c = null;
6045 n.startAt && (c = function(t) {
6046 const e = !!t.before, n = t.values || [];
6047 return new Je(n, e);
6048 }(n.startAt));
6049 let a = null;
6050 return n.endAt && (a = function(t) {
6051 const e = !t.before, n = t.values || [];
6052 return new Je(n, e);
6053 }
6054 // visible for testing
6055 (n.endAt)), nn(e, i, o, r, u, "F" /* First */ , c, a);
6056}
6057
6058function Ys(t, e) {
6059 const n = function(t, e) {
6060 switch (e) {
6061 case 0 /* Listen */ :
6062 return null;
6063
6064 case 1 /* ExistenceFilterMismatch */ :
6065 return "existence-filter-mismatch";
6066
6067 case 2 /* LimboResolution */ :
6068 return "limbo-document";
6069
6070 default:
6071 return M();
6072 }
6073 }(0, e.purpose);
6074 return null == n ? null : {
6075 "goog-listen-tags": n
6076 };
6077}
6078
6079function Xs(t) {
6080 return t ? void 0 !== t.unaryFilter ? [ ii(t) ] : void 0 !== t.fieldFilter ? [ si(t) ] : void 0 !== t.compositeFilter ? t.compositeFilter.filters.map((t => Xs(t))).reduce(((t, e) => t.concat(e))) : M() : [];
6081}
6082
6083function Zs(t) {
6084 return Ps[t];
6085}
6086
6087function ti(t) {
6088 return vs[t];
6089}
6090
6091function ei(t) {
6092 return {
6093 fieldPath: t.canonicalString()
6094 };
6095}
6096
6097function ni(t) {
6098 return ct.fromServerFormat(t.fieldPath);
6099}
6100
6101function si(t) {
6102 return Ue.create(ni(t.fieldFilter.field), function(t) {
6103 switch (t) {
6104 case "EQUAL":
6105 return "==" /* EQUAL */;
6106
6107 case "NOT_EQUAL":
6108 return "!=" /* NOT_EQUAL */;
6109
6110 case "GREATER_THAN":
6111 return ">" /* GREATER_THAN */;
6112
6113 case "GREATER_THAN_OR_EQUAL":
6114 return ">=" /* GREATER_THAN_OR_EQUAL */;
6115
6116 case "LESS_THAN":
6117 return "<" /* LESS_THAN */;
6118
6119 case "LESS_THAN_OR_EQUAL":
6120 return "<=" /* LESS_THAN_OR_EQUAL */;
6121
6122 case "ARRAY_CONTAINS":
6123 return "array-contains" /* ARRAY_CONTAINS */;
6124
6125 case "IN":
6126 return "in" /* IN */;
6127
6128 case "NOT_IN":
6129 return "not-in" /* NOT_IN */;
6130
6131 case "ARRAY_CONTAINS_ANY":
6132 return "array-contains-any" /* ARRAY_CONTAINS_ANY */;
6133
6134 default:
6135 return M();
6136 }
6137 }(t.fieldFilter.op), t.fieldFilter.value);
6138}
6139
6140function ii(t) {
6141 switch (t.unaryFilter.op) {
6142 case "IS_NAN":
6143 const e = ni(t.unaryFilter.field);
6144 return Ue.create(e, "==" /* EQUAL */ , {
6145 doubleValue: NaN
6146 });
6147
6148 case "IS_NULL":
6149 const n = ni(t.unaryFilter.field);
6150 return Ue.create(n, "==" /* EQUAL */ , {
6151 nullValue: "NULL_VALUE"
6152 });
6153
6154 case "IS_NOT_NAN":
6155 const s = ni(t.unaryFilter.field);
6156 return Ue.create(s, "!=" /* NOT_EQUAL */ , {
6157 doubleValue: NaN
6158 });
6159
6160 case "IS_NOT_NULL":
6161 const i = ni(t.unaryFilter.field);
6162 return Ue.create(i, "!=" /* NOT_EQUAL */ , {
6163 nullValue: "NULL_VALUE"
6164 });
6165
6166 default:
6167 return M();
6168 }
6169}
6170
6171function ri(t) {
6172 const e = [];
6173 return t.fields.forEach((t => e.push(t.canonicalString()))), {
6174 fieldPaths: e
6175 };
6176}
6177
6178function oi(t) {
6179 // Resource names have at least 4 components (project ID, database ID)
6180 return t.length >= 4 && "projects" === t.get(0) && "databases" === t.get(2);
6181}
6182
6183/**
6184 * @license
6185 * Copyright 2017 Google LLC
6186 *
6187 * Licensed under the Apache License, Version 2.0 (the "License");
6188 * you may not use this file except in compliance with the License.
6189 * You may obtain a copy of the License at
6190 *
6191 * http://www.apache.org/licenses/LICENSE-2.0
6192 *
6193 * Unless required by applicable law or agreed to in writing, software
6194 * distributed under the License is distributed on an "AS IS" BASIS,
6195 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
6196 * See the License for the specific language governing permissions and
6197 * limitations under the License.
6198 */
6199/**
6200 * Encodes a resource path into a IndexedDb-compatible string form.
6201 */
6202function ui(t) {
6203 let e = "";
6204 for (let n = 0; n < t.length; n++) e.length > 0 && (e = ai(e)), e = ci(t.get(n), e);
6205 return ai(e);
6206}
6207
6208/** Encodes a single segment of a resource path into the given result */ function ci(t, e) {
6209 let n = e;
6210 const s = t.length;
6211 for (let e = 0; e < s; e++) {
6212 const s = t.charAt(e);
6213 switch (s) {
6214 case "\0":
6215 n += "";
6216 break;
6217
6218 case "":
6219 n += "";
6220 break;
6221
6222 default:
6223 n += s;
6224 }
6225 }
6226 return n;
6227}
6228
6229/** Encodes a path separator into the given result */ function ai(t) {
6230 return t + "";
6231}
6232
6233/**
6234 * Decodes the given IndexedDb-compatible string form of a resource path into
6235 * a ResourcePath instance. Note that this method is not suitable for use with
6236 * decoding resource names from the server; those are One Platform format
6237 * strings.
6238 */ function hi(t) {
6239 // Event the empty path must encode as a path of at least length 2. A path
6240 // with exactly 2 must be the empty path.
6241 const e = t.length;
6242 if (F(e >= 2), 2 === e) return F("" === t.charAt(0) && "" === t.charAt(1)), ot.emptyPath();
6243 // Escape characters cannot exist past the second-to-last position in the
6244 // source value.
6245 const n = e - 2, s = [];
6246 let i = "";
6247 for (let r = 0; r < e; ) {
6248 // The last two characters of a valid encoded path must be a separator, so
6249 // there must be an end to this segment.
6250 const e = t.indexOf("", r);
6251 (e < 0 || e > n) && M();
6252 switch (t.charAt(e + 1)) {
6253 case "":
6254 const n = t.substring(r, e);
6255 let o;
6256 0 === i.length ?
6257 // Avoid copying for the common case of a segment that excludes \0
6258 // and \001
6259 o = n : (i += n, o = i, i = ""), s.push(o);
6260 break;
6261
6262 case "":
6263 i += t.substring(r, e), i += "\0";
6264 break;
6265
6266 case "":
6267 // The escape character can be used in the output to encode itself.
6268 i += t.substring(r, e + 1);
6269 break;
6270
6271 default:
6272 M();
6273 }
6274 r = e + 2;
6275 }
6276 return new ot(s);
6277}
6278
6279/**
6280 * @license
6281 * Copyright 2022 Google LLC
6282 *
6283 * Licensed under the Apache License, Version 2.0 (the "License");
6284 * you may not use this file except in compliance with the License.
6285 * You may obtain a copy of the License at
6286 *
6287 * http://www.apache.org/licenses/LICENSE-2.0
6288 *
6289 * Unless required by applicable law or agreed to in writing, software
6290 * distributed under the License is distributed on an "AS IS" BASIS,
6291 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
6292 * See the License for the specific language governing permissions and
6293 * limitations under the License.
6294 */ const li = [ "userId", "batchId" ];
6295
6296/**
6297 * @license
6298 * Copyright 2022 Google LLC
6299 *
6300 * Licensed under the Apache License, Version 2.0 (the "License");
6301 * you may not use this file except in compliance with the License.
6302 * You may obtain a copy of the License at
6303 *
6304 * http://www.apache.org/licenses/LICENSE-2.0
6305 *
6306 * Unless required by applicable law or agreed to in writing, software
6307 * distributed under the License is distributed on an "AS IS" BASIS,
6308 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
6309 * See the License for the specific language governing permissions and
6310 * limitations under the License.
6311 */
6312/**
6313 * Name of the IndexedDb object store.
6314 *
6315 * Note that the name 'owner' is chosen to ensure backwards compatibility with
6316 * older clients that only supported single locked access to the persistence
6317 * layer.
6318 */
6319/**
6320 * Creates a [userId, encodedPath] key for use in the DbDocumentMutations
6321 * index to iterate over all at document mutations for a given path or lower.
6322 */
6323function fi(t, e) {
6324 return [ t, ui(e) ];
6325}
6326
6327/**
6328 * Creates a full index key of [userId, encodedPath, batchId] for inserting
6329 * and deleting into the DbDocumentMutations index.
6330 */ function di(t, e, n) {
6331 return [ t, ui(e), n ];
6332}
6333
6334/**
6335 * Because we store all the useful information for this store in the key,
6336 * there is no useful information to store as the value. The raw (unencoded)
6337 * path cannot be stored because IndexedDb doesn't store prototype
6338 * information.
6339 */ const _i = {}, wi = [ "prefixPath", "collectionGroup", "readTime", "documentId" ], mi = [ "prefixPath", "collectionGroup", "documentId" ], gi = [ "collectionGroup", "readTime", "prefixPath", "documentId" ], yi = [ "canonicalId", "targetId" ], pi = [ "targetId", "path" ], Ii = [ "path", "targetId" ], Ti = [ "collectionId", "parent" ], Ei = [ "indexId", "uid" ], Ai = [ "uid", "sequenceNumber" ], Ri = [ "indexId", "uid", "arrayValue", "directionalValue", "orderedDocumentKey", "documentKey" ], bi = [ "indexId", "uid", "orderedDocumentKey" ], Pi = [ "userId", "collectionPath", "documentId" ], vi = [ "userId", "collectionPath", "largestBatchId" ], Vi = [ "userId", "collectionGroup", "largestBatchId" ], Si = [ ...[ ...[ ...[ ...[ "mutationQueues", "mutations", "documentMutations", "remoteDocuments", "targets", "owner", "targetGlobal", "targetDocuments" ], "clientMetadata" ], "remoteDocumentGlobal" ], "collectionParents" ], "bundles", "namedQueries" ], Di = [ ...Si, "documentOverlays" ], Ci = [ "mutationQueues", "mutations", "documentMutations", "remoteDocumentsV14", "targets", "owner", "targetGlobal", "targetDocuments", "clientMetadata", "remoteDocumentGlobal", "collectionParents", "bundles", "namedQueries", "documentOverlays" ], xi = Ci, Ni = [ ...xi, "indexConfiguration", "indexState", "indexEntries" ];
6340
6341/**
6342 * @license
6343 * Copyright 2020 Google LLC
6344 *
6345 * Licensed under the Apache License, Version 2.0 (the "License");
6346 * you may not use this file except in compliance with the License.
6347 * You may obtain a copy of the License at
6348 *
6349 * http://www.apache.org/licenses/LICENSE-2.0
6350 *
6351 * Unless required by applicable law or agreed to in writing, software
6352 * distributed under the License is distributed on an "AS IS" BASIS,
6353 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
6354 * See the License for the specific language governing permissions and
6355 * limitations under the License.
6356 */
6357class ki extends Et {
6358 constructor(t, e) {
6359 super(), this.ie = t, this.currentSequenceNumber = e;
6360 }
6361}
6362
6363function Oi(t, e) {
6364 const n = B(t);
6365 return Pt.M(n.ie, e);
6366}
6367
6368/**
6369 * @license
6370 * Copyright 2017 Google LLC
6371 *
6372 * Licensed under the Apache License, Version 2.0 (the "License");
6373 * you may not use this file except in compliance with the License.
6374 * You may obtain a copy of the License at
6375 *
6376 * http://www.apache.org/licenses/LICENSE-2.0
6377 *
6378 * Unless required by applicable law or agreed to in writing, software
6379 * distributed under the License is distributed on an "AS IS" BASIS,
6380 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
6381 * See the License for the specific language governing permissions and
6382 * limitations under the License.
6383 */
6384/**
6385 * A batch of mutations that will be sent as one unit to the backend.
6386 */ class Mi {
6387 /**
6388 * @param batchId - The unique ID of this mutation batch.
6389 * @param localWriteTime - The original write time of this mutation.
6390 * @param baseMutations - Mutations that are used to populate the base
6391 * values when this mutation is applied locally. This can be used to locally
6392 * overwrite values that are persisted in the remote document cache. Base
6393 * mutations are never sent to the backend.
6394 * @param mutations - The user-provided mutations in this mutation batch.
6395 * User-provided mutations are applied both locally and remotely on the
6396 * backend.
6397 */
6398 constructor(t, e, n, s) {
6399 this.batchId = t, this.localWriteTime = e, this.baseMutations = n, this.mutations = s;
6400 }
6401 /**
6402 * Applies all the mutations in this MutationBatch to the specified document
6403 * to compute the state of the remote document
6404 *
6405 * @param document - The document to apply mutations to.
6406 * @param batchResult - The result of applying the MutationBatch to the
6407 * backend.
6408 */ applyToRemoteDocument(t, e) {
6409 const n = e.mutationResults;
6410 for (let e = 0; e < this.mutations.length; e++) {
6411 const s = this.mutations[e];
6412 if (s.key.isEqual(t.key)) {
6413 Un(s, t, n[e]);
6414 }
6415 }
6416 }
6417 /**
6418 * Computes the local view of a document given all the mutations in this
6419 * batch.
6420 *
6421 * @param document - The document to apply mutations to.
6422 * @param mutatedFields - Fields that have been updated before applying this mutation batch.
6423 * @returns A `FieldMask` representing all the fields that are mutated.
6424 */ applyToLocalView(t, e) {
6425 // First, apply the base state. This allows us to apply non-idempotent
6426 // transform against a consistent set of values.
6427 for (const n of this.baseMutations) n.key.isEqual(t.key) && (e = qn(n, t, e, this.localWriteTime));
6428 // Second, apply all user-provided mutations.
6429 for (const n of this.mutations) n.key.isEqual(t.key) && (e = qn(n, t, e, this.localWriteTime));
6430 return e;
6431 }
6432 /**
6433 * Computes the local view for all provided documents given the mutations in
6434 * this batch. Returns a `DocumentKey` to `Mutation` map which can be used to
6435 * replace all the mutation applications.
6436 */ applyToLocalDocumentSet(t, e) {
6437 // TODO(mrschmidt): This implementation is O(n^2). If we apply the mutations
6438 // directly (as done in `applyToLocalView()`), we can reduce the complexity
6439 // to O(n).
6440 const n = hs();
6441 return this.mutations.forEach((s => {
6442 const i = t.get(s.key), r = i.overlayedDocument;
6443 // TODO(mutabledocuments): This method should take a MutableDocumentMap
6444 // and we should remove this cast.
6445 let o = this.applyToLocalView(r, i.mutatedFields);
6446 // Set mutatedFields to null if the document is only from local mutations.
6447 // This creates a Set or Delete mutation, instead of trying to create a
6448 // patch mutation as the overlay.
6449 o = e.has(s.key) ? null : o;
6450 const u = Ln(r, o);
6451 null !== u && n.set(s.key, u), r.isValidDocument() || r.convertToNoDocument(it.min());
6452 })), n;
6453 }
6454 keys() {
6455 return this.mutations.reduce(((t, e) => t.add(e.key)), _s());
6456 }
6457 isEqual(t) {
6458 return this.batchId === t.batchId && et(this.mutations, t.mutations, ((t, e) => Gn(t, e))) && et(this.baseMutations, t.baseMutations, ((t, e) => Gn(t, e)));
6459 }
6460}
6461
6462/** The result of applying a mutation batch to the backend. */ class Fi {
6463 constructor(t, e, n,
6464 /**
6465 * A pre-computed mapping from each mutated document to the resulting
6466 * version.
6467 */
6468 s) {
6469 this.batch = t, this.commitVersion = e, this.mutationResults = n, this.docVersions = s;
6470 }
6471 /**
6472 * Creates a new MutationBatchResult for the given batch and results. There
6473 * must be one result for each mutation in the batch. This static factory
6474 * caches a document=&gt;version mapping (docVersions).
6475 */ static from(t, e, n) {
6476 F(t.mutations.length === n.length);
6477 let s = fs;
6478 const i = t.mutations;
6479 for (let t = 0; t < i.length; t++) s = s.insert(i[t].key, n[t].version);
6480 return new Fi(t, e, n, s);
6481 }
6482}
6483
6484/**
6485 * @license
6486 * Copyright 2022 Google LLC
6487 *
6488 * Licensed under the Apache License, Version 2.0 (the "License");
6489 * you may not use this file except in compliance with the License.
6490 * You may obtain a copy of the License at
6491 *
6492 * http://www.apache.org/licenses/LICENSE-2.0
6493 *
6494 * Unless required by applicable law or agreed to in writing, software
6495 * distributed under the License is distributed on an "AS IS" BASIS,
6496 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
6497 * See the License for the specific language governing permissions and
6498 * limitations under the License.
6499 */
6500/**
6501 * Representation of an overlay computed by Firestore.
6502 *
6503 * Holds information about a mutation and the largest batch id in Firestore when
6504 * the mutation was created.
6505 */ class $i {
6506 constructor(t, e) {
6507 this.largestBatchId = t, this.mutation = e;
6508 }
6509 getKey() {
6510 return this.mutation.key;
6511 }
6512 isEqual(t) {
6513 return null !== t && this.mutation === t.mutation;
6514 }
6515 toString() {
6516 return `Overlay{\n largestBatchId: ${this.largestBatchId},\n mutation: ${this.mutation.toString()}\n }`;
6517 }
6518}
6519
6520/**
6521 * @license
6522 * Copyright 2017 Google LLC
6523 *
6524 * Licensed under the Apache License, Version 2.0 (the "License");
6525 * you may not use this file except in compliance with the License.
6526 * You may obtain a copy of the License at
6527 *
6528 * http://www.apache.org/licenses/LICENSE-2.0
6529 *
6530 * Unless required by applicable law or agreed to in writing, software
6531 * distributed under the License is distributed on an "AS IS" BASIS,
6532 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
6533 * See the License for the specific language governing permissions and
6534 * limitations under the License.
6535 */
6536/**
6537 * An immutable set of metadata that the local store tracks for each target.
6538 */ class Bi {
6539 constructor(
6540 /** The target being listened to. */
6541 t,
6542 /**
6543 * The target ID to which the target corresponds; Assigned by the
6544 * LocalStore for user listens and by the SyncEngine for limbo watches.
6545 */
6546 e,
6547 /** The purpose of the target. */
6548 n,
6549 /**
6550 * The sequence number of the last transaction during which this target data
6551 * was modified.
6552 */
6553 s,
6554 /** The latest snapshot version seen for this target. */
6555 i = it.min()
6556 /**
6557 * The maximum snapshot version at which the associated view
6558 * contained no limbo documents.
6559 */ , r = it.min()
6560 /**
6561 * An opaque, server-assigned token that allows watching a target to be
6562 * resumed after disconnecting without retransmitting all the data that
6563 * matches the target. The resume token essentially identifies a point in
6564 * time from which the server should resume sending results.
6565 */ , o = Ht.EMPTY_BYTE_STRING) {
6566 this.target = t, this.targetId = e, this.purpose = n, this.sequenceNumber = s, this.snapshotVersion = i,
6567 this.lastLimboFreeSnapshotVersion = r, this.resumeToken = o;
6568 }
6569 /** Creates a new target data instance with an updated sequence number. */ withSequenceNumber(t) {
6570 return new Bi(this.target, this.targetId, this.purpose, t, this.snapshotVersion, this.lastLimboFreeSnapshotVersion, this.resumeToken);
6571 }
6572 /**
6573 * Creates a new target data instance with an updated resume token and
6574 * snapshot version.
6575 */ withResumeToken(t, e) {
6576 return new Bi(this.target, this.targetId, this.purpose, this.sequenceNumber, e, this.lastLimboFreeSnapshotVersion, t);
6577 }
6578 /**
6579 * Creates a new target data instance with an updated last limbo free
6580 * snapshot version number.
6581 */ withLastLimboFreeSnapshotVersion(t) {
6582 return new Bi(this.target, this.targetId, this.purpose, this.sequenceNumber, this.snapshotVersion, t, this.resumeToken);
6583 }
6584}
6585
6586/**
6587 * @license
6588 * Copyright 2017 Google LLC
6589 *
6590 * Licensed under the Apache License, Version 2.0 (the "License");
6591 * you may not use this file except in compliance with the License.
6592 * You may obtain a copy of the License at
6593 *
6594 * http://www.apache.org/licenses/LICENSE-2.0
6595 *
6596 * Unless required by applicable law or agreed to in writing, software
6597 * distributed under the License is distributed on an "AS IS" BASIS,
6598 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
6599 * See the License for the specific language governing permissions and
6600 * limitations under the License.
6601 */
6602/** Serializer for values stored in the LocalStore. */ class Li {
6603 constructor(t) {
6604 this.re = t;
6605 }
6606}
6607
6608/** Decodes a remote document from storage locally to a Document. */ function Ui(t, e) {
6609 let n;
6610 if (e.document) n = qs(t.re, e.document, !!e.hasCommittedMutations); else if (e.noDocument) {
6611 const t = at.fromSegments(e.noDocument.path), s = Qi(e.noDocument.readTime);
6612 n = Ce.newNoDocument(t, s), e.hasCommittedMutations && n.setHasCommittedMutations();
6613 } else {
6614 if (!e.unknownDocument) return M();
6615 {
6616 const t = at.fromSegments(e.unknownDocument.path), s = Qi(e.unknownDocument.version);
6617 n = Ce.newUnknownDocument(t, s);
6618 }
6619 }
6620 return e.readTime && n.setReadTime(function(t) {
6621 const e = new st(t[0], t[1]);
6622 return it.fromTimestamp(e);
6623 }(e.readTime)), n;
6624}
6625
6626/** Encodes a document for storage locally. */ function qi(t, e) {
6627 const n = e.key, s = {
6628 prefixPath: n.getCollectionPath().popLast().toArray(),
6629 collectionGroup: n.collectionGroup,
6630 documentId: n.path.lastSegment(),
6631 readTime: Ki(e.readTime),
6632 hasCommittedMutations: e.hasCommittedMutations
6633 };
6634 if (e.isFoundDocument()) s.document = function(t, e) {
6635 return {
6636 name: Os(t, e.key),
6637 fields: e.data.value.mapValue.fields,
6638 updateTime: Ss(t, e.version.toTimestamp())
6639 };
6640 }(t.re, e); else if (e.isNoDocument()) s.noDocument = {
6641 path: n.path.toArray(),
6642 readTime: Gi(e.version)
6643 }; else {
6644 if (!e.isUnknownDocument()) return M();
6645 s.unknownDocument = {
6646 path: n.path.toArray(),
6647 version: Gi(e.version)
6648 };
6649 }
6650 return s;
6651}
6652
6653function Ki(t) {
6654 const e = t.toTimestamp();
6655 return [ e.seconds, e.nanoseconds ];
6656}
6657
6658function Gi(t) {
6659 const e = t.toTimestamp();
6660 return {
6661 seconds: e.seconds,
6662 nanoseconds: e.nanoseconds
6663 };
6664}
6665
6666function Qi(t) {
6667 const e = new st(t.seconds, t.nanoseconds);
6668 return it.fromTimestamp(e);
6669}
6670
6671/** Encodes a batch of mutations into a DbMutationBatch for local storage. */
6672/** Decodes a DbMutationBatch into a MutationBatch */
6673function ji(t, e) {
6674 const n = (e.baseMutations || []).map((e => js(t.re, e)));
6675 // Squash old transform mutations into existing patch or set mutations.
6676 // The replacement of representing `transforms` with `update_transforms`
6677 // on the SDK means that old `transform` mutations stored in IndexedDB need
6678 // to be updated to `update_transforms`.
6679 // TODO(b/174608374): Remove this code once we perform a schema migration.
6680 for (let t = 0; t < e.mutations.length - 1; ++t) {
6681 const n = e.mutations[t];
6682 if (t + 1 < e.mutations.length && void 0 !== e.mutations[t + 1].transform) {
6683 const s = e.mutations[t + 1];
6684 n.updateTransforms = s.transform.fieldTransforms, e.mutations.splice(t + 1, 1),
6685 ++t;
6686 }
6687 }
6688 const s = e.mutations.map((e => js(t.re, e))), i = st.fromMillis(e.localWriteTimeMs);
6689 return new Mi(e.batchId, i, n, s);
6690}
6691
6692/** Decodes a DbTarget into TargetData */ function Wi(t) {
6693 const e = Qi(t.readTime), n = void 0 !== t.lastLimboFreeSnapshotVersion ? Qi(t.lastLimboFreeSnapshotVersion) : it.min();
6694 let s;
6695 var i;
6696 return void 0 !== t.query.documents ? (F(1 === (i = t.query).documents.length),
6697 s = hn(sn($s(i.documents[0])))) : s = function(t) {
6698 return hn(Js(t));
6699 }(t.query), new Bi(s, t.targetId, 0 /* Listen */ , t.lastListenSequenceNumber, e, n, Ht.fromBase64String(t.resumeToken));
6700}
6701
6702/** Encodes TargetData into a DbTarget for storage locally. */ function zi(t, e) {
6703 const n = Gi(e.snapshotVersion), s = Gi(e.lastLimboFreeSnapshotVersion);
6704 let i;
6705 i = Fe(e.target) ? zs(t.re, e.target) : Hs(t.re, e.target);
6706 // We can't store the resumeToken as a ByteString in IndexedDb, so we
6707 // convert it to a base64 string for storage.
6708 const r = e.resumeToken.toBase64();
6709 // lastListenSequenceNumber is always 0 until we do real GC.
6710 return {
6711 targetId: e.targetId,
6712 canonicalId: ke(e.target),
6713 readTime: n,
6714 resumeToken: r,
6715 lastListenSequenceNumber: e.sequenceNumber,
6716 lastLimboFreeSnapshotVersion: s,
6717 query: i
6718 };
6719}
6720
6721/**
6722 * A helper function for figuring out what kind of query has been stored.
6723 */
6724/**
6725 * Encodes a `BundledQuery` from bundle proto to a Query object.
6726 *
6727 * This reconstructs the original query used to build the bundle being loaded,
6728 * including features exists only in SDKs (for example: limit-to-last).
6729 */
6730function Hi(t) {
6731 const e = Js({
6732 parent: t.parent,
6733 structuredQuery: t.structuredQuery
6734 });
6735 return "LAST" === t.limitType ? ln(e, e.limit, "L" /* Last */) : e;
6736}
6737
6738/** Encodes a NamedQuery proto object to a NamedQuery model object. */
6739/** Encodes a DbDocumentOverlay object to an Overlay model object. */
6740function Ji(t, e) {
6741 return new $i(e.largestBatchId, js(t.re, e.overlayMutation));
6742}
6743
6744/** Decodes an Overlay model object into a DbDocumentOverlay object. */
6745/**
6746 * Returns the DbDocumentOverlayKey corresponding to the given user and
6747 * document key.
6748 */
6749function Yi(t, e) {
6750 const n = e.path.lastSegment();
6751 return [ t, ui(e.path.popLast()), n ];
6752}
6753
6754function Xi(t, e, n, s) {
6755 return {
6756 indexId: t,
6757 uid: e.uid || "",
6758 sequenceNumber: n,
6759 readTime: Gi(s.readTime),
6760 documentKey: ui(s.documentKey.path),
6761 largestBatchId: s.largestBatchId
6762 };
6763}
6764
6765/**
6766 * @license
6767 * Copyright 2020 Google LLC
6768 *
6769 * Licensed under the Apache License, Version 2.0 (the "License");
6770 * you may not use this file except in compliance with the License.
6771 * You may obtain a copy of the License at
6772 *
6773 * http://www.apache.org/licenses/LICENSE-2.0
6774 *
6775 * Unless required by applicable law or agreed to in writing, software
6776 * distributed under the License is distributed on an "AS IS" BASIS,
6777 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
6778 * See the License for the specific language governing permissions and
6779 * limitations under the License.
6780 */ class Zi {
6781 getBundleMetadata(t, e) {
6782 return tr(t).get(e).next((t => {
6783 if (t) return {
6784 id: (e = t).bundleId,
6785 createTime: Qi(e.createTime),
6786 version: e.version
6787 };
6788 /** Encodes a DbBundle to a BundleMetadata object. */
6789 var e;
6790 /** Encodes a BundleMetadata to a DbBundle. */ }));
6791 }
6792 saveBundleMetadata(t, e) {
6793 return tr(t).put({
6794 bundleId: (n = e).id,
6795 createTime: Gi(xs(n.createTime)),
6796 version: n.version
6797 });
6798 var n;
6799 /** Encodes a DbNamedQuery to a NamedQuery. */ }
6800 getNamedQuery(t, e) {
6801 return er(t).get(e).next((t => {
6802 if (t) return {
6803 name: (e = t).name,
6804 query: Hi(e.bundledQuery),
6805 readTime: Qi(e.readTime)
6806 };
6807 var e;
6808 /** Encodes a NamedQuery from a bundle proto to a DbNamedQuery. */ }));
6809 }
6810 saveNamedQuery(t, e) {
6811 return er(t).put(function(t) {
6812 return {
6813 name: t.name,
6814 readTime: Gi(xs(t.readTime)),
6815 bundledQuery: t.bundledQuery
6816 };
6817 }(e));
6818 }
6819}
6820
6821/**
6822 * Helper to get a typed SimpleDbStore for the bundles object store.
6823 */ function tr(t) {
6824 return Oi(t, "bundles");
6825}
6826
6827/**
6828 * Helper to get a typed SimpleDbStore for the namedQueries object store.
6829 */ function er(t) {
6830 return Oi(t, "namedQueries");
6831}
6832
6833/**
6834 * @license
6835 * Copyright 2022 Google LLC
6836 *
6837 * Licensed under the Apache License, Version 2.0 (the "License");
6838 * you may not use this file except in compliance with the License.
6839 * You may obtain a copy of the License at
6840 *
6841 * http://www.apache.org/licenses/LICENSE-2.0
6842 *
6843 * Unless required by applicable law or agreed to in writing, software
6844 * distributed under the License is distributed on an "AS IS" BASIS,
6845 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
6846 * See the License for the specific language governing permissions and
6847 * limitations under the License.
6848 */
6849/**
6850 * Implementation of DocumentOverlayCache using IndexedDb.
6851 */ class nr {
6852 /**
6853 * @param serializer - The document serializer.
6854 * @param userId - The userId for which we are accessing overlays.
6855 */
6856 constructor(t, e) {
6857 this.It = t, this.userId = e;
6858 }
6859 static oe(t, e) {
6860 const n = e.uid || "";
6861 return new nr(t, n);
6862 }
6863 getOverlay(t, e) {
6864 return sr(t).get(Yi(this.userId, e)).next((t => t ? Ji(this.It, t) : null));
6865 }
6866 getOverlays(t, e) {
6867 const n = as();
6868 return Rt.forEach(e, (e => this.getOverlay(t, e).next((t => {
6869 null !== t && n.set(e, t);
6870 })))).next((() => n));
6871 }
6872 saveOverlays(t, e, n) {
6873 const s = [];
6874 return n.forEach(((n, i) => {
6875 const r = new $i(e, i);
6876 s.push(this.ue(t, r));
6877 })), Rt.waitFor(s);
6878 }
6879 removeOverlaysForBatchId(t, e, n) {
6880 const s = new Set;
6881 // Get the set of unique collection paths.
6882 e.forEach((t => s.add(ui(t.getCollectionPath()))));
6883 const i = [];
6884 return s.forEach((e => {
6885 const s = IDBKeyRange.bound([ this.userId, e, n ], [ this.userId, e, n + 1 ],
6886 /*lowerOpen=*/ !1,
6887 /*upperOpen=*/ !0);
6888 i.push(sr(t).Y("collectionPathOverlayIndex", s));
6889 })), Rt.waitFor(i);
6890 }
6891 getOverlaysForCollection(t, e, n) {
6892 const s = as(), i = ui(e), r = IDBKeyRange.bound([ this.userId, i, n ], [ this.userId, i, Number.POSITIVE_INFINITY ],
6893 /*lowerOpen=*/ !0);
6894 return sr(t).W("collectionPathOverlayIndex", r).next((t => {
6895 for (const e of t) {
6896 const t = Ji(this.It, e);
6897 s.set(t.getKey(), t);
6898 }
6899 return s;
6900 }));
6901 }
6902 getOverlaysForCollectionGroup(t, e, n, s) {
6903 const i = as();
6904 let r;
6905 // We want batch IDs larger than `sinceBatchId`, and so the lower bound
6906 // is not inclusive.
6907 const o = IDBKeyRange.bound([ this.userId, e, n ], [ this.userId, e, Number.POSITIVE_INFINITY ],
6908 /*lowerOpen=*/ !0);
6909 return sr(t).Z({
6910 index: "collectionGroupOverlayIndex",
6911 range: o
6912 }, ((t, e, n) => {
6913 // We do not want to return partial batch overlays, even if the size
6914 // of the result set exceeds the given `count` argument. Therefore, we
6915 // continue to aggregate results even after the result size exceeds
6916 // `count` if there are more overlays from the `currentBatchId`.
6917 const o = Ji(this.It, e);
6918 i.size() < s || o.largestBatchId === r ? (i.set(o.getKey(), o), r = o.largestBatchId) : n.done();
6919 })).next((() => i));
6920 }
6921 ue(t, e) {
6922 return sr(t).put(function(t, e, n) {
6923 const [s, i, r] = Yi(e, n.mutation.key);
6924 return {
6925 userId: e,
6926 collectionPath: i,
6927 documentId: r,
6928 collectionGroup: n.mutation.key.getCollectionGroup(),
6929 largestBatchId: n.largestBatchId,
6930 overlayMutation: Qs(t.re, n.mutation)
6931 };
6932 }(this.It, this.userId, e));
6933 }
6934}
6935
6936/**
6937 * Helper to get a typed SimpleDbStore for the document overlay object store.
6938 */ function sr(t) {
6939 return Oi(t, "documentOverlays");
6940}
6941
6942/**
6943 * @license
6944 * Copyright 2021 Google LLC
6945 *
6946 * Licensed under the Apache License, Version 2.0 (the "License");
6947 * you may not use this file except in compliance with the License.
6948 * You may obtain a copy of the License at
6949 *
6950 * http://www.apache.org/licenses/LICENSE-2.0
6951 *
6952 * Unless required by applicable law or agreed to in writing, software
6953 * distributed under the License is distributed on an "AS IS" BASIS,
6954 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
6955 * See the License for the specific language governing permissions and
6956 * limitations under the License.
6957 */
6958// Note: This code is copied from the backend. Code that is not used by
6959// Firestore was removed.
6960/** Firestore index value writer. */
6961class ir {
6962 constructor() {}
6963 // The write methods below short-circuit writing terminators for values
6964 // containing a (terminating) truncated value.
6965 // As an example, consider the resulting encoding for:
6966 // ["bar", [2, "foo"]] -> (STRING, "bar", TERM, ARRAY, NUMBER, 2, STRING, "foo", TERM, TERM, TERM)
6967 // ["bar", [2, truncated("foo")]] -> (STRING, "bar", TERM, ARRAY, NUMBER, 2, STRING, "foo", TRUNC)
6968 // ["bar", truncated(["foo"])] -> (STRING, "bar", TERM, ARRAY. STRING, "foo", TERM, TRUNC)
6969 /** Writes an index value. */
6970 ce(t, e) {
6971 this.ae(t, e),
6972 // Write separator to split index values
6973 // (see go/firestore-storage-format#encodings).
6974 e.he();
6975 }
6976 ae(t, e) {
6977 if ("nullValue" in t) this.le(e, 5); else if ("booleanValue" in t) this.le(e, 10),
6978 e.fe(t.booleanValue ? 1 : 0); else if ("integerValue" in t) this.le(e, 15), e.fe(Xt(t.integerValue)); else if ("doubleValue" in t) {
6979 const n = Xt(t.doubleValue);
6980 isNaN(n) ? this.le(e, 13) : (this.le(e, 15), oe(n) ?
6981 // -0.0, 0 and 0.0 are all considered the same
6982 e.fe(0) : e.fe(n));
6983 } else if ("timestampValue" in t) {
6984 const n = t.timestampValue;
6985 this.le(e, 20), "string" == typeof n ? e.de(n) : (e.de(`${n.seconds || ""}`), e.fe(n.nanos || 0));
6986 } else if ("stringValue" in t) this._e(t.stringValue, e), this.we(e); else if ("bytesValue" in t) this.le(e, 30),
6987 e.me(Zt(t.bytesValue)), this.we(e); else if ("referenceValue" in t) this.ge(t.referenceValue, e); else if ("geoPointValue" in t) {
6988 const n = t.geoPointValue;
6989 this.le(e, 45), e.fe(n.latitude || 0), e.fe(n.longitude || 0);
6990 } else "mapValue" in t ? Re(t) ? this.le(e, Number.MAX_SAFE_INTEGER) : (this.ye(t.mapValue, e),
6991 this.we(e)) : "arrayValue" in t ? (this.pe(t.arrayValue, e), this.we(e)) : M();
6992 }
6993 _e(t, e) {
6994 this.le(e, 25), this.Ie(t, e);
6995 }
6996 Ie(t, e) {
6997 e.de(t);
6998 }
6999 ye(t, e) {
7000 const n = t.fields || {};
7001 this.le(e, 55);
7002 for (const t of Object.keys(n)) this._e(t, e), this.ae(n[t], e);
7003 }
7004 pe(t, e) {
7005 const n = t.values || [];
7006 this.le(e, 50);
7007 for (const t of n) this.ae(t, e);
7008 }
7009 ge(t, e) {
7010 this.le(e, 37);
7011 at.fromName(t).path.forEach((t => {
7012 this.le(e, 60), this.Ie(t, e);
7013 }));
7014 }
7015 le(t, e) {
7016 t.fe(e);
7017 }
7018 we(t) {
7019 // While the SDK does not implement truncation, the truncation marker is
7020 // used to terminate all variable length values (which are strings, bytes,
7021 // references, arrays and maps).
7022 t.fe(2);
7023 }
7024}
7025
7026ir.Te = new ir;
7027
7028/**
7029 * Counts the number of zeros in a byte.
7030 *
7031 * Visible for testing.
7032 */
7033function rr(t) {
7034 if (0 === t) return 8;
7035 let e = 0;
7036 return t >> 4 == 0 && (
7037 // Test if the first four bits are zero.
7038 e += 4, t <<= 4), t >> 6 == 0 && (
7039 // Test if the first two (or next two) bits are zero.
7040 e += 2, t <<= 2), t >> 7 == 0 && (
7041 // Test if the remaining bit is zero.
7042 e += 1), e;
7043}
7044
7045/** Counts the number of leading zeros in the given byte array. */
7046/**
7047 * Returns the number of bytes required to store "value". Leading zero bytes
7048 * are skipped.
7049 */
7050function or(t) {
7051 // This is just the number of bytes for the unsigned representation of the number.
7052 const e = 64 - function(t) {
7053 let e = 0;
7054 for (let n = 0; n < 8; ++n) {
7055 const s = rr(255 & t[n]);
7056 if (e += s, 8 !== s) break;
7057 }
7058 return e;
7059 }(t);
7060 return Math.ceil(e / 8);
7061}
7062
7063/**
7064 * OrderedCodeWriter is a minimal-allocation implementation of the writing
7065 * behavior defined by the backend.
7066 *
7067 * The code is ported from its Java counterpart.
7068 */ class ur {
7069 constructor() {
7070 this.buffer = new Uint8Array(1024), this.position = 0;
7071 }
7072 Ee(t) {
7073 const e = t[Symbol.iterator]();
7074 let n = e.next();
7075 for (;!n.done; ) this.Ae(n.value), n = e.next();
7076 this.Re();
7077 }
7078 be(t) {
7079 const e = t[Symbol.iterator]();
7080 let n = e.next();
7081 for (;!n.done; ) this.Pe(n.value), n = e.next();
7082 this.ve();
7083 }
7084 /** Writes utf8 bytes into this byte sequence, ascending. */ Ve(t) {
7085 for (const e of t) {
7086 const t = e.charCodeAt(0);
7087 if (t < 128) this.Ae(t); else if (t < 2048) this.Ae(960 | t >>> 6), this.Ae(128 | 63 & t); else if (e < "\ud800" || "\udbff" < e) this.Ae(480 | t >>> 12),
7088 this.Ae(128 | 63 & t >>> 6), this.Ae(128 | 63 & t); else {
7089 const t = e.codePointAt(0);
7090 this.Ae(240 | t >>> 18), this.Ae(128 | 63 & t >>> 12), this.Ae(128 | 63 & t >>> 6),
7091 this.Ae(128 | 63 & t);
7092 }
7093 }
7094 this.Re();
7095 }
7096 /** Writes utf8 bytes into this byte sequence, descending */ Se(t) {
7097 for (const e of t) {
7098 const t = e.charCodeAt(0);
7099 if (t < 128) this.Pe(t); else if (t < 2048) this.Pe(960 | t >>> 6), this.Pe(128 | 63 & t); else if (e < "\ud800" || "\udbff" < e) this.Pe(480 | t >>> 12),
7100 this.Pe(128 | 63 & t >>> 6), this.Pe(128 | 63 & t); else {
7101 const t = e.codePointAt(0);
7102 this.Pe(240 | t >>> 18), this.Pe(128 | 63 & t >>> 12), this.Pe(128 | 63 & t >>> 6),
7103 this.Pe(128 | 63 & t);
7104 }
7105 }
7106 this.ve();
7107 }
7108 De(t) {
7109 // Values are encoded with a single byte length prefix, followed by the
7110 // actual value in big-endian format with leading 0 bytes dropped.
7111 const e = this.Ce(t), n = or(e);
7112 this.xe(1 + n), this.buffer[this.position++] = 255 & n;
7113 // Write the length
7114 for (let t = e.length - n; t < e.length; ++t) this.buffer[this.position++] = 255 & e[t];
7115 }
7116 Ne(t) {
7117 // Values are encoded with a single byte length prefix, followed by the
7118 // inverted value in big-endian format with leading 0 bytes dropped.
7119 const e = this.Ce(t), n = or(e);
7120 this.xe(1 + n), this.buffer[this.position++] = ~(255 & n);
7121 // Write the length
7122 for (let t = e.length - n; t < e.length; ++t) this.buffer[this.position++] = ~(255 & e[t]);
7123 }
7124 /**
7125 * Writes the "infinity" byte sequence that sorts after all other byte
7126 * sequences written in ascending order.
7127 */ ke() {
7128 this.Oe(255), this.Oe(255);
7129 }
7130 /**
7131 * Writes the "infinity" byte sequence that sorts before all other byte
7132 * sequences written in descending order.
7133 */ Me() {
7134 this.Fe(255), this.Fe(255);
7135 }
7136 /**
7137 * Resets the buffer such that it is the same as when it was newly
7138 * constructed.
7139 */ reset() {
7140 this.position = 0;
7141 }
7142 seed(t) {
7143 this.xe(t.length), this.buffer.set(t, this.position), this.position += t.length;
7144 }
7145 /** Makes a copy of the encoded bytes in this buffer. */ $e() {
7146 return this.buffer.slice(0, this.position);
7147 }
7148 /**
7149 * Encodes `val` into an encoding so that the order matches the IEEE 754
7150 * floating-point comparison results with the following exceptions:
7151 * -0.0 < 0.0
7152 * all non-NaN < NaN
7153 * NaN = NaN
7154 */ Ce(t) {
7155 const e =
7156 /** Converts a JavaScript number to a byte array (using big endian encoding). */
7157 function(t) {
7158 const e = new DataView(new ArrayBuffer(8));
7159 return e.setFloat64(0, t, /* littleEndian= */ !1), new Uint8Array(e.buffer);
7160 }(t), n = 0 != (128 & e[0]);
7161 // Check if the first bit is set. We use a bit mask since value[0] is
7162 // encoded as a number from 0 to 255.
7163 // Revert the two complement to get natural ordering
7164 e[0] ^= n ? 255 : 128;
7165 for (let t = 1; t < e.length; ++t) e[t] ^= n ? 255 : 0;
7166 return e;
7167 }
7168 /** Writes a single byte ascending to the buffer. */ Ae(t) {
7169 const e = 255 & t;
7170 0 === e ? (this.Oe(0), this.Oe(255)) : 255 === e ? (this.Oe(255), this.Oe(0)) : this.Oe(e);
7171 }
7172 /** Writes a single byte descending to the buffer. */ Pe(t) {
7173 const e = 255 & t;
7174 0 === e ? (this.Fe(0), this.Fe(255)) : 255 === e ? (this.Fe(255), this.Fe(0)) : this.Fe(t);
7175 }
7176 Re() {
7177 this.Oe(0), this.Oe(1);
7178 }
7179 ve() {
7180 this.Fe(0), this.Fe(1);
7181 }
7182 Oe(t) {
7183 this.xe(1), this.buffer[this.position++] = t;
7184 }
7185 Fe(t) {
7186 this.xe(1), this.buffer[this.position++] = ~t;
7187 }
7188 xe(t) {
7189 const e = t + this.position;
7190 if (e <= this.buffer.length) return;
7191 // Try doubling.
7192 let n = 2 * this.buffer.length;
7193 // Still not big enough? Just allocate the right size.
7194 n < e && (n = e);
7195 // Create the new buffer.
7196 const s = new Uint8Array(n);
7197 s.set(this.buffer), // copy old data
7198 this.buffer = s;
7199 }
7200}
7201
7202class cr {
7203 constructor(t) {
7204 this.Be = t;
7205 }
7206 me(t) {
7207 this.Be.Ee(t);
7208 }
7209 de(t) {
7210 this.Be.Ve(t);
7211 }
7212 fe(t) {
7213 this.Be.De(t);
7214 }
7215 he() {
7216 this.Be.ke();
7217 }
7218}
7219
7220class ar {
7221 constructor(t) {
7222 this.Be = t;
7223 }
7224 me(t) {
7225 this.Be.be(t);
7226 }
7227 de(t) {
7228 this.Be.Se(t);
7229 }
7230 fe(t) {
7231 this.Be.Ne(t);
7232 }
7233 he() {
7234 this.Be.Me();
7235 }
7236}
7237
7238/**
7239 * Implements `DirectionalIndexByteEncoder` using `OrderedCodeWriter` for the
7240 * actual encoding.
7241 */ class hr {
7242 constructor() {
7243 this.Be = new ur, this.Le = new cr(this.Be), this.Ue = new ar(this.Be);
7244 }
7245 seed(t) {
7246 this.Be.seed(t);
7247 }
7248 qe(t) {
7249 return 0 /* ASCENDING */ === t ? this.Le : this.Ue;
7250 }
7251 $e() {
7252 return this.Be.$e();
7253 }
7254 reset() {
7255 this.Be.reset();
7256 }
7257}
7258
7259/**
7260 * @license
7261 * Copyright 2022 Google LLC
7262 *
7263 * Licensed under the Apache License, Version 2.0 (the "License");
7264 * you may not use this file except in compliance with the License.
7265 * You may obtain a copy of the License at
7266 *
7267 * http://www.apache.org/licenses/LICENSE-2.0
7268 *
7269 * Unless required by applicable law or agreed to in writing, software
7270 * distributed under the License is distributed on an "AS IS" BASIS,
7271 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
7272 * See the License for the specific language governing permissions and
7273 * limitations under the License.
7274 */
7275/** Represents an index entry saved by the SDK in persisted storage. */ class lr {
7276 constructor(t, e, n, s) {
7277 this.indexId = t, this.documentKey = e, this.arrayValue = n, this.directionalValue = s;
7278 }
7279 /**
7280 * Returns an IndexEntry entry that sorts immediately after the current
7281 * directional value.
7282 */ Ke() {
7283 const t = this.directionalValue.length, e = 0 === t || 255 === this.directionalValue[t - 1] ? t + 1 : t, n = new Uint8Array(e);
7284 return n.set(this.directionalValue, 0), e !== t ? n.set([ 0 ], this.directionalValue.length) : ++n[n.length - 1],
7285 new lr(this.indexId, this.documentKey, this.arrayValue, n);
7286 }
7287}
7288
7289function fr(t, e) {
7290 let n = t.indexId - e.indexId;
7291 return 0 !== n ? n : (n = dr(t.arrayValue, e.arrayValue), 0 !== n ? n : (n = dr(t.directionalValue, e.directionalValue),
7292 0 !== n ? n : at.comparator(t.documentKey, e.documentKey)));
7293}
7294
7295function dr(t, e) {
7296 for (let n = 0; n < t.length && n < e.length; ++n) {
7297 const s = t[n] - e[n];
7298 if (0 !== s) return s;
7299 }
7300 return t.length - e.length;
7301}
7302
7303/**
7304 * @license
7305 * Copyright 2022 Google LLC
7306 *
7307 * Licensed under the Apache License, Version 2.0 (the "License");
7308 * you may not use this file except in compliance with the License.
7309 * You may obtain a copy of the License at
7310 *
7311 * http://www.apache.org/licenses/LICENSE-2.0
7312 *
7313 * Unless required by applicable law or agreed to in writing, software
7314 * distributed under the License is distributed on an "AS IS" BASIS,
7315 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
7316 * See the License for the specific language governing permissions and
7317 * limitations under the License.
7318 */
7319/**
7320 * A light query planner for Firestore.
7321 *
7322 * This class matches a `FieldIndex` against a Firestore Query `Target`. It
7323 * determines whether a given index can be used to serve the specified target.
7324 *
7325 * The following table showcases some possible index configurations:
7326 *
7327 * Query | Index
7328 * -----------------------------------------------------------------------------
7329 * where('a', '==', 'a').where('b', '==', 'b') | a ASC, b DESC
7330 * where('a', '==', 'a').where('b', '==', 'b') | a ASC
7331 * where('a', '==', 'a').where('b', '==', 'b') | b DESC
7332 * where('a', '>=', 'a').orderBy('a') | a ASC
7333 * where('a', '>=', 'a').orderBy('a', 'desc') | a DESC
7334 * where('a', '>=', 'a').orderBy('a').orderBy('b') | a ASC, b ASC
7335 * where('a', '>=', 'a').orderBy('a').orderBy('b') | a ASC
7336 * where('a', 'array-contains', 'a').orderBy('b') | a CONTAINS, b ASCENDING
7337 * where('a', 'array-contains', 'a').orderBy('b') | a CONTAINS
7338 */ class _r {
7339 constructor(t) {
7340 this.collectionId = null != t.collectionGroup ? t.collectionGroup : t.path.lastSegment(),
7341 this.Ge = t.orderBy, this.Qe = [];
7342 for (const e of t.filters) {
7343 const t = e;
7344 t.dt() ? this.je = t : this.Qe.push(t);
7345 }
7346 }
7347 /**
7348 * Returns whether the index can be used to serve the TargetIndexMatcher's
7349 * target.
7350 *
7351 * An index is considered capable of serving the target when:
7352 * - The target uses all index segments for its filters and orderBy clauses.
7353 * The target can have additional filter and orderBy clauses, but not
7354 * fewer.
7355 * - If an ArrayContains/ArrayContainsAnyfilter is used, the index must also
7356 * have a corresponding `CONTAINS` segment.
7357 * - All directional index segments can be mapped to the target as a series of
7358 * equality filters, a single inequality filter and a series of orderBy
7359 * clauses.
7360 * - The segments that represent the equality filters may appear out of order.
7361 * - The optional segment for the inequality filter must appear after all
7362 * equality segments.
7363 * - The segments that represent that orderBy clause of the target must appear
7364 * in order after all equality and inequality segments. Single orderBy
7365 * clauses cannot be skipped, but a continuous orderBy suffix may be
7366 * omitted.
7367 */ We(t) {
7368 // If there is an array element, find a matching filter.
7369 const e = lt(t);
7370 if (void 0 !== e && !this.ze(e)) return !1;
7371 const n = ft(t);
7372 let s = 0, i = 0;
7373 // Process all equalities first. Equalities can appear out of order.
7374 for (;s < n.length && this.ze(n[s]); ++s) ;
7375 // If we already have processed all segments, all segments are used to serve
7376 // the equality filters and we do not need to map any segments to the
7377 // target's inequality and orderBy clauses.
7378 if (s === n.length) return !0;
7379 // If there is an inequality filter, the next segment must match both the
7380 // filter and the first orderBy clause.
7381 if (void 0 !== this.je) {
7382 const t = n[s];
7383 if (!this.He(this.je, t) || !this.Je(this.Ge[i++], t)) return !1;
7384 ++s;
7385 }
7386 // All remaining segments need to represent the prefix of the target's
7387 // orderBy.
7388 for (;s < n.length; ++s) {
7389 const t = n[s];
7390 if (i >= this.Ge.length || !this.Je(this.Ge[i++], t)) return !1;
7391 }
7392 return !0;
7393 }
7394 ze(t) {
7395 for (const e of this.Qe) if (this.He(e, t)) return !0;
7396 return !1;
7397 }
7398 He(t, e) {
7399 if (void 0 === t || !t.field.isEqual(e.fieldPath)) return !1;
7400 const n = "array-contains" /* ARRAY_CONTAINS */ === t.op || "array-contains-any" /* ARRAY_CONTAINS_ANY */ === t.op;
7401 return 2 /* CONTAINS */ === e.kind === n;
7402 }
7403 Je(t, e) {
7404 return !!t.field.isEqual(e.fieldPath) && (0 /* ASCENDING */ === e.kind && "asc" /* ASCENDING */ === t.dir || 1 /* DESCENDING */ === e.kind && "desc" /* DESCENDING */ === t.dir);
7405 }
7406}
7407
7408/**
7409 * @license
7410 * Copyright 2019 Google LLC
7411 *
7412 * Licensed under the Apache License, Version 2.0 (the "License");
7413 * you may not use this file except in compliance with the License.
7414 * You may obtain a copy of the License at
7415 *
7416 * http://www.apache.org/licenses/LICENSE-2.0
7417 *
7418 * Unless required by applicable law or agreed to in writing, software
7419 * distributed under the License is distributed on an "AS IS" BASIS,
7420 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
7421 * See the License for the specific language governing permissions and
7422 * limitations under the License.
7423 */
7424/**
7425 * An in-memory implementation of IndexManager.
7426 */ class wr {
7427 constructor() {
7428 this.Ye = new mr;
7429 }
7430 addToCollectionParentIndex(t, e) {
7431 return this.Ye.add(e), Rt.resolve();
7432 }
7433 getCollectionParents(t, e) {
7434 return Rt.resolve(this.Ye.getEntries(e));
7435 }
7436 addFieldIndex(t, e) {
7437 // Field indices are not supported with memory persistence.
7438 return Rt.resolve();
7439 }
7440 deleteFieldIndex(t, e) {
7441 // Field indices are not supported with memory persistence.
7442 return Rt.resolve();
7443 }
7444 getDocumentsMatchingTarget(t, e) {
7445 // Field indices are not supported with memory persistence.
7446 return Rt.resolve(null);
7447 }
7448 getIndexType(t, e) {
7449 // Field indices are not supported with memory persistence.
7450 return Rt.resolve(0 /* NONE */);
7451 }
7452 getFieldIndexes(t, e) {
7453 // Field indices are not supported with memory persistence.
7454 return Rt.resolve([]);
7455 }
7456 getNextCollectionGroupToUpdate(t) {
7457 // Field indices are not supported with memory persistence.
7458 return Rt.resolve(null);
7459 }
7460 getMinOffset(t, e) {
7461 return Rt.resolve(pt.min());
7462 }
7463 getMinOffsetFromCollectionGroup(t, e) {
7464 return Rt.resolve(pt.min());
7465 }
7466 updateCollectionGroup(t, e, n) {
7467 // Field indices are not supported with memory persistence.
7468 return Rt.resolve();
7469 }
7470 updateIndexEntries(t, e) {
7471 // Field indices are not supported with memory persistence.
7472 return Rt.resolve();
7473 }
7474}
7475
7476/**
7477 * Internal implementation of the collection-parent index exposed by MemoryIndexManager.
7478 * Also used for in-memory caching by IndexedDbIndexManager and initial index population
7479 * in indexeddb_schema.ts
7480 */ class mr {
7481 constructor() {
7482 this.index = {};
7483 }
7484 // Returns false if the entry already existed.
7485 add(t) {
7486 const e = t.lastSegment(), n = t.popLast(), s = this.index[e] || new Kt(ot.comparator), i = !s.has(n);
7487 return this.index[e] = s.add(n), i;
7488 }
7489 has(t) {
7490 const e = t.lastSegment(), n = t.popLast(), s = this.index[e];
7491 return s && s.has(n);
7492 }
7493 getEntries(t) {
7494 return (this.index[t] || new Kt(ot.comparator)).toArray();
7495 }
7496}
7497
7498/**
7499 * @license
7500 * Copyright 2019 Google LLC
7501 *
7502 * Licensed under the Apache License, Version 2.0 (the "License");
7503 * you may not use this file except in compliance with the License.
7504 * You may obtain a copy of the License at
7505 *
7506 * http://www.apache.org/licenses/LICENSE-2.0
7507 *
7508 * Unless required by applicable law or agreed to in writing, software
7509 * distributed under the License is distributed on an "AS IS" BASIS,
7510 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
7511 * See the License for the specific language governing permissions and
7512 * limitations under the License.
7513 */ const gr = new Uint8Array(0);
7514
7515/**
7516 * A persisted implementation of IndexManager.
7517 *
7518 * PORTING NOTE: Unlike iOS and Android, the Web SDK does not memoize index
7519 * data as it supports multi-tab access.
7520 */
7521class yr {
7522 constructor(t, e) {
7523 this.user = t, this.databaseId = e,
7524 /**
7525 * An in-memory copy of the index entries we've already written since the SDK
7526 * launched. Used to avoid re-writing the same entry repeatedly.
7527 *
7528 * This is *NOT* a complete cache of what's in persistence and so can never be
7529 * used to satisfy reads.
7530 */
7531 this.Xe = new mr,
7532 /**
7533 * Maps from a target to its equivalent list of sub-targets. Each sub-target
7534 * contains only one term from the target's disjunctive normal form (DNF).
7535 */
7536 this.Ze = new ss((t => ke(t)), ((t, e) => Me(t, e))), this.uid = t.uid || "";
7537 }
7538 /**
7539 * Adds a new entry to the collection parent index.
7540 *
7541 * Repeated calls for the same collectionPath should be avoided within a
7542 * transaction as IndexedDbIndexManager only caches writes once a transaction
7543 * has been committed.
7544 */ addToCollectionParentIndex(t, e) {
7545 if (!this.Xe.has(e)) {
7546 const n = e.lastSegment(), s = e.popLast();
7547 t.addOnCommittedListener((() => {
7548 // Add the collection to the in memory cache only if the transaction was
7549 // successfully committed.
7550 this.Xe.add(e);
7551 }));
7552 const i = {
7553 collectionId: n,
7554 parent: ui(s)
7555 };
7556 return pr(t).put(i);
7557 }
7558 return Rt.resolve();
7559 }
7560 getCollectionParents(t, e) {
7561 const n = [], s = IDBKeyRange.bound([ e, "" ], [ nt(e), "" ],
7562 /*lowerOpen=*/ !1,
7563 /*upperOpen=*/ !0);
7564 return pr(t).W(s).next((t => {
7565 for (const s of t) {
7566 // This collectionId guard shouldn't be necessary (and isn't as long
7567 // as we're running in a real browser), but there's a bug in
7568 // indexeddbshim that breaks our range in our tests running in node:
7569 // https://github.com/axemclion/IndexedDBShim/issues/334
7570 if (s.collectionId !== e) break;
7571 n.push(hi(s.parent));
7572 }
7573 return n;
7574 }));
7575 }
7576 addFieldIndex(t, e) {
7577 // TODO(indexing): Verify that the auto-incrementing index ID works in
7578 // Safari & Firefox.
7579 const n = Tr(t), s = function(t) {
7580 return {
7581 indexId: t.indexId,
7582 collectionGroup: t.collectionGroup,
7583 fields: t.fields.map((t => [ t.fieldPath.canonicalString(), t.kind ]))
7584 };
7585 }(e);
7586 delete s.indexId;
7587 // `indexId` is auto-populated by IndexedDb
7588 const i = n.add(s);
7589 if (e.indexState) {
7590 const n = Er(t);
7591 return i.next((t => {
7592 n.put(Xi(t, this.user, e.indexState.sequenceNumber, e.indexState.offset));
7593 }));
7594 }
7595 return i.next();
7596 }
7597 deleteFieldIndex(t, e) {
7598 const n = Tr(t), s = Er(t), i = Ir(t);
7599 return n.delete(e.indexId).next((() => s.delete(IDBKeyRange.bound([ e.indexId ], [ e.indexId + 1 ],
7600 /*lowerOpen=*/ !1,
7601 /*upperOpen=*/ !0)))).next((() => i.delete(IDBKeyRange.bound([ e.indexId ], [ e.indexId + 1 ],
7602 /*lowerOpen=*/ !1,
7603 /*upperOpen=*/ !0))));
7604 }
7605 getDocumentsMatchingTarget(t, e) {
7606 const n = Ir(t);
7607 let s = !0;
7608 const i = new Map;
7609 return Rt.forEach(this.tn(e), (e => this.en(t, e).next((t => {
7610 s && (s = !!t), i.set(e, t);
7611 })))).next((() => {
7612 if (s) {
7613 let t = _s();
7614 const s = [];
7615 return Rt.forEach(i, ((i, r) => {
7616 var o;
7617 x("IndexedDbIndexManager", `Using index ${o = i, `id=${o.indexId}|cg=${o.collectionGroup}|f=${o.fields.map((t => `${t.fieldPath}:${t.kind}`)).join(",")}`} to execute ${ke(e)}`);
7618 const u = function(t, e) {
7619 const n = lt(e);
7620 if (void 0 === n) return null;
7621 for (const e of $e(t, n.fieldPath)) switch (e.op) {
7622 case "array-contains-any" /* ARRAY_CONTAINS_ANY */ :
7623 return e.value.arrayValue.values || [];
7624
7625 case "array-contains" /* ARRAY_CONTAINS */ :
7626 return [ e.value ];
7627 // Remaining filters are not array filters.
7628 }
7629 return null;
7630 }
7631 /**
7632 * Returns the list of values that are used in != or NOT_IN filters. Returns
7633 * `null` if there are no such filters.
7634 */ (r, i), c = function(t, e) {
7635 const n = new Map;
7636 for (const s of ft(e)) for (const e of $e(t, s.fieldPath)) switch (e.op) {
7637 case "==" /* EQUAL */ :
7638 case "in" /* IN */ :
7639 // Encode equality prefix, which is encoded in the index value before
7640 // the inequality (e.g. `a == 'a' && b != 'b'` is encoded to
7641 // `value != 'ab'`).
7642 n.set(s.fieldPath.canonicalString(), e.value);
7643 break;
7644
7645 case "not-in" /* NOT_IN */ :
7646 case "!=" /* NOT_EQUAL */ :
7647 // NotIn/NotEqual is always a suffix. There cannot be any remaining
7648 // segments and hence we can return early here.
7649 return n.set(s.fieldPath.canonicalString(), e.value), Array.from(n.values());
7650 // Remaining filters cannot be used as notIn bounds.
7651 }
7652 return null;
7653 }
7654 /**
7655 * Returns a lower bound of field values that can be used as a starting point to
7656 * scan the index defined by `fieldIndex`. Returns `MIN_VALUE` if no lower bound
7657 * exists.
7658 */ (r, i), a = function(t, e) {
7659 const n = [];
7660 let s = !0;
7661 // For each segment, retrieve a lower bound if there is a suitable filter or
7662 // startAt.
7663 for (const i of ft(e)) {
7664 const e = 0 /* ASCENDING */ === i.kind ? Be(t, i.fieldPath, t.startAt) : Le(t, i.fieldPath, t.startAt);
7665 n.push(e.value), s && (s = e.inclusive);
7666 }
7667 return new Je(n, s);
7668 }
7669 /**
7670 * Returns an upper bound of field values that can be used as an ending point
7671 * when scanning the index defined by `fieldIndex`. Returns `MAX_VALUE` if no
7672 * upper bound exists.
7673 */ (r, i), h = function(t, e) {
7674 const n = [];
7675 let s = !0;
7676 // For each segment, retrieve an upper bound if there is a suitable filter or
7677 // endAt.
7678 for (const i of ft(e)) {
7679 const e = 0 /* ASCENDING */ === i.kind ? Le(t, i.fieldPath, t.endAt) : Be(t, i.fieldPath, t.endAt);
7680 n.push(e.value), s && (s = e.inclusive);
7681 }
7682 return new Je(n, s);
7683 }(r, i), l = this.nn(i, r, a), f = this.nn(i, r, h), d = this.sn(i, r, c), _ = this.rn(i.indexId, u, l, a.inclusive, f, h.inclusive, d);
7684 return Rt.forEach(_, (i => n.J(i, e.limit).next((e => {
7685 e.forEach((e => {
7686 const n = at.fromSegments(e.documentKey);
7687 t.has(n) || (t = t.add(n), s.push(n));
7688 }));
7689 }))));
7690 })).next((() => s));
7691 }
7692 return Rt.resolve(null);
7693 }));
7694 }
7695 tn(t) {
7696 let e = this.Ze.get(t);
7697 return e || (
7698 // TODO(orquery): Implement DNF transform
7699 e = [ t ], this.Ze.set(t, e), e);
7700 }
7701 /**
7702 * Constructs a key range query on `DbIndexEntryStore` that unions all
7703 * bounds.
7704 */ rn(t, e, n, s, i, r, o) {
7705 // The number of total index scans we union together. This is similar to a
7706 // distributed normal form, but adapted for array values. We create a single
7707 // index range per value in an ARRAY_CONTAINS or ARRAY_CONTAINS_ANY filter
7708 // combined with the values from the query bounds.
7709 const u = (null != e ? e.length : 1) * Math.max(n.length, i.length), c = u / (null != e ? e.length : 1), a = [];
7710 for (let h = 0; h < u; ++h) {
7711 const u = e ? this.on(e[h / c]) : gr, l = this.un(t, u, n[h % c], s), f = this.cn(t, u, i[h % c], r), d = o.map((e => this.un(t, u, e,
7712 /* inclusive= */ !0)));
7713 a.push(...this.createRange(l, f, d));
7714 }
7715 return a;
7716 }
7717 /** Generates the lower bound for `arrayValue` and `directionalValue`. */ un(t, e, n, s) {
7718 const i = new lr(t, at.empty(), e, n);
7719 return s ? i : i.Ke();
7720 }
7721 /** Generates the upper bound for `arrayValue` and `directionalValue`. */ cn(t, e, n, s) {
7722 const i = new lr(t, at.empty(), e, n);
7723 return s ? i.Ke() : i;
7724 }
7725 en(t, e) {
7726 const n = new _r(e), s = null != e.collectionGroup ? e.collectionGroup : e.path.lastSegment();
7727 return this.getFieldIndexes(t, s).next((t => {
7728 // Return the index with the most number of segments.
7729 let e = null;
7730 for (const s of t) {
7731 n.We(s) && (!e || s.fields.length > e.fields.length) && (e = s);
7732 }
7733 return e;
7734 }));
7735 }
7736 getIndexType(t, e) {
7737 let n = 2 /* FULL */;
7738 return Rt.forEach(this.tn(e), (e => this.en(t, e).next((t => {
7739 t ? 0 /* NONE */ !== n && t.fields.length < function(t) {
7740 let e = new Kt(ct.comparator), n = !1;
7741 for (const s of t.filters) {
7742 // TODO(orquery): Use the flattened filters here
7743 const t = s;
7744 // __name__ is not an explicit segment of any index, so we don't need to
7745 // count it.
7746 t.field.isKeyField() || (
7747 // ARRAY_CONTAINS or ARRAY_CONTAINS_ANY filters must be counted separately.
7748 // For instance, it is possible to have an index for "a ARRAY a ASC". Even
7749 // though these are on the same field, they should be counted as two
7750 // separate segments in an index.
7751 "array-contains" /* ARRAY_CONTAINS */ === t.op || "array-contains-any" /* ARRAY_CONTAINS_ANY */ === t.op ? n = !0 : e = e.add(t.field));
7752 }
7753 for (const n of t.orderBy)
7754 // __name__ is not an explicit segment of any index, so we don't need to
7755 // count it.
7756 n.field.isKeyField() || (e = e.add(n.field));
7757 return e.size + (n ? 1 : 0);
7758 }(e) && (n = 1 /* PARTIAL */) : n = 0 /* NONE */;
7759 })))).next((() => n));
7760 }
7761 /**
7762 * Returns the byte encoded form of the directional values in the field index.
7763 * Returns `null` if the document does not have all fields specified in the
7764 * index.
7765 */ an(t, e) {
7766 const n = new hr;
7767 for (const s of ft(t)) {
7768 const t = e.data.field(s.fieldPath);
7769 if (null == t) return null;
7770 const i = n.qe(s.kind);
7771 ir.Te.ce(t, i);
7772 }
7773 return n.$e();
7774 }
7775 /** Encodes a single value to the ascending index format. */ on(t) {
7776 const e = new hr;
7777 return ir.Te.ce(t, e.qe(0 /* ASCENDING */)), e.$e();
7778 }
7779 /**
7780 * Returns an encoded form of the document key that sorts based on the key
7781 * ordering of the field index.
7782 */ hn(t, e) {
7783 const n = new hr;
7784 return ir.Te.ce(ge(this.databaseId, e), n.qe(function(t) {
7785 const e = ft(t);
7786 return 0 === e.length ? 0 /* ASCENDING */ : e[e.length - 1].kind;
7787 }(t))), n.$e();
7788 }
7789 /**
7790 * Encodes the given field values according to the specification in `target`.
7791 * For IN queries, a list of possible values is returned.
7792 */ sn(t, e, n) {
7793 if (null === n) return [];
7794 let s = [];
7795 s.push(new hr);
7796 let i = 0;
7797 for (const r of ft(t)) {
7798 const t = n[i++];
7799 for (const n of s) if (this.ln(e, r.fieldPath) && pe(t)) s = this.fn(s, r, t); else {
7800 const e = n.qe(r.kind);
7801 ir.Te.ce(t, e);
7802 }
7803 }
7804 return this.dn(s);
7805 }
7806 /**
7807 * Encodes the given bounds according to the specification in `target`. For IN
7808 * queries, a list of possible values is returned.
7809 */ nn(t, e, n) {
7810 return this.sn(t, e, n.position);
7811 }
7812 /** Returns the byte representation for the provided encoders. */ dn(t) {
7813 const e = [];
7814 for (let n = 0; n < t.length; ++n) e[n] = t[n].$e();
7815 return e;
7816 }
7817 /**
7818 * Creates a separate encoder for each element of an array.
7819 *
7820 * The method appends each value to all existing encoders (e.g. filter("a",
7821 * "==", "a1").filter("b", "in", ["b1", "b2"]) becomes ["a1,b1", "a1,b2"]). A
7822 * list of new encoders is returned.
7823 */ fn(t, e, n) {
7824 const s = [ ...t ], i = [];
7825 for (const t of n.arrayValue.values || []) for (const n of s) {
7826 const s = new hr;
7827 s.seed(n.$e()), ir.Te.ce(t, s.qe(e.kind)), i.push(s);
7828 }
7829 return i;
7830 }
7831 ln(t, e) {
7832 return !!t.filters.find((t => t instanceof Ue && t.field.isEqual(e) && ("in" /* IN */ === t.op || "not-in" /* NOT_IN */ === t.op)));
7833 }
7834 getFieldIndexes(t, e) {
7835 const n = Tr(t), s = Er(t);
7836 return (e ? n.W("collectionGroupIndex", IDBKeyRange.bound(e, e)) : n.W()).next((t => {
7837 const e = [];
7838 return Rt.forEach(t, (t => s.get([ t.indexId, this.uid ]).next((n => {
7839 e.push(function(t, e) {
7840 const n = e ? new mt(e.sequenceNumber, new pt(Qi(e.readTime), new at(hi(e.documentKey)), e.largestBatchId)) : mt.empty(), s = t.fields.map((([t, e]) => new _t(ct.fromServerFormat(t), e)));
7841 return new ht(t.indexId, t.collectionGroup, s, n);
7842 }(t, n));
7843 })))).next((() => e));
7844 }));
7845 }
7846 getNextCollectionGroupToUpdate(t) {
7847 return this.getFieldIndexes(t).next((t => 0 === t.length ? null : (t.sort(((t, e) => {
7848 const n = t.indexState.sequenceNumber - e.indexState.sequenceNumber;
7849 return 0 !== n ? n : tt(t.collectionGroup, e.collectionGroup);
7850 })), t[0].collectionGroup)));
7851 }
7852 updateCollectionGroup(t, e, n) {
7853 const s = Tr(t), i = Er(t);
7854 return this._n(t).next((t => s.W("collectionGroupIndex", IDBKeyRange.bound(e, e)).next((e => Rt.forEach(e, (e => i.put(Xi(e.indexId, this.user, t, n))))))));
7855 }
7856 updateIndexEntries(t, e) {
7857 // Porting Note: `getFieldIndexes()` on Web does not cache index lookups as
7858 // it could be used across different IndexedDB transactions. As any cached
7859 // data might be invalidated by other multi-tab clients, we can only trust
7860 // data within a single IndexedDB transaction. We therefore add a cache
7861 // here.
7862 const n = new Map;
7863 return Rt.forEach(e, ((e, s) => {
7864 const i = n.get(e.collectionGroup);
7865 return (i ? Rt.resolve(i) : this.getFieldIndexes(t, e.collectionGroup)).next((i => (n.set(e.collectionGroup, i),
7866 Rt.forEach(i, (n => this.wn(t, e, n).next((e => {
7867 const i = this.mn(s, n);
7868 return e.isEqual(i) ? Rt.resolve() : this.gn(t, s, n, e, i);
7869 })))))));
7870 }));
7871 }
7872 yn(t, e, n, s) {
7873 return Ir(t).put({
7874 indexId: s.indexId,
7875 uid: this.uid,
7876 arrayValue: s.arrayValue,
7877 directionalValue: s.directionalValue,
7878 orderedDocumentKey: this.hn(n, e.key),
7879 documentKey: e.key.path.toArray()
7880 });
7881 }
7882 pn(t, e, n, s) {
7883 return Ir(t).delete([ s.indexId, this.uid, s.arrayValue, s.directionalValue, this.hn(n, e.key), e.key.path.toArray() ]);
7884 }
7885 wn(t, e, n) {
7886 const s = Ir(t);
7887 let i = new Kt(fr);
7888 return s.Z({
7889 index: "documentKeyIndex",
7890 range: IDBKeyRange.only([ n.indexId, this.uid, this.hn(n, e) ])
7891 }, ((t, s) => {
7892 i = i.add(new lr(n.indexId, e, s.arrayValue, s.directionalValue));
7893 })).next((() => i));
7894 }
7895 /** Creates the index entries for the given document. */ mn(t, e) {
7896 let n = new Kt(fr);
7897 const s = this.an(e, t);
7898 if (null == s) return n;
7899 const i = lt(e);
7900 if (null != i) {
7901 const r = t.data.field(i.fieldPath);
7902 if (pe(r)) for (const i of r.arrayValue.values || []) n = n.add(new lr(e.indexId, t.key, this.on(i), s));
7903 } else n = n.add(new lr(e.indexId, t.key, gr, s));
7904 return n;
7905 }
7906 /**
7907 * Updates the index entries for the provided document by deleting entries
7908 * that are no longer referenced in `newEntries` and adding all newly added
7909 * entries.
7910 */ gn(t, e, n, s, i) {
7911 x("IndexedDbIndexManager", "Updating index entries for document '%s'", e.key);
7912 const r = [];
7913 return function(t, e, n, s, i) {
7914 const r = t.getIterator(), o = e.getIterator();
7915 let u = Qt(r), c = Qt(o);
7916 // Walk through the two sets at the same time, using the ordering defined by
7917 // `comparator`.
7918 for (;u || c; ) {
7919 let t = !1, e = !1;
7920 if (u && c) {
7921 const s = n(u, c);
7922 s < 0 ?
7923 // The element was removed if the next element in our ordered
7924 // walkthrough is only in `before`.
7925 e = !0 : s > 0 && (
7926 // The element was added if the next element in our ordered walkthrough
7927 // is only in `after`.
7928 t = !0);
7929 } else null != u ? e = !0 : t = !0;
7930 t ? (s(c), c = Qt(o)) : e ? (i(u), u = Qt(r)) : (u = Qt(r), c = Qt(o));
7931 }
7932 }(s, i, fr, (
7933 /* onAdd= */ s => {
7934 r.push(this.yn(t, e, n, s));
7935 }), (
7936 /* onRemove= */ s => {
7937 r.push(this.pn(t, e, n, s));
7938 })), Rt.waitFor(r);
7939 }
7940 _n(t) {
7941 let e = 1;
7942 return Er(t).Z({
7943 index: "sequenceNumberIndex",
7944 reverse: !0,
7945 range: IDBKeyRange.upperBound([ this.uid, Number.MAX_SAFE_INTEGER ])
7946 }, ((t, n, s) => {
7947 s.done(), e = n.sequenceNumber + 1;
7948 })).next((() => e));
7949 }
7950 /**
7951 * Returns a new set of IDB ranges that splits the existing range and excludes
7952 * any values that match the `notInValue` from these ranges. As an example,
7953 * '[foo > 2 && foo != 3]` becomes `[foo > 2 && < 3, foo > 3]`.
7954 */ createRange(t, e, n) {
7955 // The notIn values need to be sorted and unique so that we can return a
7956 // sorted set of non-overlapping ranges.
7957 n = n.sort(((t, e) => fr(t, e))).filter(((t, e, n) => !e || 0 !== fr(t, n[e - 1])));
7958 const s = [];
7959 s.push(t);
7960 for (const i of n) {
7961 const n = fr(i, t), r = fr(i, e);
7962 if (0 === n)
7963 // `notInValue` is the lower bound. We therefore need to raise the bound
7964 // to the next value.
7965 s[0] = t.Ke(); else if (n > 0 && r < 0)
7966 // `notInValue` is in the middle of the range
7967 s.push(i), s.push(i.Ke()); else if (r > 0)
7968 // `notInValue` (and all following values) are out of the range
7969 break;
7970 }
7971 s.push(e);
7972 const i = [];
7973 for (let t = 0; t < s.length; t += 2) i.push(IDBKeyRange.bound([ s[t].indexId, this.uid, s[t].arrayValue, s[t].directionalValue, gr, [] ], [ s[t + 1].indexId, this.uid, s[t + 1].arrayValue, s[t + 1].directionalValue, gr, [] ]));
7974 return i;
7975 }
7976 getMinOffsetFromCollectionGroup(t, e) {
7977 return this.getFieldIndexes(t, e).next(Ar);
7978 }
7979 getMinOffset(t, e) {
7980 return Rt.mapArray(this.tn(e), (e => this.en(t, e).next((t => t || M())))).next(Ar);
7981 }
7982}
7983
7984/**
7985 * Helper to get a typed SimpleDbStore for the collectionParents
7986 * document store.
7987 */ function pr(t) {
7988 return Oi(t, "collectionParents");
7989}
7990
7991/**
7992 * Helper to get a typed SimpleDbStore for the index entry object store.
7993 */ function Ir(t) {
7994 return Oi(t, "indexEntries");
7995}
7996
7997/**
7998 * Helper to get a typed SimpleDbStore for the index configuration object store.
7999 */ function Tr(t) {
8000 return Oi(t, "indexConfiguration");
8001}
8002
8003/**
8004 * Helper to get a typed SimpleDbStore for the index state object store.
8005 */ function Er(t) {
8006 return Oi(t, "indexState");
8007}
8008
8009function Ar(t) {
8010 F(0 !== t.length);
8011 let e = t[0].indexState.offset, n = e.largestBatchId;
8012 for (let s = 1; s < t.length; s++) {
8013 const i = t[s].indexState.offset;
8014 It(i, e) < 0 && (e = i), n < i.largestBatchId && (n = i.largestBatchId);
8015 }
8016 return new pt(e.readTime, e.documentKey, n);
8017}
8018
8019/**
8020 * @license
8021 * Copyright 2018 Google LLC
8022 *
8023 * Licensed under the Apache License, Version 2.0 (the "License");
8024 * you may not use this file except in compliance with the License.
8025 * You may obtain a copy of the License at
8026 *
8027 * http://www.apache.org/licenses/LICENSE-2.0
8028 *
8029 * Unless required by applicable law or agreed to in writing, software
8030 * distributed under the License is distributed on an "AS IS" BASIS,
8031 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
8032 * See the License for the specific language governing permissions and
8033 * limitations under the License.
8034 */ const Rr = {
8035 didRun: !1,
8036 sequenceNumbersCollected: 0,
8037 targetsRemoved: 0,
8038 documentsRemoved: 0
8039};
8040
8041class br {
8042 constructor(
8043 // When we attempt to collect, we will only do so if the cache size is greater than this
8044 // threshold. Passing `COLLECTION_DISABLED` here will cause collection to always be skipped.
8045 t,
8046 // The percentage of sequence numbers that we will attempt to collect
8047 e,
8048 // A cap on the total number of sequence numbers that will be collected. This prevents
8049 // us from collecting a huge number of sequence numbers if the cache has grown very large.
8050 n) {
8051 this.cacheSizeCollectionThreshold = t, this.percentileToCollect = e, this.maximumSequenceNumbersToCollect = n;
8052 }
8053 static withCacheSize(t) {
8054 return new br(t, br.DEFAULT_COLLECTION_PERCENTILE, br.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT);
8055 }
8056}
8057
8058/**
8059 * @license
8060 * Copyright 2020 Google LLC
8061 *
8062 * Licensed under the Apache License, Version 2.0 (the "License");
8063 * you may not use this file except in compliance with the License.
8064 * You may obtain a copy of the License at
8065 *
8066 * http://www.apache.org/licenses/LICENSE-2.0
8067 *
8068 * Unless required by applicable law or agreed to in writing, software
8069 * distributed under the License is distributed on an "AS IS" BASIS,
8070 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
8071 * See the License for the specific language governing permissions and
8072 * limitations under the License.
8073 */
8074/**
8075 * Delete a mutation batch and the associated document mutations.
8076 * @returns A PersistencePromise of the document mutations that were removed.
8077 */
8078function Pr(t, e, n) {
8079 const s = t.store("mutations"), i = t.store("documentMutations"), r = [], o = IDBKeyRange.only(n.batchId);
8080 let u = 0;
8081 const c = s.Z({
8082 range: o
8083 }, ((t, e, n) => (u++, n.delete())));
8084 r.push(c.next((() => {
8085 F(1 === u);
8086 })));
8087 const a = [];
8088 for (const t of n.mutations) {
8089 const s = di(e, t.key.path, n.batchId);
8090 r.push(i.delete(s)), a.push(t.key);
8091 }
8092 return Rt.waitFor(r).next((() => a));
8093}
8094
8095/**
8096 * Returns an approximate size for the given document.
8097 */ function vr(t) {
8098 if (!t) return 0;
8099 let e;
8100 if (t.document) e = t.document; else if (t.unknownDocument) e = t.unknownDocument; else {
8101 if (!t.noDocument) throw M();
8102 e = t.noDocument;
8103 }
8104 return JSON.stringify(e).length;
8105}
8106
8107/**
8108 * @license
8109 * Copyright 2017 Google LLC
8110 *
8111 * Licensed under the Apache License, Version 2.0 (the "License");
8112 * you may not use this file except in compliance with the License.
8113 * You may obtain a copy of the License at
8114 *
8115 * http://www.apache.org/licenses/LICENSE-2.0
8116 *
8117 * Unless required by applicable law or agreed to in writing, software
8118 * distributed under the License is distributed on an "AS IS" BASIS,
8119 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
8120 * See the License for the specific language governing permissions and
8121 * limitations under the License.
8122 */
8123/** A mutation queue for a specific user, backed by IndexedDB. */ br.DEFAULT_COLLECTION_PERCENTILE = 10,
8124br.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT = 1e3, br.DEFAULT = new br(41943040, br.DEFAULT_COLLECTION_PERCENTILE, br.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT),
8125br.DISABLED = new br(-1, 0, 0);
8126
8127class Vr {
8128 constructor(
8129 /**
8130 * The normalized userId (e.g. null UID => "" userId) used to store /
8131 * retrieve mutations.
8132 */
8133 t, e, n, s) {
8134 this.userId = t, this.It = e, this.indexManager = n, this.referenceDelegate = s,
8135 /**
8136 * Caches the document keys for pending mutation batches. If the mutation
8137 * has been removed from IndexedDb, the cached value may continue to
8138 * be used to retrieve the batch's document keys. To remove a cached value
8139 * locally, `removeCachedMutationKeys()` should be invoked either directly
8140 * or through `removeMutationBatches()`.
8141 *
8142 * With multi-tab, when the primary client acknowledges or rejects a mutation,
8143 * this cache is used by secondary clients to invalidate the local
8144 * view of the documents that were previously affected by the mutation.
8145 */
8146 // PORTING NOTE: Multi-tab only.
8147 this.In = {};
8148 }
8149 /**
8150 * Creates a new mutation queue for the given user.
8151 * @param user - The user for which to create a mutation queue.
8152 * @param serializer - The serializer to use when persisting to IndexedDb.
8153 */ static oe(t, e, n, s) {
8154 // TODO(mcg): Figure out what constraints there are on userIDs
8155 // In particular, are there any reserved characters? are empty ids allowed?
8156 // For the moment store these together in the same mutations table assuming
8157 // that empty userIDs aren't allowed.
8158 F("" !== t.uid);
8159 const i = t.isAuthenticated() ? t.uid : "";
8160 return new Vr(i, e, n, s);
8161 }
8162 checkEmpty(t) {
8163 let e = !0;
8164 const n = IDBKeyRange.bound([ this.userId, Number.NEGATIVE_INFINITY ], [ this.userId, Number.POSITIVE_INFINITY ]);
8165 return Dr(t).Z({
8166 index: "userMutationsIndex",
8167 range: n
8168 }, ((t, n, s) => {
8169 e = !1, s.done();
8170 })).next((() => e));
8171 }
8172 addMutationBatch(t, e, n, s) {
8173 const i = Cr(t), r = Dr(t);
8174 // The IndexedDb implementation in Chrome (and Firefox) does not handle
8175 // compound indices that include auto-generated keys correctly. To ensure
8176 // that the index entry is added correctly in all browsers, we perform two
8177 // writes: The first write is used to retrieve the next auto-generated Batch
8178 // ID, and the second write populates the index and stores the actual
8179 // mutation batch.
8180 // See: https://bugs.chromium.org/p/chromium/issues/detail?id=701972
8181 // We write an empty object to obtain key
8182 // eslint-disable-next-line @typescript-eslint/no-explicit-any
8183 return r.add({}).next((o => {
8184 F("number" == typeof o);
8185 const u = new Mi(o, e, n, s), c = function(t, e, n) {
8186 const s = n.baseMutations.map((e => Qs(t.re, e))), i = n.mutations.map((e => Qs(t.re, e)));
8187 return {
8188 userId: e,
8189 batchId: n.batchId,
8190 localWriteTimeMs: n.localWriteTime.toMillis(),
8191 baseMutations: s,
8192 mutations: i
8193 };
8194 }(this.It, this.userId, u), a = [];
8195 let h = new Kt(((t, e) => tt(t.canonicalString(), e.canonicalString())));
8196 for (const t of s) {
8197 const e = di(this.userId, t.key.path, o);
8198 h = h.add(t.key.path.popLast()), a.push(r.put(c)), a.push(i.put(e, _i));
8199 }
8200 return h.forEach((e => {
8201 a.push(this.indexManager.addToCollectionParentIndex(t, e));
8202 })), t.addOnCommittedListener((() => {
8203 this.In[o] = u.keys();
8204 })), Rt.waitFor(a).next((() => u));
8205 }));
8206 }
8207 lookupMutationBatch(t, e) {
8208 return Dr(t).get(e).next((t => t ? (F(t.userId === this.userId), ji(this.It, t)) : null));
8209 }
8210 /**
8211 * Returns the document keys for the mutation batch with the given batchId.
8212 * For primary clients, this method returns `null` after
8213 * `removeMutationBatches()` has been called. Secondary clients return a
8214 * cached result until `removeCachedMutationKeys()` is invoked.
8215 */
8216 // PORTING NOTE: Multi-tab only.
8217 Tn(t, e) {
8218 return this.In[e] ? Rt.resolve(this.In[e]) : this.lookupMutationBatch(t, e).next((t => {
8219 if (t) {
8220 const n = t.keys();
8221 return this.In[e] = n, n;
8222 }
8223 return null;
8224 }));
8225 }
8226 getNextMutationBatchAfterBatchId(t, e) {
8227 const n = e + 1, s = IDBKeyRange.lowerBound([ this.userId, n ]);
8228 let i = null;
8229 return Dr(t).Z({
8230 index: "userMutationsIndex",
8231 range: s
8232 }, ((t, e, s) => {
8233 e.userId === this.userId && (F(e.batchId >= n), i = ji(this.It, e)), s.done();
8234 })).next((() => i));
8235 }
8236 getHighestUnacknowledgedBatchId(t) {
8237 const e = IDBKeyRange.upperBound([ this.userId, Number.POSITIVE_INFINITY ]);
8238 let n = -1;
8239 return Dr(t).Z({
8240 index: "userMutationsIndex",
8241 range: e,
8242 reverse: !0
8243 }, ((t, e, s) => {
8244 n = e.batchId, s.done();
8245 })).next((() => n));
8246 }
8247 getAllMutationBatches(t) {
8248 const e = IDBKeyRange.bound([ this.userId, -1 ], [ this.userId, Number.POSITIVE_INFINITY ]);
8249 return Dr(t).W("userMutationsIndex", e).next((t => t.map((t => ji(this.It, t)))));
8250 }
8251 getAllMutationBatchesAffectingDocumentKey(t, e) {
8252 // Scan the document-mutation index starting with a prefix starting with
8253 // the given documentKey.
8254 const n = fi(this.userId, e.path), s = IDBKeyRange.lowerBound(n), i = [];
8255 return Cr(t).Z({
8256 range: s
8257 }, ((n, s, r) => {
8258 const [o, u, c] = n, a = hi(u);
8259 // Only consider rows matching exactly the specific key of
8260 // interest. Note that because we order by path first, and we
8261 // order terminators before path separators, we'll encounter all
8262 // the index rows for documentKey contiguously. In particular, all
8263 // the rows for documentKey will occur before any rows for
8264 // documents nested in a subcollection beneath documentKey so we
8265 // can stop as soon as we hit any such row.
8266 if (o === this.userId && e.path.isEqual(a))
8267 // Look up the mutation batch in the store.
8268 return Dr(t).get(c).next((t => {
8269 if (!t) throw M();
8270 F(t.userId === this.userId), i.push(ji(this.It, t));
8271 }));
8272 r.done();
8273 })).next((() => i));
8274 }
8275 getAllMutationBatchesAffectingDocumentKeys(t, e) {
8276 let n = new Kt(tt);
8277 const s = [];
8278 return e.forEach((e => {
8279 const i = fi(this.userId, e.path), r = IDBKeyRange.lowerBound(i), o = Cr(t).Z({
8280 range: r
8281 }, ((t, s, i) => {
8282 const [r, o, u] = t, c = hi(o);
8283 // Only consider rows matching exactly the specific key of
8284 // interest. Note that because we order by path first, and we
8285 // order terminators before path separators, we'll encounter all
8286 // the index rows for documentKey contiguously. In particular, all
8287 // the rows for documentKey will occur before any rows for
8288 // documents nested in a subcollection beneath documentKey so we
8289 // can stop as soon as we hit any such row.
8290 r === this.userId && e.path.isEqual(c) ? n = n.add(u) : i.done();
8291 }));
8292 s.push(o);
8293 })), Rt.waitFor(s).next((() => this.En(t, n)));
8294 }
8295 getAllMutationBatchesAffectingQuery(t, e) {
8296 const n = e.path, s = n.length + 1, i = fi(this.userId, n), r = IDBKeyRange.lowerBound(i);
8297 // Collect up unique batchIDs encountered during a scan of the index. Use a
8298 // SortedSet to accumulate batch IDs so they can be traversed in order in a
8299 // scan of the main table.
8300 let o = new Kt(tt);
8301 return Cr(t).Z({
8302 range: r
8303 }, ((t, e, i) => {
8304 const [r, u, c] = t, a = hi(u);
8305 r === this.userId && n.isPrefixOf(a) ?
8306 // Rows with document keys more than one segment longer than the
8307 // query path can't be matches. For example, a query on 'rooms'
8308 // can't match the document /rooms/abc/messages/xyx.
8309 // TODO(mcg): we'll need a different scanner when we implement
8310 // ancestor queries.
8311 a.length === s && (o = o.add(c)) : i.done();
8312 })).next((() => this.En(t, o)));
8313 }
8314 En(t, e) {
8315 const n = [], s = [];
8316 // TODO(rockwood): Implement this using iterate.
8317 return e.forEach((e => {
8318 s.push(Dr(t).get(e).next((t => {
8319 if (null === t) throw M();
8320 F(t.userId === this.userId), n.push(ji(this.It, t));
8321 })));
8322 })), Rt.waitFor(s).next((() => n));
8323 }
8324 removeMutationBatch(t, e) {
8325 return Pr(t.ie, this.userId, e).next((n => (t.addOnCommittedListener((() => {
8326 this.An(e.batchId);
8327 })), Rt.forEach(n, (e => this.referenceDelegate.markPotentiallyOrphaned(t, e))))));
8328 }
8329 /**
8330 * Clears the cached keys for a mutation batch. This method should be
8331 * called by secondary clients after they process mutation updates.
8332 *
8333 * Note that this method does not have to be called from primary clients as
8334 * the corresponding cache entries are cleared when an acknowledged or
8335 * rejected batch is removed from the mutation queue.
8336 */
8337 // PORTING NOTE: Multi-tab only
8338 An(t) {
8339 delete this.In[t];
8340 }
8341 performConsistencyCheck(t) {
8342 return this.checkEmpty(t).next((e => {
8343 if (!e) return Rt.resolve();
8344 // Verify that there are no entries in the documentMutations index if
8345 // the queue is empty.
8346 const n = IDBKeyRange.lowerBound([ this.userId ]);
8347 const s = [];
8348 return Cr(t).Z({
8349 range: n
8350 }, ((t, e, n) => {
8351 if (t[0] === this.userId) {
8352 const e = hi(t[1]);
8353 s.push(e);
8354 } else n.done();
8355 })).next((() => {
8356 F(0 === s.length);
8357 }));
8358 }));
8359 }
8360 containsKey(t, e) {
8361 return Sr(t, this.userId, e);
8362 }
8363 // PORTING NOTE: Multi-tab only (state is held in memory in other clients).
8364 /** Returns the mutation queue's metadata from IndexedDb. */
8365 Rn(t) {
8366 return xr(t).get(this.userId).next((t => t || {
8367 userId: this.userId,
8368 lastAcknowledgedBatchId: -1,
8369 lastStreamToken: ""
8370 }));
8371 }
8372}
8373
8374/**
8375 * @returns true if the mutation queue for the given user contains a pending
8376 * mutation for the given key.
8377 */ function Sr(t, e, n) {
8378 const s = fi(e, n.path), i = s[1], r = IDBKeyRange.lowerBound(s);
8379 let o = !1;
8380 return Cr(t).Z({
8381 range: r,
8382 X: !0
8383 }, ((t, n, s) => {
8384 const [r, u, /*batchID*/ c] = t;
8385 r === e && u === i && (o = !0), s.done();
8386 })).next((() => o));
8387}
8388
8389/** Returns true if any mutation queue contains the given document. */
8390/**
8391 * Helper to get a typed SimpleDbStore for the mutations object store.
8392 */
8393function Dr(t) {
8394 return Oi(t, "mutations");
8395}
8396
8397/**
8398 * Helper to get a typed SimpleDbStore for the mutationQueues object store.
8399 */ function Cr(t) {
8400 return Oi(t, "documentMutations");
8401}
8402
8403/**
8404 * Helper to get a typed SimpleDbStore for the mutationQueues object store.
8405 */ function xr(t) {
8406 return Oi(t, "mutationQueues");
8407}
8408
8409/**
8410 * @license
8411 * Copyright 2017 Google LLC
8412 *
8413 * Licensed under the Apache License, Version 2.0 (the "License");
8414 * you may not use this file except in compliance with the License.
8415 * You may obtain a copy of the License at
8416 *
8417 * http://www.apache.org/licenses/LICENSE-2.0
8418 *
8419 * Unless required by applicable law or agreed to in writing, software
8420 * distributed under the License is distributed on an "AS IS" BASIS,
8421 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
8422 * See the License for the specific language governing permissions and
8423 * limitations under the License.
8424 */
8425/** Offset to ensure non-overlapping target ids. */
8426/**
8427 * Generates monotonically increasing target IDs for sending targets to the
8428 * watch stream.
8429 *
8430 * The client constructs two generators, one for the target cache, and one for
8431 * for the sync engine (to generate limbo documents targets). These
8432 * generators produce non-overlapping IDs (by using even and odd IDs
8433 * respectively).
8434 *
8435 * By separating the target ID space, the query cache can generate target IDs
8436 * that persist across client restarts, while sync engine can independently
8437 * generate in-memory target IDs that are transient and can be reused after a
8438 * restart.
8439 */
8440class Nr {
8441 constructor(t) {
8442 this.bn = t;
8443 }
8444 next() {
8445 return this.bn += 2, this.bn;
8446 }
8447 static Pn() {
8448 // The target cache generator must return '2' in its first call to `next()`
8449 // as there is no differentiation in the protocol layer between an unset
8450 // number and the number '0'. If we were to sent a target with target ID
8451 // '0', the backend would consider it unset and replace it with its own ID.
8452 return new Nr(0);
8453 }
8454 static vn() {
8455 // Sync engine assigns target IDs for limbo document detection.
8456 return new Nr(-1);
8457 }
8458}
8459
8460/**
8461 * @license
8462 * Copyright 2017 Google LLC
8463 *
8464 * Licensed under the Apache License, Version 2.0 (the "License");
8465 * you may not use this file except in compliance with the License.
8466 * You may obtain a copy of the License at
8467 *
8468 * http://www.apache.org/licenses/LICENSE-2.0
8469 *
8470 * Unless required by applicable law or agreed to in writing, software
8471 * distributed under the License is distributed on an "AS IS" BASIS,
8472 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
8473 * See the License for the specific language governing permissions and
8474 * limitations under the License.
8475 */ class kr {
8476 constructor(t, e) {
8477 this.referenceDelegate = t, this.It = e;
8478 }
8479 // PORTING NOTE: We don't cache global metadata for the target cache, since
8480 // some of it (in particular `highestTargetId`) can be modified by secondary
8481 // tabs. We could perhaps be more granular (and e.g. still cache
8482 // `lastRemoteSnapshotVersion` in memory) but for simplicity we currently go
8483 // to IndexedDb whenever we need to read metadata. We can revisit if it turns
8484 // out to have a meaningful performance impact.
8485 allocateTargetId(t) {
8486 return this.Vn(t).next((e => {
8487 const n = new Nr(e.highestTargetId);
8488 return e.highestTargetId = n.next(), this.Sn(t, e).next((() => e.highestTargetId));
8489 }));
8490 }
8491 getLastRemoteSnapshotVersion(t) {
8492 return this.Vn(t).next((t => it.fromTimestamp(new st(t.lastRemoteSnapshotVersion.seconds, t.lastRemoteSnapshotVersion.nanoseconds))));
8493 }
8494 getHighestSequenceNumber(t) {
8495 return this.Vn(t).next((t => t.highestListenSequenceNumber));
8496 }
8497 setTargetsMetadata(t, e, n) {
8498 return this.Vn(t).next((s => (s.highestListenSequenceNumber = e, n && (s.lastRemoteSnapshotVersion = n.toTimestamp()),
8499 e > s.highestListenSequenceNumber && (s.highestListenSequenceNumber = e), this.Sn(t, s))));
8500 }
8501 addTargetData(t, e) {
8502 return this.Dn(t, e).next((() => this.Vn(t).next((n => (n.targetCount += 1, this.Cn(e, n),
8503 this.Sn(t, n))))));
8504 }
8505 updateTargetData(t, e) {
8506 return this.Dn(t, e);
8507 }
8508 removeTargetData(t, e) {
8509 return this.removeMatchingKeysForTargetId(t, e.targetId).next((() => Or(t).delete(e.targetId))).next((() => this.Vn(t))).next((e => (F(e.targetCount > 0),
8510 e.targetCount -= 1, this.Sn(t, e))));
8511 }
8512 /**
8513 * Drops any targets with sequence number less than or equal to the upper bound, excepting those
8514 * present in `activeTargetIds`. Document associations for the removed targets are also removed.
8515 * Returns the number of targets removed.
8516 */ removeTargets(t, e, n) {
8517 let s = 0;
8518 const i = [];
8519 return Or(t).Z(((r, o) => {
8520 const u = Wi(o);
8521 u.sequenceNumber <= e && null === n.get(u.targetId) && (s++, i.push(this.removeTargetData(t, u)));
8522 })).next((() => Rt.waitFor(i))).next((() => s));
8523 }
8524 /**
8525 * Call provided function with each `TargetData` that we have cached.
8526 */ forEachTarget(t, e) {
8527 return Or(t).Z(((t, n) => {
8528 const s = Wi(n);
8529 e(s);
8530 }));
8531 }
8532 Vn(t) {
8533 return Mr(t).get("targetGlobalKey").next((t => (F(null !== t), t)));
8534 }
8535 Sn(t, e) {
8536 return Mr(t).put("targetGlobalKey", e);
8537 }
8538 Dn(t, e) {
8539 return Or(t).put(zi(this.It, e));
8540 }
8541 /**
8542 * In-place updates the provided metadata to account for values in the given
8543 * TargetData. Saving is done separately. Returns true if there were any
8544 * changes to the metadata.
8545 */ Cn(t, e) {
8546 let n = !1;
8547 return t.targetId > e.highestTargetId && (e.highestTargetId = t.targetId, n = !0),
8548 t.sequenceNumber > e.highestListenSequenceNumber && (e.highestListenSequenceNumber = t.sequenceNumber,
8549 n = !0), n;
8550 }
8551 getTargetCount(t) {
8552 return this.Vn(t).next((t => t.targetCount));
8553 }
8554 getTargetData(t, e) {
8555 // Iterating by the canonicalId may yield more than one result because
8556 // canonicalId values are not required to be unique per target. This query
8557 // depends on the queryTargets index to be efficient.
8558 const n = ke(e), s = IDBKeyRange.bound([ n, Number.NEGATIVE_INFINITY ], [ n, Number.POSITIVE_INFINITY ]);
8559 let i = null;
8560 return Or(t).Z({
8561 range: s,
8562 index: "queryTargetsIndex"
8563 }, ((t, n, s) => {
8564 const r = Wi(n);
8565 // After finding a potential match, check that the target is
8566 // actually equal to the requested target.
8567 Me(e, r.target) && (i = r, s.done());
8568 })).next((() => i));
8569 }
8570 addMatchingKeys(t, e, n) {
8571 // PORTING NOTE: The reverse index (documentsTargets) is maintained by
8572 // IndexedDb.
8573 const s = [], i = Fr(t);
8574 return e.forEach((e => {
8575 const r = ui(e.path);
8576 s.push(i.put({
8577 targetId: n,
8578 path: r
8579 })), s.push(this.referenceDelegate.addReference(t, n, e));
8580 })), Rt.waitFor(s);
8581 }
8582 removeMatchingKeys(t, e, n) {
8583 // PORTING NOTE: The reverse index (documentsTargets) is maintained by
8584 // IndexedDb.
8585 const s = Fr(t);
8586 return Rt.forEach(e, (e => {
8587 const i = ui(e.path);
8588 return Rt.waitFor([ s.delete([ n, i ]), this.referenceDelegate.removeReference(t, n, e) ]);
8589 }));
8590 }
8591 removeMatchingKeysForTargetId(t, e) {
8592 const n = Fr(t), s = IDBKeyRange.bound([ e ], [ e + 1 ],
8593 /*lowerOpen=*/ !1,
8594 /*upperOpen=*/ !0);
8595 return n.delete(s);
8596 }
8597 getMatchingKeysForTargetId(t, e) {
8598 const n = IDBKeyRange.bound([ e ], [ e + 1 ],
8599 /*lowerOpen=*/ !1,
8600 /*upperOpen=*/ !0), s = Fr(t);
8601 let i = _s();
8602 return s.Z({
8603 range: n,
8604 X: !0
8605 }, ((t, e, n) => {
8606 const s = hi(t[1]), r = new at(s);
8607 i = i.add(r);
8608 })).next((() => i));
8609 }
8610 containsKey(t, e) {
8611 const n = ui(e.path), s = IDBKeyRange.bound([ n ], [ nt(n) ],
8612 /*lowerOpen=*/ !1,
8613 /*upperOpen=*/ !0);
8614 let i = 0;
8615 return Fr(t).Z({
8616 index: "documentTargetsIndex",
8617 X: !0,
8618 range: s
8619 }, (([t, e], n, s) => {
8620 // Having a sentinel row for a document does not count as containing that document;
8621 // For the target cache, containing the document means the document is part of some
8622 // target.
8623 0 !== t && (i++, s.done());
8624 })).next((() => i > 0));
8625 }
8626 /**
8627 * Looks up a TargetData entry by target ID.
8628 *
8629 * @param targetId - The target ID of the TargetData entry to look up.
8630 * @returns The cached TargetData entry, or null if the cache has no entry for
8631 * the target.
8632 */
8633 // PORTING NOTE: Multi-tab only.
8634 se(t, e) {
8635 return Or(t).get(e).next((t => t ? Wi(t) : null));
8636 }
8637}
8638
8639/**
8640 * Helper to get a typed SimpleDbStore for the queries object store.
8641 */ function Or(t) {
8642 return Oi(t, "targets");
8643}
8644
8645/**
8646 * Helper to get a typed SimpleDbStore for the target globals object store.
8647 */ function Mr(t) {
8648 return Oi(t, "targetGlobal");
8649}
8650
8651/**
8652 * Helper to get a typed SimpleDbStore for the document target object store.
8653 */ function Fr(t) {
8654 return Oi(t, "targetDocuments");
8655}
8656
8657/**
8658 * @license
8659 * Copyright 2020 Google LLC
8660 *
8661 * Licensed under the Apache License, Version 2.0 (the "License");
8662 * you may not use this file except in compliance with the License.
8663 * You may obtain a copy of the License at
8664 *
8665 * http://www.apache.org/licenses/LICENSE-2.0
8666 *
8667 * Unless required by applicable law or agreed to in writing, software
8668 * distributed under the License is distributed on an "AS IS" BASIS,
8669 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
8670 * See the License for the specific language governing permissions and
8671 * limitations under the License.
8672 */ function $r([t, e], [n, s]) {
8673 const i = tt(t, n);
8674 return 0 === i ? tt(e, s) : i;
8675}
8676
8677/**
8678 * Used to calculate the nth sequence number. Keeps a rolling buffer of the
8679 * lowest n values passed to `addElement`, and finally reports the largest of
8680 * them in `maxValue`.
8681 */ class Br {
8682 constructor(t) {
8683 this.xn = t, this.buffer = new Kt($r), this.Nn = 0;
8684 }
8685 kn() {
8686 return ++this.Nn;
8687 }
8688 On(t) {
8689 const e = [ t, this.kn() ];
8690 if (this.buffer.size < this.xn) this.buffer = this.buffer.add(e); else {
8691 const t = this.buffer.last();
8692 $r(e, t) < 0 && (this.buffer = this.buffer.delete(t).add(e));
8693 }
8694 }
8695 get maxValue() {
8696 // Guaranteed to be non-empty. If we decide we are not collecting any
8697 // sequence numbers, nthSequenceNumber below short-circuits. If we have
8698 // decided that we are collecting n sequence numbers, it's because n is some
8699 // percentage of the existing sequence numbers. That means we should never
8700 // be in a situation where we are collecting sequence numbers but don't
8701 // actually have any.
8702 return this.buffer.last()[0];
8703 }
8704}
8705
8706/**
8707 * This class is responsible for the scheduling of LRU garbage collection. It handles checking
8708 * whether or not GC is enabled, as well as which delay to use before the next run.
8709 */ class Lr {
8710 constructor(t, e, n) {
8711 this.garbageCollector = t, this.asyncQueue = e, this.localStore = n, this.Mn = null;
8712 }
8713 start() {
8714 -1 !== this.garbageCollector.params.cacheSizeCollectionThreshold && this.Fn(6e4);
8715 }
8716 stop() {
8717 this.Mn && (this.Mn.cancel(), this.Mn = null);
8718 }
8719 get started() {
8720 return null !== this.Mn;
8721 }
8722 Fn(t) {
8723 x("LruGarbageCollector", `Garbage collection scheduled in ${t}ms`), this.Mn = this.asyncQueue.enqueueAfterDelay("lru_garbage_collection" /* LruGarbageCollection */ , t, (async () => {
8724 this.Mn = null;
8725 try {
8726 await this.localStore.collectGarbage(this.garbageCollector);
8727 } catch (t) {
8728 St(t) ? x("LruGarbageCollector", "Ignoring IndexedDB error during garbage collection: ", t) : await At(t);
8729 }
8730 await this.Fn(3e5);
8731 }));
8732 }
8733}
8734
8735/** Implements the steps for LRU garbage collection. */ class Ur {
8736 constructor(t, e) {
8737 this.$n = t, this.params = e;
8738 }
8739 calculateTargetCount(t, e) {
8740 return this.$n.Bn(t).next((t => Math.floor(e / 100 * t)));
8741 }
8742 nthSequenceNumber(t, e) {
8743 if (0 === e) return Rt.resolve(Mt.at);
8744 const n = new Br(e);
8745 return this.$n.forEachTarget(t, (t => n.On(t.sequenceNumber))).next((() => this.$n.Ln(t, (t => n.On(t))))).next((() => n.maxValue));
8746 }
8747 removeTargets(t, e, n) {
8748 return this.$n.removeTargets(t, e, n);
8749 }
8750 removeOrphanedDocuments(t, e) {
8751 return this.$n.removeOrphanedDocuments(t, e);
8752 }
8753 collect(t, e) {
8754 return -1 === this.params.cacheSizeCollectionThreshold ? (x("LruGarbageCollector", "Garbage collection skipped; disabled"),
8755 Rt.resolve(Rr)) : this.getCacheSize(t).next((n => n < this.params.cacheSizeCollectionThreshold ? (x("LruGarbageCollector", `Garbage collection skipped; Cache size ${n} is lower than threshold ${this.params.cacheSizeCollectionThreshold}`),
8756 Rr) : this.Un(t, e)));
8757 }
8758 getCacheSize(t) {
8759 return this.$n.getCacheSize(t);
8760 }
8761 Un(t, e) {
8762 let n, s, i, r, o, c, a;
8763 const h = Date.now();
8764 return this.calculateTargetCount(t, this.params.percentileToCollect).next((e => (
8765 // Cap at the configured max
8766 e > this.params.maximumSequenceNumbersToCollect ? (x("LruGarbageCollector", `Capping sequence numbers to collect down to the maximum of ${this.params.maximumSequenceNumbersToCollect} from ${e}`),
8767 s = this.params.maximumSequenceNumbersToCollect) : s = e, r = Date.now(), this.nthSequenceNumber(t, s)))).next((s => (n = s,
8768 o = Date.now(), this.removeTargets(t, n, e)))).next((e => (i = e, c = Date.now(),
8769 this.removeOrphanedDocuments(t, n)))).next((t => {
8770 if (a = Date.now(), D() <= u.DEBUG) {
8771 x("LruGarbageCollector", `LRU Garbage Collection\n\tCounted targets in ${r - h}ms\n\tDetermined least recently used ${s} in ` + (o - r) + "ms\n" + `\tRemoved ${i} targets in ` + (c - o) + "ms\n" + `\tRemoved ${t} documents in ` + (a - c) + "ms\n" + `Total Duration: ${a - h}ms`);
8772 }
8773 return Rt.resolve({
8774 didRun: !0,
8775 sequenceNumbersCollected: s,
8776 targetsRemoved: i,
8777 documentsRemoved: t
8778 });
8779 }));
8780 }
8781}
8782
8783/**
8784 * @license
8785 * Copyright 2020 Google LLC
8786 *
8787 * Licensed under the Apache License, Version 2.0 (the "License");
8788 * you may not use this file except in compliance with the License.
8789 * You may obtain a copy of the License at
8790 *
8791 * http://www.apache.org/licenses/LICENSE-2.0
8792 *
8793 * Unless required by applicable law or agreed to in writing, software
8794 * distributed under the License is distributed on an "AS IS" BASIS,
8795 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
8796 * See the License for the specific language governing permissions and
8797 * limitations under the License.
8798 */
8799/** Provides LRU functionality for IndexedDB persistence. */
8800class qr {
8801 constructor(t, e) {
8802 this.db = t, this.garbageCollector = function(t, e) {
8803 return new Ur(t, e);
8804 }(this, e);
8805 }
8806 Bn(t) {
8807 const e = this.qn(t);
8808 return this.db.getTargetCache().getTargetCount(t).next((t => e.next((e => t + e))));
8809 }
8810 qn(t) {
8811 let e = 0;
8812 return this.Ln(t, (t => {
8813 e++;
8814 })).next((() => e));
8815 }
8816 forEachTarget(t, e) {
8817 return this.db.getTargetCache().forEachTarget(t, e);
8818 }
8819 Ln(t, e) {
8820 return this.Kn(t, ((t, n) => e(n)));
8821 }
8822 addReference(t, e, n) {
8823 return Kr(t, n);
8824 }
8825 removeReference(t, e, n) {
8826 return Kr(t, n);
8827 }
8828 removeTargets(t, e, n) {
8829 return this.db.getTargetCache().removeTargets(t, e, n);
8830 }
8831 markPotentiallyOrphaned(t, e) {
8832 return Kr(t, e);
8833 }
8834 /**
8835 * Returns true if anything would prevent this document from being garbage
8836 * collected, given that the document in question is not present in any
8837 * targets and has a sequence number less than or equal to the upper bound for
8838 * the collection run.
8839 */ Gn(t, e) {
8840 return function(t, e) {
8841 let n = !1;
8842 return xr(t).tt((s => Sr(t, s, e).next((t => (t && (n = !0), Rt.resolve(!t)))))).next((() => n));
8843 }(t, e);
8844 }
8845 removeOrphanedDocuments(t, e) {
8846 const n = this.db.getRemoteDocumentCache().newChangeBuffer(), s = [];
8847 let i = 0;
8848 return this.Kn(t, ((r, o) => {
8849 if (o <= e) {
8850 const e = this.Gn(t, r).next((e => {
8851 if (!e)
8852 // Our size accounting requires us to read all documents before
8853 // removing them.
8854 return i++, n.getEntry(t, r).next((() => (n.removeEntry(r, it.min()), Fr(t).delete([ 0, ui(r.path) ]))));
8855 }));
8856 s.push(e);
8857 }
8858 })).next((() => Rt.waitFor(s))).next((() => n.apply(t))).next((() => i));
8859 }
8860 removeTarget(t, e) {
8861 const n = e.withSequenceNumber(t.currentSequenceNumber);
8862 return this.db.getTargetCache().updateTargetData(t, n);
8863 }
8864 updateLimboDocument(t, e) {
8865 return Kr(t, e);
8866 }
8867 /**
8868 * Call provided function for each document in the cache that is 'orphaned'. Orphaned
8869 * means not a part of any target, so the only entry in the target-document index for
8870 * that document will be the sentinel row (targetId 0), which will also have the sequence
8871 * number for the last time the document was accessed.
8872 */ Kn(t, e) {
8873 const n = Fr(t);
8874 let s, i = Mt.at;
8875 return n.Z({
8876 index: "documentTargetsIndex"
8877 }, (([t, n], {path: r, sequenceNumber: o}) => {
8878 0 === t ? (
8879 // if nextToReport is valid, report it, this is a new key so the
8880 // last one must not be a member of any targets.
8881 i !== Mt.at && e(new at(hi(s)), i),
8882 // set nextToReport to be this sequence number. It's the next one we
8883 // might report, if we don't find any targets for this document.
8884 // Note that the sequence number must be defined when the targetId
8885 // is 0.
8886 i = o, s = r) :
8887 // set nextToReport to be invalid, we know we don't need to report
8888 // this one since we found a target for it.
8889 i = Mt.at;
8890 })).next((() => {
8891 // Since we report sequence numbers after getting to the next key, we
8892 // need to check if the last key we iterated over was an orphaned
8893 // document and report it.
8894 i !== Mt.at && e(new at(hi(s)), i);
8895 }));
8896 }
8897 getCacheSize(t) {
8898 return this.db.getRemoteDocumentCache().getSize(t);
8899 }
8900}
8901
8902function Kr(t, e) {
8903 return Fr(t).put(
8904 /**
8905 * @returns A value suitable for writing a sentinel row in the target-document
8906 * store.
8907 */
8908 function(t, e) {
8909 return {
8910 targetId: 0,
8911 path: ui(t.path),
8912 sequenceNumber: e
8913 };
8914 }(e, t.currentSequenceNumber));
8915}
8916
8917/**
8918 * @license
8919 * Copyright 2017 Google LLC
8920 *
8921 * Licensed under the Apache License, Version 2.0 (the "License");
8922 * you may not use this file except in compliance with the License.
8923 * You may obtain a copy of the License at
8924 *
8925 * http://www.apache.org/licenses/LICENSE-2.0
8926 *
8927 * Unless required by applicable law or agreed to in writing, software
8928 * distributed under the License is distributed on an "AS IS" BASIS,
8929 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
8930 * See the License for the specific language governing permissions and
8931 * limitations under the License.
8932 */
8933/**
8934 * An in-memory buffer of entries to be written to a RemoteDocumentCache.
8935 * It can be used to batch up a set of changes to be written to the cache, but
8936 * additionally supports reading entries back with the `getEntry()` method,
8937 * falling back to the underlying RemoteDocumentCache if no entry is
8938 * buffered.
8939 *
8940 * Entries added to the cache *must* be read first. This is to facilitate
8941 * calculating the size delta of the pending changes.
8942 *
8943 * PORTING NOTE: This class was implemented then removed from other platforms.
8944 * If byte-counting ends up being needed on the other platforms, consider
8945 * porting this class as part of that implementation work.
8946 */ class Gr {
8947 constructor() {
8948 // A mapping of document key to the new cache entry that should be written.
8949 this.changes = new ss((t => t.toString()), ((t, e) => t.isEqual(e))), this.changesApplied = !1;
8950 }
8951 /**
8952 * Buffers a `RemoteDocumentCache.addEntry()` call.
8953 *
8954 * You can only modify documents that have already been retrieved via
8955 * `getEntry()/getEntries()` (enforced via IndexedDbs `apply()`).
8956 */ addEntry(t) {
8957 this.assertNotApplied(), this.changes.set(t.key, t);
8958 }
8959 /**
8960 * Buffers a `RemoteDocumentCache.removeEntry()` call.
8961 *
8962 * You can only remove documents that have already been retrieved via
8963 * `getEntry()/getEntries()` (enforced via IndexedDbs `apply()`).
8964 */ removeEntry(t, e) {
8965 this.assertNotApplied(), this.changes.set(t, Ce.newInvalidDocument(t).setReadTime(e));
8966 }
8967 /**
8968 * Looks up an entry in the cache. The buffered changes will first be checked,
8969 * and if no buffered change applies, this will forward to
8970 * `RemoteDocumentCache.getEntry()`.
8971 *
8972 * @param transaction - The transaction in which to perform any persistence
8973 * operations.
8974 * @param documentKey - The key of the entry to look up.
8975 * @returns The cached document or an invalid document if we have nothing
8976 * cached.
8977 */ getEntry(t, e) {
8978 this.assertNotApplied();
8979 const n = this.changes.get(e);
8980 return void 0 !== n ? Rt.resolve(n) : this.getFromCache(t, e);
8981 }
8982 /**
8983 * Looks up several entries in the cache, forwarding to
8984 * `RemoteDocumentCache.getEntry()`.
8985 *
8986 * @param transaction - The transaction in which to perform any persistence
8987 * operations.
8988 * @param documentKeys - The keys of the entries to look up.
8989 * @returns A map of cached documents, indexed by key. If an entry cannot be
8990 * found, the corresponding key will be mapped to an invalid document.
8991 */ getEntries(t, e) {
8992 return this.getAllFromCache(t, e);
8993 }
8994 /**
8995 * Applies buffered changes to the underlying RemoteDocumentCache, using
8996 * the provided transaction.
8997 */ apply(t) {
8998 return this.assertNotApplied(), this.changesApplied = !0, this.applyChanges(t);
8999 }
9000 /** Helper to assert this.changes is not null */ assertNotApplied() {}
9001}
9002
9003/**
9004 * @license
9005 * Copyright 2017 Google LLC
9006 *
9007 * Licensed under the Apache License, Version 2.0 (the "License");
9008 * you may not use this file except in compliance with the License.
9009 * You may obtain a copy of the License at
9010 *
9011 * http://www.apache.org/licenses/LICENSE-2.0
9012 *
9013 * Unless required by applicable law or agreed to in writing, software
9014 * distributed under the License is distributed on an "AS IS" BASIS,
9015 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9016 * See the License for the specific language governing permissions and
9017 * limitations under the License.
9018 */
9019/**
9020 * The RemoteDocumentCache for IndexedDb. To construct, invoke
9021 * `newIndexedDbRemoteDocumentCache()`.
9022 */ class Qr {
9023 constructor(t) {
9024 this.It = t;
9025 }
9026 setIndexManager(t) {
9027 this.indexManager = t;
9028 }
9029 /**
9030 * Adds the supplied entries to the cache.
9031 *
9032 * All calls of `addEntry` are required to go through the RemoteDocumentChangeBuffer
9033 * returned by `newChangeBuffer()` to ensure proper accounting of metadata.
9034 */ addEntry(t, e, n) {
9035 return Hr(t).put(n);
9036 }
9037 /**
9038 * Removes a document from the cache.
9039 *
9040 * All calls of `removeEntry` are required to go through the RemoteDocumentChangeBuffer
9041 * returned by `newChangeBuffer()` to ensure proper accounting of metadata.
9042 */ removeEntry(t, e, n) {
9043 return Hr(t).delete(
9044 /**
9045 * Returns a key that can be used for document lookups via the primary key of
9046 * the DbRemoteDocument object store.
9047 */
9048 function(t, e) {
9049 const n = t.path.toArray();
9050 return [
9051 /* prefix path */ n.slice(0, n.length - 2),
9052 /* collection id */ n[n.length - 2], Ki(e),
9053 /* document id */ n[n.length - 1] ];
9054 }
9055 /**
9056 * Returns a key that can be used for document lookups on the
9057 * `DbRemoteDocumentDocumentCollectionGroupIndex` index.
9058 */ (e, n));
9059 }
9060 /**
9061 * Updates the current cache size.
9062 *
9063 * Callers to `addEntry()` and `removeEntry()` *must* call this afterwards to update the
9064 * cache's metadata.
9065 */ updateMetadata(t, e) {
9066 return this.getMetadata(t).next((n => (n.byteSize += e, this.Qn(t, n))));
9067 }
9068 getEntry(t, e) {
9069 let n = Ce.newInvalidDocument(e);
9070 return Hr(t).Z({
9071 index: "documentKeyIndex",
9072 range: IDBKeyRange.only(Jr(e))
9073 }, ((t, s) => {
9074 n = this.jn(e, s);
9075 })).next((() => n));
9076 }
9077 /**
9078 * Looks up an entry in the cache.
9079 *
9080 * @param documentKey - The key of the entry to look up.
9081 * @returns The cached document entry and its size.
9082 */ Wn(t, e) {
9083 let n = {
9084 size: 0,
9085 document: Ce.newInvalidDocument(e)
9086 };
9087 return Hr(t).Z({
9088 index: "documentKeyIndex",
9089 range: IDBKeyRange.only(Jr(e))
9090 }, ((t, s) => {
9091 n = {
9092 document: this.jn(e, s),
9093 size: vr(s)
9094 };
9095 })).next((() => n));
9096 }
9097 getEntries(t, e) {
9098 let n = rs();
9099 return this.zn(t, e, ((t, e) => {
9100 const s = this.jn(t, e);
9101 n = n.insert(t, s);
9102 })).next((() => n));
9103 }
9104 /**
9105 * Looks up several entries in the cache.
9106 *
9107 * @param documentKeys - The set of keys entries to look up.
9108 * @returns A map of documents indexed by key and a map of sizes indexed by
9109 * key (zero if the document does not exist).
9110 */ Hn(t, e) {
9111 let n = rs(), s = new Lt(at.comparator);
9112 return this.zn(t, e, ((t, e) => {
9113 const i = this.jn(t, e);
9114 n = n.insert(t, i), s = s.insert(t, vr(e));
9115 })).next((() => ({
9116 documents: n,
9117 Jn: s
9118 })));
9119 }
9120 zn(t, e, n) {
9121 if (e.isEmpty()) return Rt.resolve();
9122 let s = new Kt(Xr);
9123 e.forEach((t => s = s.add(t)));
9124 const i = IDBKeyRange.bound(Jr(s.first()), Jr(s.last())), r = s.getIterator();
9125 let o = r.getNext();
9126 return Hr(t).Z({
9127 index: "documentKeyIndex",
9128 range: i
9129 }, ((t, e, s) => {
9130 const i = at.fromSegments([ ...e.prefixPath, e.collectionGroup, e.documentId ]);
9131 // Go through keys not found in cache.
9132 for (;o && Xr(o, i) < 0; ) n(o, null), o = r.getNext();
9133 o && o.isEqual(i) && (
9134 // Key found in cache.
9135 n(o, e), o = r.hasNext() ? r.getNext() : null),
9136 // Skip to the next key (if there is one).
9137 o ? s.j(Jr(o)) : s.done();
9138 })).next((() => {
9139 // The rest of the keys are not in the cache. One case where `iterate`
9140 // above won't go through them is when the cache is empty.
9141 for (;o; ) n(o, null), o = r.hasNext() ? r.getNext() : null;
9142 }));
9143 }
9144 getAllFromCollection(t, e, n) {
9145 const s = [ e.popLast().toArray(), e.lastSegment(), Ki(n.readTime), n.documentKey.path.isEmpty() ? "" : n.documentKey.path.lastSegment() ], i = [ e.popLast().toArray(), e.lastSegment(), [ Number.MAX_SAFE_INTEGER, Number.MAX_SAFE_INTEGER ], "" ];
9146 return Hr(t).W(IDBKeyRange.bound(s, i, !0)).next((t => {
9147 let e = rs();
9148 for (const n of t) {
9149 const t = this.jn(at.fromSegments(n.prefixPath.concat(n.collectionGroup, n.documentId)), n);
9150 e = e.insert(t.key, t);
9151 }
9152 return e;
9153 }));
9154 }
9155 getAllFromCollectionGroup(t, e, n, s) {
9156 let i = rs();
9157 const r = Yr(e, n), o = Yr(e, pt.max());
9158 return Hr(t).Z({
9159 index: "collectionGroupIndex",
9160 range: IDBKeyRange.bound(r, o, !0)
9161 }, ((t, e, n) => {
9162 const r = this.jn(at.fromSegments(e.prefixPath.concat(e.collectionGroup, e.documentId)), e);
9163 i = i.insert(r.key, r), i.size === s && n.done();
9164 })).next((() => i));
9165 }
9166 newChangeBuffer(t) {
9167 return new Wr(this, !!t && t.trackRemovals);
9168 }
9169 getSize(t) {
9170 return this.getMetadata(t).next((t => t.byteSize));
9171 }
9172 getMetadata(t) {
9173 return zr(t).get("remoteDocumentGlobalKey").next((t => (F(!!t), t)));
9174 }
9175 Qn(t, e) {
9176 return zr(t).put("remoteDocumentGlobalKey", e);
9177 }
9178 /**
9179 * Decodes `dbRemoteDoc` and returns the document (or an invalid document if
9180 * the document corresponds to the format used for sentinel deletes).
9181 */ jn(t, e) {
9182 if (e) {
9183 const t = Ui(this.It, e);
9184 // Whether the document is a sentinel removal and should only be used in the
9185 // `getNewDocumentChanges()`
9186 if (!(t.isNoDocument() && t.version.isEqual(it.min()))) return t;
9187 }
9188 return Ce.newInvalidDocument(t);
9189 }
9190}
9191
9192/** Creates a new IndexedDbRemoteDocumentCache. */ function jr(t) {
9193 return new Qr(t);
9194}
9195
9196/**
9197 * Handles the details of adding and updating documents in the IndexedDbRemoteDocumentCache.
9198 *
9199 * Unlike the MemoryRemoteDocumentChangeBuffer, the IndexedDb implementation computes the size
9200 * delta for all submitted changes. This avoids having to re-read all documents from IndexedDb
9201 * when we apply the changes.
9202 */ class Wr extends Gr {
9203 /**
9204 * @param documentCache - The IndexedDbRemoteDocumentCache to apply the changes to.
9205 * @param trackRemovals - Whether to create sentinel deletes that can be tracked by
9206 * `getNewDocumentChanges()`.
9207 */
9208 constructor(t, e) {
9209 super(), this.Yn = t, this.trackRemovals = e,
9210 // A map of document sizes and read times prior to applying the changes in
9211 // this buffer.
9212 this.Xn = new ss((t => t.toString()), ((t, e) => t.isEqual(e)));
9213 }
9214 applyChanges(t) {
9215 const e = [];
9216 let n = 0, s = new Kt(((t, e) => tt(t.canonicalString(), e.canonicalString())));
9217 return this.changes.forEach(((i, r) => {
9218 const o = this.Xn.get(i);
9219 if (e.push(this.Yn.removeEntry(t, i, o.readTime)), r.isValidDocument()) {
9220 const u = qi(this.Yn.It, r);
9221 s = s.add(i.path.popLast());
9222 const c = vr(u);
9223 n += c - o.size, e.push(this.Yn.addEntry(t, i, u));
9224 } else if (n -= o.size, this.trackRemovals) {
9225 // In order to track removals, we store a "sentinel delete" in the
9226 // RemoteDocumentCache. This entry is represented by a NoDocument
9227 // with a version of 0 and ignored by `maybeDecodeDocument()` but
9228 // preserved in `getNewDocumentChanges()`.
9229 const n = qi(this.Yn.It, r.convertToNoDocument(it.min()));
9230 e.push(this.Yn.addEntry(t, i, n));
9231 }
9232 })), s.forEach((n => {
9233 e.push(this.Yn.indexManager.addToCollectionParentIndex(t, n));
9234 })), e.push(this.Yn.updateMetadata(t, n)), Rt.waitFor(e);
9235 }
9236 getFromCache(t, e) {
9237 // Record the size of everything we load from the cache so we can compute a delta later.
9238 return this.Yn.Wn(t, e).next((t => (this.Xn.set(e, {
9239 size: t.size,
9240 readTime: t.document.readTime
9241 }), t.document)));
9242 }
9243 getAllFromCache(t, e) {
9244 // Record the size of everything we load from the cache so we can compute
9245 // a delta later.
9246 return this.Yn.Hn(t, e).next((({documents: t, Jn: e}) => (
9247 // Note: `getAllFromCache` returns two maps instead of a single map from
9248 // keys to `DocumentSizeEntry`s. This is to allow returning the
9249 // `MutableDocumentMap` directly, without a conversion.
9250 e.forEach(((e, n) => {
9251 this.Xn.set(e, {
9252 size: n,
9253 readTime: t.get(e).readTime
9254 });
9255 })), t)));
9256 }
9257}
9258
9259function zr(t) {
9260 return Oi(t, "remoteDocumentGlobal");
9261}
9262
9263/**
9264 * Helper to get a typed SimpleDbStore for the remoteDocuments object store.
9265 */ function Hr(t) {
9266 return Oi(t, "remoteDocumentsV14");
9267}
9268
9269/**
9270 * Returns a key that can be used for document lookups on the
9271 * `DbRemoteDocumentDocumentKeyIndex` index.
9272 */ function Jr(t) {
9273 const e = t.path.toArray();
9274 return [
9275 /* prefix path */ e.slice(0, e.length - 2),
9276 /* collection id */ e[e.length - 2],
9277 /* document id */ e[e.length - 1] ];
9278}
9279
9280function Yr(t, e) {
9281 const n = e.documentKey.path.toArray();
9282 return [
9283 /* collection id */ t, Ki(e.readTime),
9284 /* prefix path */ n.slice(0, n.length - 2),
9285 /* document id */ n.length > 0 ? n[n.length - 1] : "" ];
9286}
9287
9288/**
9289 * Comparator that compares document keys according to the primary key sorting
9290 * used by the `DbRemoteDocumentDocument` store (by prefix path, collection id
9291 * and then document ID).
9292 *
9293 * Visible for testing.
9294 */ function Xr(t, e) {
9295 const n = t.path.toArray(), s = e.path.toArray();
9296 // The ordering is based on https://chromium.googlesource.com/chromium/blink/+/fe5c21fef94dae71c1c3344775b8d8a7f7e6d9ec/Source/modules/indexeddb/IDBKey.cpp#74
9297 let i = 0;
9298 for (let t = 0; t < n.length - 2 && t < s.length - 2; ++t) if (i = tt(n[t], s[t]),
9299 i) return i;
9300 return i = tt(n.length, s.length), i || (i = tt(n[n.length - 2], s[s.length - 2]),
9301 i || tt(n[n.length - 1], s[s.length - 1]));
9302}
9303
9304/**
9305 * @license
9306 * Copyright 2017 Google LLC
9307 *
9308 * Licensed under the Apache License, Version 2.0 (the "License");
9309 * you may not use this file except in compliance with the License.
9310 * You may obtain a copy of the License at
9311 *
9312 * http://www.apache.org/licenses/LICENSE-2.0
9313 *
9314 * Unless required by applicable law or agreed to in writing, software
9315 * distributed under the License is distributed on an "AS IS" BASIS,
9316 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9317 * See the License for the specific language governing permissions and
9318 * limitations under the License.
9319 */
9320/**
9321 * Schema Version for the Web client:
9322 * 1. Initial version including Mutation Queue, Query Cache, and Remote
9323 * Document Cache
9324 * 2. Used to ensure a targetGlobal object exists and add targetCount to it. No
9325 * longer required because migration 3 unconditionally clears it.
9326 * 3. Dropped and re-created Query Cache to deal with cache corruption related
9327 * to limbo resolution. Addresses
9328 * https://github.com/firebase/firebase-ios-sdk/issues/1548
9329 * 4. Multi-Tab Support.
9330 * 5. Removal of held write acks.
9331 * 6. Create document global for tracking document cache size.
9332 * 7. Ensure every cached document has a sentinel row with a sequence number.
9333 * 8. Add collection-parent index for Collection Group queries.
9334 * 9. Change RemoteDocumentChanges store to be keyed by readTime rather than
9335 * an auto-incrementing ID. This is required for Index-Free queries.
9336 * 10. Rewrite the canonical IDs to the explicit Protobuf-based format.
9337 * 11. Add bundles and named_queries for bundle support.
9338 * 12. Add document overlays.
9339 * 13. Rewrite the keys of the remote document cache to allow for efficient
9340 * document lookup via `getAll()`.
9341 * 14. Add overlays.
9342 * 15. Add indexing support.
9343 */
9344/**
9345 * @license
9346 * Copyright 2022 Google LLC
9347 *
9348 * Licensed under the Apache License, Version 2.0 (the "License");
9349 * you may not use this file except in compliance with the License.
9350 * You may obtain a copy of the License at
9351 *
9352 * http://www.apache.org/licenses/LICENSE-2.0
9353 *
9354 * Unless required by applicable law or agreed to in writing, software
9355 * distributed under the License is distributed on an "AS IS" BASIS,
9356 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9357 * See the License for the specific language governing permissions and
9358 * limitations under the License.
9359 */
9360/**
9361 * Represents a local view (overlay) of a document, and the fields that are
9362 * locally mutated.
9363 */
9364class Zr {
9365 constructor(t,
9366 /**
9367 * The fields that are locally mutated by patch mutations. If the overlayed
9368 * document is from set or delete mutations, this returns null.
9369 */
9370 e) {
9371 this.overlayedDocument = t, this.mutatedFields = e;
9372 }
9373}
9374
9375/**
9376 * @license
9377 * Copyright 2017 Google LLC
9378 *
9379 * Licensed under the Apache License, Version 2.0 (the "License");
9380 * you may not use this file except in compliance with the License.
9381 * You may obtain a copy of the License at
9382 *
9383 * http://www.apache.org/licenses/LICENSE-2.0
9384 *
9385 * Unless required by applicable law or agreed to in writing, software
9386 * distributed under the License is distributed on an "AS IS" BASIS,
9387 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9388 * See the License for the specific language governing permissions and
9389 * limitations under the License.
9390 */
9391/**
9392 * A readonly view of the local state of all documents we're tracking (i.e. we
9393 * have a cached version in remoteDocumentCache or local mutations for the
9394 * document). The view is computed by applying the mutations in the
9395 * MutationQueue to the RemoteDocumentCache.
9396 */ class to {
9397 constructor(t, e, n, s) {
9398 this.remoteDocumentCache = t, this.mutationQueue = e, this.documentOverlayCache = n,
9399 this.indexManager = s;
9400 }
9401 /**
9402 * Get the local view of the document identified by `key`.
9403 *
9404 * @returns Local view of the document or null if we don't have any cached
9405 * state for it.
9406 */ getDocument(t, e) {
9407 let n = null;
9408 return this.documentOverlayCache.getOverlay(t, e).next((s => (n = s, this.getBaseDocument(t, e, n)))).next((t => (null !== n && qn(n.mutation, t, jt.empty(), st.now()),
9409 t)));
9410 }
9411 /**
9412 * Gets the local view of the documents identified by `keys`.
9413 *
9414 * If we don't have cached state for a document in `keys`, a NoDocument will
9415 * be stored for that key in the resulting set.
9416 */ getDocuments(t, e) {
9417 return this.remoteDocumentCache.getEntries(t, e).next((e => this.getLocalViewOfDocuments(t, e, _s()).next((() => e))));
9418 }
9419 /**
9420 * Similar to `getDocuments`, but creates the local view from the given
9421 * `baseDocs` without retrieving documents from the local store.
9422 *
9423 * @param transaction - The transaction this operation is scoped to.
9424 * @param docs - The documents to apply local mutations to get the local views.
9425 * @param existenceStateChanged - The set of document keys whose existence state
9426 * is changed. This is useful to determine if some documents overlay needs
9427 * to be recalculated.
9428 */ getLocalViewOfDocuments(t, e, n = _s()) {
9429 const s = as();
9430 return this.populateOverlays(t, s, e).next((() => this.computeViews(t, e, s, n).next((t => {
9431 let e = us();
9432 return t.forEach(((t, n) => {
9433 e = e.insert(t, n.overlayedDocument);
9434 })), e;
9435 }))));
9436 }
9437 /**
9438 * Gets the overlayed documents for the given document map, which will include
9439 * the local view of those documents and a `FieldMask` indicating which fields
9440 * are mutated locally, `null` if overlay is a Set or Delete mutation.
9441 */ getOverlayedDocuments(t, e) {
9442 const n = as();
9443 return this.populateOverlays(t, n, e).next((() => this.computeViews(t, e, n, _s())));
9444 }
9445 /**
9446 * Fetches the overlays for {@code docs} and adds them to provided overlay map
9447 * if the map does not already contain an entry for the given document key.
9448 */ populateOverlays(t, e, n) {
9449 const s = [];
9450 return n.forEach((t => {
9451 e.has(t) || s.push(t);
9452 })), this.documentOverlayCache.getOverlays(t, s).next((t => {
9453 t.forEach(((t, n) => {
9454 e.set(t, n);
9455 }));
9456 }));
9457 }
9458 /**
9459 * Computes the local view for the given documents.
9460 *
9461 * @param docs - The documents to compute views for. It also has the base
9462 * version of the documents.
9463 * @param overlays - The overlays that need to be applied to the given base
9464 * version of the documents.
9465 * @param existenceStateChanged - A set of documents whose existence states
9466 * might have changed. This is used to determine if we need to re-calculate
9467 * overlays from mutation queues.
9468 * @return A map represents the local documents view.
9469 */ computeViews(t, e, n, s) {
9470 let i = rs();
9471 const r = ls(), o = ls();
9472 return e.forEach(((t, e) => {
9473 const o = n.get(e.key);
9474 // Recalculate an overlay if the document's existence state changed due to
9475 // a remote event *and* the overlay is a PatchMutation. This is because
9476 // document existence state can change if some patch mutation's
9477 // preconditions are met.
9478 // NOTE: we recalculate when `overlay` is undefined as well, because there
9479 // might be a patch mutation whose precondition does not match before the
9480 // change (hence overlay is undefined), but would now match.
9481 s.has(e.key) && (void 0 === o || o.mutation instanceof jn) ? i = i.insert(e.key, e) : void 0 !== o && (r.set(e.key, o.mutation.getFieldMask()),
9482 qn(o.mutation, e, o.mutation.getFieldMask(), st.now()));
9483 })), this.recalculateAndSaveOverlays(t, i).next((t => (t.forEach(((t, e) => r.set(t, e))),
9484 e.forEach(((t, e) => {
9485 var n;
9486 return o.set(t, new Zr(e, null !== (n = r.get(t)) && void 0 !== n ? n : null));
9487 })), o)));
9488 }
9489 recalculateAndSaveOverlays(t, e) {
9490 const n = ls();
9491 // A reverse lookup map from batch id to the documents within that batch.
9492 let s = new Lt(((t, e) => t - e)), i = _s();
9493 return this.mutationQueue.getAllMutationBatchesAffectingDocumentKeys(t, e).next((t => {
9494 for (const i of t) i.keys().forEach((t => {
9495 const r = e.get(t);
9496 if (null === r) return;
9497 let o = n.get(t) || jt.empty();
9498 o = i.applyToLocalView(r, o), n.set(t, o);
9499 const u = (s.get(i.batchId) || _s()).add(t);
9500 s = s.insert(i.batchId, u);
9501 }));
9502 })).next((() => {
9503 const r = [], o = s.getReverseIterator();
9504 // Iterate in descending order of batch IDs, and skip documents that are
9505 // already saved.
9506 for (;o.hasNext(); ) {
9507 const s = o.getNext(), u = s.key, c = s.value, a = hs();
9508 c.forEach((t => {
9509 if (!i.has(t)) {
9510 const s = Ln(e.get(t), n.get(t));
9511 null !== s && a.set(t, s), i = i.add(t);
9512 }
9513 })), r.push(this.documentOverlayCache.saveOverlays(t, u, a));
9514 }
9515 return Rt.waitFor(r);
9516 })).next((() => n));
9517 }
9518 /**
9519 * Recalculates overlays by reading the documents from remote document cache
9520 * first, and saves them after they are calculated.
9521 */ recalculateAndSaveOverlaysForDocumentKeys(t, e) {
9522 return this.remoteDocumentCache.getEntries(t, e).next((e => this.recalculateAndSaveOverlays(t, e)));
9523 }
9524 /**
9525 * Performs a query against the local view of all documents.
9526 *
9527 * @param transaction - The persistence transaction.
9528 * @param query - The query to match documents against.
9529 * @param offset - Read time and key to start scanning by (exclusive).
9530 */ getDocumentsMatchingQuery(t, e, n) {
9531 /**
9532 * Returns whether the query matches a single document by path (rather than a
9533 * collection).
9534 */
9535 return function(t) {
9536 return at.isDocumentKey(t.path) && null === t.collectionGroup && 0 === t.filters.length;
9537 }(e) ? this.getDocumentsMatchingDocumentQuery(t, e.path) : cn(e) ? this.getDocumentsMatchingCollectionGroupQuery(t, e, n) : this.getDocumentsMatchingCollectionQuery(t, e, n);
9538 }
9539 /**
9540 * Given a collection group, returns the next documents that follow the provided offset, along
9541 * with an updated batch ID.
9542 *
9543 * <p>The documents returned by this method are ordered by remote version from the provided
9544 * offset. If there are no more remote documents after the provided offset, documents with
9545 * mutations in order of batch id from the offset are returned. Since all documents in a batch are
9546 * returned together, the total number of documents returned can exceed {@code count}.
9547 *
9548 * @param transaction
9549 * @param collectionGroup The collection group for the documents.
9550 * @param offset The offset to index into.
9551 * @param count The number of documents to return
9552 * @return A LocalWriteResult with the documents that follow the provided offset and the last processed batch id.
9553 */ getNextDocuments(t, e, n, s) {
9554 return this.remoteDocumentCache.getAllFromCollectionGroup(t, e, n, s).next((i => {
9555 const r = s - i.size > 0 ? this.documentOverlayCache.getOverlaysForCollectionGroup(t, e, n.largestBatchId, s - i.size) : Rt.resolve(as());
9556 // The callsite will use the largest batch ID together with the latest read time to create
9557 // a new index offset. Since we only process batch IDs if all remote documents have been read,
9558 // no overlay will increase the overall read time. This is why we only need to special case
9559 // the batch id.
9560 let o = -1, u = i;
9561 return r.next((e => Rt.forEach(e, ((e, n) => (o < n.largestBatchId && (o = n.largestBatchId),
9562 i.get(e) ? Rt.resolve() : this.getBaseDocument(t, e, n).next((t => {
9563 u = u.insert(e, t);
9564 }))))).next((() => this.populateOverlays(t, e, i))).next((() => this.computeViews(t, u, e, _s()))).next((t => ({
9565 batchId: o,
9566 changes: cs(t)
9567 })))));
9568 }));
9569 }
9570 getDocumentsMatchingDocumentQuery(t, e) {
9571 // Just do a simple document lookup.
9572 return this.getDocument(t, new at(e)).next((t => {
9573 let e = us();
9574 return t.isFoundDocument() && (e = e.insert(t.key, t)), e;
9575 }));
9576 }
9577 getDocumentsMatchingCollectionGroupQuery(t, e, n) {
9578 const s = e.collectionGroup;
9579 let i = us();
9580 return this.indexManager.getCollectionParents(t, s).next((r => Rt.forEach(r, (r => {
9581 const o = function(t, e) {
9582 return new en(e,
9583 /*collectionGroup=*/ null, t.explicitOrderBy.slice(), t.filters.slice(), t.limit, t.limitType, t.startAt, t.endAt);
9584 }(e, r.child(s));
9585 return this.getDocumentsMatchingCollectionQuery(t, o, n).next((t => {
9586 t.forEach(((t, e) => {
9587 i = i.insert(t, e);
9588 }));
9589 }));
9590 })).next((() => i))));
9591 }
9592 getDocumentsMatchingCollectionQuery(t, e, n) {
9593 // Query the remote documents and overlay mutations.
9594 let s;
9595 return this.remoteDocumentCache.getAllFromCollection(t, e.path, n).next((i => (s = i,
9596 this.documentOverlayCache.getOverlaysForCollection(t, e.path, n.largestBatchId)))).next((t => {
9597 // As documents might match the query because of their overlay we need to
9598 // include documents for all overlays in the initial document set.
9599 t.forEach(((t, e) => {
9600 const n = e.getKey();
9601 null === s.get(n) && (s = s.insert(n, Ce.newInvalidDocument(n)));
9602 }));
9603 // Apply the overlays and match against the query.
9604 let n = us();
9605 return s.forEach(((s, i) => {
9606 const r = t.get(s);
9607 void 0 !== r && qn(r.mutation, i, jt.empty(), st.now()),
9608 // Finally, insert the documents that still match the query
9609 wn(e, i) && (n = n.insert(s, i));
9610 })), n;
9611 }));
9612 }
9613 /** Returns a base document that can be used to apply `overlay`. */ getBaseDocument(t, e, n) {
9614 return null === n || 1 /* Patch */ === n.mutation.type ? this.remoteDocumentCache.getEntry(t, e) : Rt.resolve(Ce.newInvalidDocument(e));
9615 }
9616}
9617
9618/**
9619 * @license
9620 * Copyright 2020 Google LLC
9621 *
9622 * Licensed under the Apache License, Version 2.0 (the "License");
9623 * you may not use this file except in compliance with the License.
9624 * You may obtain a copy of the License at
9625 *
9626 * http://www.apache.org/licenses/LICENSE-2.0
9627 *
9628 * Unless required by applicable law or agreed to in writing, software
9629 * distributed under the License is distributed on an "AS IS" BASIS,
9630 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9631 * See the License for the specific language governing permissions and
9632 * limitations under the License.
9633 */ class eo {
9634 constructor(t) {
9635 this.It = t, this.Zn = new Map, this.ts = new Map;
9636 }
9637 getBundleMetadata(t, e) {
9638 return Rt.resolve(this.Zn.get(e));
9639 }
9640 saveBundleMetadata(t, e) {
9641 /** Decodes a BundleMetadata proto into a BundleMetadata object. */
9642 var n;
9643 return this.Zn.set(e.id, {
9644 id: (n = e).id,
9645 version: n.version,
9646 createTime: xs(n.createTime)
9647 }), Rt.resolve();
9648 }
9649 getNamedQuery(t, e) {
9650 return Rt.resolve(this.ts.get(e));
9651 }
9652 saveNamedQuery(t, e) {
9653 return this.ts.set(e.name, function(t) {
9654 return {
9655 name: t.name,
9656 query: Hi(t.bundledQuery),
9657 readTime: xs(t.readTime)
9658 };
9659 }(e)), Rt.resolve();
9660 }
9661}
9662
9663/**
9664 * @license
9665 * Copyright 2022 Google LLC
9666 *
9667 * Licensed under the Apache License, Version 2.0 (the "License");
9668 * you may not use this file except in compliance with the License.
9669 * You may obtain a copy of the License at
9670 *
9671 * http://www.apache.org/licenses/LICENSE-2.0
9672 *
9673 * Unless required by applicable law or agreed to in writing, software
9674 * distributed under the License is distributed on an "AS IS" BASIS,
9675 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9676 * See the License for the specific language governing permissions and
9677 * limitations under the License.
9678 */
9679/**
9680 * An in-memory implementation of DocumentOverlayCache.
9681 */ class no {
9682 constructor() {
9683 // A map sorted by DocumentKey, whose value is a pair of the largest batch id
9684 // for the overlay and the overlay itself.
9685 this.overlays = new Lt(at.comparator), this.es = new Map;
9686 }
9687 getOverlay(t, e) {
9688 return Rt.resolve(this.overlays.get(e));
9689 }
9690 getOverlays(t, e) {
9691 const n = as();
9692 return Rt.forEach(e, (e => this.getOverlay(t, e).next((t => {
9693 null !== t && n.set(e, t);
9694 })))).next((() => n));
9695 }
9696 saveOverlays(t, e, n) {
9697 return n.forEach(((n, s) => {
9698 this.ue(t, e, s);
9699 })), Rt.resolve();
9700 }
9701 removeOverlaysForBatchId(t, e, n) {
9702 const s = this.es.get(n);
9703 return void 0 !== s && (s.forEach((t => this.overlays = this.overlays.remove(t))),
9704 this.es.delete(n)), Rt.resolve();
9705 }
9706 getOverlaysForCollection(t, e, n) {
9707 const s = as(), i = e.length + 1, r = new at(e.child("")), o = this.overlays.getIteratorFrom(r);
9708 for (;o.hasNext(); ) {
9709 const t = o.getNext().value, r = t.getKey();
9710 if (!e.isPrefixOf(r.path)) break;
9711 // Documents from sub-collections
9712 r.path.length === i && (t.largestBatchId > n && s.set(t.getKey(), t));
9713 }
9714 return Rt.resolve(s);
9715 }
9716 getOverlaysForCollectionGroup(t, e, n, s) {
9717 let i = new Lt(((t, e) => t - e));
9718 const r = this.overlays.getIterator();
9719 for (;r.hasNext(); ) {
9720 const t = r.getNext().value;
9721 if (t.getKey().getCollectionGroup() === e && t.largestBatchId > n) {
9722 let e = i.get(t.largestBatchId);
9723 null === e && (e = as(), i = i.insert(t.largestBatchId, e)), e.set(t.getKey(), t);
9724 }
9725 }
9726 const o = as(), u = i.getIterator();
9727 for (;u.hasNext(); ) {
9728 if (u.getNext().value.forEach(((t, e) => o.set(t, e))), o.size() >= s) break;
9729 }
9730 return Rt.resolve(o);
9731 }
9732 ue(t, e, n) {
9733 // Remove the association of the overlay to its batch id.
9734 const s = this.overlays.get(n.key);
9735 if (null !== s) {
9736 const t = this.es.get(s.largestBatchId).delete(n.key);
9737 this.es.set(s.largestBatchId, t);
9738 }
9739 this.overlays = this.overlays.insert(n.key, new $i(e, n));
9740 // Create the association of this overlay to the given largestBatchId.
9741 let i = this.es.get(e);
9742 void 0 === i && (i = _s(), this.es.set(e, i)), this.es.set(e, i.add(n.key));
9743 }
9744}
9745
9746/**
9747 * @license
9748 * Copyright 2017 Google LLC
9749 *
9750 * Licensed under the Apache License, Version 2.0 (the "License");
9751 * you may not use this file except in compliance with the License.
9752 * You may obtain a copy of the License at
9753 *
9754 * http://www.apache.org/licenses/LICENSE-2.0
9755 *
9756 * Unless required by applicable law or agreed to in writing, software
9757 * distributed under the License is distributed on an "AS IS" BASIS,
9758 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9759 * See the License for the specific language governing permissions and
9760 * limitations under the License.
9761 */
9762/**
9763 * A collection of references to a document from some kind of numbered entity
9764 * (either a target ID or batch ID). As references are added to or removed from
9765 * the set corresponding events are emitted to a registered garbage collector.
9766 *
9767 * Each reference is represented by a DocumentReference object. Each of them
9768 * contains enough information to uniquely identify the reference. They are all
9769 * stored primarily in a set sorted by key. A document is considered garbage if
9770 * there's no references in that set (this can be efficiently checked thanks to
9771 * sorting by key).
9772 *
9773 * ReferenceSet also keeps a secondary set that contains references sorted by
9774 * IDs. This one is used to efficiently implement removal of all references by
9775 * some target ID.
9776 */ class so {
9777 constructor() {
9778 // A set of outstanding references to a document sorted by key.
9779 this.ns = new Kt(io.ss),
9780 // A set of outstanding references to a document sorted by target id.
9781 this.rs = new Kt(io.os);
9782 }
9783 /** Returns true if the reference set contains no references. */ isEmpty() {
9784 return this.ns.isEmpty();
9785 }
9786 /** Adds a reference to the given document key for the given ID. */ addReference(t, e) {
9787 const n = new io(t, e);
9788 this.ns = this.ns.add(n), this.rs = this.rs.add(n);
9789 }
9790 /** Add references to the given document keys for the given ID. */ us(t, e) {
9791 t.forEach((t => this.addReference(t, e)));
9792 }
9793 /**
9794 * Removes a reference to the given document key for the given
9795 * ID.
9796 */ removeReference(t, e) {
9797 this.cs(new io(t, e));
9798 }
9799 hs(t, e) {
9800 t.forEach((t => this.removeReference(t, e)));
9801 }
9802 /**
9803 * Clears all references with a given ID. Calls removeRef() for each key
9804 * removed.
9805 */ ls(t) {
9806 const e = new at(new ot([])), n = new io(e, t), s = new io(e, t + 1), i = [];
9807 return this.rs.forEachInRange([ n, s ], (t => {
9808 this.cs(t), i.push(t.key);
9809 })), i;
9810 }
9811 fs() {
9812 this.ns.forEach((t => this.cs(t)));
9813 }
9814 cs(t) {
9815 this.ns = this.ns.delete(t), this.rs = this.rs.delete(t);
9816 }
9817 ds(t) {
9818 const e = new at(new ot([])), n = new io(e, t), s = new io(e, t + 1);
9819 let i = _s();
9820 return this.rs.forEachInRange([ n, s ], (t => {
9821 i = i.add(t.key);
9822 })), i;
9823 }
9824 containsKey(t) {
9825 const e = new io(t, 0), n = this.ns.firstAfterOrEqual(e);
9826 return null !== n && t.isEqual(n.key);
9827 }
9828}
9829
9830class io {
9831 constructor(t, e) {
9832 this.key = t, this._s = e;
9833 }
9834 /** Compare by key then by ID */ static ss(t, e) {
9835 return at.comparator(t.key, e.key) || tt(t._s, e._s);
9836 }
9837 /** Compare by ID then by key */ static os(t, e) {
9838 return tt(t._s, e._s) || at.comparator(t.key, e.key);
9839 }
9840}
9841
9842/**
9843 * @license
9844 * Copyright 2017 Google LLC
9845 *
9846 * Licensed under the Apache License, Version 2.0 (the "License");
9847 * you may not use this file except in compliance with the License.
9848 * You may obtain a copy of the License at
9849 *
9850 * http://www.apache.org/licenses/LICENSE-2.0
9851 *
9852 * Unless required by applicable law or agreed to in writing, software
9853 * distributed under the License is distributed on an "AS IS" BASIS,
9854 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9855 * See the License for the specific language governing permissions and
9856 * limitations under the License.
9857 */ class ro {
9858 constructor(t, e) {
9859 this.indexManager = t, this.referenceDelegate = e,
9860 /**
9861 * The set of all mutations that have been sent but not yet been applied to
9862 * the backend.
9863 */
9864 this.mutationQueue = [],
9865 /** Next value to use when assigning sequential IDs to each mutation batch. */
9866 this.ws = 1,
9867 /** An ordered mapping between documents and the mutations batch IDs. */
9868 this.gs = new Kt(io.ss);
9869 }
9870 checkEmpty(t) {
9871 return Rt.resolve(0 === this.mutationQueue.length);
9872 }
9873 addMutationBatch(t, e, n, s) {
9874 const i = this.ws;
9875 this.ws++, this.mutationQueue.length > 0 && this.mutationQueue[this.mutationQueue.length - 1];
9876 const r = new Mi(i, e, n, s);
9877 this.mutationQueue.push(r);
9878 // Track references by document key and index collection parents.
9879 for (const e of s) this.gs = this.gs.add(new io(e.key, i)), this.indexManager.addToCollectionParentIndex(t, e.key.path.popLast());
9880 return Rt.resolve(r);
9881 }
9882 lookupMutationBatch(t, e) {
9883 return Rt.resolve(this.ys(e));
9884 }
9885 getNextMutationBatchAfterBatchId(t, e) {
9886 const n = e + 1, s = this.ps(n), i = s < 0 ? 0 : s;
9887 // The requested batchId may still be out of range so normalize it to the
9888 // start of the queue.
9889 return Rt.resolve(this.mutationQueue.length > i ? this.mutationQueue[i] : null);
9890 }
9891 getHighestUnacknowledgedBatchId() {
9892 return Rt.resolve(0 === this.mutationQueue.length ? -1 : this.ws - 1);
9893 }
9894 getAllMutationBatches(t) {
9895 return Rt.resolve(this.mutationQueue.slice());
9896 }
9897 getAllMutationBatchesAffectingDocumentKey(t, e) {
9898 const n = new io(e, 0), s = new io(e, Number.POSITIVE_INFINITY), i = [];
9899 return this.gs.forEachInRange([ n, s ], (t => {
9900 const e = this.ys(t._s);
9901 i.push(e);
9902 })), Rt.resolve(i);
9903 }
9904 getAllMutationBatchesAffectingDocumentKeys(t, e) {
9905 let n = new Kt(tt);
9906 return e.forEach((t => {
9907 const e = new io(t, 0), s = new io(t, Number.POSITIVE_INFINITY);
9908 this.gs.forEachInRange([ e, s ], (t => {
9909 n = n.add(t._s);
9910 }));
9911 })), Rt.resolve(this.Is(n));
9912 }
9913 getAllMutationBatchesAffectingQuery(t, e) {
9914 // Use the query path as a prefix for testing if a document matches the
9915 // query.
9916 const n = e.path, s = n.length + 1;
9917 // Construct a document reference for actually scanning the index. Unlike
9918 // the prefix the document key in this reference must have an even number of
9919 // segments. The empty segment can be used a suffix of the query path
9920 // because it precedes all other segments in an ordered traversal.
9921 let i = n;
9922 at.isDocumentKey(i) || (i = i.child(""));
9923 const r = new io(new at(i), 0);
9924 // Find unique batchIDs referenced by all documents potentially matching the
9925 // query.
9926 let o = new Kt(tt);
9927 return this.gs.forEachWhile((t => {
9928 const e = t.key.path;
9929 return !!n.isPrefixOf(e) && (
9930 // Rows with document keys more than one segment longer than the query
9931 // path can't be matches. For example, a query on 'rooms' can't match
9932 // the document /rooms/abc/messages/xyx.
9933 // TODO(mcg): we'll need a different scanner when we implement
9934 // ancestor queries.
9935 e.length === s && (o = o.add(t._s)), !0);
9936 }), r), Rt.resolve(this.Is(o));
9937 }
9938 Is(t) {
9939 // Construct an array of matching batches, sorted by batchID to ensure that
9940 // multiple mutations affecting the same document key are applied in order.
9941 const e = [];
9942 return t.forEach((t => {
9943 const n = this.ys(t);
9944 null !== n && e.push(n);
9945 })), e;
9946 }
9947 removeMutationBatch(t, e) {
9948 F(0 === this.Ts(e.batchId, "removed")), this.mutationQueue.shift();
9949 let n = this.gs;
9950 return Rt.forEach(e.mutations, (s => {
9951 const i = new io(s.key, e.batchId);
9952 return n = n.delete(i), this.referenceDelegate.markPotentiallyOrphaned(t, s.key);
9953 })).next((() => {
9954 this.gs = n;
9955 }));
9956 }
9957 An(t) {
9958 // No-op since the memory mutation queue does not maintain a separate cache.
9959 }
9960 containsKey(t, e) {
9961 const n = new io(e, 0), s = this.gs.firstAfterOrEqual(n);
9962 return Rt.resolve(e.isEqual(s && s.key));
9963 }
9964 performConsistencyCheck(t) {
9965 return this.mutationQueue.length, Rt.resolve();
9966 }
9967 /**
9968 * Finds the index of the given batchId in the mutation queue and asserts that
9969 * the resulting index is within the bounds of the queue.
9970 *
9971 * @param batchId - The batchId to search for
9972 * @param action - A description of what the caller is doing, phrased in passive
9973 * form (e.g. "acknowledged" in a routine that acknowledges batches).
9974 */ Ts(t, e) {
9975 return this.ps(t);
9976 }
9977 /**
9978 * Finds the index of the given batchId in the mutation queue. This operation
9979 * is O(1).
9980 *
9981 * @returns The computed index of the batch with the given batchId, based on
9982 * the state of the queue. Note this index can be negative if the requested
9983 * batchId has already been remvoed from the queue or past the end of the
9984 * queue if the batchId is larger than the last added batch.
9985 */ ps(t) {
9986 if (0 === this.mutationQueue.length)
9987 // As an index this is past the end of the queue
9988 return 0;
9989 // Examine the front of the queue to figure out the difference between the
9990 // batchId and indexes in the array. Note that since the queue is ordered
9991 // by batchId, if the first batch has a larger batchId then the requested
9992 // batchId doesn't exist in the queue.
9993 return t - this.mutationQueue[0].batchId;
9994 }
9995 /**
9996 * A version of lookupMutationBatch that doesn't return a promise, this makes
9997 * other functions that uses this code easier to read and more efficent.
9998 */ ys(t) {
9999 const e = this.ps(t);
10000 if (e < 0 || e >= this.mutationQueue.length) return null;
10001 return this.mutationQueue[e];
10002 }
10003}
10004
10005/**
10006 * @license
10007 * Copyright 2017 Google LLC
10008 *
10009 * Licensed under the Apache License, Version 2.0 (the "License");
10010 * you may not use this file except in compliance with the License.
10011 * You may obtain a copy of the License at
10012 *
10013 * http://www.apache.org/licenses/LICENSE-2.0
10014 *
10015 * Unless required by applicable law or agreed to in writing, software
10016 * distributed under the License is distributed on an "AS IS" BASIS,
10017 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10018 * See the License for the specific language governing permissions and
10019 * limitations under the License.
10020 */
10021/**
10022 * The memory-only RemoteDocumentCache for IndexedDb. To construct, invoke
10023 * `newMemoryRemoteDocumentCache()`.
10024 */
10025class oo {
10026 /**
10027 * @param sizer - Used to assess the size of a document. For eager GC, this is
10028 * expected to just return 0 to avoid unnecessarily doing the work of
10029 * calculating the size.
10030 */
10031 constructor(t) {
10032 this.Es = t,
10033 /** Underlying cache of documents and their read times. */
10034 this.docs = new Lt(at.comparator),
10035 /** Size of all cached documents. */
10036 this.size = 0;
10037 }
10038 setIndexManager(t) {
10039 this.indexManager = t;
10040 }
10041 /**
10042 * Adds the supplied entry to the cache and updates the cache size as appropriate.
10043 *
10044 * All calls of `addEntry` are required to go through the RemoteDocumentChangeBuffer
10045 * returned by `newChangeBuffer()`.
10046 */ addEntry(t, e) {
10047 const n = e.key, s = this.docs.get(n), i = s ? s.size : 0, r = this.Es(e);
10048 return this.docs = this.docs.insert(n, {
10049 document: e.mutableCopy(),
10050 size: r
10051 }), this.size += r - i, this.indexManager.addToCollectionParentIndex(t, n.path.popLast());
10052 }
10053 /**
10054 * Removes the specified entry from the cache and updates the cache size as appropriate.
10055 *
10056 * All calls of `removeEntry` are required to go through the RemoteDocumentChangeBuffer
10057 * returned by `newChangeBuffer()`.
10058 */ removeEntry(t) {
10059 const e = this.docs.get(t);
10060 e && (this.docs = this.docs.remove(t), this.size -= e.size);
10061 }
10062 getEntry(t, e) {
10063 const n = this.docs.get(e);
10064 return Rt.resolve(n ? n.document.mutableCopy() : Ce.newInvalidDocument(e));
10065 }
10066 getEntries(t, e) {
10067 let n = rs();
10068 return e.forEach((t => {
10069 const e = this.docs.get(t);
10070 n = n.insert(t, e ? e.document.mutableCopy() : Ce.newInvalidDocument(t));
10071 })), Rt.resolve(n);
10072 }
10073 getAllFromCollection(t, e, n) {
10074 let s = rs();
10075 // Documents are ordered by key, so we can use a prefix scan to narrow down
10076 // the documents we need to match the query against.
10077 const i = new at(e.child("")), r = this.docs.getIteratorFrom(i);
10078 for (;r.hasNext(); ) {
10079 const {key: t, value: {document: i}} = r.getNext();
10080 if (!e.isPrefixOf(t.path)) break;
10081 t.path.length > e.length + 1 || (It(yt(i), n) <= 0 || (s = s.insert(i.key, i.mutableCopy())));
10082 }
10083 return Rt.resolve(s);
10084 }
10085 getAllFromCollectionGroup(t, e, n, s) {
10086 // This method should only be called from the IndexBackfiller if persistence
10087 // is enabled.
10088 M();
10089 }
10090 As(t, e) {
10091 return Rt.forEach(this.docs, (t => e(t)));
10092 }
10093 newChangeBuffer(t) {
10094 // `trackRemovals` is ignores since the MemoryRemoteDocumentCache keeps
10095 // a separate changelog and does not need special handling for removals.
10096 return new uo(this);
10097 }
10098 getSize(t) {
10099 return Rt.resolve(this.size);
10100 }
10101}
10102
10103/**
10104 * Creates a new memory-only RemoteDocumentCache.
10105 *
10106 * @param sizer - Used to assess the size of a document. For eager GC, this is
10107 * expected to just return 0 to avoid unnecessarily doing the work of
10108 * calculating the size.
10109 */
10110/**
10111 * Handles the details of adding and updating documents in the MemoryRemoteDocumentCache.
10112 */
10113class uo extends Gr {
10114 constructor(t) {
10115 super(), this.Yn = t;
10116 }
10117 applyChanges(t) {
10118 const e = [];
10119 return this.changes.forEach(((n, s) => {
10120 s.isValidDocument() ? e.push(this.Yn.addEntry(t, s)) : this.Yn.removeEntry(n);
10121 })), Rt.waitFor(e);
10122 }
10123 getFromCache(t, e) {
10124 return this.Yn.getEntry(t, e);
10125 }
10126 getAllFromCache(t, e) {
10127 return this.Yn.getEntries(t, e);
10128 }
10129}
10130
10131/**
10132 * @license
10133 * Copyright 2017 Google LLC
10134 *
10135 * Licensed under the Apache License, Version 2.0 (the "License");
10136 * you may not use this file except in compliance with the License.
10137 * You may obtain a copy of the License at
10138 *
10139 * http://www.apache.org/licenses/LICENSE-2.0
10140 *
10141 * Unless required by applicable law or agreed to in writing, software
10142 * distributed under the License is distributed on an "AS IS" BASIS,
10143 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10144 * See the License for the specific language governing permissions and
10145 * limitations under the License.
10146 */ class co {
10147 constructor(t) {
10148 this.persistence = t,
10149 /**
10150 * Maps a target to the data about that target
10151 */
10152 this.Rs = new ss((t => ke(t)), Me),
10153 /** The last received snapshot version. */
10154 this.lastRemoteSnapshotVersion = it.min(),
10155 /** The highest numbered target ID encountered. */
10156 this.highestTargetId = 0,
10157 /** The highest sequence number encountered. */
10158 this.bs = 0,
10159 /**
10160 * A ordered bidirectional mapping between documents and the remote target
10161 * IDs.
10162 */
10163 this.Ps = new so, this.targetCount = 0, this.vs = Nr.Pn();
10164 }
10165 forEachTarget(t, e) {
10166 return this.Rs.forEach(((t, n) => e(n))), Rt.resolve();
10167 }
10168 getLastRemoteSnapshotVersion(t) {
10169 return Rt.resolve(this.lastRemoteSnapshotVersion);
10170 }
10171 getHighestSequenceNumber(t) {
10172 return Rt.resolve(this.bs);
10173 }
10174 allocateTargetId(t) {
10175 return this.highestTargetId = this.vs.next(), Rt.resolve(this.highestTargetId);
10176 }
10177 setTargetsMetadata(t, e, n) {
10178 return n && (this.lastRemoteSnapshotVersion = n), e > this.bs && (this.bs = e),
10179 Rt.resolve();
10180 }
10181 Dn(t) {
10182 this.Rs.set(t.target, t);
10183 const e = t.targetId;
10184 e > this.highestTargetId && (this.vs = new Nr(e), this.highestTargetId = e), t.sequenceNumber > this.bs && (this.bs = t.sequenceNumber);
10185 }
10186 addTargetData(t, e) {
10187 return this.Dn(e), this.targetCount += 1, Rt.resolve();
10188 }
10189 updateTargetData(t, e) {
10190 return this.Dn(e), Rt.resolve();
10191 }
10192 removeTargetData(t, e) {
10193 return this.Rs.delete(e.target), this.Ps.ls(e.targetId), this.targetCount -= 1,
10194 Rt.resolve();
10195 }
10196 removeTargets(t, e, n) {
10197 let s = 0;
10198 const i = [];
10199 return this.Rs.forEach(((r, o) => {
10200 o.sequenceNumber <= e && null === n.get(o.targetId) && (this.Rs.delete(r), i.push(this.removeMatchingKeysForTargetId(t, o.targetId)),
10201 s++);
10202 })), Rt.waitFor(i).next((() => s));
10203 }
10204 getTargetCount(t) {
10205 return Rt.resolve(this.targetCount);
10206 }
10207 getTargetData(t, e) {
10208 const n = this.Rs.get(e) || null;
10209 return Rt.resolve(n);
10210 }
10211 addMatchingKeys(t, e, n) {
10212 return this.Ps.us(e, n), Rt.resolve();
10213 }
10214 removeMatchingKeys(t, e, n) {
10215 this.Ps.hs(e, n);
10216 const s = this.persistence.referenceDelegate, i = [];
10217 return s && e.forEach((e => {
10218 i.push(s.markPotentiallyOrphaned(t, e));
10219 })), Rt.waitFor(i);
10220 }
10221 removeMatchingKeysForTargetId(t, e) {
10222 return this.Ps.ls(e), Rt.resolve();
10223 }
10224 getMatchingKeysForTargetId(t, e) {
10225 const n = this.Ps.ds(e);
10226 return Rt.resolve(n);
10227 }
10228 containsKey(t, e) {
10229 return Rt.resolve(this.Ps.containsKey(e));
10230 }
10231}
10232
10233/**
10234 * @license
10235 * Copyright 2017 Google LLC
10236 *
10237 * Licensed under the Apache License, Version 2.0 (the "License");
10238 * you may not use this file except in compliance with the License.
10239 * You may obtain a copy of the License at
10240 *
10241 * http://www.apache.org/licenses/LICENSE-2.0
10242 *
10243 * Unless required by applicable law or agreed to in writing, software
10244 * distributed under the License is distributed on an "AS IS" BASIS,
10245 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10246 * See the License for the specific language governing permissions and
10247 * limitations under the License.
10248 */
10249/**
10250 * A memory-backed instance of Persistence. Data is stored only in RAM and
10251 * not persisted across sessions.
10252 */
10253class ao {
10254 /**
10255 * The constructor accepts a factory for creating a reference delegate. This
10256 * allows both the delegate and this instance to have strong references to
10257 * each other without having nullable fields that would then need to be
10258 * checked or asserted on every access.
10259 */
10260 constructor(t, e) {
10261 this.Vs = {}, this.overlays = {}, this.Ss = new Mt(0), this.Ds = !1, this.Ds = !0,
10262 this.referenceDelegate = t(this), this.Cs = new co(this);
10263 this.indexManager = new wr, this.remoteDocumentCache = function(t) {
10264 return new oo(t);
10265 }((t => this.referenceDelegate.xs(t))), this.It = new Li(e), this.Ns = new eo(this.It);
10266 }
10267 start() {
10268 return Promise.resolve();
10269 }
10270 shutdown() {
10271 // No durable state to ensure is closed on shutdown.
10272 return this.Ds = !1, Promise.resolve();
10273 }
10274 get started() {
10275 return this.Ds;
10276 }
10277 setDatabaseDeletedListener() {
10278 // No op.
10279 }
10280 setNetworkEnabled() {
10281 // No op.
10282 }
10283 getIndexManager(t) {
10284 // We do not currently support indices for memory persistence, so we can
10285 // return the same shared instance of the memory index manager.
10286 return this.indexManager;
10287 }
10288 getDocumentOverlayCache(t) {
10289 let e = this.overlays[t.toKey()];
10290 return e || (e = new no, this.overlays[t.toKey()] = e), e;
10291 }
10292 getMutationQueue(t, e) {
10293 let n = this.Vs[t.toKey()];
10294 return n || (n = new ro(e, this.referenceDelegate), this.Vs[t.toKey()] = n), n;
10295 }
10296 getTargetCache() {
10297 return this.Cs;
10298 }
10299 getRemoteDocumentCache() {
10300 return this.remoteDocumentCache;
10301 }
10302 getBundleCache() {
10303 return this.Ns;
10304 }
10305 runTransaction(t, e, n) {
10306 x("MemoryPersistence", "Starting transaction:", t);
10307 const s = new ho(this.Ss.next());
10308 return this.referenceDelegate.ks(), n(s).next((t => this.referenceDelegate.Os(s).next((() => t)))).toPromise().then((t => (s.raiseOnCommittedEvent(),
10309 t)));
10310 }
10311 Ms(t, e) {
10312 return Rt.or(Object.values(this.Vs).map((n => () => n.containsKey(t, e))));
10313 }
10314}
10315
10316/**
10317 * Memory persistence is not actually transactional, but future implementations
10318 * may have transaction-scoped state.
10319 */ class ho extends Et {
10320 constructor(t) {
10321 super(), this.currentSequenceNumber = t;
10322 }
10323}
10324
10325class lo {
10326 constructor(t) {
10327 this.persistence = t,
10328 /** Tracks all documents that are active in Query views. */
10329 this.Fs = new so,
10330 /** The list of documents that are potentially GCed after each transaction. */
10331 this.$s = null;
10332 }
10333 static Bs(t) {
10334 return new lo(t);
10335 }
10336 get Ls() {
10337 if (this.$s) return this.$s;
10338 throw M();
10339 }
10340 addReference(t, e, n) {
10341 return this.Fs.addReference(n, e), this.Ls.delete(n.toString()), Rt.resolve();
10342 }
10343 removeReference(t, e, n) {
10344 return this.Fs.removeReference(n, e), this.Ls.add(n.toString()), Rt.resolve();
10345 }
10346 markPotentiallyOrphaned(t, e) {
10347 return this.Ls.add(e.toString()), Rt.resolve();
10348 }
10349 removeTarget(t, e) {
10350 this.Fs.ls(e.targetId).forEach((t => this.Ls.add(t.toString())));
10351 const n = this.persistence.getTargetCache();
10352 return n.getMatchingKeysForTargetId(t, e.targetId).next((t => {
10353 t.forEach((t => this.Ls.add(t.toString())));
10354 })).next((() => n.removeTargetData(t, e)));
10355 }
10356 ks() {
10357 this.$s = new Set;
10358 }
10359 Os(t) {
10360 // Remove newly orphaned documents.
10361 const e = this.persistence.getRemoteDocumentCache().newChangeBuffer();
10362 return Rt.forEach(this.Ls, (n => {
10363 const s = at.fromPath(n);
10364 return this.Us(t, s).next((t => {
10365 t || e.removeEntry(s, it.min());
10366 }));
10367 })).next((() => (this.$s = null, e.apply(t))));
10368 }
10369 updateLimboDocument(t, e) {
10370 return this.Us(t, e).next((t => {
10371 t ? this.Ls.delete(e.toString()) : this.Ls.add(e.toString());
10372 }));
10373 }
10374 xs(t) {
10375 // For eager GC, we don't care about the document size, there are no size thresholds.
10376 return 0;
10377 }
10378 Us(t, e) {
10379 return Rt.or([ () => Rt.resolve(this.Fs.containsKey(e)), () => this.persistence.getTargetCache().containsKey(t, e), () => this.persistence.Ms(t, e) ]);
10380 }
10381}
10382
10383/**
10384 * @license
10385 * Copyright 2020 Google LLC
10386 *
10387 * Licensed under the Apache License, Version 2.0 (the "License");
10388 * you may not use this file except in compliance with the License.
10389 * You may obtain a copy of the License at
10390 *
10391 * http://www.apache.org/licenses/LICENSE-2.0
10392 *
10393 * Unless required by applicable law or agreed to in writing, software
10394 * distributed under the License is distributed on an "AS IS" BASIS,
10395 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10396 * See the License for the specific language governing permissions and
10397 * limitations under the License.
10398 */
10399/** Performs database creation and schema upgrades. */ class fo {
10400 constructor(t) {
10401 this.It = t;
10402 }
10403 /**
10404 * Performs database creation and schema upgrades.
10405 *
10406 * Note that in production, this method is only ever used to upgrade the schema
10407 * to SCHEMA_VERSION. Different values of toVersion are only used for testing
10408 * and local feature development.
10409 */ $(t, e, n, s) {
10410 const i = new bt("createOrUpgrade", e);
10411 n < 1 && s >= 1 && (function(t) {
10412 t.createObjectStore("owner");
10413 }(t), function(t) {
10414 t.createObjectStore("mutationQueues", {
10415 keyPath: "userId"
10416 });
10417 t.createObjectStore("mutations", {
10418 keyPath: "batchId",
10419 autoIncrement: !0
10420 }).createIndex("userMutationsIndex", li, {
10421 unique: !0
10422 }), t.createObjectStore("documentMutations");
10423 }
10424 /**
10425 * Upgrade function to migrate the 'mutations' store from V1 to V3. Loads
10426 * and rewrites all data.
10427 */ (t), _o(t), function(t) {
10428 t.createObjectStore("remoteDocuments");
10429 }(t));
10430 // Migration 2 to populate the targetGlobal object no longer needed since
10431 // migration 3 unconditionally clears it.
10432 let r = Rt.resolve();
10433 return n < 3 && s >= 3 && (
10434 // Brand new clients don't need to drop and recreate--only clients that
10435 // potentially have corrupt data.
10436 0 !== n && (!function(t) {
10437 t.deleteObjectStore("targetDocuments"), t.deleteObjectStore("targets"), t.deleteObjectStore("targetGlobal");
10438 }(t), _o(t)), r = r.next((() =>
10439 /**
10440 * Creates the target global singleton row.
10441 *
10442 * @param txn - The version upgrade transaction for indexeddb
10443 */
10444 function(t) {
10445 const e = t.store("targetGlobal"), n = {
10446 highestTargetId: 0,
10447 highestListenSequenceNumber: 0,
10448 lastRemoteSnapshotVersion: it.min().toTimestamp(),
10449 targetCount: 0
10450 };
10451 return e.put("targetGlobalKey", n);
10452 }(i)))), n < 4 && s >= 4 && (0 !== n && (
10453 // Schema version 3 uses auto-generated keys to generate globally unique
10454 // mutation batch IDs (this was previously ensured internally by the
10455 // client). To migrate to the new schema, we have to read all mutations
10456 // and write them back out. We preserve the existing batch IDs to guarantee
10457 // consistency with other object stores. Any further mutation batch IDs will
10458 // be auto-generated.
10459 r = r.next((() => function(t, e) {
10460 return e.store("mutations").W().next((n => {
10461 t.deleteObjectStore("mutations");
10462 t.createObjectStore("mutations", {
10463 keyPath: "batchId",
10464 autoIncrement: !0
10465 }).createIndex("userMutationsIndex", li, {
10466 unique: !0
10467 });
10468 const s = e.store("mutations"), i = n.map((t => s.put(t)));
10469 return Rt.waitFor(i);
10470 }));
10471 }(t, i)))), r = r.next((() => {
10472 !function(t) {
10473 t.createObjectStore("clientMetadata", {
10474 keyPath: "clientId"
10475 });
10476 }(t);
10477 }))), n < 5 && s >= 5 && (r = r.next((() => this.qs(i)))), n < 6 && s >= 6 && (r = r.next((() => (function(t) {
10478 t.createObjectStore("remoteDocumentGlobal");
10479 }(t), this.Ks(i))))), n < 7 && s >= 7 && (r = r.next((() => this.Gs(i)))), n < 8 && s >= 8 && (r = r.next((() => this.Qs(t, i)))),
10480 n < 9 && s >= 9 && (r = r.next((() => {
10481 // Multi-Tab used to manage its own changelog, but this has been moved
10482 // to the DbRemoteDocument object store itself. Since the previous change
10483 // log only contained transient data, we can drop its object store.
10484 !function(t) {
10485 t.objectStoreNames.contains("remoteDocumentChanges") && t.deleteObjectStore("remoteDocumentChanges");
10486 }(t);
10487 // Note: Schema version 9 used to create a read time index for the
10488 // RemoteDocumentCache. This is now done with schema version 13.
10489 }))), n < 10 && s >= 10 && (r = r.next((() => this.js(i)))), n < 11 && s >= 11 && (r = r.next((() => {
10490 !function(t) {
10491 t.createObjectStore("bundles", {
10492 keyPath: "bundleId"
10493 });
10494 }(t), function(t) {
10495 t.createObjectStore("namedQueries", {
10496 keyPath: "name"
10497 });
10498 }(t);
10499 }))), n < 12 && s >= 12 && (r = r.next((() => {
10500 !function(t) {
10501 const e = t.createObjectStore("documentOverlays", {
10502 keyPath: Pi
10503 });
10504 e.createIndex("collectionPathOverlayIndex", vi, {
10505 unique: !1
10506 }), e.createIndex("collectionGroupOverlayIndex", Vi, {
10507 unique: !1
10508 });
10509 }(t);
10510 }))), n < 13 && s >= 13 && (r = r.next((() => function(t) {
10511 const e = t.createObjectStore("remoteDocumentsV14", {
10512 keyPath: wi
10513 });
10514 e.createIndex("documentKeyIndex", mi), e.createIndex("collectionGroupIndex", gi);
10515 }(t))).next((() => this.Ws(t, i))).next((() => t.deleteObjectStore("remoteDocuments")))),
10516 n < 14 && s >= 14 && (r = r.next((() => this.zs(t, i)))), n < 15 && s >= 15 && (r = r.next((() => function(t) {
10517 t.createObjectStore("indexConfiguration", {
10518 keyPath: "indexId",
10519 autoIncrement: !0
10520 }).createIndex("collectionGroupIndex", "collectionGroup", {
10521 unique: !1
10522 });
10523 t.createObjectStore("indexState", {
10524 keyPath: Ei
10525 }).createIndex("sequenceNumberIndex", Ai, {
10526 unique: !1
10527 });
10528 t.createObjectStore("indexEntries", {
10529 keyPath: Ri
10530 }).createIndex("documentKeyIndex", bi, {
10531 unique: !1
10532 });
10533 }(t)))), r;
10534 }
10535 Ks(t) {
10536 let e = 0;
10537 return t.store("remoteDocuments").Z(((t, n) => {
10538 e += vr(n);
10539 })).next((() => {
10540 const n = {
10541 byteSize: e
10542 };
10543 return t.store("remoteDocumentGlobal").put("remoteDocumentGlobalKey", n);
10544 }));
10545 }
10546 qs(t) {
10547 const e = t.store("mutationQueues"), n = t.store("mutations");
10548 return e.W().next((e => Rt.forEach(e, (e => {
10549 const s = IDBKeyRange.bound([ e.userId, -1 ], [ e.userId, e.lastAcknowledgedBatchId ]);
10550 return n.W("userMutationsIndex", s).next((n => Rt.forEach(n, (n => {
10551 F(n.userId === e.userId);
10552 const s = ji(this.It, n);
10553 return Pr(t, e.userId, s).next((() => {}));
10554 }))));
10555 }))));
10556 }
10557 /**
10558 * Ensures that every document in the remote document cache has a corresponding sentinel row
10559 * with a sequence number. Missing rows are given the most recently used sequence number.
10560 */ Gs(t) {
10561 const e = t.store("targetDocuments"), n = t.store("remoteDocuments");
10562 return t.store("targetGlobal").get("targetGlobalKey").next((t => {
10563 const s = [];
10564 return n.Z(((n, i) => {
10565 const r = new ot(n), o = function(t) {
10566 return [ 0, ui(t) ];
10567 }(r);
10568 s.push(e.get(o).next((n => n ? Rt.resolve() : (n => e.put({
10569 targetId: 0,
10570 path: ui(n),
10571 sequenceNumber: t.highestListenSequenceNumber
10572 }))(r))));
10573 })).next((() => Rt.waitFor(s)));
10574 }));
10575 }
10576 Qs(t, e) {
10577 // Create the index.
10578 t.createObjectStore("collectionParents", {
10579 keyPath: Ti
10580 });
10581 const n = e.store("collectionParents"), s = new mr, i = t => {
10582 if (s.add(t)) {
10583 const e = t.lastSegment(), s = t.popLast();
10584 return n.put({
10585 collectionId: e,
10586 parent: ui(s)
10587 });
10588 }
10589 };
10590 // Helper to add an index entry iff we haven't already written it.
10591 // Index existing remote documents.
10592 return e.store("remoteDocuments").Z({
10593 X: !0
10594 }, ((t, e) => {
10595 const n = new ot(t);
10596 return i(n.popLast());
10597 })).next((() => e.store("documentMutations").Z({
10598 X: !0
10599 }, (([t, e, n], s) => {
10600 const r = hi(e);
10601 return i(r.popLast());
10602 }))));
10603 }
10604 js(t) {
10605 const e = t.store("targets");
10606 return e.Z(((t, n) => {
10607 const s = Wi(n), i = zi(this.It, s);
10608 return e.put(i);
10609 }));
10610 }
10611 Ws(t, e) {
10612 const n = e.store("remoteDocuments"), s = [];
10613 return n.Z(((t, n) => {
10614 const i = e.store("remoteDocumentsV14"), r = (o = n, o.document ? new at(ot.fromString(o.document.name).popFirst(5)) : o.noDocument ? at.fromSegments(o.noDocument.path) : o.unknownDocument ? at.fromSegments(o.unknownDocument.path) : M()).path.toArray();
10615 var o;
10616 /**
10617 * @license
10618 * Copyright 2017 Google LLC
10619 *
10620 * Licensed under the Apache License, Version 2.0 (the "License");
10621 * you may not use this file except in compliance with the License.
10622 * You may obtain a copy of the License at
10623 *
10624 * http://www.apache.org/licenses/LICENSE-2.0
10625 *
10626 * Unless required by applicable law or agreed to in writing, software
10627 * distributed under the License is distributed on an "AS IS" BASIS,
10628 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10629 * See the License for the specific language governing permissions and
10630 * limitations under the License.
10631 */ const u = {
10632 prefixPath: r.slice(0, r.length - 2),
10633 collectionGroup: r[r.length - 2],
10634 documentId: r[r.length - 1],
10635 readTime: n.readTime || [ 0, 0 ],
10636 unknownDocument: n.unknownDocument,
10637 noDocument: n.noDocument,
10638 document: n.document,
10639 hasCommittedMutations: !!n.hasCommittedMutations
10640 };
10641 s.push(i.put(u));
10642 })).next((() => Rt.waitFor(s)));
10643 }
10644 zs(t, e) {
10645 const n = e.store("mutations"), s = jr(this.It), i = new ao(lo.Bs, this.It.re);
10646 return n.W().next((t => {
10647 const n = new Map;
10648 return t.forEach((t => {
10649 var e;
10650 let s = null !== (e = n.get(t.userId)) && void 0 !== e ? e : _s();
10651 ji(this.It, t).keys().forEach((t => s = s.add(t))), n.set(t.userId, s);
10652 })), Rt.forEach(n, ((t, n) => {
10653 const r = new v(n), o = nr.oe(this.It, r), u = i.getIndexManager(r), c = Vr.oe(r, this.It, u, i.referenceDelegate);
10654 return new to(s, c, o, u).recalculateAndSaveOverlaysForDocumentKeys(new ki(e, Mt.at), t).next();
10655 }));
10656 }));
10657 }
10658}
10659
10660function _o(t) {
10661 t.createObjectStore("targetDocuments", {
10662 keyPath: pi
10663 }).createIndex("documentTargetsIndex", Ii, {
10664 unique: !0
10665 });
10666 // NOTE: This is unique only because the TargetId is the suffix.
10667 t.createObjectStore("targets", {
10668 keyPath: "targetId"
10669 }).createIndex("queryTargetsIndex", yi, {
10670 unique: !0
10671 }), t.createObjectStore("targetGlobal");
10672}
10673
10674const wo = "Failed to obtain exclusive access to the persistence layer. To allow shared access, multi-tab synchronization has to be enabled in all tabs. If you are using `experimentalForceOwningTab:true`, make sure that only one tab has persistence enabled at any given time.";
10675
10676/**
10677 * Oldest acceptable age in milliseconds for client metadata before the client
10678 * is considered inactive and its associated data is garbage collected.
10679 */
10680/**
10681 * An IndexedDB-backed instance of Persistence. Data is stored persistently
10682 * across sessions.
10683 *
10684 * On Web only, the Firestore SDKs support shared access to its persistence
10685 * layer. This allows multiple browser tabs to read and write to IndexedDb and
10686 * to synchronize state even without network connectivity. Shared access is
10687 * currently optional and not enabled unless all clients invoke
10688 * `enablePersistence()` with `{synchronizeTabs:true}`.
10689 *
10690 * In multi-tab mode, if multiple clients are active at the same time, the SDK
10691 * will designate one client as the “primary client”. An effort is made to pick
10692 * a visible, network-connected and active client, and this client is
10693 * responsible for letting other clients know about its presence. The primary
10694 * client writes a unique client-generated identifier (the client ID) to
10695 * IndexedDb’s “owner” store every 4 seconds. If the primary client fails to
10696 * update this entry, another client can acquire the lease and take over as
10697 * primary.
10698 *
10699 * Some persistence operations in the SDK are designated as primary-client only
10700 * operations. This includes the acknowledgment of mutations and all updates of
10701 * remote documents. The effects of these operations are written to persistence
10702 * and then broadcast to other tabs via LocalStorage (see
10703 * `WebStorageSharedClientState`), which then refresh their state from
10704 * persistence.
10705 *
10706 * Similarly, the primary client listens to notifications sent by secondary
10707 * clients to discover persistence changes written by secondary clients, such as
10708 * the addition of new mutations and query targets.
10709 *
10710 * If multi-tab is not enabled and another tab already obtained the primary
10711 * lease, IndexedDbPersistence enters a failed state and all subsequent
10712 * operations will automatically fail.
10713 *
10714 * Additionally, there is an optimization so that when a tab is closed, the
10715 * primary lease is released immediately (this is especially important to make
10716 * sure that a refreshed tab is able to immediately re-acquire the primary
10717 * lease). Unfortunately, IndexedDB cannot be reliably used in window.unload
10718 * since it is an asynchronous API. So in addition to attempting to give up the
10719 * lease, the leaseholder writes its client ID to a "zombiedClient" entry in
10720 * LocalStorage which acts as an indicator that another tab should go ahead and
10721 * take the primary lease immediately regardless of the current lease timestamp.
10722 *
10723 * TODO(b/114226234): Remove `synchronizeTabs` section when multi-tab is no
10724 * longer optional.
10725 */
10726class mo {
10727 constructor(
10728 /**
10729 * Whether to synchronize the in-memory state of multiple tabs and share
10730 * access to local persistence.
10731 */
10732 t, e, n, s, i, r, o, u, c,
10733 /**
10734 * If set to true, forcefully obtains database access. Existing tabs will
10735 * no longer be able to access IndexedDB.
10736 */
10737 a, h = 15) {
10738 if (this.allowTabSynchronization = t, this.persistenceKey = e, this.clientId = n,
10739 this.Hs = i, this.window = r, this.document = o, this.Js = c, this.Ys = a, this.Xs = h,
10740 this.Ss = null, this.Ds = !1, this.isPrimary = !1, this.networkEnabled = !0,
10741 /** Our window.unload handler, if registered. */
10742 this.Zs = null, this.inForeground = !1,
10743 /** Our 'visibilitychange' listener if registered. */
10744 this.ti = null,
10745 /** The client metadata refresh task. */
10746 this.ei = null,
10747 /** The last time we garbage collected the client metadata object store. */
10748 this.ni = Number.NEGATIVE_INFINITY,
10749 /** A listener to notify on primary state changes. */
10750 this.si = t => Promise.resolve(), !mo.C()) throw new U(L.UNIMPLEMENTED, "This platform is either missing IndexedDB or is known to have an incomplete implementation. Offline persistence has been disabled.");
10751 this.referenceDelegate = new qr(this, s), this.ii = e + "main", this.It = new Li(u),
10752 this.ri = new Pt(this.ii, this.Xs, new fo(this.It)), this.Cs = new kr(this.referenceDelegate, this.It),
10753 this.remoteDocumentCache = jr(this.It), this.Ns = new Zi, this.window && this.window.localStorage ? this.oi = this.window.localStorage : (this.oi = null,
10754 !1 === a && N("IndexedDbPersistence", "LocalStorage is unavailable. As a result, persistence may not work reliably. In particular enablePersistence() could fail immediately after refreshing the page."));
10755 }
10756 /**
10757 * Attempt to start IndexedDb persistence.
10758 *
10759 * @returns Whether persistence was enabled.
10760 */ start() {
10761 // NOTE: This is expected to fail sometimes (in the case of another tab
10762 // already having the persistence lock), so it's the first thing we should
10763 // do.
10764 return this.ui().then((() => {
10765 if (!this.isPrimary && !this.allowTabSynchronization)
10766 // Fail `start()` if `synchronizeTabs` is disabled and we cannot
10767 // obtain the primary lease.
10768 throw new U(L.FAILED_PRECONDITION, wo);
10769 return this.ci(), this.ai(), this.hi(), this.runTransaction("getHighestListenSequenceNumber", "readonly", (t => this.Cs.getHighestSequenceNumber(t)));
10770 })).then((t => {
10771 this.Ss = new Mt(t, this.Js);
10772 })).then((() => {
10773 this.Ds = !0;
10774 })).catch((t => (this.ri && this.ri.close(), Promise.reject(t))));
10775 }
10776 /**
10777 * Registers a listener that gets called when the primary state of the
10778 * instance changes. Upon registering, this listener is invoked immediately
10779 * with the current primary state.
10780 *
10781 * PORTING NOTE: This is only used for Web multi-tab.
10782 */ li(t) {
10783 return this.si = async e => {
10784 if (this.started) return t(e);
10785 }, t(this.isPrimary);
10786 }
10787 /**
10788 * Registers a listener that gets called when the database receives a
10789 * version change event indicating that it has deleted.
10790 *
10791 * PORTING NOTE: This is only used for Web multi-tab.
10792 */ setDatabaseDeletedListener(t) {
10793 this.ri.L((async e => {
10794 // Check if an attempt is made to delete IndexedDB.
10795 null === e.newVersion && await t();
10796 }));
10797 }
10798 /**
10799 * Adjusts the current network state in the client's metadata, potentially
10800 * affecting the primary lease.
10801 *
10802 * PORTING NOTE: This is only used for Web multi-tab.
10803 */ setNetworkEnabled(t) {
10804 this.networkEnabled !== t && (this.networkEnabled = t,
10805 // Schedule a primary lease refresh for immediate execution. The eventual
10806 // lease update will be propagated via `primaryStateListener`.
10807 this.Hs.enqueueAndForget((async () => {
10808 this.started && await this.ui();
10809 })));
10810 }
10811 /**
10812 * Updates the client metadata in IndexedDb and attempts to either obtain or
10813 * extend the primary lease for the local client. Asynchronously notifies the
10814 * primary state listener if the client either newly obtained or released its
10815 * primary lease.
10816 */ ui() {
10817 return this.runTransaction("updateClientMetadataAndTryBecomePrimary", "readwrite", (t => yo(t).put({
10818 clientId: this.clientId,
10819 updateTimeMs: Date.now(),
10820 networkEnabled: this.networkEnabled,
10821 inForeground: this.inForeground
10822 }).next((() => {
10823 if (this.isPrimary) return this.fi(t).next((t => {
10824 t || (this.isPrimary = !1, this.Hs.enqueueRetryable((() => this.si(!1))));
10825 }));
10826 })).next((() => this.di(t))).next((e => this.isPrimary && !e ? this._i(t).next((() => !1)) : !!e && this.wi(t).next((() => !0)))))).catch((t => {
10827 if (St(t))
10828 // Proceed with the existing state. Any subsequent access to
10829 // IndexedDB will verify the lease.
10830 return x("IndexedDbPersistence", "Failed to extend owner lease: ", t), this.isPrimary;
10831 if (!this.allowTabSynchronization) throw t;
10832 return x("IndexedDbPersistence", "Releasing owner lease after error during lease refresh", t),
10833 /* isPrimary= */ !1;
10834 })).then((t => {
10835 this.isPrimary !== t && this.Hs.enqueueRetryable((() => this.si(t))), this.isPrimary = t;
10836 }));
10837 }
10838 fi(t) {
10839 return go(t).get("owner").next((t => Rt.resolve(this.mi(t))));
10840 }
10841 gi(t) {
10842 return yo(t).delete(this.clientId);
10843 }
10844 /**
10845 * If the garbage collection threshold has passed, prunes the
10846 * RemoteDocumentChanges and the ClientMetadata store based on the last update
10847 * time of all clients.
10848 */ async yi() {
10849 if (this.isPrimary && !this.pi(this.ni, 18e5)) {
10850 this.ni = Date.now();
10851 const t = await this.runTransaction("maybeGarbageCollectMultiClientState", "readwrite-primary", (t => {
10852 const e = Oi(t, "clientMetadata");
10853 return e.W().next((t => {
10854 const n = this.Ii(t, 18e5), s = t.filter((t => -1 === n.indexOf(t)));
10855 // Delete metadata for clients that are no longer considered active.
10856 return Rt.forEach(s, (t => e.delete(t.clientId))).next((() => s));
10857 }));
10858 })).catch((() => []));
10859 // Delete potential leftover entries that may continue to mark the
10860 // inactive clients as zombied in LocalStorage.
10861 // Ideally we'd delete the IndexedDb and LocalStorage zombie entries for
10862 // the client atomically, but we can't. So we opt to delete the IndexedDb
10863 // entries first to avoid potentially reviving a zombied client.
10864 if (this.oi) for (const e of t) this.oi.removeItem(this.Ti(e.clientId));
10865 }
10866 }
10867 /**
10868 * Schedules a recurring timer to update the client metadata and to either
10869 * extend or acquire the primary lease if the client is eligible.
10870 */ hi() {
10871 this.ei = this.Hs.enqueueAfterDelay("client_metadata_refresh" /* ClientMetadataRefresh */ , 4e3, (() => this.ui().then((() => this.yi())).then((() => this.hi()))));
10872 }
10873 /** Checks whether `client` is the local client. */ mi(t) {
10874 return !!t && t.ownerId === this.clientId;
10875 }
10876 /**
10877 * Evaluate the state of all active clients and determine whether the local
10878 * client is or can act as the holder of the primary lease. Returns whether
10879 * the client is eligible for the lease, but does not actually acquire it.
10880 * May return 'false' even if there is no active leaseholder and another
10881 * (foreground) client should become leaseholder instead.
10882 */ di(t) {
10883 if (this.Ys) return Rt.resolve(!0);
10884 return go(t).get("owner").next((e => {
10885 // A client is eligible for the primary lease if:
10886 // - its network is enabled and the client's tab is in the foreground.
10887 // - its network is enabled and no other client's tab is in the
10888 // foreground.
10889 // - every clients network is disabled and the client's tab is in the
10890 // foreground.
10891 // - every clients network is disabled and no other client's tab is in
10892 // the foreground.
10893 // - the `forceOwningTab` setting was passed in.
10894 if (null !== e && this.pi(e.leaseTimestampMs, 5e3) && !this.Ei(e.ownerId)) {
10895 if (this.mi(e) && this.networkEnabled) return !0;
10896 if (!this.mi(e)) {
10897 if (!e.allowTabSynchronization)
10898 // Fail the `canActAsPrimary` check if the current leaseholder has
10899 // not opted into multi-tab synchronization. If this happens at
10900 // client startup, we reject the Promise returned by
10901 // `enablePersistence()` and the user can continue to use Firestore
10902 // with in-memory persistence.
10903 // If this fails during a lease refresh, we will instead block the
10904 // AsyncQueue from executing further operations. Note that this is
10905 // acceptable since mixing & matching different `synchronizeTabs`
10906 // settings is not supported.
10907 // TODO(b/114226234): Remove this check when `synchronizeTabs` can
10908 // no longer be turned off.
10909 throw new U(L.FAILED_PRECONDITION, wo);
10910 return !1;
10911 }
10912 }
10913 return !(!this.networkEnabled || !this.inForeground) || yo(t).W().next((t => void 0 === this.Ii(t, 5e3).find((t => {
10914 if (this.clientId !== t.clientId) {
10915 const e = !this.networkEnabled && t.networkEnabled, n = !this.inForeground && t.inForeground, s = this.networkEnabled === t.networkEnabled;
10916 if (e || n && s) return !0;
10917 }
10918 return !1;
10919 }))));
10920 })).next((t => (this.isPrimary !== t && x("IndexedDbPersistence", `Client ${t ? "is" : "is not"} eligible for a primary lease.`),
10921 t)));
10922 }
10923 async shutdown() {
10924 // The shutdown() operations are idempotent and can be called even when
10925 // start() aborted (e.g. because it couldn't acquire the persistence lease).
10926 this.Ds = !1, this.Ai(), this.ei && (this.ei.cancel(), this.ei = null), this.Ri(),
10927 this.bi(),
10928 // Use `SimpleDb.runTransaction` directly to avoid failing if another tab
10929 // has obtained the primary lease.
10930 await this.ri.runTransaction("shutdown", "readwrite", [ "owner", "clientMetadata" ], (t => {
10931 const e = new ki(t, Mt.at);
10932 return this._i(e).next((() => this.gi(e)));
10933 })), this.ri.close(),
10934 // Remove the entry marking the client as zombied from LocalStorage since
10935 // we successfully deleted its metadata from IndexedDb.
10936 this.Pi();
10937 }
10938 /**
10939 * Returns clients that are not zombied and have an updateTime within the
10940 * provided threshold.
10941 */ Ii(t, e) {
10942 return t.filter((t => this.pi(t.updateTimeMs, e) && !this.Ei(t.clientId)));
10943 }
10944 /**
10945 * Returns the IDs of the clients that are currently active. If multi-tab
10946 * is not supported, returns an array that only contains the local client's
10947 * ID.
10948 *
10949 * PORTING NOTE: This is only used for Web multi-tab.
10950 */ vi() {
10951 return this.runTransaction("getActiveClients", "readonly", (t => yo(t).W().next((t => this.Ii(t, 18e5).map((t => t.clientId))))));
10952 }
10953 get started() {
10954 return this.Ds;
10955 }
10956 getMutationQueue(t, e) {
10957 return Vr.oe(t, this.It, e, this.referenceDelegate);
10958 }
10959 getTargetCache() {
10960 return this.Cs;
10961 }
10962 getRemoteDocumentCache() {
10963 return this.remoteDocumentCache;
10964 }
10965 getIndexManager(t) {
10966 return new yr(t, this.It.re.databaseId);
10967 }
10968 getDocumentOverlayCache(t) {
10969 return nr.oe(this.It, t);
10970 }
10971 getBundleCache() {
10972 return this.Ns;
10973 }
10974 runTransaction(t, e, n) {
10975 x("IndexedDbPersistence", "Starting transaction:", t);
10976 const s = "readonly" === e ? "readonly" : "readwrite", i = 15 === (r = this.Xs) ? Ni : 14 === r ? xi : 13 === r ? Ci : 12 === r ? Di : 11 === r ? Si : void M();
10977 /** Returns the object stores for the provided schema. */
10978 var r;
10979 let o;
10980 // Do all transactions as readwrite against all object stores, since we
10981 // are the only reader/writer.
10982 return this.ri.runTransaction(t, s, i, (s => (o = new ki(s, this.Ss ? this.Ss.next() : Mt.at),
10983 "readwrite-primary" === e ? this.fi(o).next((t => !!t || this.di(o))).next((e => {
10984 if (!e) throw N(`Failed to obtain primary lease for action '${t}'.`), this.isPrimary = !1,
10985 this.Hs.enqueueRetryable((() => this.si(!1))), new U(L.FAILED_PRECONDITION, Tt);
10986 return n(o);
10987 })).next((t => this.wi(o).next((() => t)))) : this.Vi(o).next((() => n(o)))))).then((t => (o.raiseOnCommittedEvent(),
10988 t)));
10989 }
10990 /**
10991 * Verifies that the current tab is the primary leaseholder or alternatively
10992 * that the leaseholder has opted into multi-tab synchronization.
10993 */
10994 // TODO(b/114226234): Remove this check when `synchronizeTabs` can no longer
10995 // be turned off.
10996 Vi(t) {
10997 return go(t).get("owner").next((t => {
10998 if (null !== t && this.pi(t.leaseTimestampMs, 5e3) && !this.Ei(t.ownerId) && !this.mi(t) && !(this.Ys || this.allowTabSynchronization && t.allowTabSynchronization)) throw new U(L.FAILED_PRECONDITION, wo);
10999 }));
11000 }
11001 /**
11002 * Obtains or extends the new primary lease for the local client. This
11003 * method does not verify that the client is eligible for this lease.
11004 */ wi(t) {
11005 const e = {
11006 ownerId: this.clientId,
11007 allowTabSynchronization: this.allowTabSynchronization,
11008 leaseTimestampMs: Date.now()
11009 };
11010 return go(t).put("owner", e);
11011 }
11012 static C() {
11013 return Pt.C();
11014 }
11015 /** Checks the primary lease and removes it if we are the current primary. */ _i(t) {
11016 const e = go(t);
11017 return e.get("owner").next((t => this.mi(t) ? (x("IndexedDbPersistence", "Releasing primary lease."),
11018 e.delete("owner")) : Rt.resolve()));
11019 }
11020 /** Verifies that `updateTimeMs` is within `maxAgeMs`. */ pi(t, e) {
11021 const n = Date.now();
11022 return !(t < n - e) && (!(t > n) || (N(`Detected an update time that is in the future: ${t} > ${n}`),
11023 !1));
11024 }
11025 ci() {
11026 null !== this.document && "function" == typeof this.document.addEventListener && (this.ti = () => {
11027 this.Hs.enqueueAndForget((() => (this.inForeground = "visible" === this.document.visibilityState,
11028 this.ui())));
11029 }, this.document.addEventListener("visibilitychange", this.ti), this.inForeground = "visible" === this.document.visibilityState);
11030 }
11031 Ri() {
11032 this.ti && (this.document.removeEventListener("visibilitychange", this.ti), this.ti = null);
11033 }
11034 /**
11035 * Attaches a window.unload handler that will synchronously write our
11036 * clientId to a "zombie client id" location in LocalStorage. This can be used
11037 * by tabs trying to acquire the primary lease to determine that the lease
11038 * is no longer valid even if the timestamp is recent. This is particularly
11039 * important for the refresh case (so the tab correctly re-acquires the
11040 * primary lease). LocalStorage is used for this rather than IndexedDb because
11041 * it is a synchronous API and so can be used reliably from an unload
11042 * handler.
11043 */ ai() {
11044 var t;
11045 "function" == typeof (null === (t = this.window) || void 0 === t ? void 0 : t.addEventListener) && (this.Zs = () => {
11046 // Note: In theory, this should be scheduled on the AsyncQueue since it
11047 // accesses internal state. We execute this code directly during shutdown
11048 // to make sure it gets a chance to run.
11049 this.Ai(), f() && navigator.appVersion.match(/Version\/1[45]/) &&
11050 // On Safari 14 and 15, we do not run any cleanup actions as it might
11051 // trigger a bug that prevents Safari from re-opening IndexedDB during
11052 // the next page load.
11053 // See https://bugs.webkit.org/show_bug.cgi?id=226547
11054 this.Hs.enterRestrictedMode(/* purgeExistingTasks= */ !0), this.Hs.enqueueAndForget((() => this.shutdown()));
11055 }, this.window.addEventListener("pagehide", this.Zs));
11056 }
11057 bi() {
11058 this.Zs && (this.window.removeEventListener("pagehide", this.Zs), this.Zs = null);
11059 }
11060 /**
11061 * Returns whether a client is "zombied" based on its LocalStorage entry.
11062 * Clients become zombied when their tab closes without running all of the
11063 * cleanup logic in `shutdown()`.
11064 */ Ei(t) {
11065 var e;
11066 try {
11067 const n = null !== (null === (e = this.oi) || void 0 === e ? void 0 : e.getItem(this.Ti(t)));
11068 return x("IndexedDbPersistence", `Client '${t}' ${n ? "is" : "is not"} zombied in LocalStorage`),
11069 n;
11070 } catch (t) {
11071 // Gracefully handle if LocalStorage isn't working.
11072 return N("IndexedDbPersistence", "Failed to get zombied client id.", t), !1;
11073 }
11074 }
11075 /**
11076 * Record client as zombied (a client that had its tab closed). Zombied
11077 * clients are ignored during primary tab selection.
11078 */ Ai() {
11079 if (this.oi) try {
11080 this.oi.setItem(this.Ti(this.clientId), String(Date.now()));
11081 } catch (t) {
11082 // Gracefully handle if LocalStorage isn't available / working.
11083 N("Failed to set zombie client id.", t);
11084 }
11085 }
11086 /** Removes the zombied client entry if it exists. */ Pi() {
11087 if (this.oi) try {
11088 this.oi.removeItem(this.Ti(this.clientId));
11089 } catch (t) {
11090 // Ignore
11091 }
11092 }
11093 Ti(t) {
11094 return `firestore_zombie_${this.persistenceKey}_${t}`;
11095 }
11096}
11097
11098/**
11099 * Helper to get a typed SimpleDbStore for the primary client object store.
11100 */ function go(t) {
11101 return Oi(t, "owner");
11102}
11103
11104/**
11105 * Helper to get a typed SimpleDbStore for the client metadata object store.
11106 */ function yo(t) {
11107 return Oi(t, "clientMetadata");
11108}
11109
11110/**
11111 * Generates a string used as a prefix when storing data in IndexedDB and
11112 * LocalStorage.
11113 */ function po(t, e) {
11114 // Use two different prefix formats:
11115 // * firestore / persistenceKey / projectID . databaseID / ...
11116 // * firestore / persistenceKey / projectID / ...
11117 // projectIDs are DNS-compatible names and cannot contain dots
11118 // so there's no danger of collisions.
11119 let n = t.projectId;
11120 return t.isDefaultDatabase || (n += "." + t.database), "firestore/" + e + "/" + n + "/";
11121}
11122
11123/**
11124 * @license
11125 * Copyright 2017 Google LLC
11126 *
11127 * Licensed under the Apache License, Version 2.0 (the "License");
11128 * you may not use this file except in compliance with the License.
11129 * You may obtain a copy of the License at
11130 *
11131 * http://www.apache.org/licenses/LICENSE-2.0
11132 *
11133 * Unless required by applicable law or agreed to in writing, software
11134 * distributed under the License is distributed on an "AS IS" BASIS,
11135 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11136 * See the License for the specific language governing permissions and
11137 * limitations under the License.
11138 */
11139/**
11140 * A set of changes to what documents are currently in view and out of view for
11141 * a given query. These changes are sent to the LocalStore by the View (via
11142 * the SyncEngine) and are used to pin / unpin documents as appropriate.
11143 */
11144class Io {
11145 constructor(t, e, n, s) {
11146 this.targetId = t, this.fromCache = e, this.Si = n, this.Di = s;
11147 }
11148 static Ci(t, e) {
11149 let n = _s(), s = _s();
11150 for (const t of e.docChanges) switch (t.type) {
11151 case 0 /* Added */ :
11152 n = n.add(t.doc.key);
11153 break;
11154
11155 case 1 /* Removed */ :
11156 s = s.add(t.doc.key);
11157 // do nothing
11158 }
11159 return new Io(t, e.fromCache, n, s);
11160 }
11161}
11162
11163/**
11164 * @license
11165 * Copyright 2019 Google LLC
11166 *
11167 * Licensed under the Apache License, Version 2.0 (the "License");
11168 * you may not use this file except in compliance with the License.
11169 * You may obtain a copy of the License at
11170 *
11171 * http://www.apache.org/licenses/LICENSE-2.0
11172 *
11173 * Unless required by applicable law or agreed to in writing, software
11174 * distributed under the License is distributed on an "AS IS" BASIS,
11175 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11176 * See the License for the specific language governing permissions and
11177 * limitations under the License.
11178 */
11179/**
11180 * The Firestore query engine.
11181 *
11182 * Firestore queries can be executed in three modes. The Query Engine determines
11183 * what mode to use based on what data is persisted. The mode only determines
11184 * the runtime complexity of the query - the result set is equivalent across all
11185 * implementations.
11186 *
11187 * The Query engine will use indexed-based execution if a user has configured
11188 * any index that can be used to execute query (via `setIndexConfiguration()`).
11189 * Otherwise, the engine will try to optimize the query by re-using a previously
11190 * persisted query result. If that is not possible, the query will be executed
11191 * via a full collection scan.
11192 *
11193 * Index-based execution is the default when available. The query engine
11194 * supports partial indexed execution and merges the result from the index
11195 * lookup with documents that have not yet been indexed. The index evaluation
11196 * matches the backend's format and as such, the SDK can use indexing for all
11197 * queries that the backend supports.
11198 *
11199 * If no index exists, the query engine tries to take advantage of the target
11200 * document mapping in the TargetCache. These mappings exists for all queries
11201 * that have been synced with the backend at least once and allow the query
11202 * engine to only read documents that previously matched a query plus any
11203 * documents that were edited after the query was last listened to.
11204 *
11205 * There are some cases when this optimization is not guaranteed to produce
11206 * the same results as full collection scans. In these cases, query
11207 * processing falls back to full scans. These cases are:
11208 *
11209 * - Limit queries where a document that matched the query previously no longer
11210 * matches the query.
11211 *
11212 * - Limit queries where a document edit may cause the document to sort below
11213 * another document that is in the local cache.
11214 *
11215 * - Queries that have never been CURRENT or free of limbo documents.
11216 */ class To {
11217 constructor() {
11218 this.xi = !1;
11219 }
11220 /** Sets the document view to query against. */ initialize(t, e) {
11221 this.Ni = t, this.indexManager = e, this.xi = !0;
11222 }
11223 /** Returns all local documents matching the specified query. */ getDocumentsMatchingQuery(t, e, n, s) {
11224 return this.ki(t, e).next((i => i || this.Oi(t, e, s, n))).next((n => n || this.Mi(t, e)));
11225 }
11226 /**
11227 * Performs an indexed query that evaluates the query based on a collection's
11228 * persisted index values. Returns `null` if an index is not available.
11229 */ ki(t, e) {
11230 if (rn(e))
11231 // Queries that match all documents don't benefit from using
11232 // key-based lookups. It is more efficient to scan all documents in a
11233 // collection, rather than to perform individual lookups.
11234 return Rt.resolve(null);
11235 let n = hn(e);
11236 return this.indexManager.getIndexType(t, n).next((s => 0 /* NONE */ === s ? null : (null !== e.limit && 1 /* PARTIAL */ === s && (
11237 // We cannot apply a limit for targets that are served using a partial
11238 // index. If a partial index will be used to serve the target, the
11239 // query may return a superset of documents that match the target
11240 // (e.g. if the index doesn't include all the target's filters), or
11241 // may return the correct set of documents in the wrong order (e.g. if
11242 // the index doesn't include a segment for one of the orderBys).
11243 // Therefore, a limit should not be applied in such cases.
11244 e = ln(e, null, "F" /* First */), n = hn(e)), this.indexManager.getDocumentsMatchingTarget(t, n).next((s => {
11245 const i = _s(...s);
11246 return this.Ni.getDocuments(t, i).next((s => this.indexManager.getMinOffset(t, n).next((n => {
11247 const r = this.Fi(e, s);
11248 return this.$i(e, r, i, n.readTime) ? this.ki(t, ln(e, null, "F" /* First */)) : this.Bi(t, r, e, n);
11249 }))));
11250 })))));
11251 }
11252 /**
11253 * Performs a query based on the target's persisted query mapping. Returns
11254 * `null` if the mapping is not available or cannot be used.
11255 */ Oi(t, e, n, s) {
11256 return rn(e) || s.isEqual(it.min()) ? this.Mi(t, e) : this.Ni.getDocuments(t, n).next((i => {
11257 const r = this.Fi(e, i);
11258 return this.$i(e, r, n, s) ? this.Mi(t, e) : (D() <= u.DEBUG && x("QueryEngine", "Re-using previous result from %s to execute query: %s", s.toString(), _n(e)),
11259 this.Bi(t, r, e, gt(s, -1)));
11260 }));
11261 // Queries that have never seen a snapshot without limbo free documents
11262 // should also be run as a full collection scan.
11263 }
11264 /** Applies the query filter and sorting to the provided documents. */ Fi(t, e) {
11265 // Sort the documents and re-apply the query filter since previously
11266 // matching documents do not necessarily still match the query.
11267 let n = new Kt(gn(t));
11268 return e.forEach(((e, s) => {
11269 wn(t, s) && (n = n.add(s));
11270 })), n;
11271 }
11272 /**
11273 * Determines if a limit query needs to be refilled from cache, making it
11274 * ineligible for index-free execution.
11275 *
11276 * @param query - The query.
11277 * @param sortedPreviousResults - The documents that matched the query when it
11278 * was last synchronized, sorted by the query's comparator.
11279 * @param remoteKeys - The document keys that matched the query at the last
11280 * snapshot.
11281 * @param limboFreeSnapshotVersion - The version of the snapshot when the
11282 * query was last synchronized.
11283 */ $i(t, e, n, s) {
11284 if (null === t.limit)
11285 // Queries without limits do not need to be refilled.
11286 return !1;
11287 if (n.size !== e.size)
11288 // The query needs to be refilled if a previously matching document no
11289 // longer matches.
11290 return !0;
11291 // Limit queries are not eligible for index-free query execution if there is
11292 // a potential that an older document from cache now sorts before a document
11293 // that was previously part of the limit. This, however, can only happen if
11294 // the document at the edge of the limit goes out of limit.
11295 // If a document that is not the limit boundary sorts differently,
11296 // the boundary of the limit itself did not change and documents from cache
11297 // will continue to be "rejected" by this boundary. Therefore, we can ignore
11298 // any modifications that don't affect the last document.
11299 const i = "F" /* First */ === t.limitType ? e.last() : e.first();
11300 return !!i && (i.hasPendingWrites || i.version.compareTo(s) > 0);
11301 }
11302 Mi(t, e) {
11303 return D() <= u.DEBUG && x("QueryEngine", "Using full collection scan to execute query:", _n(e)),
11304 this.Ni.getDocumentsMatchingQuery(t, e, pt.min());
11305 }
11306 /**
11307 * Combines the results from an indexed execution with the remaining documents
11308 * that have not yet been indexed.
11309 */ Bi(t, e, n, s) {
11310 // Retrieve all results for documents that were updated since the offset.
11311 return this.Ni.getDocumentsMatchingQuery(t, n, s).next((t => (
11312 // Merge with existing results
11313 e.forEach((e => {
11314 t = t.insert(e.key, e);
11315 })), t)));
11316 }
11317}
11318
11319/**
11320 * @license
11321 * Copyright 2020 Google LLC
11322 *
11323 * Licensed under the Apache License, Version 2.0 (the "License");
11324 * you may not use this file except in compliance with the License.
11325 * You may obtain a copy of the License at
11326 *
11327 * http://www.apache.org/licenses/LICENSE-2.0
11328 *
11329 * Unless required by applicable law or agreed to in writing, software
11330 * distributed under the License is distributed on an "AS IS" BASIS,
11331 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11332 * See the License for the specific language governing permissions and
11333 * limitations under the License.
11334 */
11335/**
11336 * Implements `LocalStore` interface.
11337 *
11338 * Note: some field defined in this class might have public access level, but
11339 * the class is not exported so they are only accessible from this module.
11340 * This is useful to implement optional features (like bundles) in free
11341 * functions, such that they are tree-shakeable.
11342 */
11343class Eo {
11344 constructor(
11345 /** Manages our in-memory or durable persistence. */
11346 t, e, n, s) {
11347 this.persistence = t, this.Li = e, this.It = s,
11348 /**
11349 * Maps a targetID to data about its target.
11350 *
11351 * PORTING NOTE: We are using an immutable data structure on Web to make re-runs
11352 * of `applyRemoteEvent()` idempotent.
11353 */
11354 this.Ui = new Lt(tt),
11355 /** Maps a target to its targetID. */
11356 // TODO(wuandy): Evaluate if TargetId can be part of Target.
11357 this.qi = new ss((t => ke(t)), Me),
11358 /**
11359 * A per collection group index of the last read time processed by
11360 * `getNewDocumentChanges()`.
11361 *
11362 * PORTING NOTE: This is only used for multi-tab synchronization.
11363 */
11364 this.Ki = new Map, this.Gi = t.getRemoteDocumentCache(), this.Cs = t.getTargetCache(),
11365 this.Ns = t.getBundleCache(), this.Qi(n);
11366 }
11367 Qi(t) {
11368 // TODO(indexing): Add spec tests that test these components change after a
11369 // user change
11370 this.documentOverlayCache = this.persistence.getDocumentOverlayCache(t), this.indexManager = this.persistence.getIndexManager(t),
11371 this.mutationQueue = this.persistence.getMutationQueue(t, this.indexManager), this.localDocuments = new to(this.Gi, this.mutationQueue, this.documentOverlayCache, this.indexManager),
11372 this.Gi.setIndexManager(this.indexManager), this.Li.initialize(this.localDocuments, this.indexManager);
11373 }
11374 collectGarbage(t) {
11375 return this.persistence.runTransaction("Collect garbage", "readwrite-primary", (e => t.collect(e, this.Ui)));
11376 }
11377}
11378
11379function Ao(
11380/** Manages our in-memory or durable persistence. */
11381t, e, n, s) {
11382 return new Eo(t, e, n, s);
11383}
11384
11385/**
11386 * Tells the LocalStore that the currently authenticated user has changed.
11387 *
11388 * In response the local store switches the mutation queue to the new user and
11389 * returns any resulting document changes.
11390 */
11391// PORTING NOTE: Android and iOS only return the documents affected by the
11392// change.
11393async function Ro(t, e) {
11394 const n = B(t);
11395 return await n.persistence.runTransaction("Handle user change", "readonly", (t => {
11396 // Swap out the mutation queue, grabbing the pending mutation batches
11397 // before and after.
11398 let s;
11399 return n.mutationQueue.getAllMutationBatches(t).next((i => (s = i, n.Qi(e), n.mutationQueue.getAllMutationBatches(t)))).next((e => {
11400 const i = [], r = [];
11401 // Union the old/new changed keys.
11402 let o = _s();
11403 for (const t of s) {
11404 i.push(t.batchId);
11405 for (const e of t.mutations) o = o.add(e.key);
11406 }
11407 for (const t of e) {
11408 r.push(t.batchId);
11409 for (const e of t.mutations) o = o.add(e.key);
11410 }
11411 // Return the set of all (potentially) changed documents and the list
11412 // of mutation batch IDs that were affected by change.
11413 return n.localDocuments.getDocuments(t, o).next((t => ({
11414 ji: t,
11415 removedBatchIds: i,
11416 addedBatchIds: r
11417 })));
11418 }));
11419 }));
11420}
11421
11422/* Accepts locally generated Mutations and commit them to storage. */
11423/**
11424 * Acknowledges the given batch.
11425 *
11426 * On the happy path when a batch is acknowledged, the local store will
11427 *
11428 * + remove the batch from the mutation queue;
11429 * + apply the changes to the remote document cache;
11430 * + recalculate the latency compensated view implied by those changes (there
11431 * may be mutations in the queue that affect the documents but haven't been
11432 * acknowledged yet); and
11433 * + give the changed documents back the sync engine
11434 *
11435 * @returns The resulting (modified) documents.
11436 */
11437function bo(t, e) {
11438 const n = B(t);
11439 return n.persistence.runTransaction("Acknowledge batch", "readwrite-primary", (t => {
11440 const s = e.batch.keys(), i = n.Gi.newChangeBuffer({
11441 trackRemovals: !0
11442 });
11443 return function(t, e, n, s) {
11444 const i = n.batch, r = i.keys();
11445 let o = Rt.resolve();
11446 return r.forEach((t => {
11447 o = o.next((() => s.getEntry(e, t))).next((e => {
11448 const r = n.docVersions.get(t);
11449 F(null !== r), e.version.compareTo(r) < 0 && (i.applyToRemoteDocument(e, n), e.isValidDocument() && (
11450 // We use the commitVersion as the readTime rather than the
11451 // document's updateTime since the updateTime is not advanced
11452 // for updates that do not modify the underlying document.
11453 e.setReadTime(n.commitVersion), s.addEntry(e)));
11454 }));
11455 })), o.next((() => t.mutationQueue.removeMutationBatch(e, i)));
11456 }
11457 /** Returns the local view of the documents affected by a mutation batch. */
11458 // PORTING NOTE: Multi-Tab only.
11459 (n, t, e, i).next((() => i.apply(t))).next((() => n.mutationQueue.performConsistencyCheck(t))).next((() => n.documentOverlayCache.removeOverlaysForBatchId(t, s, e.batch.batchId))).next((() => n.localDocuments.recalculateAndSaveOverlaysForDocumentKeys(t, function(t) {
11460 let e = _s();
11461 for (let n = 0; n < t.mutationResults.length; ++n) {
11462 t.mutationResults[n].transformResults.length > 0 && (e = e.add(t.batch.mutations[n].key));
11463 }
11464 return e;
11465 }
11466 /**
11467 * Removes mutations from the MutationQueue for the specified batch;
11468 * LocalDocuments will be recalculated.
11469 *
11470 * @returns The resulting modified documents.
11471 */ (e)))).next((() => n.localDocuments.getDocuments(t, s)));
11472 }));
11473}
11474
11475/**
11476 * Returns the last consistent snapshot processed (used by the RemoteStore to
11477 * determine whether to buffer incoming snapshots from the backend).
11478 */
11479function Po(t) {
11480 const e = B(t);
11481 return e.persistence.runTransaction("Get last remote snapshot version", "readonly", (t => e.Cs.getLastRemoteSnapshotVersion(t)));
11482}
11483
11484/**
11485 * Updates the "ground-state" (remote) documents. We assume that the remote
11486 * event reflects any write batches that have been acknowledged or rejected
11487 * (i.e. we do not re-apply local mutations to updates from this event).
11488 *
11489 * LocalDocuments are re-calculated if there are remaining mutations in the
11490 * queue.
11491 */ function vo(t, e) {
11492 const n = B(t), s = e.snapshotVersion;
11493 let i = n.Ui;
11494 return n.persistence.runTransaction("Apply remote event", "readwrite-primary", (t => {
11495 const r = n.Gi.newChangeBuffer({
11496 trackRemovals: !0
11497 });
11498 // Reset newTargetDataByTargetMap in case this transaction gets re-run.
11499 i = n.Ui;
11500 const o = [];
11501 e.targetChanges.forEach(((r, u) => {
11502 const c = i.get(u);
11503 if (!c) return;
11504 // Only update the remote keys if the target is still active. This
11505 // ensures that we can persist the updated target data along with
11506 // the updated assignment.
11507 o.push(n.Cs.removeMatchingKeys(t, r.removedDocuments, u).next((() => n.Cs.addMatchingKeys(t, r.addedDocuments, u))));
11508 let a = c.withSequenceNumber(t.currentSequenceNumber);
11509 e.targetMismatches.has(u) ? a = a.withResumeToken(Ht.EMPTY_BYTE_STRING, it.min()).withLastLimboFreeSnapshotVersion(it.min()) : r.resumeToken.approximateByteSize() > 0 && (a = a.withResumeToken(r.resumeToken, s)),
11510 i = i.insert(u, a),
11511 // Update the target data if there are target changes (or if
11512 // sufficient time has passed since the last update).
11513 /**
11514 * Returns true if the newTargetData should be persisted during an update of
11515 * an active target. TargetData should always be persisted when a target is
11516 * being released and should not call this function.
11517 *
11518 * While the target is active, TargetData updates can be omitted when nothing
11519 * about the target has changed except metadata like the resume token or
11520 * snapshot version. Occasionally it's worth the extra write to prevent these
11521 * values from getting too stale after a crash, but this doesn't have to be
11522 * too frequent.
11523 */
11524 function(t, e, n) {
11525 // Always persist target data if we don't already have a resume token.
11526 if (0 === t.resumeToken.approximateByteSize()) return !0;
11527 // Don't allow resume token changes to be buffered indefinitely. This
11528 // allows us to be reasonably up-to-date after a crash and avoids needing
11529 // to loop over all active queries on shutdown. Especially in the browser
11530 // we may not get time to do anything interesting while the current tab is
11531 // closing.
11532 if (e.snapshotVersion.toMicroseconds() - t.snapshotVersion.toMicroseconds() >= 3e8) return !0;
11533 // Otherwise if the only thing that has changed about a target is its resume
11534 // token it's not worth persisting. Note that the RemoteStore keeps an
11535 // in-memory view of the currently active targets which includes the current
11536 // resume token, so stream failure or user changes will still use an
11537 // up-to-date resume token regardless of what we do here.
11538 return n.addedDocuments.size + n.modifiedDocuments.size + n.removedDocuments.size > 0;
11539 }
11540 /**
11541 * Notifies local store of the changed views to locally pin documents.
11542 */ (c, a, r) && o.push(n.Cs.updateTargetData(t, a));
11543 }));
11544 let u = rs(), c = _s();
11545 // HACK: The only reason we allow a null snapshot version is so that we
11546 // can synthesize remote events when we get permission denied errors while
11547 // trying to resolve the state of a locally cached document that is in
11548 // limbo.
11549 if (e.documentUpdates.forEach((s => {
11550 e.resolvedLimboDocuments.has(s) && o.push(n.persistence.referenceDelegate.updateLimboDocument(t, s));
11551 })),
11552 // Each loop iteration only affects its "own" doc, so it's safe to get all
11553 // the remote documents in advance in a single call.
11554 o.push(Vo(t, r, e.documentUpdates).next((t => {
11555 u = t.Wi, c = t.zi;
11556 }))), !s.isEqual(it.min())) {
11557 const e = n.Cs.getLastRemoteSnapshotVersion(t).next((e => n.Cs.setTargetsMetadata(t, t.currentSequenceNumber, s)));
11558 o.push(e);
11559 }
11560 return Rt.waitFor(o).next((() => r.apply(t))).next((() => n.localDocuments.getLocalViewOfDocuments(t, u, c))).next((() => u));
11561 })).then((t => (n.Ui = i, t)));
11562}
11563
11564/**
11565 * Populates document change buffer with documents from backend or a bundle.
11566 * Returns the document changes resulting from applying those documents, and
11567 * also a set of documents whose existence state are changed as a result.
11568 *
11569 * @param txn - Transaction to use to read existing documents from storage.
11570 * @param documentBuffer - Document buffer to collect the resulted changes to be
11571 * applied to storage.
11572 * @param documents - Documents to be applied.
11573 */ function Vo(t, e, n) {
11574 let s = _s(), i = _s();
11575 return n.forEach((t => s = s.add(t))), e.getEntries(t, s).next((t => {
11576 let s = rs();
11577 return n.forEach(((n, r) => {
11578 const o = t.get(n);
11579 // Check if see if there is a existence state change for this document.
11580 r.isFoundDocument() !== o.isFoundDocument() && (i = i.add(n)),
11581 // Note: The order of the steps below is important, since we want
11582 // to ensure that rejected limbo resolutions (which fabricate
11583 // NoDocuments with SnapshotVersion.min()) never add documents to
11584 // cache.
11585 r.isNoDocument() && r.version.isEqual(it.min()) ? (
11586 // NoDocuments with SnapshotVersion.min() are used in manufactured
11587 // events. We remove these documents from cache since we lost
11588 // access.
11589 e.removeEntry(n, r.readTime), s = s.insert(n, r)) : !o.isValidDocument() || r.version.compareTo(o.version) > 0 || 0 === r.version.compareTo(o.version) && o.hasPendingWrites ? (e.addEntry(r),
11590 s = s.insert(n, r)) : x("LocalStore", "Ignoring outdated watch update for ", n, ". Current version:", o.version, " Watch version:", r.version);
11591 })), {
11592 Wi: s,
11593 zi: i
11594 };
11595 }));
11596}
11597
11598/**
11599 * Gets the mutation batch after the passed in batchId in the mutation queue
11600 * or null if empty.
11601 * @param afterBatchId - If provided, the batch to search after.
11602 * @returns The next mutation or null if there wasn't one.
11603 */
11604function So(t, e) {
11605 const n = B(t);
11606 return n.persistence.runTransaction("Get next mutation batch", "readonly", (t => (void 0 === e && (e = -1),
11607 n.mutationQueue.getNextMutationBatchAfterBatchId(t, e))));
11608}
11609
11610/**
11611 * Reads the current value of a Document with a given key or null if not
11612 * found - used for testing.
11613 */
11614/**
11615 * Assigns the given target an internal ID so that its results can be pinned so
11616 * they don't get GC'd. A target must be allocated in the local store before
11617 * the store can be used to manage its view.
11618 *
11619 * Allocating an already allocated `Target` will return the existing `TargetData`
11620 * for that `Target`.
11621 */
11622function Do(t, e) {
11623 const n = B(t);
11624 return n.persistence.runTransaction("Allocate target", "readwrite", (t => {
11625 let s;
11626 return n.Cs.getTargetData(t, e).next((i => i ? (
11627 // This target has been listened to previously, so reuse the
11628 // previous targetID.
11629 // TODO(mcg): freshen last accessed date?
11630 s = i, Rt.resolve(s)) : n.Cs.allocateTargetId(t).next((i => (s = new Bi(e, i, 0 /* Listen */ , t.currentSequenceNumber),
11631 n.Cs.addTargetData(t, s).next((() => s)))))));
11632 })).then((t => {
11633 // If Multi-Tab is enabled, the existing target data may be newer than
11634 // the in-memory data
11635 const s = n.Ui.get(t.targetId);
11636 return (null === s || t.snapshotVersion.compareTo(s.snapshotVersion) > 0) && (n.Ui = n.Ui.insert(t.targetId, t),
11637 n.qi.set(e, t.targetId)), t;
11638 }));
11639}
11640
11641/**
11642 * Returns the TargetData as seen by the LocalStore, including updates that may
11643 * have not yet been persisted to the TargetCache.
11644 */
11645// Visible for testing.
11646/**
11647 * Unpins all the documents associated with the given target. If
11648 * `keepPersistedTargetData` is set to false and Eager GC enabled, the method
11649 * directly removes the associated target data from the target cache.
11650 *
11651 * Releasing a non-existing `Target` is a no-op.
11652 */
11653// PORTING NOTE: `keepPersistedTargetData` is multi-tab only.
11654async function Co(t, e, n) {
11655 const s = B(t), i = s.Ui.get(e), r = n ? "readwrite" : "readwrite-primary";
11656 try {
11657 n || await s.persistence.runTransaction("Release target", r, (t => s.persistence.referenceDelegate.removeTarget(t, i)));
11658 } catch (t) {
11659 if (!St(t)) throw t;
11660 // All `releaseTarget` does is record the final metadata state for the
11661 // target, but we've been recording this periodically during target
11662 // activity. If we lose this write this could cause a very slight
11663 // difference in the order of target deletion during GC, but we
11664 // don't define exact LRU semantics so this is acceptable.
11665 x("LocalStore", `Failed to update sequence numbers for target ${e}: ${t}`);
11666 }
11667 s.Ui = s.Ui.remove(e), s.qi.delete(i.target);
11668}
11669
11670/**
11671 * Runs the specified query against the local store and returns the results,
11672 * potentially taking advantage of query data from previous executions (such
11673 * as the set of remote keys).
11674 *
11675 * @param usePreviousResults - Whether results from previous executions can
11676 * be used to optimize this query execution.
11677 */ function xo(t, e, n) {
11678 const s = B(t);
11679 let i = it.min(), r = _s();
11680 return s.persistence.runTransaction("Execute query", "readonly", (t => function(t, e, n) {
11681 const s = B(t), i = s.qi.get(n);
11682 return void 0 !== i ? Rt.resolve(s.Ui.get(i)) : s.Cs.getTargetData(e, n);
11683 }(s, t, hn(e)).next((e => {
11684 if (e) return i = e.lastLimboFreeSnapshotVersion, s.Cs.getMatchingKeysForTargetId(t, e.targetId).next((t => {
11685 r = t;
11686 }));
11687 })).next((() => s.Li.getDocumentsMatchingQuery(t, e, n ? i : it.min(), n ? r : _s()))).next((t => (Oo(s, mn(e), t),
11688 {
11689 documents: t,
11690 Hi: r
11691 })))));
11692}
11693
11694// PORTING NOTE: Multi-Tab only.
11695function No(t, e) {
11696 const n = B(t), s = B(n.Cs), i = n.Ui.get(e);
11697 return i ? Promise.resolve(i.target) : n.persistence.runTransaction("Get target data", "readonly", (t => s.se(t, e).next((t => t ? t.target : null))));
11698}
11699
11700/**
11701 * Returns the set of documents that have been updated since the last call.
11702 * If this is the first call, returns the set of changes since client
11703 * initialization. Further invocations will return document that have changed
11704 * since the prior call.
11705 */
11706// PORTING NOTE: Multi-Tab only.
11707function ko(t, e) {
11708 const n = B(t), s = n.Ki.get(e) || it.min();
11709 // Get the current maximum read time for the collection. This should always
11710 // exist, but to reduce the chance for regressions we default to
11711 // SnapshotVersion.Min()
11712 // TODO(indexing): Consider removing the default value.
11713 return n.persistence.runTransaction("Get new document changes", "readonly", (t => n.Gi.getAllFromCollectionGroup(t, e, gt(s, -1),
11714 /* limit= */ Number.MAX_SAFE_INTEGER))).then((t => (Oo(n, e, t), t)));
11715}
11716
11717/** Sets the collection group's maximum read time from the given documents. */
11718// PORTING NOTE: Multi-Tab only.
11719function Oo(t, e, n) {
11720 let s = t.Ki.get(e) || it.min();
11721 n.forEach(((t, e) => {
11722 e.readTime.compareTo(s) > 0 && (s = e.readTime);
11723 })), t.Ki.set(e, s);
11724}
11725
11726/**
11727 * Creates a new target using the given bundle name, which will be used to
11728 * hold the keys of all documents from the bundle in query-document mappings.
11729 * This ensures that the loaded documents do not get garbage collected
11730 * right away.
11731 */
11732/**
11733 * Applies the documents from a bundle to the "ground-state" (remote)
11734 * documents.
11735 *
11736 * LocalDocuments are re-calculated if there are remaining mutations in the
11737 * queue.
11738 */
11739async function Mo(t, e, n, s) {
11740 const i = B(t);
11741 let r = _s(), o = rs();
11742 for (const t of n) {
11743 const n = e.Ji(t.metadata.name);
11744 t.document && (r = r.add(n));
11745 const s = e.Yi(t);
11746 s.setReadTime(e.Xi(t.metadata.readTime)), o = o.insert(n, s);
11747 }
11748 const u = i.Gi.newChangeBuffer({
11749 trackRemovals: !0
11750 }), c = await Do(i, function(t) {
11751 // It is OK that the path used for the query is not valid, because this will
11752 // not be read and queried.
11753 return hn(sn(ot.fromString(`__bundle__/docs/${t}`)));
11754 }(s));
11755 // Allocates a target to hold all document keys from the bundle, such that
11756 // they will not get garbage collected right away.
11757 return i.persistence.runTransaction("Apply bundle documents", "readwrite", (t => Vo(t, u, o).next((e => (u.apply(t),
11758 e))).next((e => i.Cs.removeMatchingKeysForTargetId(t, c.targetId).next((() => i.Cs.addMatchingKeys(t, r, c.targetId))).next((() => i.localDocuments.getLocalViewOfDocuments(t, e.Wi, e.zi))).next((() => e.Wi))))));
11759}
11760
11761/**
11762 * Returns a promise of a boolean to indicate if the given bundle has already
11763 * been loaded and the create time is newer than the current loading bundle.
11764 */
11765/**
11766 * Saves the given `NamedQuery` to local persistence.
11767 */
11768async function Fo(t, e, n = _s()) {
11769 // Allocate a target for the named query such that it can be resumed
11770 // from associated read time if users use it to listen.
11771 // NOTE: this also means if no corresponding target exists, the new target
11772 // will remain active and will not get collected, unless users happen to
11773 // unlisten the query somehow.
11774 const s = await Do(t, hn(Hi(e.bundledQuery))), i = B(t);
11775 return i.persistence.runTransaction("Save named query", "readwrite", (t => {
11776 const r = xs(e.readTime);
11777 // Simply save the query itself if it is older than what the SDK already
11778 // has.
11779 if (s.snapshotVersion.compareTo(r) >= 0) return i.Ns.saveNamedQuery(t, e);
11780 // Update existing target data because the query from the bundle is newer.
11781 const o = s.withResumeToken(Ht.EMPTY_BYTE_STRING, r);
11782 return i.Ui = i.Ui.insert(o.targetId, o), i.Cs.updateTargetData(t, o).next((() => i.Cs.removeMatchingKeysForTargetId(t, s.targetId))).next((() => i.Cs.addMatchingKeys(t, n, s.targetId))).next((() => i.Ns.saveNamedQuery(t, e)));
11783 }));
11784}
11785
11786/** Assembles the key for a client state in WebStorage */
11787function $o(t, e) {
11788 return `firestore_clients_${t}_${e}`;
11789}
11790
11791// The format of the WebStorage key that stores the mutation state is:
11792// firestore_mutations_<persistence_prefix>_<batch_id>
11793// (for unauthenticated users)
11794// or: firestore_mutations_<persistence_prefix>_<batch_id>_<user_uid>
11795
11796// 'user_uid' is last to avoid needing to escape '_' characters that it might
11797// contain.
11798/** Assembles the key for a mutation batch in WebStorage */
11799function Bo(t, e, n) {
11800 let s = `firestore_mutations_${t}_${n}`;
11801 return e.isAuthenticated() && (s += `_${e.uid}`), s;
11802}
11803
11804// The format of the WebStorage key that stores a query target's metadata is:
11805// firestore_targets_<persistence_prefix>_<target_id>
11806/** Assembles the key for a query state in WebStorage */
11807function Lo(t, e) {
11808 return `firestore_targets_${t}_${e}`;
11809}
11810
11811// The WebStorage prefix that stores the primary tab's online state. The
11812// format of the key is:
11813// firestore_online_state_<persistence_prefix>
11814/**
11815 * Holds the state of a mutation batch, including its user ID, batch ID and
11816 * whether the batch is 'pending', 'acknowledged' or 'rejected'.
11817 */
11818// Visible for testing
11819class Uo {
11820 constructor(t, e, n, s) {
11821 this.user = t, this.batchId = e, this.state = n, this.error = s;
11822 }
11823 /**
11824 * Parses a MutationMetadata from its JSON representation in WebStorage.
11825 * Logs a warning and returns null if the format of the data is not valid.
11826 */ static Zi(t, e, n) {
11827 const s = JSON.parse(n);
11828 let i, r = "object" == typeof s && -1 !== [ "pending", "acknowledged", "rejected" ].indexOf(s.state) && (void 0 === s.error || "object" == typeof s.error);
11829 return r && s.error && (r = "string" == typeof s.error.message && "string" == typeof s.error.code,
11830 r && (i = new U(s.error.code, s.error.message))), r ? new Uo(t, e, s.state, i) : (N("SharedClientState", `Failed to parse mutation state for ID '${e}': ${n}`),
11831 null);
11832 }
11833 tr() {
11834 const t = {
11835 state: this.state,
11836 updateTimeMs: Date.now()
11837 };
11838 return this.error && (t.error = {
11839 code: this.error.code,
11840 message: this.error.message
11841 }), JSON.stringify(t);
11842 }
11843}
11844
11845/**
11846 * Holds the state of a query target, including its target ID and whether the
11847 * target is 'not-current', 'current' or 'rejected'.
11848 */
11849// Visible for testing
11850class qo {
11851 constructor(t, e, n) {
11852 this.targetId = t, this.state = e, this.error = n;
11853 }
11854 /**
11855 * Parses a QueryTargetMetadata from its JSON representation in WebStorage.
11856 * Logs a warning and returns null if the format of the data is not valid.
11857 */ static Zi(t, e) {
11858 const n = JSON.parse(e);
11859 let s, i = "object" == typeof n && -1 !== [ "not-current", "current", "rejected" ].indexOf(n.state) && (void 0 === n.error || "object" == typeof n.error);
11860 return i && n.error && (i = "string" == typeof n.error.message && "string" == typeof n.error.code,
11861 i && (s = new U(n.error.code, n.error.message))), i ? new qo(t, n.state, s) : (N("SharedClientState", `Failed to parse target state for ID '${t}': ${e}`),
11862 null);
11863 }
11864 tr() {
11865 const t = {
11866 state: this.state,
11867 updateTimeMs: Date.now()
11868 };
11869 return this.error && (t.error = {
11870 code: this.error.code,
11871 message: this.error.message
11872 }), JSON.stringify(t);
11873 }
11874}
11875
11876/**
11877 * This class represents the immutable ClientState for a client read from
11878 * WebStorage, containing the list of active query targets.
11879 */ class Ko {
11880 constructor(t, e) {
11881 this.clientId = t, this.activeTargetIds = e;
11882 }
11883 /**
11884 * Parses a RemoteClientState from the JSON representation in WebStorage.
11885 * Logs a warning and returns null if the format of the data is not valid.
11886 */ static Zi(t, e) {
11887 const n = JSON.parse(e);
11888 let s = "object" == typeof n && n.activeTargetIds instanceof Array, i = ms();
11889 for (let t = 0; s && t < n.activeTargetIds.length; ++t) s = ue(n.activeTargetIds[t]),
11890 i = i.add(n.activeTargetIds[t]);
11891 return s ? new Ko(t, i) : (N("SharedClientState", `Failed to parse client data for instance '${t}': ${e}`),
11892 null);
11893 }
11894}
11895
11896/**
11897 * This class represents the online state for all clients participating in
11898 * multi-tab. The online state is only written to by the primary client, and
11899 * used in secondary clients to update their query views.
11900 */ class Go {
11901 constructor(t, e) {
11902 this.clientId = t, this.onlineState = e;
11903 }
11904 /**
11905 * Parses a SharedOnlineState from its JSON representation in WebStorage.
11906 * Logs a warning and returns null if the format of the data is not valid.
11907 */ static Zi(t) {
11908 const e = JSON.parse(t);
11909 return "object" == typeof e && -1 !== [ "Unknown", "Online", "Offline" ].indexOf(e.onlineState) && "string" == typeof e.clientId ? new Go(e.clientId, e.onlineState) : (N("SharedClientState", `Failed to parse online state: ${t}`),
11910 null);
11911 }
11912}
11913
11914/**
11915 * Metadata state of the local client. Unlike `RemoteClientState`, this class is
11916 * mutable and keeps track of all pending mutations, which allows us to
11917 * update the range of pending mutation batch IDs as new mutations are added or
11918 * removed.
11919 *
11920 * The data in `LocalClientState` is not read from WebStorage and instead
11921 * updated via its instance methods. The updated state can be serialized via
11922 * `toWebStorageJSON()`.
11923 */
11924// Visible for testing.
11925class Qo {
11926 constructor() {
11927 this.activeTargetIds = ms();
11928 }
11929 er(t) {
11930 this.activeTargetIds = this.activeTargetIds.add(t);
11931 }
11932 nr(t) {
11933 this.activeTargetIds = this.activeTargetIds.delete(t);
11934 }
11935 /**
11936 * Converts this entry into a JSON-encoded format we can use for WebStorage.
11937 * Does not encode `clientId` as it is part of the key in WebStorage.
11938 */ tr() {
11939 const t = {
11940 activeTargetIds: this.activeTargetIds.toArray(),
11941 updateTimeMs: Date.now()
11942 };
11943 return JSON.stringify(t);
11944 }
11945}
11946
11947/**
11948 * `WebStorageSharedClientState` uses WebStorage (window.localStorage) as the
11949 * backing store for the SharedClientState. It keeps track of all active
11950 * clients and supports modifications of the local client's data.
11951 */ class jo {
11952 constructor(t, e, n, s, i) {
11953 this.window = t, this.Hs = e, this.persistenceKey = n, this.sr = s, this.syncEngine = null,
11954 this.onlineStateHandler = null, this.sequenceNumberHandler = null, this.ir = this.rr.bind(this),
11955 this.ur = new Lt(tt), this.started = !1,
11956 /**
11957 * Captures WebStorage events that occur before `start()` is called. These
11958 * events are replayed once `WebStorageSharedClientState` is started.
11959 */
11960 this.cr = [];
11961 // Escape the special characters mentioned here:
11962 // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions
11963 const r = n.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
11964 this.storage = this.window.localStorage, this.currentUser = i, this.ar = $o(this.persistenceKey, this.sr),
11965 this.hr =
11966 /** Assembles the key for the current sequence number. */
11967 function(t) {
11968 return `firestore_sequence_number_${t}`;
11969 }
11970 /**
11971 * @license
11972 * Copyright 2018 Google LLC
11973 *
11974 * Licensed under the Apache License, Version 2.0 (the "License");
11975 * you may not use this file except in compliance with the License.
11976 * You may obtain a copy of the License at
11977 *
11978 * http://www.apache.org/licenses/LICENSE-2.0
11979 *
11980 * Unless required by applicable law or agreed to in writing, software
11981 * distributed under the License is distributed on an "AS IS" BASIS,
11982 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11983 * See the License for the specific language governing permissions and
11984 * limitations under the License.
11985 */ (this.persistenceKey), this.ur = this.ur.insert(this.sr, new Qo), this.lr = new RegExp(`^firestore_clients_${r}_([^_]*)$`),
11986 this.dr = new RegExp(`^firestore_mutations_${r}_(\\d+)(?:_(.*))?$`), this._r = new RegExp(`^firestore_targets_${r}_(\\d+)$`),
11987 this.wr =
11988 /** Assembles the key for the online state of the primary tab. */
11989 function(t) {
11990 return `firestore_online_state_${t}`;
11991 }
11992 // The WebStorage prefix that plays as a event to indicate the remote documents
11993 // might have changed due to some secondary tabs loading a bundle.
11994 // format of the key is:
11995 // firestore_bundle_loaded_v2_<persistenceKey>
11996 // The version ending with "v2" stores the list of modified collection groups.
11997 (this.persistenceKey), this.mr = function(t) {
11998 return `firestore_bundle_loaded_v2_${t}`;
11999 }
12000 // The WebStorage key prefix for the key that stores the last sequence number allocated. The key
12001 // looks like 'firestore_sequence_number_<persistence_prefix>'.
12002 (this.persistenceKey),
12003 // Rather than adding the storage observer during start(), we add the
12004 // storage observer during initialization. This ensures that we collect
12005 // events before other components populate their initial state (during their
12006 // respective start() calls). Otherwise, we might for example miss a
12007 // mutation that is added after LocalStore's start() processed the existing
12008 // mutations but before we observe WebStorage events.
12009 this.window.addEventListener("storage", this.ir);
12010 }
12011 /** Returns 'true' if WebStorage is available in the current environment. */ static C(t) {
12012 return !(!t || !t.localStorage);
12013 }
12014 async start() {
12015 // Retrieve the list of existing clients to backfill the data in
12016 // SharedClientState.
12017 const t = await this.syncEngine.vi();
12018 for (const e of t) {
12019 if (e === this.sr) continue;
12020 const t = this.getItem($o(this.persistenceKey, e));
12021 if (t) {
12022 const n = Ko.Zi(e, t);
12023 n && (this.ur = this.ur.insert(n.clientId, n));
12024 }
12025 }
12026 this.gr();
12027 // Check if there is an existing online state and call the callback handler
12028 // if applicable.
12029 const e = this.storage.getItem(this.wr);
12030 if (e) {
12031 const t = this.yr(e);
12032 t && this.pr(t);
12033 }
12034 for (const t of this.cr) this.rr(t);
12035 this.cr = [],
12036 // Register a window unload hook to remove the client metadata entry from
12037 // WebStorage even if `shutdown()` was not called.
12038 this.window.addEventListener("pagehide", (() => this.shutdown())), this.started = !0;
12039 }
12040 writeSequenceNumber(t) {
12041 this.setItem(this.hr, JSON.stringify(t));
12042 }
12043 getAllActiveQueryTargets() {
12044 return this.Ir(this.ur);
12045 }
12046 isActiveQueryTarget(t) {
12047 let e = !1;
12048 return this.ur.forEach(((n, s) => {
12049 s.activeTargetIds.has(t) && (e = !0);
12050 })), e;
12051 }
12052 addPendingMutation(t) {
12053 this.Tr(t, "pending");
12054 }
12055 updateMutationState(t, e, n) {
12056 this.Tr(t, e, n),
12057 // Once a final mutation result is observed by other clients, they no longer
12058 // access the mutation's metadata entry. Since WebStorage replays events
12059 // in order, it is safe to delete the entry right after updating it.
12060 this.Er(t);
12061 }
12062 addLocalQueryTarget(t) {
12063 let e = "not-current";
12064 // Lookup an existing query state if the target ID was already registered
12065 // by another tab
12066 if (this.isActiveQueryTarget(t)) {
12067 const n = this.storage.getItem(Lo(this.persistenceKey, t));
12068 if (n) {
12069 const s = qo.Zi(t, n);
12070 s && (e = s.state);
12071 }
12072 }
12073 return this.Ar.er(t), this.gr(), e;
12074 }
12075 removeLocalQueryTarget(t) {
12076 this.Ar.nr(t), this.gr();
12077 }
12078 isLocalQueryTarget(t) {
12079 return this.Ar.activeTargetIds.has(t);
12080 }
12081 clearQueryState(t) {
12082 this.removeItem(Lo(this.persistenceKey, t));
12083 }
12084 updateQueryState(t, e, n) {
12085 this.Rr(t, e, n);
12086 }
12087 handleUserChange(t, e, n) {
12088 e.forEach((t => {
12089 this.Er(t);
12090 })), this.currentUser = t, n.forEach((t => {
12091 this.addPendingMutation(t);
12092 }));
12093 }
12094 setOnlineState(t) {
12095 this.br(t);
12096 }
12097 notifyBundleLoaded(t) {
12098 this.Pr(t);
12099 }
12100 shutdown() {
12101 this.started && (this.window.removeEventListener("storage", this.ir), this.removeItem(this.ar),
12102 this.started = !1);
12103 }
12104 getItem(t) {
12105 const e = this.storage.getItem(t);
12106 return x("SharedClientState", "READ", t, e), e;
12107 }
12108 setItem(t, e) {
12109 x("SharedClientState", "SET", t, e), this.storage.setItem(t, e);
12110 }
12111 removeItem(t) {
12112 x("SharedClientState", "REMOVE", t), this.storage.removeItem(t);
12113 }
12114 rr(t) {
12115 // Note: The function is typed to take Event to be interface-compatible with
12116 // `Window.addEventListener`.
12117 const e = t;
12118 if (e.storageArea === this.storage) {
12119 if (x("SharedClientState", "EVENT", e.key, e.newValue), e.key === this.ar) return void N("Received WebStorage notification for local change. Another client might have garbage-collected our state");
12120 this.Hs.enqueueRetryable((async () => {
12121 if (this.started) {
12122 if (null !== e.key) if (this.lr.test(e.key)) {
12123 if (null == e.newValue) {
12124 const t = this.vr(e.key);
12125 return this.Vr(t, null);
12126 }
12127 {
12128 const t = this.Sr(e.key, e.newValue);
12129 if (t) return this.Vr(t.clientId, t);
12130 }
12131 } else if (this.dr.test(e.key)) {
12132 if (null !== e.newValue) {
12133 const t = this.Dr(e.key, e.newValue);
12134 if (t) return this.Cr(t);
12135 }
12136 } else if (this._r.test(e.key)) {
12137 if (null !== e.newValue) {
12138 const t = this.Nr(e.key, e.newValue);
12139 if (t) return this.kr(t);
12140 }
12141 } else if (e.key === this.wr) {
12142 if (null !== e.newValue) {
12143 const t = this.yr(e.newValue);
12144 if (t) return this.pr(t);
12145 }
12146 } else if (e.key === this.hr) {
12147 const t = function(t) {
12148 let e = Mt.at;
12149 if (null != t) try {
12150 const n = JSON.parse(t);
12151 F("number" == typeof n), e = n;
12152 } catch (t) {
12153 N("SharedClientState", "Failed to read sequence number from WebStorage", t);
12154 }
12155 return e;
12156 }
12157 /**
12158 * `MemorySharedClientState` is a simple implementation of SharedClientState for
12159 * clients using memory persistence. The state in this class remains fully
12160 * isolated and no synchronization is performed.
12161 */ (e.newValue);
12162 t !== Mt.at && this.sequenceNumberHandler(t);
12163 } else if (e.key === this.mr) {
12164 const t = this.Or(e.newValue);
12165 await Promise.all(t.map((t => this.syncEngine.Mr(t))));
12166 }
12167 } else this.cr.push(e);
12168 }));
12169 }
12170 }
12171 get Ar() {
12172 return this.ur.get(this.sr);
12173 }
12174 gr() {
12175 this.setItem(this.ar, this.Ar.tr());
12176 }
12177 Tr(t, e, n) {
12178 const s = new Uo(this.currentUser, t, e, n), i = Bo(this.persistenceKey, this.currentUser, t);
12179 this.setItem(i, s.tr());
12180 }
12181 Er(t) {
12182 const e = Bo(this.persistenceKey, this.currentUser, t);
12183 this.removeItem(e);
12184 }
12185 br(t) {
12186 const e = {
12187 clientId: this.sr,
12188 onlineState: t
12189 };
12190 this.storage.setItem(this.wr, JSON.stringify(e));
12191 }
12192 Rr(t, e, n) {
12193 const s = Lo(this.persistenceKey, t), i = new qo(t, e, n);
12194 this.setItem(s, i.tr());
12195 }
12196 Pr(t) {
12197 const e = JSON.stringify(Array.from(t));
12198 this.setItem(this.mr, e);
12199 }
12200 /**
12201 * Parses a client state key in WebStorage. Returns null if the key does not
12202 * match the expected key format.
12203 */ vr(t) {
12204 const e = this.lr.exec(t);
12205 return e ? e[1] : null;
12206 }
12207 /**
12208 * Parses a client state in WebStorage. Returns 'null' if the value could not
12209 * be parsed.
12210 */ Sr(t, e) {
12211 const n = this.vr(t);
12212 return Ko.Zi(n, e);
12213 }
12214 /**
12215 * Parses a mutation batch state in WebStorage. Returns 'null' if the value
12216 * could not be parsed.
12217 */ Dr(t, e) {
12218 const n = this.dr.exec(t), s = Number(n[1]), i = void 0 !== n[2] ? n[2] : null;
12219 return Uo.Zi(new v(i), s, e);
12220 }
12221 /**
12222 * Parses a query target state from WebStorage. Returns 'null' if the value
12223 * could not be parsed.
12224 */ Nr(t, e) {
12225 const n = this._r.exec(t), s = Number(n[1]);
12226 return qo.Zi(s, e);
12227 }
12228 /**
12229 * Parses an online state from WebStorage. Returns 'null' if the value
12230 * could not be parsed.
12231 */ yr(t) {
12232 return Go.Zi(t);
12233 }
12234 Or(t) {
12235 return JSON.parse(t);
12236 }
12237 async Cr(t) {
12238 if (t.user.uid === this.currentUser.uid) return this.syncEngine.Fr(t.batchId, t.state, t.error);
12239 x("SharedClientState", `Ignoring mutation for non-active user ${t.user.uid}`);
12240 }
12241 kr(t) {
12242 return this.syncEngine.$r(t.targetId, t.state, t.error);
12243 }
12244 Vr(t, e) {
12245 const n = e ? this.ur.insert(t, e) : this.ur.remove(t), s = this.Ir(this.ur), i = this.Ir(n), r = [], o = [];
12246 return i.forEach((t => {
12247 s.has(t) || r.push(t);
12248 })), s.forEach((t => {
12249 i.has(t) || o.push(t);
12250 })), this.syncEngine.Br(r, o).then((() => {
12251 this.ur = n;
12252 }));
12253 }
12254 pr(t) {
12255 // We check whether the client that wrote this online state is still active
12256 // by comparing its client ID to the list of clients kept active in
12257 // IndexedDb. If a client does not update their IndexedDb client state
12258 // within 5 seconds, it is considered inactive and we don't emit an online
12259 // state event.
12260 this.ur.get(t.clientId) && this.onlineStateHandler(t.onlineState);
12261 }
12262 Ir(t) {
12263 let e = ms();
12264 return t.forEach(((t, n) => {
12265 e = e.unionWith(n.activeTargetIds);
12266 })), e;
12267 }
12268}
12269
12270class Wo {
12271 constructor() {
12272 this.Lr = new Qo, this.Ur = {}, this.onlineStateHandler = null, this.sequenceNumberHandler = null;
12273 }
12274 addPendingMutation(t) {
12275 // No op.
12276 }
12277 updateMutationState(t, e, n) {
12278 // No op.
12279 }
12280 addLocalQueryTarget(t) {
12281 return this.Lr.er(t), this.Ur[t] || "not-current";
12282 }
12283 updateQueryState(t, e, n) {
12284 this.Ur[t] = e;
12285 }
12286 removeLocalQueryTarget(t) {
12287 this.Lr.nr(t);
12288 }
12289 isLocalQueryTarget(t) {
12290 return this.Lr.activeTargetIds.has(t);
12291 }
12292 clearQueryState(t) {
12293 delete this.Ur[t];
12294 }
12295 getAllActiveQueryTargets() {
12296 return this.Lr.activeTargetIds;
12297 }
12298 isActiveQueryTarget(t) {
12299 return this.Lr.activeTargetIds.has(t);
12300 }
12301 start() {
12302 return this.Lr = new Qo, Promise.resolve();
12303 }
12304 handleUserChange(t, e, n) {
12305 // No op.
12306 }
12307 setOnlineState(t) {
12308 // No op.
12309 }
12310 shutdown() {}
12311 writeSequenceNumber(t) {}
12312 notifyBundleLoaded(t) {
12313 // No op.
12314 }
12315}
12316
12317/**
12318 * @license
12319 * Copyright 2019 Google LLC
12320 *
12321 * Licensed under the Apache License, Version 2.0 (the "License");
12322 * you may not use this file except in compliance with the License.
12323 * You may obtain a copy of the License at
12324 *
12325 * http://www.apache.org/licenses/LICENSE-2.0
12326 *
12327 * Unless required by applicable law or agreed to in writing, software
12328 * distributed under the License is distributed on an "AS IS" BASIS,
12329 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12330 * See the License for the specific language governing permissions and
12331 * limitations under the License.
12332 */ class zo {
12333 qr(t) {
12334 // No-op.
12335 }
12336 shutdown() {
12337 // No-op.
12338 }
12339}
12340
12341/**
12342 * @license
12343 * Copyright 2019 Google LLC
12344 *
12345 * Licensed under the Apache License, Version 2.0 (the "License");
12346 * you may not use this file except in compliance with the License.
12347 * You may obtain a copy of the License at
12348 *
12349 * http://www.apache.org/licenses/LICENSE-2.0
12350 *
12351 * Unless required by applicable law or agreed to in writing, software
12352 * distributed under the License is distributed on an "AS IS" BASIS,
12353 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12354 * See the License for the specific language governing permissions and
12355 * limitations under the License.
12356 */
12357// References to `window` are guarded by BrowserConnectivityMonitor.isAvailable()
12358/* eslint-disable no-restricted-globals */
12359/**
12360 * Browser implementation of ConnectivityMonitor.
12361 */
12362class Ho {
12363 constructor() {
12364 this.Kr = () => this.Gr(), this.Qr = () => this.jr(), this.Wr = [], this.zr();
12365 }
12366 qr(t) {
12367 this.Wr.push(t);
12368 }
12369 shutdown() {
12370 window.removeEventListener("online", this.Kr), window.removeEventListener("offline", this.Qr);
12371 }
12372 zr() {
12373 window.addEventListener("online", this.Kr), window.addEventListener("offline", this.Qr);
12374 }
12375 Gr() {
12376 x("ConnectivityMonitor", "Network connectivity changed: AVAILABLE");
12377 for (const t of this.Wr) t(0 /* AVAILABLE */);
12378 }
12379 jr() {
12380 x("ConnectivityMonitor", "Network connectivity changed: UNAVAILABLE");
12381 for (const t of this.Wr) t(1 /* UNAVAILABLE */);
12382 }
12383 // TODO(chenbrian): Consider passing in window either into this component or
12384 // here for testing via FakeWindow.
12385 /** Checks that all used attributes of window are available. */
12386 static C() {
12387 return "undefined" != typeof window && void 0 !== window.addEventListener && void 0 !== window.removeEventListener;
12388 }
12389}
12390
12391/**
12392 * @license
12393 * Copyright 2020 Google LLC
12394 *
12395 * Licensed under the Apache License, Version 2.0 (the "License");
12396 * you may not use this file except in compliance with the License.
12397 * You may obtain a copy of the License at
12398 *
12399 * http://www.apache.org/licenses/LICENSE-2.0
12400 *
12401 * Unless required by applicable law or agreed to in writing, software
12402 * distributed under the License is distributed on an "AS IS" BASIS,
12403 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12404 * See the License for the specific language governing permissions and
12405 * limitations under the License.
12406 */ const Jo = {
12407 BatchGetDocuments: "batchGet",
12408 Commit: "commit",
12409 RunQuery: "runQuery",
12410 RunAggregationQuery: "runAggregationQuery"
12411};
12412
12413/**
12414 * Maps RPC names to the corresponding REST endpoint name.
12415 *
12416 * We use array notation to avoid mangling.
12417 */
12418/**
12419 * @license
12420 * Copyright 2017 Google LLC
12421 *
12422 * Licensed under the Apache License, Version 2.0 (the "License");
12423 * you may not use this file except in compliance with the License.
12424 * You may obtain a copy of the License at
12425 *
12426 * http://www.apache.org/licenses/LICENSE-2.0
12427 *
12428 * Unless required by applicable law or agreed to in writing, software
12429 * distributed under the License is distributed on an "AS IS" BASIS,
12430 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12431 * See the License for the specific language governing permissions and
12432 * limitations under the License.
12433 */
12434/**
12435 * Provides a simple helper class that implements the Stream interface to
12436 * bridge to other implementations that are streams but do not implement the
12437 * interface. The stream callbacks are invoked with the callOn... methods.
12438 */
12439class Yo {
12440 constructor(t) {
12441 this.Hr = t.Hr, this.Jr = t.Jr;
12442 }
12443 Yr(t) {
12444 this.Xr = t;
12445 }
12446 Zr(t) {
12447 this.eo = t;
12448 }
12449 onMessage(t) {
12450 this.no = t;
12451 }
12452 close() {
12453 this.Jr();
12454 }
12455 send(t) {
12456 this.Hr(t);
12457 }
12458 so() {
12459 this.Xr();
12460 }
12461 io(t) {
12462 this.eo(t);
12463 }
12464 ro(t) {
12465 this.no(t);
12466 }
12467}
12468
12469/**
12470 * @license
12471 * Copyright 2017 Google LLC
12472 *
12473 * Licensed under the Apache License, Version 2.0 (the "License");
12474 * you may not use this file except in compliance with the License.
12475 * You may obtain a copy of the License at
12476 *
12477 * http://www.apache.org/licenses/LICENSE-2.0
12478 *
12479 * Unless required by applicable law or agreed to in writing, software
12480 * distributed under the License is distributed on an "AS IS" BASIS,
12481 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12482 * See the License for the specific language governing permissions and
12483 * limitations under the License.
12484 */ class Xo extends
12485/**
12486 * Base class for all Rest-based connections to the backend (WebChannel and
12487 * HTTP).
12488 */
12489class {
12490 constructor(t) {
12491 this.databaseInfo = t, this.databaseId = t.databaseId;
12492 const e = t.ssl ? "https" : "http";
12493 this.oo = e + "://" + t.host, this.uo = "projects/" + this.databaseId.projectId + "/databases/" + this.databaseId.database + "/documents";
12494 }
12495 get co() {
12496 // Both `invokeRPC()` and `invokeStreamingRPC()` use their `path` arguments to determine
12497 // where to run the query, and expect the `request` to NOT specify the "path".
12498 return !1;
12499 }
12500 ao(t, e, n, s, i) {
12501 const r = this.ho(t, e);
12502 x("RestConnection", "Sending: ", r, n);
12503 const o = {};
12504 return this.lo(o, s, i), this.fo(t, r, o, n).then((t => (x("RestConnection", "Received: ", t),
12505 t)), (e => {
12506 throw k("RestConnection", `${t} failed with error: `, e, "url: ", r, "request:", n),
12507 e;
12508 }));
12509 }
12510 _o(t, e, n, s, i, r) {
12511 // The REST API automatically aggregates all of the streamed results, so we
12512 // can just use the normal invoke() method.
12513 return this.ao(t, e, n, s, i);
12514 }
12515 /**
12516 * Modifies the headers for a request, adding any authorization token if
12517 * present and any additional headers for the request.
12518 */ lo(t, e, n) {
12519 t["X-Goog-Api-Client"] = "gl-js/ fire/" + V,
12520 // Content-Type: text/plain will avoid preflight requests which might
12521 // mess with CORS and redirects by proxies. If we add custom headers
12522 // we will need to change this code to potentially use the $httpOverwrite
12523 // parameter supported by ESF to avoid triggering preflight requests.
12524 t["Content-Type"] = "text/plain", this.databaseInfo.appId && (t["X-Firebase-GMPID"] = this.databaseInfo.appId),
12525 e && e.headers.forEach(((e, n) => t[n] = e)), n && n.headers.forEach(((e, n) => t[n] = e));
12526 }
12527 ho(t, e) {
12528 const n = Jo[t];
12529 return `${this.oo}/v1/${e}:${n}`;
12530 }
12531} {
12532 constructor(t) {
12533 super(t), this.forceLongPolling = t.forceLongPolling, this.autoDetectLongPolling = t.autoDetectLongPolling,
12534 this.useFetchStreams = t.useFetchStreams;
12535 }
12536 fo(t, e, n, s) {
12537 return new Promise(((i, r) => {
12538 const o = new g;
12539 o.setWithCredentials(!0), o.listenOnce(y.COMPLETE, (() => {
12540 var e;
12541 try {
12542 switch (o.getLastErrorCode()) {
12543 case p.NO_ERROR:
12544 const n = o.getResponseJson();
12545 x("Connection", "XHR received:", JSON.stringify(n)), i(n);
12546 break;
12547
12548 case p.TIMEOUT:
12549 x("Connection", 'RPC "' + t + '" timed out'), r(new U(L.DEADLINE_EXCEEDED, "Request time out"));
12550 break;
12551
12552 case p.HTTP_ERROR:
12553 const s = o.getStatus();
12554 if (x("Connection", 'RPC "' + t + '" failed with status:', s, "response text:", o.getResponseText()),
12555 s > 0) {
12556 let t = o.getResponseJson();
12557 Array.isArray(t) && (t = t[0]);
12558 const n = null === (e = t) || void 0 === e ? void 0 : e.error;
12559 if (n && n.status && n.message) {
12560 const t = function(t) {
12561 const e = t.toLowerCase().replace(/_/g, "-");
12562 return Object.values(L).indexOf(e) >= 0 ? e : L.UNKNOWN;
12563 }(n.status);
12564 r(new U(t, n.message));
12565 } else r(new U(L.UNKNOWN, "Server responded with status " + o.getStatus()));
12566 } else
12567 // If we received an HTTP_ERROR but there's no status code,
12568 // it's most probably a connection issue
12569 r(new U(L.UNAVAILABLE, "Connection failed."));
12570 break;
12571
12572 default:
12573 M();
12574 }
12575 } finally {
12576 x("Connection", 'RPC "' + t + '" completed.');
12577 }
12578 }));
12579 const u = JSON.stringify(s);
12580 o.send(e, "POST", u, n, 15);
12581 }));
12582 }
12583 wo(t, e, n) {
12584 const s = [ this.oo, "/", "google.firestore.v1.Firestore", "/", t, "/channel" ], i = I(), r = T(), o = {
12585 // Required for backend stickiness, routing behavior is based on this
12586 // parameter.
12587 httpSessionIdParam: "gsessionid",
12588 initMessageHeaders: {},
12589 messageUrlParams: {
12590 // This param is used to improve routing and project isolation by the
12591 // backend and must be included in every request.
12592 database: `projects/${this.databaseId.projectId}/databases/${this.databaseId.database}`
12593 },
12594 sendRawJson: !0,
12595 supportsCrossDomainXhr: !0,
12596 internalChannelParams: {
12597 // Override the default timeout (randomized between 10-20 seconds) since
12598 // a large write batch on a slow internet connection may take a long
12599 // time to send to the backend. Rather than have WebChannel impose a
12600 // tight timeout which could lead to infinite timeouts and retries, we
12601 // set it very large (5-10 minutes) and rely on the browser's builtin
12602 // timeouts to kick in if the request isn't working.
12603 forwardChannelRequestTimeoutMs: 6e5
12604 },
12605 forceLongPolling: this.forceLongPolling,
12606 detectBufferingProxy: this.autoDetectLongPolling
12607 };
12608 this.useFetchStreams && (o.xmlHttpFactory = new E({})), this.lo(o.initMessageHeaders, e, n),
12609 // Sending the custom headers we just added to request.initMessageHeaders
12610 // (Authorization, etc.) will trigger the browser to make a CORS preflight
12611 // request because the XHR will no longer meet the criteria for a "simple"
12612 // CORS request:
12613 // https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS#Simple_requests
12614 // Therefore to avoid the CORS preflight request (an extra network
12615 // roundtrip), we use the encodeInitMessageHeaders option to specify that
12616 // the headers should instead be encoded in the request's POST payload,
12617 // which is recognized by the webchannel backend.
12618 o.encodeInitMessageHeaders = !0;
12619 const u = s.join("");
12620 x("Connection", "Creating WebChannel: " + u, o);
12621 const c = i.createWebChannel(u, o);
12622 // WebChannel supports sending the first message with the handshake - saving
12623 // a network round trip. However, it will have to call send in the same
12624 // JS event loop as open. In order to enforce this, we delay actually
12625 // opening the WebChannel until send is called. Whether we have called
12626 // open is tracked with this variable.
12627 let a = !1, h = !1;
12628 // A flag to determine whether the stream was closed (by us or through an
12629 // error/close event) to avoid delivering multiple close events or sending
12630 // on a closed stream
12631 const l = new Yo({
12632 Hr: t => {
12633 h ? x("Connection", "Not sending because WebChannel is closed:", t) : (a || (x("Connection", "Opening WebChannel transport."),
12634 c.open(), a = !0), x("Connection", "WebChannel sending:", t), c.send(t));
12635 },
12636 Jr: () => c.close()
12637 }), f = (t, e, n) => {
12638 // TODO(dimond): closure typing seems broken because WebChannel does
12639 // not implement goog.events.Listenable
12640 t.listen(e, (t => {
12641 try {
12642 n(t);
12643 } catch (t) {
12644 setTimeout((() => {
12645 throw t;
12646 }), 0);
12647 }
12648 }));
12649 };
12650 // Closure events are guarded and exceptions are swallowed, so catch any
12651 // exception and rethrow using a setTimeout so they become visible again.
12652 // Note that eventually this function could go away if we are confident
12653 // enough the code is exception free.
12654 return f(c, A.EventType.OPEN, (() => {
12655 h || x("Connection", "WebChannel transport opened.");
12656 })), f(c, A.EventType.CLOSE, (() => {
12657 h || (h = !0, x("Connection", "WebChannel transport closed"), l.io());
12658 })), f(c, A.EventType.ERROR, (t => {
12659 h || (h = !0, k("Connection", "WebChannel transport errored:", t), l.io(new U(L.UNAVAILABLE, "The operation could not be completed")));
12660 })), f(c, A.EventType.MESSAGE, (t => {
12661 var e;
12662 if (!h) {
12663 const n = t.data[0];
12664 F(!!n);
12665 // TODO(b/35143891): There is a bug in One Platform that caused errors
12666 // (and only errors) to be wrapped in an extra array. To be forward
12667 // compatible with the bug we need to check either condition. The latter
12668 // can be removed once the fix has been rolled out.
12669 // Use any because msgData.error is not typed.
12670 const s = n, i = s.error || (null === (e = s[0]) || void 0 === e ? void 0 : e.error);
12671 if (i) {
12672 x("Connection", "WebChannel received error:", i);
12673 // error.status will be a string like 'OK' or 'NOT_FOUND'.
12674 const t = i.status;
12675 let e =
12676 /**
12677 * Maps an error Code from a GRPC status identifier like 'NOT_FOUND'.
12678 *
12679 * @returns The Code equivalent to the given status string or undefined if
12680 * there is no match.
12681 */
12682 function(t) {
12683 // lookup by string
12684 // eslint-disable-next-line @typescript-eslint/no-explicit-any
12685 const e = Zn[t];
12686 if (void 0 !== e) return ns(e);
12687 }(t), n = i.message;
12688 void 0 === e && (e = L.INTERNAL, n = "Unknown error status: " + t + " with message " + i.message),
12689 // Mark closed so no further events are propagated
12690 h = !0, l.io(new U(e, n)), c.close();
12691 } else x("Connection", "WebChannel received:", n), l.ro(n);
12692 }
12693 })), f(r, R.STAT_EVENT, (t => {
12694 t.stat === b.PROXY ? x("Connection", "Detected buffering proxy") : t.stat === b.NOPROXY && x("Connection", "Detected no buffering proxy");
12695 })), setTimeout((() => {
12696 // Technically we could/should wait for the WebChannel opened event,
12697 // but because we want to send the first message with the WebChannel
12698 // handshake we pretend the channel opened here (asynchronously), and
12699 // then delay the actual open until the first message is sent.
12700 l.so();
12701 }), 0), l;
12702 }
12703}
12704
12705/**
12706 * @license
12707 * Copyright 2020 Google LLC
12708 *
12709 * Licensed under the Apache License, Version 2.0 (the "License");
12710 * you may not use this file except in compliance with the License.
12711 * You may obtain a copy of the License at
12712 *
12713 * http://www.apache.org/licenses/LICENSE-2.0
12714 *
12715 * Unless required by applicable law or agreed to in writing, software
12716 * distributed under the License is distributed on an "AS IS" BASIS,
12717 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12718 * See the License for the specific language governing permissions and
12719 * limitations under the License.
12720 */
12721/** Initializes the WebChannelConnection for the browser. */
12722/**
12723 * @license
12724 * Copyright 2020 Google LLC
12725 *
12726 * Licensed under the Apache License, Version 2.0 (the "License");
12727 * you may not use this file except in compliance with the License.
12728 * You may obtain a copy of the License at
12729 *
12730 * http://www.apache.org/licenses/LICENSE-2.0
12731 *
12732 * Unless required by applicable law or agreed to in writing, software
12733 * distributed under the License is distributed on an "AS IS" BASIS,
12734 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12735 * See the License for the specific language governing permissions and
12736 * limitations under the License.
12737 */
12738/** The Platform's 'window' implementation or null if not available. */
12739function Zo() {
12740 // `window` is not always available, e.g. in ReactNative and WebWorkers.
12741 // eslint-disable-next-line no-restricted-globals
12742 return "undefined" != typeof window ? window : null;
12743}
12744
12745/** The Platform's 'document' implementation or null if not available. */ function tu() {
12746 // `document` is not always available, e.g. in ReactNative and WebWorkers.
12747 // eslint-disable-next-line no-restricted-globals
12748 return "undefined" != typeof document ? document : null;
12749}
12750
12751/**
12752 * @license
12753 * Copyright 2020 Google LLC
12754 *
12755 * Licensed under the Apache License, Version 2.0 (the "License");
12756 * you may not use this file except in compliance with the License.
12757 * You may obtain a copy of the License at
12758 *
12759 * http://www.apache.org/licenses/LICENSE-2.0
12760 *
12761 * Unless required by applicable law or agreed to in writing, software
12762 * distributed under the License is distributed on an "AS IS" BASIS,
12763 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12764 * See the License for the specific language governing permissions and
12765 * limitations under the License.
12766 */ function eu(t) {
12767 return new Vs(t, /* useProto3Json= */ !0);
12768}
12769
12770/**
12771 * An instance of the Platform's 'TextEncoder' implementation.
12772 */
12773/**
12774 * A helper for running delayed tasks following an exponential backoff curve
12775 * between attempts.
12776 *
12777 * Each delay is made up of a "base" delay which follows the exponential
12778 * backoff curve, and a +/- 50% "jitter" that is calculated and added to the
12779 * base delay. This prevents clients from accidentally synchronizing their
12780 * delays causing spikes of load to the backend.
12781 */
12782class nu {
12783 constructor(
12784 /**
12785 * The AsyncQueue to run backoff operations on.
12786 */
12787 t,
12788 /**
12789 * The ID to use when scheduling backoff operations on the AsyncQueue.
12790 */
12791 e,
12792 /**
12793 * The initial delay (used as the base delay on the first retry attempt).
12794 * Note that jitter will still be applied, so the actual delay could be as
12795 * little as 0.5*initialDelayMs.
12796 */
12797 n = 1e3
12798 /**
12799 * The multiplier to use to determine the extended base delay after each
12800 * attempt.
12801 */ , s = 1.5
12802 /**
12803 * The maximum base delay after which no further backoff is performed.
12804 * Note that jitter will still be applied, so the actual delay could be as
12805 * much as 1.5*maxDelayMs.
12806 */ , i = 6e4) {
12807 this.Hs = t, this.timerId = e, this.mo = n, this.yo = s, this.po = i, this.Io = 0,
12808 this.To = null,
12809 /** The last backoff attempt, as epoch milliseconds. */
12810 this.Eo = Date.now(), this.reset();
12811 }
12812 /**
12813 * Resets the backoff delay.
12814 *
12815 * The very next backoffAndWait() will have no delay. If it is called again
12816 * (i.e. due to an error), initialDelayMs (plus jitter) will be used, and
12817 * subsequent ones will increase according to the backoffFactor.
12818 */ reset() {
12819 this.Io = 0;
12820 }
12821 /**
12822 * Resets the backoff delay to the maximum delay (e.g. for use after a
12823 * RESOURCE_EXHAUSTED error).
12824 */ Ao() {
12825 this.Io = this.po;
12826 }
12827 /**
12828 * Returns a promise that resolves after currentDelayMs, and increases the
12829 * delay for any subsequent attempts. If there was a pending backoff operation
12830 * already, it will be canceled.
12831 */ Ro(t) {
12832 // Cancel any pending backoff operation.
12833 this.cancel();
12834 // First schedule using the current base (which may be 0 and should be
12835 // honored as such).
12836 const e = Math.floor(this.Io + this.bo()), n = Math.max(0, Date.now() - this.Eo), s = Math.max(0, e - n);
12837 // Guard against lastAttemptTime being in the future due to a clock change.
12838 s > 0 && x("ExponentialBackoff", `Backing off for ${s} ms (base delay: ${this.Io} ms, delay with jitter: ${e} ms, last attempt: ${n} ms ago)`),
12839 this.To = this.Hs.enqueueAfterDelay(this.timerId, s, (() => (this.Eo = Date.now(),
12840 t()))),
12841 // Apply backoff factor to determine next delay and ensure it is within
12842 // bounds.
12843 this.Io *= this.yo, this.Io < this.mo && (this.Io = this.mo), this.Io > this.po && (this.Io = this.po);
12844 }
12845 Po() {
12846 null !== this.To && (this.To.skipDelay(), this.To = null);
12847 }
12848 cancel() {
12849 null !== this.To && (this.To.cancel(), this.To = null);
12850 }
12851 /** Returns a random value in the range [-currentBaseMs/2, currentBaseMs/2] */ bo() {
12852 return (Math.random() - .5) * this.Io;
12853 }
12854}
12855
12856/**
12857 * @license
12858 * Copyright 2017 Google LLC
12859 *
12860 * Licensed under the Apache License, Version 2.0 (the "License");
12861 * you may not use this file except in compliance with the License.
12862 * You may obtain a copy of the License at
12863 *
12864 * http://www.apache.org/licenses/LICENSE-2.0
12865 *
12866 * Unless required by applicable law or agreed to in writing, software
12867 * distributed under the License is distributed on an "AS IS" BASIS,
12868 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12869 * See the License for the specific language governing permissions and
12870 * limitations under the License.
12871 */
12872/**
12873 * A PersistentStream is an abstract base class that represents a streaming RPC
12874 * to the Firestore backend. It's built on top of the connections own support
12875 * for streaming RPCs, and adds several critical features for our clients:
12876 *
12877 * - Exponential backoff on failure
12878 * - Authentication via CredentialsProvider
12879 * - Dispatching all callbacks into the shared worker queue
12880 * - Closing idle streams after 60 seconds of inactivity
12881 *
12882 * Subclasses of PersistentStream implement serialization of models to and
12883 * from the JSON representation of the protocol buffers for a specific
12884 * streaming RPC.
12885 *
12886 * ## Starting and Stopping
12887 *
12888 * Streaming RPCs are stateful and need to be start()ed before messages can
12889 * be sent and received. The PersistentStream will call the onOpen() function
12890 * of the listener once the stream is ready to accept requests.
12891 *
12892 * Should a start() fail, PersistentStream will call the registered onClose()
12893 * listener with a FirestoreError indicating what went wrong.
12894 *
12895 * A PersistentStream can be started and stopped repeatedly.
12896 *
12897 * Generic types:
12898 * SendType: The type of the outgoing message of the underlying
12899 * connection stream
12900 * ReceiveType: The type of the incoming message of the underlying
12901 * connection stream
12902 * ListenerType: The type of the listener that will be used for callbacks
12903 */
12904class su {
12905 constructor(t, e, n, s, i, r, o, u) {
12906 this.Hs = t, this.vo = n, this.Vo = s, this.connection = i, this.authCredentialsProvider = r,
12907 this.appCheckCredentialsProvider = o, this.listener = u, this.state = 0 /* Initial */ ,
12908 /**
12909 * A close count that's incremented every time the stream is closed; used by
12910 * getCloseGuardedDispatcher() to invalidate callbacks that happen after
12911 * close.
12912 */
12913 this.So = 0, this.Do = null, this.Co = null, this.stream = null, this.xo = new nu(t, e);
12914 }
12915 /**
12916 * Returns true if start() has been called and no error has occurred. True
12917 * indicates the stream is open or in the process of opening (which
12918 * encompasses respecting backoff, getting auth tokens, and starting the
12919 * actual RPC). Use isOpen() to determine if the stream is open and ready for
12920 * outbound requests.
12921 */ No() {
12922 return 1 /* Starting */ === this.state || 5 /* Backoff */ === this.state || this.ko();
12923 }
12924 /**
12925 * Returns true if the underlying RPC is open (the onOpen() listener has been
12926 * called) and the stream is ready for outbound requests.
12927 */ ko() {
12928 return 2 /* Open */ === this.state || 3 /* Healthy */ === this.state;
12929 }
12930 /**
12931 * Starts the RPC. Only allowed if isStarted() returns false. The stream is
12932 * not immediately ready for use: onOpen() will be invoked when the RPC is
12933 * ready for outbound requests, at which point isOpen() will return true.
12934 *
12935 * When start returns, isStarted() will return true.
12936 */ start() {
12937 4 /* Error */ !== this.state ? this.auth() : this.Oo();
12938 }
12939 /**
12940 * Stops the RPC. This call is idempotent and allowed regardless of the
12941 * current isStarted() state.
12942 *
12943 * When stop returns, isStarted() and isOpen() will both return false.
12944 */ async stop() {
12945 this.No() && await this.close(0 /* Initial */);
12946 }
12947 /**
12948 * After an error the stream will usually back off on the next attempt to
12949 * start it. If the error warrants an immediate restart of the stream, the
12950 * sender can use this to indicate that the receiver should not back off.
12951 *
12952 * Each error will call the onClose() listener. That function can decide to
12953 * inhibit backoff if required.
12954 */ Mo() {
12955 this.state = 0 /* Initial */ , this.xo.reset();
12956 }
12957 /**
12958 * Marks this stream as idle. If no further actions are performed on the
12959 * stream for one minute, the stream will automatically close itself and
12960 * notify the stream's onClose() handler with Status.OK. The stream will then
12961 * be in a !isStarted() state, requiring the caller to start the stream again
12962 * before further use.
12963 *
12964 * Only streams that are in state 'Open' can be marked idle, as all other
12965 * states imply pending network operations.
12966 */ Fo() {
12967 // Starts the idle time if we are in state 'Open' and are not yet already
12968 // running a timer (in which case the previous idle timeout still applies).
12969 this.ko() && null === this.Do && (this.Do = this.Hs.enqueueAfterDelay(this.vo, 6e4, (() => this.$o())));
12970 }
12971 /** Sends a message to the underlying stream. */ Bo(t) {
12972 this.Lo(), this.stream.send(t);
12973 }
12974 /** Called by the idle timer when the stream should close due to inactivity. */ async $o() {
12975 if (this.ko())
12976 // When timing out an idle stream there's no reason to force the stream into backoff when
12977 // it restarts so set the stream state to Initial instead of Error.
12978 return this.close(0 /* Initial */);
12979 }
12980 /** Marks the stream as active again. */ Lo() {
12981 this.Do && (this.Do.cancel(), this.Do = null);
12982 }
12983 /** Cancels the health check delayed operation. */ Uo() {
12984 this.Co && (this.Co.cancel(), this.Co = null);
12985 }
12986 /**
12987 * Closes the stream and cleans up as necessary:
12988 *
12989 * * closes the underlying GRPC stream;
12990 * * calls the onClose handler with the given 'error';
12991 * * sets internal stream state to 'finalState';
12992 * * adjusts the backoff timer based on the error
12993 *
12994 * A new stream can be opened by calling start().
12995 *
12996 * @param finalState - the intended state of the stream after closing.
12997 * @param error - the error the connection was closed with.
12998 */ async close(t, e) {
12999 // Cancel any outstanding timers (they're guaranteed not to execute).
13000 this.Lo(), this.Uo(), this.xo.cancel(),
13001 // Invalidates any stream-related callbacks (e.g. from auth or the
13002 // underlying stream), guaranteeing they won't execute.
13003 this.So++, 4 /* Error */ !== t ?
13004 // If this is an intentional close ensure we don't delay our next connection attempt.
13005 this.xo.reset() : e && e.code === L.RESOURCE_EXHAUSTED ? (
13006 // Log the error. (Probably either 'quota exceeded' or 'max queue length reached'.)
13007 N(e.toString()), N("Using maximum backoff delay to prevent overloading the backend."),
13008 this.xo.Ao()) : e && e.code === L.UNAUTHENTICATED && 3 /* Healthy */ !== this.state && (
13009 // "unauthenticated" error means the token was rejected. This should rarely
13010 // happen since both Auth and AppCheck ensure a sufficient TTL when we
13011 // request a token. If a user manually resets their system clock this can
13012 // fail, however. In this case, we should get a Code.UNAUTHENTICATED error
13013 // before we received the first message and we need to invalidate the token
13014 // to ensure that we fetch a new token.
13015 this.authCredentialsProvider.invalidateToken(), this.appCheckCredentialsProvider.invalidateToken()),
13016 // Clean up the underlying stream because we are no longer interested in events.
13017 null !== this.stream && (this.qo(), this.stream.close(), this.stream = null),
13018 // This state must be assigned before calling onClose() to allow the callback to
13019 // inhibit backoff or otherwise manipulate the state in its non-started state.
13020 this.state = t,
13021 // Notify the listener that the stream closed.
13022 await this.listener.Zr(e);
13023 }
13024 /**
13025 * Can be overridden to perform additional cleanup before the stream is closed.
13026 * Calling super.tearDown() is not required.
13027 */ qo() {}
13028 auth() {
13029 this.state = 1 /* Starting */;
13030 const t = this.Ko(this.So), e = this.So;
13031 // TODO(mikelehen): Just use dispatchIfNotClosed, but see TODO below.
13032 Promise.all([ this.authCredentialsProvider.getToken(), this.appCheckCredentialsProvider.getToken() ]).then((([t, n]) => {
13033 // Stream can be stopped while waiting for authentication.
13034 // TODO(mikelehen): We really should just use dispatchIfNotClosed
13035 // and let this dispatch onto the queue, but that opened a spec test can
13036 // of worms that I don't want to deal with in this PR.
13037 this.So === e &&
13038 // Normally we'd have to schedule the callback on the AsyncQueue.
13039 // However, the following calls are safe to be called outside the
13040 // AsyncQueue since they don't chain asynchronous calls
13041 this.Go(t, n);
13042 }), (e => {
13043 t((() => {
13044 const t = new U(L.UNKNOWN, "Fetching auth token failed: " + e.message);
13045 return this.Qo(t);
13046 }));
13047 }));
13048 }
13049 Go(t, e) {
13050 const n = this.Ko(this.So);
13051 this.stream = this.jo(t, e), this.stream.Yr((() => {
13052 n((() => (this.state = 2 /* Open */ , this.Co = this.Hs.enqueueAfterDelay(this.Vo, 1e4, (() => (this.ko() && (this.state = 3 /* Healthy */),
13053 Promise.resolve()))), this.listener.Yr())));
13054 })), this.stream.Zr((t => {
13055 n((() => this.Qo(t)));
13056 })), this.stream.onMessage((t => {
13057 n((() => this.onMessage(t)));
13058 }));
13059 }
13060 Oo() {
13061 this.state = 5 /* Backoff */ , this.xo.Ro((async () => {
13062 this.state = 0 /* Initial */ , this.start();
13063 }));
13064 }
13065 // Visible for tests
13066 Qo(t) {
13067 // In theory the stream could close cleanly, however, in our current model
13068 // we never expect this to happen because if we stop a stream ourselves,
13069 // this callback will never be called. To prevent cases where we retry
13070 // without a backoff accidentally, we set the stream to error in all cases.
13071 return x("PersistentStream", `close with error: ${t}`), this.stream = null, this.close(4 /* Error */ , t);
13072 }
13073 /**
13074 * Returns a "dispatcher" function that dispatches operations onto the
13075 * AsyncQueue but only runs them if closeCount remains unchanged. This allows
13076 * us to turn auth / stream callbacks into no-ops if the stream is closed /
13077 * re-opened, etc.
13078 */ Ko(t) {
13079 return e => {
13080 this.Hs.enqueueAndForget((() => this.So === t ? e() : (x("PersistentStream", "stream callback skipped by getCloseGuardedDispatcher."),
13081 Promise.resolve())));
13082 };
13083 }
13084}
13085
13086/**
13087 * A PersistentStream that implements the Listen RPC.
13088 *
13089 * Once the Listen stream has called the onOpen() listener, any number of
13090 * listen() and unlisten() calls can be made to control what changes will be
13091 * sent from the server for ListenResponses.
13092 */ class iu extends su {
13093 constructor(t, e, n, s, i, r) {
13094 super(t, "listen_stream_connection_backoff" /* ListenStreamConnectionBackoff */ , "listen_stream_idle" /* ListenStreamIdle */ , "health_check_timeout" /* HealthCheckTimeout */ , e, n, s, r),
13095 this.It = i;
13096 }
13097 jo(t, e) {
13098 return this.connection.wo("Listen", t, e);
13099 }
13100 onMessage(t) {
13101 // A successful response means the stream is healthy
13102 this.xo.reset();
13103 const e = Gs(this.It, t), n = function(t) {
13104 // We have only reached a consistent snapshot for the entire stream if there
13105 // is a read_time set and it applies to all targets (i.e. the list of
13106 // targets is empty). The backend is guaranteed to send such responses.
13107 if (!("targetChange" in t)) return it.min();
13108 const e = t.targetChange;
13109 return e.targetIds && e.targetIds.length ? it.min() : e.readTime ? xs(e.readTime) : it.min();
13110 }(t);
13111 return this.listener.Wo(e, n);
13112 }
13113 /**
13114 * Registers interest in the results of the given target. If the target
13115 * includes a resumeToken it will be included in the request. Results that
13116 * affect the target will be streamed back as WatchChange messages that
13117 * reference the targetId.
13118 */ zo(t) {
13119 const e = {};
13120 e.database = Bs(this.It), e.addTarget = function(t, e) {
13121 let n;
13122 const s = e.target;
13123 return n = Fe(s) ? {
13124 documents: zs(t, s)
13125 } : {
13126 query: Hs(t, s)
13127 }, n.targetId = e.targetId, e.resumeToken.approximateByteSize() > 0 ? n.resumeToken = Ds(t, e.resumeToken) : e.snapshotVersion.compareTo(it.min()) > 0 && (
13128 // TODO(wuandy): Consider removing above check because it is most likely true.
13129 // Right now, many tests depend on this behaviour though (leaving min() out
13130 // of serialization).
13131 n.readTime = Ss(t, e.snapshotVersion.toTimestamp())), n;
13132 }(this.It, t);
13133 const n = Ys(this.It, t);
13134 n && (e.labels = n), this.Bo(e);
13135 }
13136 /**
13137 * Unregisters interest in the results of the target associated with the
13138 * given targetId.
13139 */ Ho(t) {
13140 const e = {};
13141 e.database = Bs(this.It), e.removeTarget = t, this.Bo(e);
13142 }
13143}
13144
13145/**
13146 * A Stream that implements the Write RPC.
13147 *
13148 * The Write RPC requires the caller to maintain special streamToken
13149 * state in between calls, to help the server understand which responses the
13150 * client has processed by the time the next request is made. Every response
13151 * will contain a streamToken; this value must be passed to the next
13152 * request.
13153 *
13154 * After calling start() on this stream, the next request must be a handshake,
13155 * containing whatever streamToken is on hand. Once a response to this
13156 * request is received, all pending mutations may be submitted. When
13157 * submitting multiple batches of mutations at the same time, it's
13158 * okay to use the same streamToken for the calls to writeMutations.
13159 *
13160 * TODO(b/33271235): Use proto types
13161 */ class ru extends su {
13162 constructor(t, e, n, s, i, r) {
13163 super(t, "write_stream_connection_backoff" /* WriteStreamConnectionBackoff */ , "write_stream_idle" /* WriteStreamIdle */ , "health_check_timeout" /* HealthCheckTimeout */ , e, n, s, r),
13164 this.It = i, this.Jo = !1;
13165 }
13166 /**
13167 * Tracks whether or not a handshake has been successfully exchanged and
13168 * the stream is ready to accept mutations.
13169 */ get Yo() {
13170 return this.Jo;
13171 }
13172 // Override of PersistentStream.start
13173 start() {
13174 this.Jo = !1, this.lastStreamToken = void 0, super.start();
13175 }
13176 qo() {
13177 this.Jo && this.Xo([]);
13178 }
13179 jo(t, e) {
13180 return this.connection.wo("Write", t, e);
13181 }
13182 onMessage(t) {
13183 if (
13184 // Always capture the last stream token.
13185 F(!!t.streamToken), this.lastStreamToken = t.streamToken, this.Jo) {
13186 // A successful first write response means the stream is healthy,
13187 // Note, that we could consider a successful handshake healthy, however,
13188 // the write itself might be causing an error we want to back off from.
13189 this.xo.reset();
13190 const e = Ws(t.writeResults, t.commitTime), n = xs(t.commitTime);
13191 return this.listener.Zo(n, e);
13192 }
13193 // The first response is always the handshake response
13194 return F(!t.writeResults || 0 === t.writeResults.length), this.Jo = !0, this.listener.tu();
13195 }
13196 /**
13197 * Sends an initial streamToken to the server, performing the handshake
13198 * required to make the StreamingWrite RPC work. Subsequent
13199 * calls should wait until onHandshakeComplete was called.
13200 */ eu() {
13201 // TODO(dimond): Support stream resumption. We intentionally do not set the
13202 // stream token on the handshake, ignoring any stream token we might have.
13203 const t = {};
13204 t.database = Bs(this.It), this.Bo(t);
13205 }
13206 /** Sends a group of mutations to the Firestore backend to apply. */ Xo(t) {
13207 const e = {
13208 streamToken: this.lastStreamToken,
13209 writes: t.map((t => Qs(this.It, t)))
13210 };
13211 this.Bo(e);
13212 }
13213}
13214
13215/**
13216 * @license
13217 * Copyright 2017 Google LLC
13218 *
13219 * Licensed under the Apache License, Version 2.0 (the "License");
13220 * you may not use this file except in compliance with the License.
13221 * You may obtain a copy of the License at
13222 *
13223 * http://www.apache.org/licenses/LICENSE-2.0
13224 *
13225 * Unless required by applicable law or agreed to in writing, software
13226 * distributed under the License is distributed on an "AS IS" BASIS,
13227 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13228 * See the License for the specific language governing permissions and
13229 * limitations under the License.
13230 */
13231/**
13232 * Datastore and its related methods are a wrapper around the external Google
13233 * Cloud Datastore grpc API, which provides an interface that is more convenient
13234 * for the rest of the client SDK architecture to consume.
13235 */
13236/**
13237 * An implementation of Datastore that exposes additional state for internal
13238 * consumption.
13239 */
13240class ou extends class {} {
13241 constructor(t, e, n, s) {
13242 super(), this.authCredentials = t, this.appCheckCredentials = e, this.connection = n,
13243 this.It = s, this.nu = !1;
13244 }
13245 su() {
13246 if (this.nu) throw new U(L.FAILED_PRECONDITION, "The client has already been terminated.");
13247 }
13248 /** Invokes the provided RPC with auth and AppCheck tokens. */ ao(t, e, n) {
13249 return this.su(), Promise.all([ this.authCredentials.getToken(), this.appCheckCredentials.getToken() ]).then((([s, i]) => this.connection.ao(t, e, n, s, i))).catch((t => {
13250 throw "FirebaseError" === t.name ? (t.code === L.UNAUTHENTICATED && (this.authCredentials.invalidateToken(),
13251 this.appCheckCredentials.invalidateToken()), t) : new U(L.UNKNOWN, t.toString());
13252 }));
13253 }
13254 /** Invokes the provided RPC with streamed results with auth and AppCheck tokens. */ _o(t, e, n, s) {
13255 return this.su(), Promise.all([ this.authCredentials.getToken(), this.appCheckCredentials.getToken() ]).then((([i, r]) => this.connection._o(t, e, n, i, r, s))).catch((t => {
13256 throw "FirebaseError" === t.name ? (t.code === L.UNAUTHENTICATED && (this.authCredentials.invalidateToken(),
13257 this.appCheckCredentials.invalidateToken()), t) : new U(L.UNKNOWN, t.toString());
13258 }));
13259 }
13260 terminate() {
13261 this.nu = !0;
13262 }
13263}
13264
13265// TODO(firestorexp): Make sure there is only one Datastore instance per
13266// firestore-exp client.
13267async function uu(t, e) {
13268 const n = B(t), s = function(t, e) {
13269 const n = Hs(t, e);
13270 return {
13271 structuredAggregationQuery: {
13272 aggregations: [ {
13273 count: {},
13274 alias: "count_alias"
13275 } ],
13276 structuredQuery: n.structuredQuery
13277 },
13278 parent: n.parent
13279 };
13280 }(n.It, hn(e)), i = s.parent;
13281 n.connection.co || delete s.parent;
13282 return (await n._o("RunAggregationQuery", i, s, /*expectedResponseCount=*/ 1)).filter((t => !!t.result)).map((t => t.result.aggregateFields));
13283}
13284
13285/**
13286 * A component used by the RemoteStore to track the OnlineState (that is,
13287 * whether or not the client as a whole should be considered to be online or
13288 * offline), implementing the appropriate heuristics.
13289 *
13290 * In particular, when the client is trying to connect to the backend, we
13291 * allow up to MAX_WATCH_STREAM_FAILURES within ONLINE_STATE_TIMEOUT_MS for
13292 * a connection to succeed. If we have too many failures or the timeout elapses,
13293 * then we set the OnlineState to Offline, and the client will behave as if
13294 * it is offline (get()s will return cached data, etc.).
13295 */
13296class cu {
13297 constructor(t, e) {
13298 this.asyncQueue = t, this.onlineStateHandler = e,
13299 /** The current OnlineState. */
13300 this.state = "Unknown" /* Unknown */ ,
13301 /**
13302 * A count of consecutive failures to open the stream. If it reaches the
13303 * maximum defined by MAX_WATCH_STREAM_FAILURES, we'll set the OnlineState to
13304 * Offline.
13305 */
13306 this.iu = 0,
13307 /**
13308 * A timer that elapses after ONLINE_STATE_TIMEOUT_MS, at which point we
13309 * transition from OnlineState.Unknown to OnlineState.Offline without waiting
13310 * for the stream to actually fail (MAX_WATCH_STREAM_FAILURES times).
13311 */
13312 this.ru = null,
13313 /**
13314 * Whether the client should log a warning message if it fails to connect to
13315 * the backend (initially true, cleared after a successful stream, or if we've
13316 * logged the message already).
13317 */
13318 this.ou = !0;
13319 }
13320 /**
13321 * Called by RemoteStore when a watch stream is started (including on each
13322 * backoff attempt).
13323 *
13324 * If this is the first attempt, it sets the OnlineState to Unknown and starts
13325 * the onlineStateTimer.
13326 */ uu() {
13327 0 === this.iu && (this.cu("Unknown" /* Unknown */), this.ru = this.asyncQueue.enqueueAfterDelay("online_state_timeout" /* OnlineStateTimeout */ , 1e4, (() => (this.ru = null,
13328 this.au("Backend didn't respond within 10 seconds."), this.cu("Offline" /* Offline */),
13329 Promise.resolve()))));
13330 }
13331 /**
13332 * Updates our OnlineState as appropriate after the watch stream reports a
13333 * failure. The first failure moves us to the 'Unknown' state. We then may
13334 * allow multiple failures (based on MAX_WATCH_STREAM_FAILURES) before we
13335 * actually transition to the 'Offline' state.
13336 */ hu(t) {
13337 "Online" /* Online */ === this.state ? this.cu("Unknown" /* Unknown */) : (this.iu++,
13338 this.iu >= 1 && (this.lu(), this.au(`Connection failed 1 times. Most recent error: ${t.toString()}`),
13339 this.cu("Offline" /* Offline */)));
13340 }
13341 /**
13342 * Explicitly sets the OnlineState to the specified state.
13343 *
13344 * Note that this resets our timers / failure counters, etc. used by our
13345 * Offline heuristics, so must not be used in place of
13346 * handleWatchStreamStart() and handleWatchStreamFailure().
13347 */ set(t) {
13348 this.lu(), this.iu = 0, "Online" /* Online */ === t && (
13349 // We've connected to watch at least once. Don't warn the developer
13350 // about being offline going forward.
13351 this.ou = !1), this.cu(t);
13352 }
13353 cu(t) {
13354 t !== this.state && (this.state = t, this.onlineStateHandler(t));
13355 }
13356 au(t) {
13357 const e = `Could not reach Cloud Firestore backend. ${t}\nThis typically indicates that your device does not have a healthy Internet connection at the moment. The client will operate in offline mode until it is able to successfully connect to the backend.`;
13358 this.ou ? (N(e), this.ou = !1) : x("OnlineStateTracker", e);
13359 }
13360 lu() {
13361 null !== this.ru && (this.ru.cancel(), this.ru = null);
13362 }
13363}
13364
13365/**
13366 * @license
13367 * Copyright 2017 Google LLC
13368 *
13369 * Licensed under the Apache License, Version 2.0 (the "License");
13370 * you may not use this file except in compliance with the License.
13371 * You may obtain a copy of the License at
13372 *
13373 * http://www.apache.org/licenses/LICENSE-2.0
13374 *
13375 * Unless required by applicable law or agreed to in writing, software
13376 * distributed under the License is distributed on an "AS IS" BASIS,
13377 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13378 * See the License for the specific language governing permissions and
13379 * limitations under the License.
13380 */ class au {
13381 constructor(
13382 /**
13383 * The local store, used to fill the write pipeline with outbound mutations.
13384 */
13385 t,
13386 /** The client-side proxy for interacting with the backend. */
13387 e, n, s, i) {
13388 this.localStore = t, this.datastore = e, this.asyncQueue = n, this.remoteSyncer = {},
13389 /**
13390 * A list of up to MAX_PENDING_WRITES writes that we have fetched from the
13391 * LocalStore via fillWritePipeline() and have or will send to the write
13392 * stream.
13393 *
13394 * Whenever writePipeline.length > 0 the RemoteStore will attempt to start or
13395 * restart the write stream. When the stream is established the writes in the
13396 * pipeline will be sent in order.
13397 *
13398 * Writes remain in writePipeline until they are acknowledged by the backend
13399 * and thus will automatically be re-sent if the stream is interrupted /
13400 * restarted before they're acknowledged.
13401 *
13402 * Write responses from the backend are linked to their originating request
13403 * purely based on order, and so we can just shift() writes from the front of
13404 * the writePipeline as we receive responses.
13405 */
13406 this.fu = [],
13407 /**
13408 * A mapping of watched targets that the client cares about tracking and the
13409 * user has explicitly called a 'listen' for this target.
13410 *
13411 * These targets may or may not have been sent to or acknowledged by the
13412 * server. On re-establishing the listen stream, these targets should be sent
13413 * to the server. The targets removed with unlistens are removed eagerly
13414 * without waiting for confirmation from the listen stream.
13415 */
13416 this.du = new Map,
13417 /**
13418 * A set of reasons for why the RemoteStore may be offline. If empty, the
13419 * RemoteStore may start its network connections.
13420 */
13421 this._u = new Set,
13422 /**
13423 * Event handlers that get called when the network is disabled or enabled.
13424 *
13425 * PORTING NOTE: These functions are used on the Web client to create the
13426 * underlying streams (to support tree-shakeable streams). On Android and iOS,
13427 * the streams are created during construction of RemoteStore.
13428 */
13429 this.wu = [], this.mu = i, this.mu.qr((t => {
13430 n.enqueueAndForget((async () => {
13431 // Porting Note: Unlike iOS, `restartNetwork()` is called even when the
13432 // network becomes unreachable as we don't have any other way to tear
13433 // down our streams.
13434 yu(this) && (x("RemoteStore", "Restarting streams for network reachability change."),
13435 await async function(t) {
13436 const e = B(t);
13437 e._u.add(4 /* ConnectivityChange */), await lu(e), e.gu.set("Unknown" /* Unknown */),
13438 e._u.delete(4 /* ConnectivityChange */), await hu(e);
13439 }(this));
13440 }));
13441 })), this.gu = new cu(n, s);
13442 }
13443}
13444
13445async function hu(t) {
13446 if (yu(t)) for (const e of t.wu) await e(/* enabled= */ !0);
13447}
13448
13449/**
13450 * Temporarily disables the network. The network can be re-enabled using
13451 * enableNetwork().
13452 */ async function lu(t) {
13453 for (const e of t.wu) await e(/* enabled= */ !1);
13454}
13455
13456/**
13457 * Starts new listen for the given target. Uses resume token if provided. It
13458 * is a no-op if the target of given `TargetData` is already being listened to.
13459 */
13460function fu(t, e) {
13461 const n = B(t);
13462 n.du.has(e.targetId) || (
13463 // Mark this as something the client is currently listening for.
13464 n.du.set(e.targetId, e), gu(n) ?
13465 // The listen will be sent in onWatchStreamOpen
13466 mu(n) : Mu(n).ko() && _u(n, e));
13467}
13468
13469/**
13470 * Removes the listen from server. It is a no-op if the given target id is
13471 * not being listened to.
13472 */ function du(t, e) {
13473 const n = B(t), s = Mu(n);
13474 n.du.delete(e), s.ko() && wu(n, e), 0 === n.du.size && (s.ko() ? s.Fo() : yu(n) &&
13475 // Revert to OnlineState.Unknown if the watch stream is not open and we
13476 // have no listeners, since without any listens to send we cannot
13477 // confirm if the stream is healthy and upgrade to OnlineState.Online.
13478 n.gu.set("Unknown" /* Unknown */));
13479}
13480
13481/**
13482 * We need to increment the the expected number of pending responses we're due
13483 * from watch so we wait for the ack to process any messages from this target.
13484 */ function _u(t, e) {
13485 t.yu.Mt(e.targetId), Mu(t).zo(e);
13486}
13487
13488/**
13489 * We need to increment the expected number of pending responses we're due
13490 * from watch so we wait for the removal on the server before we process any
13491 * messages from this target.
13492 */ function wu(t, e) {
13493 t.yu.Mt(e), Mu(t).Ho(e);
13494}
13495
13496function mu(t) {
13497 t.yu = new As({
13498 getRemoteKeysForTarget: e => t.remoteSyncer.getRemoteKeysForTarget(e),
13499 se: e => t.du.get(e) || null
13500 }), Mu(t).start(), t.gu.uu();
13501}
13502
13503/**
13504 * Returns whether the watch stream should be started because it's necessary
13505 * and has not yet been started.
13506 */ function gu(t) {
13507 return yu(t) && !Mu(t).No() && t.du.size > 0;
13508}
13509
13510function yu(t) {
13511 return 0 === B(t)._u.size;
13512}
13513
13514function pu(t) {
13515 t.yu = void 0;
13516}
13517
13518async function Iu(t) {
13519 t.du.forEach(((e, n) => {
13520 _u(t, e);
13521 }));
13522}
13523
13524async function Tu(t, e) {
13525 pu(t),
13526 // If we still need the watch stream, retry the connection.
13527 gu(t) ? (t.gu.hu(e), mu(t)) :
13528 // No need to restart watch stream because there are no active targets.
13529 // The online state is set to unknown because there is no active attempt
13530 // at establishing a connection
13531 t.gu.set("Unknown" /* Unknown */);
13532}
13533
13534async function Eu(t, e, n) {
13535 if (
13536 // Mark the client as online since we got a message from the server
13537 t.gu.set("Online" /* Online */), e instanceof Ts && 2 /* Removed */ === e.state && e.cause)
13538 // There was an error on a target, don't wait for a consistent snapshot
13539 // to raise events
13540 try {
13541 await
13542 /** Handles an error on a target */
13543 async function(t, e) {
13544 const n = e.cause;
13545 for (const s of e.targetIds)
13546 // A watched target might have been removed already.
13547 t.du.has(s) && (await t.remoteSyncer.rejectListen(s, n), t.du.delete(s), t.yu.removeTarget(s));
13548 }
13549 /**
13550 * Attempts to fill our write pipeline with writes from the LocalStore.
13551 *
13552 * Called internally to bootstrap or refill the write pipeline and by
13553 * SyncEngine whenever there are new mutations to process.
13554 *
13555 * Starts the write stream if necessary.
13556 */ (t, e);
13557 } catch (n) {
13558 x("RemoteStore", "Failed to remove targets %s: %s ", e.targetIds.join(","), n),
13559 await Au(t, n);
13560 } else if (e instanceof ps ? t.yu.Gt(e) : e instanceof Is ? t.yu.Yt(e) : t.yu.Wt(e),
13561 !n.isEqual(it.min())) try {
13562 const e = await Po(t.localStore);
13563 n.compareTo(e) >= 0 &&
13564 // We have received a target change with a global snapshot if the snapshot
13565 // version is not equal to SnapshotVersion.min().
13566 await
13567 /**
13568 * Takes a batch of changes from the Datastore, repackages them as a
13569 * RemoteEvent, and passes that on to the listener, which is typically the
13570 * SyncEngine.
13571 */
13572 function(t, e) {
13573 const n = t.yu.te(e);
13574 // Update in-memory resume tokens. LocalStore will update the
13575 // persistent view of these when applying the completed RemoteEvent.
13576 return n.targetChanges.forEach(((n, s) => {
13577 if (n.resumeToken.approximateByteSize() > 0) {
13578 const i = t.du.get(s);
13579 // A watched target might have been removed already.
13580 i && t.du.set(s, i.withResumeToken(n.resumeToken, e));
13581 }
13582 })),
13583 // Re-establish listens for the targets that have been invalidated by
13584 // existence filter mismatches.
13585 n.targetMismatches.forEach((e => {
13586 const n = t.du.get(e);
13587 if (!n)
13588 // A watched target might have been removed already.
13589 return;
13590 // Clear the resume token for the target, since we're in a known mismatch
13591 // state.
13592 t.du.set(e, n.withResumeToken(Ht.EMPTY_BYTE_STRING, n.snapshotVersion)),
13593 // Cause a hard reset by unwatching and rewatching immediately, but
13594 // deliberately don't send a resume token so that we get a full update.
13595 wu(t, e);
13596 // Mark the target we send as being on behalf of an existence filter
13597 // mismatch, but don't actually retain that in listenTargets. This ensures
13598 // that we flag the first re-listen this way without impacting future
13599 // listens of this target (that might happen e.g. on reconnect).
13600 const s = new Bi(n.target, e, 1 /* ExistenceFilterMismatch */ , n.sequenceNumber);
13601 _u(t, s);
13602 })), t.remoteSyncer.applyRemoteEvent(n);
13603 }(t, n);
13604 } catch (e) {
13605 x("RemoteStore", "Failed to raise snapshot:", e), await Au(t, e);
13606 }
13607}
13608
13609/**
13610 * Recovery logic for IndexedDB errors that takes the network offline until
13611 * `op` succeeds. Retries are scheduled with backoff using
13612 * `enqueueRetryable()`. If `op()` is not provided, IndexedDB access is
13613 * validated via a generic operation.
13614 *
13615 * The returned Promise is resolved once the network is disabled and before
13616 * any retry attempt.
13617 */ async function Au(t, e, n) {
13618 if (!St(e)) throw e;
13619 t._u.add(1 /* IndexedDbFailed */),
13620 // Disable network and raise offline snapshots
13621 await lu(t), t.gu.set("Offline" /* Offline */), n || (
13622 // Use a simple read operation to determine if IndexedDB recovered.
13623 // Ideally, we would expose a health check directly on SimpleDb, but
13624 // RemoteStore only has access to persistence through LocalStore.
13625 n = () => Po(t.localStore)),
13626 // Probe IndexedDB periodically and re-enable network
13627 t.asyncQueue.enqueueRetryable((async () => {
13628 x("RemoteStore", "Retrying IndexedDB access"), await n(), t._u.delete(1 /* IndexedDbFailed */),
13629 await hu(t);
13630 }));
13631}
13632
13633/**
13634 * Executes `op`. If `op` fails, takes the network offline until `op`
13635 * succeeds. Returns after the first attempt.
13636 */ function Ru(t, e) {
13637 return e().catch((n => Au(t, n, e)));
13638}
13639
13640async function bu(t) {
13641 const e = B(t), n = Fu(e);
13642 let s = e.fu.length > 0 ? e.fu[e.fu.length - 1].batchId : -1;
13643 for (;Pu(e); ) try {
13644 const t = await So(e.localStore, s);
13645 if (null === t) {
13646 0 === e.fu.length && n.Fo();
13647 break;
13648 }
13649 s = t.batchId, vu(e, t);
13650 } catch (t) {
13651 await Au(e, t);
13652 }
13653 Vu(e) && Su(e);
13654}
13655
13656/**
13657 * Returns true if we can add to the write pipeline (i.e. the network is
13658 * enabled and the write pipeline is not full).
13659 */ function Pu(t) {
13660 return yu(t) && t.fu.length < 10;
13661}
13662
13663/**
13664 * Queues additional writes to be sent to the write stream, sending them
13665 * immediately if the write stream is established.
13666 */ function vu(t, e) {
13667 t.fu.push(e);
13668 const n = Fu(t);
13669 n.ko() && n.Yo && n.Xo(e.mutations);
13670}
13671
13672function Vu(t) {
13673 return yu(t) && !Fu(t).No() && t.fu.length > 0;
13674}
13675
13676function Su(t) {
13677 Fu(t).start();
13678}
13679
13680async function Du(t) {
13681 Fu(t).eu();
13682}
13683
13684async function Cu(t) {
13685 const e = Fu(t);
13686 // Send the write pipeline now that the stream is established.
13687 for (const n of t.fu) e.Xo(n.mutations);
13688}
13689
13690async function xu(t, e, n) {
13691 const s = t.fu.shift(), i = Fi.from(s, e, n);
13692 await Ru(t, (() => t.remoteSyncer.applySuccessfulWrite(i))),
13693 // It's possible that with the completion of this mutation another
13694 // slot has freed up.
13695 await bu(t);
13696}
13697
13698async function Nu(t, e) {
13699 // If the write stream closed after the write handshake completes, a write
13700 // operation failed and we fail the pending operation.
13701 e && Fu(t).Yo &&
13702 // This error affects the actual write.
13703 await async function(t, e) {
13704 // Only handle permanent errors here. If it's transient, just let the retry
13705 // logic kick in.
13706 if (n = e.code, es(n) && n !== L.ABORTED) {
13707 // This was a permanent error, the request itself was the problem
13708 // so it's not going to succeed if we resend it.
13709 const n = t.fu.shift();
13710 // In this case it's also unlikely that the server itself is melting
13711 // down -- this was just a bad request so inhibit backoff on the next
13712 // restart.
13713 Fu(t).Mo(), await Ru(t, (() => t.remoteSyncer.rejectFailedWrite(n.batchId, e))),
13714 // It's possible that with the completion of this mutation
13715 // another slot has freed up.
13716 await bu(t);
13717 }
13718 var n;
13719 }(t, e),
13720 // The write stream might have been started by refilling the write
13721 // pipeline for failed writes
13722 Vu(t) && Su(t);
13723}
13724
13725async function ku(t, e) {
13726 const n = B(t);
13727 n.asyncQueue.verifyOperationInProgress(), x("RemoteStore", "RemoteStore received new credentials");
13728 const s = yu(n);
13729 // Tear down and re-create our network streams. This will ensure we get a
13730 // fresh auth token for the new user and re-fill the write pipeline with
13731 // new mutations from the LocalStore (since mutations are per-user).
13732 n._u.add(3 /* CredentialChange */), await lu(n), s &&
13733 // Don't set the network status to Unknown if we are offline.
13734 n.gu.set("Unknown" /* Unknown */), await n.remoteSyncer.handleCredentialChange(e),
13735 n._u.delete(3 /* CredentialChange */), await hu(n);
13736}
13737
13738/**
13739 * Toggles the network state when the client gains or loses its primary lease.
13740 */ async function Ou(t, e) {
13741 const n = B(t);
13742 e ? (n._u.delete(2 /* IsSecondary */), await hu(n)) : e || (n._u.add(2 /* IsSecondary */),
13743 await lu(n), n.gu.set("Unknown" /* Unknown */));
13744}
13745
13746/**
13747 * If not yet initialized, registers the WatchStream and its network state
13748 * callback with `remoteStoreImpl`. Returns the existing stream if one is
13749 * already available.
13750 *
13751 * PORTING NOTE: On iOS and Android, the WatchStream gets registered on startup.
13752 * This is not done on Web to allow it to be tree-shaken.
13753 */ function Mu(t) {
13754 return t.pu || (
13755 // Create stream (but note that it is not started yet).
13756 t.pu = function(t, e, n) {
13757 const s = B(t);
13758 return s.su(), new iu(e, s.connection, s.authCredentials, s.appCheckCredentials, s.It, n);
13759 }
13760 /**
13761 * @license
13762 * Copyright 2018 Google LLC
13763 *
13764 * Licensed under the Apache License, Version 2.0 (the "License");
13765 * you may not use this file except in compliance with the License.
13766 * You may obtain a copy of the License at
13767 *
13768 * http://www.apache.org/licenses/LICENSE-2.0
13769 *
13770 * Unless required by applicable law or agreed to in writing, software
13771 * distributed under the License is distributed on an "AS IS" BASIS,
13772 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13773 * See the License for the specific language governing permissions and
13774 * limitations under the License.
13775 */ (t.datastore, t.asyncQueue, {
13776 Yr: Iu.bind(null, t),
13777 Zr: Tu.bind(null, t),
13778 Wo: Eu.bind(null, t)
13779 }), t.wu.push((async e => {
13780 e ? (t.pu.Mo(), gu(t) ? mu(t) : t.gu.set("Unknown" /* Unknown */)) : (await t.pu.stop(),
13781 pu(t));
13782 }))), t.pu;
13783}
13784
13785/**
13786 * If not yet initialized, registers the WriteStream and its network state
13787 * callback with `remoteStoreImpl`. Returns the existing stream if one is
13788 * already available.
13789 *
13790 * PORTING NOTE: On iOS and Android, the WriteStream gets registered on startup.
13791 * This is not done on Web to allow it to be tree-shaken.
13792 */ function Fu(t) {
13793 return t.Iu || (
13794 // Create stream (but note that it is not started yet).
13795 t.Iu = function(t, e, n) {
13796 const s = B(t);
13797 return s.su(), new ru(e, s.connection, s.authCredentials, s.appCheckCredentials, s.It, n);
13798 }(t.datastore, t.asyncQueue, {
13799 Yr: Du.bind(null, t),
13800 Zr: Nu.bind(null, t),
13801 tu: Cu.bind(null, t),
13802 Zo: xu.bind(null, t)
13803 }), t.wu.push((async e => {
13804 e ? (t.Iu.Mo(),
13805 // This will start the write stream if necessary.
13806 await bu(t)) : (await t.Iu.stop(), t.fu.length > 0 && (x("RemoteStore", `Stopping write stream with ${t.fu.length} pending writes`),
13807 t.fu = []));
13808 }))), t.Iu;
13809}
13810
13811/**
13812 * @license
13813 * Copyright 2017 Google LLC
13814 *
13815 * Licensed under the Apache License, Version 2.0 (the "License");
13816 * you may not use this file except in compliance with the License.
13817 * You may obtain a copy of the License at
13818 *
13819 * http://www.apache.org/licenses/LICENSE-2.0
13820 *
13821 * Unless required by applicable law or agreed to in writing, software
13822 * distributed under the License is distributed on an "AS IS" BASIS,
13823 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13824 * See the License for the specific language governing permissions and
13825 * limitations under the License.
13826 */
13827/**
13828 * Represents an operation scheduled to be run in the future on an AsyncQueue.
13829 *
13830 * It is created via DelayedOperation.createAndSchedule().
13831 *
13832 * Supports cancellation (via cancel()) and early execution (via skipDelay()).
13833 *
13834 * Note: We implement `PromiseLike` instead of `Promise`, as the `Promise` type
13835 * in newer versions of TypeScript defines `finally`, which is not available in
13836 * IE.
13837 */
13838class $u {
13839 constructor(t, e, n, s, i) {
13840 this.asyncQueue = t, this.timerId = e, this.targetTimeMs = n, this.op = s, this.removalCallback = i,
13841 this.deferred = new q, this.then = this.deferred.promise.then.bind(this.deferred.promise),
13842 // It's normal for the deferred promise to be canceled (due to cancellation)
13843 // and so we attach a dummy catch callback to avoid
13844 // 'UnhandledPromiseRejectionWarning' log spam.
13845 this.deferred.promise.catch((t => {}));
13846 }
13847 /**
13848 * Creates and returns a DelayedOperation that has been scheduled to be
13849 * executed on the provided asyncQueue after the provided delayMs.
13850 *
13851 * @param asyncQueue - The queue to schedule the operation on.
13852 * @param id - A Timer ID identifying the type of operation this is.
13853 * @param delayMs - The delay (ms) before the operation should be scheduled.
13854 * @param op - The operation to run.
13855 * @param removalCallback - A callback to be called synchronously once the
13856 * operation is executed or canceled, notifying the AsyncQueue to remove it
13857 * from its delayedOperations list.
13858 * PORTING NOTE: This exists to prevent making removeDelayedOperation() and
13859 * the DelayedOperation class public.
13860 */ static createAndSchedule(t, e, n, s, i) {
13861 const r = Date.now() + n, o = new $u(t, e, r, s, i);
13862 return o.start(n), o;
13863 }
13864 /**
13865 * Starts the timer. This is called immediately after construction by
13866 * createAndSchedule().
13867 */ start(t) {
13868 this.timerHandle = setTimeout((() => this.handleDelayElapsed()), t);
13869 }
13870 /**
13871 * Queues the operation to run immediately (if it hasn't already been run or
13872 * canceled).
13873 */ skipDelay() {
13874 return this.handleDelayElapsed();
13875 }
13876 /**
13877 * Cancels the operation if it hasn't already been executed or canceled. The
13878 * promise will be rejected.
13879 *
13880 * As long as the operation has not yet been run, calling cancel() provides a
13881 * guarantee that the operation will not be run.
13882 */ cancel(t) {
13883 null !== this.timerHandle && (this.clearTimeout(), this.deferred.reject(new U(L.CANCELLED, "Operation cancelled" + (t ? ": " + t : ""))));
13884 }
13885 handleDelayElapsed() {
13886 this.asyncQueue.enqueueAndForget((() => null !== this.timerHandle ? (this.clearTimeout(),
13887 this.op().then((t => this.deferred.resolve(t)))) : Promise.resolve()));
13888 }
13889 clearTimeout() {
13890 null !== this.timerHandle && (this.removalCallback(this), clearTimeout(this.timerHandle),
13891 this.timerHandle = null);
13892 }
13893}
13894
13895/**
13896 * Returns a FirestoreError that can be surfaced to the user if the provided
13897 * error is an IndexedDbTransactionError. Re-throws the error otherwise.
13898 */ function Bu(t, e) {
13899 if (N("AsyncQueue", `${e}: ${t}`), St(t)) return new U(L.UNAVAILABLE, `${e}: ${t}`);
13900 throw t;
13901}
13902
13903/**
13904 * @license
13905 * Copyright 2017 Google LLC
13906 *
13907 * Licensed under the Apache License, Version 2.0 (the "License");
13908 * you may not use this file except in compliance with the License.
13909 * You may obtain a copy of the License at
13910 *
13911 * http://www.apache.org/licenses/LICENSE-2.0
13912 *
13913 * Unless required by applicable law or agreed to in writing, software
13914 * distributed under the License is distributed on an "AS IS" BASIS,
13915 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13916 * See the License for the specific language governing permissions and
13917 * limitations under the License.
13918 */
13919/**
13920 * DocumentSet is an immutable (copy-on-write) collection that holds documents
13921 * in order specified by the provided comparator. We always add a document key
13922 * comparator on top of what is provided to guarantee document equality based on
13923 * the key.
13924 */ class Lu {
13925 /** The default ordering is by key if the comparator is omitted */
13926 constructor(t) {
13927 // We are adding document key comparator to the end as it's the only
13928 // guaranteed unique property of a document.
13929 this.comparator = t ? (e, n) => t(e, n) || at.comparator(e.key, n.key) : (t, e) => at.comparator(t.key, e.key),
13930 this.keyedMap = us(), this.sortedSet = new Lt(this.comparator);
13931 }
13932 /**
13933 * Returns an empty copy of the existing DocumentSet, using the same
13934 * comparator.
13935 */ static emptySet(t) {
13936 return new Lu(t.comparator);
13937 }
13938 has(t) {
13939 return null != this.keyedMap.get(t);
13940 }
13941 get(t) {
13942 return this.keyedMap.get(t);
13943 }
13944 first() {
13945 return this.sortedSet.minKey();
13946 }
13947 last() {
13948 return this.sortedSet.maxKey();
13949 }
13950 isEmpty() {
13951 return this.sortedSet.isEmpty();
13952 }
13953 /**
13954 * Returns the index of the provided key in the document set, or -1 if the
13955 * document key is not present in the set;
13956 */ indexOf(t) {
13957 const e = this.keyedMap.get(t);
13958 return e ? this.sortedSet.indexOf(e) : -1;
13959 }
13960 get size() {
13961 return this.sortedSet.size;
13962 }
13963 /** Iterates documents in order defined by "comparator" */ forEach(t) {
13964 this.sortedSet.inorderTraversal(((e, n) => (t(e), !1)));
13965 }
13966 /** Inserts or updates a document with the same key */ add(t) {
13967 // First remove the element if we have it.
13968 const e = this.delete(t.key);
13969 return e.copy(e.keyedMap.insert(t.key, t), e.sortedSet.insert(t, null));
13970 }
13971 /** Deletes a document with a given key */ delete(t) {
13972 const e = this.get(t);
13973 return e ? this.copy(this.keyedMap.remove(t), this.sortedSet.remove(e)) : this;
13974 }
13975 isEqual(t) {
13976 if (!(t instanceof Lu)) return !1;
13977 if (this.size !== t.size) return !1;
13978 const e = this.sortedSet.getIterator(), n = t.sortedSet.getIterator();
13979 for (;e.hasNext(); ) {
13980 const t = e.getNext().key, s = n.getNext().key;
13981 if (!t.isEqual(s)) return !1;
13982 }
13983 return !0;
13984 }
13985 toString() {
13986 const t = [];
13987 return this.forEach((e => {
13988 t.push(e.toString());
13989 })), 0 === t.length ? "DocumentSet ()" : "DocumentSet (\n " + t.join(" \n") + "\n)";
13990 }
13991 copy(t, e) {
13992 const n = new Lu;
13993 return n.comparator = this.comparator, n.keyedMap = t, n.sortedSet = e, n;
13994 }
13995}
13996
13997/**
13998 * @license
13999 * Copyright 2017 Google LLC
14000 *
14001 * Licensed under the Apache License, Version 2.0 (the "License");
14002 * you may not use this file except in compliance with the License.
14003 * You may obtain a copy of the License at
14004 *
14005 * http://www.apache.org/licenses/LICENSE-2.0
14006 *
14007 * Unless required by applicable law or agreed to in writing, software
14008 * distributed under the License is distributed on an "AS IS" BASIS,
14009 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14010 * See the License for the specific language governing permissions and
14011 * limitations under the License.
14012 */
14013/**
14014 * DocumentChangeSet keeps track of a set of changes to docs in a query, merging
14015 * duplicate events for the same doc.
14016 */ class Uu {
14017 constructor() {
14018 this.Tu = new Lt(at.comparator);
14019 }
14020 track(t) {
14021 const e = t.doc.key, n = this.Tu.get(e);
14022 n ?
14023 // Merge the new change with the existing change.
14024 0 /* Added */ !== t.type && 3 /* Metadata */ === n.type ? this.Tu = this.Tu.insert(e, t) : 3 /* Metadata */ === t.type && 1 /* Removed */ !== n.type ? this.Tu = this.Tu.insert(e, {
14025 type: n.type,
14026 doc: t.doc
14027 }) : 2 /* Modified */ === t.type && 2 /* Modified */ === n.type ? this.Tu = this.Tu.insert(e, {
14028 type: 2 /* Modified */ ,
14029 doc: t.doc
14030 }) : 2 /* Modified */ === t.type && 0 /* Added */ === n.type ? this.Tu = this.Tu.insert(e, {
14031 type: 0 /* Added */ ,
14032 doc: t.doc
14033 }) : 1 /* Removed */ === t.type && 0 /* Added */ === n.type ? this.Tu = this.Tu.remove(e) : 1 /* Removed */ === t.type && 2 /* Modified */ === n.type ? this.Tu = this.Tu.insert(e, {
14034 type: 1 /* Removed */ ,
14035 doc: n.doc
14036 }) : 0 /* Added */ === t.type && 1 /* Removed */ === n.type ? this.Tu = this.Tu.insert(e, {
14037 type: 2 /* Modified */ ,
14038 doc: t.doc
14039 }) :
14040 // This includes these cases, which don't make sense:
14041 // Added->Added
14042 // Removed->Removed
14043 // Modified->Added
14044 // Removed->Modified
14045 // Metadata->Added
14046 // Removed->Metadata
14047 M() : this.Tu = this.Tu.insert(e, t);
14048 }
14049 Eu() {
14050 const t = [];
14051 return this.Tu.inorderTraversal(((e, n) => {
14052 t.push(n);
14053 })), t;
14054 }
14055}
14056
14057class qu {
14058 constructor(t, e, n, s, i, r, o, u, c) {
14059 this.query = t, this.docs = e, this.oldDocs = n, this.docChanges = s, this.mutatedKeys = i,
14060 this.fromCache = r, this.syncStateChanged = o, this.excludesMetadataChanges = u,
14061 this.hasCachedResults = c;
14062 }
14063 /** Returns a view snapshot as if all documents in the snapshot were added. */ static fromInitialDocuments(t, e, n, s, i) {
14064 const r = [];
14065 return e.forEach((t => {
14066 r.push({
14067 type: 0 /* Added */ ,
14068 doc: t
14069 });
14070 })), new qu(t, e, Lu.emptySet(e), r, n, s,
14071 /* syncStateChanged= */ !0,
14072 /* excludesMetadataChanges= */ !1, i);
14073 }
14074 get hasPendingWrites() {
14075 return !this.mutatedKeys.isEmpty();
14076 }
14077 isEqual(t) {
14078 if (!(this.fromCache === t.fromCache && this.hasCachedResults === t.hasCachedResults && this.syncStateChanged === t.syncStateChanged && this.mutatedKeys.isEqual(t.mutatedKeys) && fn(this.query, t.query) && this.docs.isEqual(t.docs) && this.oldDocs.isEqual(t.oldDocs))) return !1;
14079 const e = this.docChanges, n = t.docChanges;
14080 if (e.length !== n.length) return !1;
14081 for (let t = 0; t < e.length; t++) if (e[t].type !== n[t].type || !e[t].doc.isEqual(n[t].doc)) return !1;
14082 return !0;
14083 }
14084}
14085
14086/**
14087 * @license
14088 * Copyright 2017 Google LLC
14089 *
14090 * Licensed under the Apache License, Version 2.0 (the "License");
14091 * you may not use this file except in compliance with the License.
14092 * You may obtain a copy of the License at
14093 *
14094 * http://www.apache.org/licenses/LICENSE-2.0
14095 *
14096 * Unless required by applicable law or agreed to in writing, software
14097 * distributed under the License is distributed on an "AS IS" BASIS,
14098 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14099 * See the License for the specific language governing permissions and
14100 * limitations under the License.
14101 */
14102/**
14103 * Holds the listeners and the last received ViewSnapshot for a query being
14104 * tracked by EventManager.
14105 */ class Ku {
14106 constructor() {
14107 this.Au = void 0, this.listeners = [];
14108 }
14109}
14110
14111class Gu {
14112 constructor() {
14113 this.queries = new ss((t => dn(t)), fn), this.onlineState = "Unknown" /* Unknown */ ,
14114 this.Ru = new Set;
14115 }
14116}
14117
14118async function Qu(t, e) {
14119 const n = B(t), s = e.query;
14120 let i = !1, r = n.queries.get(s);
14121 if (r || (i = !0, r = new Ku), i) try {
14122 r.Au = await n.onListen(s);
14123 } catch (t) {
14124 const n = Bu(t, `Initialization of query '${_n(e.query)}' failed`);
14125 return void e.onError(n);
14126 }
14127 if (n.queries.set(s, r), r.listeners.push(e),
14128 // Run global snapshot listeners if a consistent snapshot has been emitted.
14129 e.bu(n.onlineState), r.Au) {
14130 e.Pu(r.Au) && Hu(n);
14131 }
14132}
14133
14134async function ju(t, e) {
14135 const n = B(t), s = e.query;
14136 let i = !1;
14137 const r = n.queries.get(s);
14138 if (r) {
14139 const t = r.listeners.indexOf(e);
14140 t >= 0 && (r.listeners.splice(t, 1), i = 0 === r.listeners.length);
14141 }
14142 if (i) return n.queries.delete(s), n.onUnlisten(s);
14143}
14144
14145function Wu(t, e) {
14146 const n = B(t);
14147 let s = !1;
14148 for (const t of e) {
14149 const e = t.query, i = n.queries.get(e);
14150 if (i) {
14151 for (const e of i.listeners) e.Pu(t) && (s = !0);
14152 i.Au = t;
14153 }
14154 }
14155 s && Hu(n);
14156}
14157
14158function zu(t, e, n) {
14159 const s = B(t), i = s.queries.get(e);
14160 if (i) for (const t of i.listeners) t.onError(n);
14161 // Remove all listeners. NOTE: We don't need to call syncEngine.unlisten()
14162 // after an error.
14163 s.queries.delete(e);
14164}
14165
14166// Call all global snapshot listeners that have been set.
14167function Hu(t) {
14168 t.Ru.forEach((t => {
14169 t.next();
14170 }));
14171}
14172
14173/**
14174 * QueryListener takes a series of internal view snapshots and determines
14175 * when to raise the event.
14176 *
14177 * It uses an Observer to dispatch events.
14178 */ class Ju {
14179 constructor(t, e, n) {
14180 this.query = t, this.vu = e,
14181 /**
14182 * Initial snapshots (e.g. from cache) may not be propagated to the wrapped
14183 * observer. This flag is set to true once we've actually raised an event.
14184 */
14185 this.Vu = !1, this.Su = null, this.onlineState = "Unknown" /* Unknown */ , this.options = n || {};
14186 }
14187 /**
14188 * Applies the new ViewSnapshot to this listener, raising a user-facing event
14189 * if applicable (depending on what changed, whether the user has opted into
14190 * metadata-only changes, etc.). Returns true if a user-facing event was
14191 * indeed raised.
14192 */ Pu(t) {
14193 if (!this.options.includeMetadataChanges) {
14194 // Remove the metadata only changes.
14195 const e = [];
14196 for (const n of t.docChanges) 3 /* Metadata */ !== n.type && e.push(n);
14197 t = new qu(t.query, t.docs, t.oldDocs, e, t.mutatedKeys, t.fromCache, t.syncStateChanged,
14198 /* excludesMetadataChanges= */ !0, t.hasCachedResults);
14199 }
14200 let e = !1;
14201 return this.Vu ? this.Du(t) && (this.vu.next(t), e = !0) : this.Cu(t, this.onlineState) && (this.xu(t),
14202 e = !0), this.Su = t, e;
14203 }
14204 onError(t) {
14205 this.vu.error(t);
14206 }
14207 /** Returns whether a snapshot was raised. */ bu(t) {
14208 this.onlineState = t;
14209 let e = !1;
14210 return this.Su && !this.Vu && this.Cu(this.Su, t) && (this.xu(this.Su), e = !0),
14211 e;
14212 }
14213 Cu(t, e) {
14214 // Always raise the first event when we're synced
14215 if (!t.fromCache) return !0;
14216 // NOTE: We consider OnlineState.Unknown as online (it should become Offline
14217 // or Online if we wait long enough).
14218 const n = "Offline" /* Offline */ !== e;
14219 // Don't raise the event if we're online, aren't synced yet (checked
14220 // above) and are waiting for a sync.
14221 return (!this.options.Nu || !n) && (!t.docs.isEmpty() || t.hasCachedResults || "Offline" /* Offline */ === e);
14222 // Raise data from cache if we have any documents, have cached results before,
14223 // or we are offline.
14224 }
14225 Du(t) {
14226 // We don't need to handle includeDocumentMetadataChanges here because
14227 // the Metadata only changes have already been stripped out if needed.
14228 // At this point the only changes we will see are the ones we should
14229 // propagate.
14230 if (t.docChanges.length > 0) return !0;
14231 const e = this.Su && this.Su.hasPendingWrites !== t.hasPendingWrites;
14232 return !(!t.syncStateChanged && !e) && !0 === this.options.includeMetadataChanges;
14233 // Generally we should have hit one of the cases above, but it's possible
14234 // to get here if there were only metadata docChanges and they got
14235 // stripped out.
14236 }
14237 xu(t) {
14238 t = qu.fromInitialDocuments(t.query, t.docs, t.mutatedKeys, t.fromCache, t.hasCachedResults),
14239 this.Vu = !0, this.vu.next(t);
14240 }
14241}
14242
14243/**
14244 * @license
14245 * Copyright 2020 Google LLC
14246 *
14247 * Licensed under the Apache License, Version 2.0 (the "License");
14248 * you may not use this file except in compliance with the License.
14249 * You may obtain a copy of the License at
14250 *
14251 * http://www.apache.org/licenses/LICENSE-2.0
14252 *
14253 * Unless required by applicable law or agreed to in writing, software
14254 * distributed under the License is distributed on an "AS IS" BASIS,
14255 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14256 * See the License for the specific language governing permissions and
14257 * limitations under the License.
14258 */
14259/**
14260 * A complete element in the bundle stream, together with the byte length it
14261 * occupies in the stream.
14262 */ class Yu {
14263 constructor(t,
14264 // How many bytes this element takes to store in the bundle.
14265 e) {
14266 this.ku = t, this.byteLength = e;
14267 }
14268 Ou() {
14269 return "metadata" in this.ku;
14270 }
14271}
14272
14273/**
14274 * @license
14275 * Copyright 2020 Google LLC
14276 *
14277 * Licensed under the Apache License, Version 2.0 (the "License");
14278 * you may not use this file except in compliance with the License.
14279 * You may obtain a copy of the License at
14280 *
14281 * http://www.apache.org/licenses/LICENSE-2.0
14282 *
14283 * Unless required by applicable law or agreed to in writing, software
14284 * distributed under the License is distributed on an "AS IS" BASIS,
14285 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14286 * See the License for the specific language governing permissions and
14287 * limitations under the License.
14288 */
14289/**
14290 * Helper to convert objects from bundles to model objects in the SDK.
14291 */ class Xu {
14292 constructor(t) {
14293 this.It = t;
14294 }
14295 Ji(t) {
14296 return Ms(this.It, t);
14297 }
14298 /**
14299 * Converts a BundleDocument to a MutableDocument.
14300 */ Yi(t) {
14301 return t.metadata.exists ? qs(this.It, t.document, !1) : Ce.newNoDocument(this.Ji(t.metadata.name), this.Xi(t.metadata.readTime));
14302 }
14303 Xi(t) {
14304 return xs(t);
14305 }
14306}
14307
14308/**
14309 * A class to process the elements from a bundle, load them into local
14310 * storage and provide progress update while loading.
14311 */ class Zu {
14312 constructor(t, e, n) {
14313 this.Mu = t, this.localStore = e, this.It = n,
14314 /** Batched queries to be saved into storage */
14315 this.queries = [],
14316 /** Batched documents to be saved into storage */
14317 this.documents = [],
14318 /** The collection groups affected by this bundle. */
14319 this.collectionGroups = new Set, this.progress = tc(t);
14320 }
14321 /**
14322 * Adds an element from the bundle to the loader.
14323 *
14324 * Returns a new progress if adding the element leads to a new progress,
14325 * otherwise returns null.
14326 */ Fu(t) {
14327 this.progress.bytesLoaded += t.byteLength;
14328 let e = this.progress.documentsLoaded;
14329 if (t.ku.namedQuery) this.queries.push(t.ku.namedQuery); else if (t.ku.documentMetadata) {
14330 this.documents.push({
14331 metadata: t.ku.documentMetadata
14332 }), t.ku.documentMetadata.exists || ++e;
14333 const n = ot.fromString(t.ku.documentMetadata.name);
14334 this.collectionGroups.add(n.get(n.length - 2));
14335 } else t.ku.document && (this.documents[this.documents.length - 1].document = t.ku.document,
14336 ++e);
14337 return e !== this.progress.documentsLoaded ? (this.progress.documentsLoaded = e,
14338 Object.assign({}, this.progress)) : null;
14339 }
14340 $u(t) {
14341 const e = new Map, n = new Xu(this.It);
14342 for (const s of t) if (s.metadata.queries) {
14343 const t = n.Ji(s.metadata.name);
14344 for (const n of s.metadata.queries) {
14345 const s = (e.get(n) || _s()).add(t);
14346 e.set(n, s);
14347 }
14348 }
14349 return e;
14350 }
14351 /**
14352 * Update the progress to 'Success' and return the updated progress.
14353 */ async complete() {
14354 const t = await Mo(this.localStore, new Xu(this.It), this.documents, this.Mu.id), e = this.$u(this.documents);
14355 for (const t of this.queries) await Fo(this.localStore, t, e.get(t.name));
14356 return this.progress.taskState = "Success", {
14357 progress: this.progress,
14358 Bu: this.collectionGroups,
14359 Lu: t
14360 };
14361 }
14362}
14363
14364/**
14365 * Returns a `LoadBundleTaskProgress` representing the initial progress of
14366 * loading a bundle.
14367 */ function tc(t) {
14368 return {
14369 taskState: "Running",
14370 documentsLoaded: 0,
14371 bytesLoaded: 0,
14372 totalDocuments: t.totalDocuments,
14373 totalBytes: t.totalBytes
14374 };
14375}
14376
14377/**
14378 * Returns a `LoadBundleTaskProgress` representing the progress that the loading
14379 * has succeeded.
14380 */
14381/**
14382 * @license
14383 * Copyright 2017 Google LLC
14384 *
14385 * Licensed under the Apache License, Version 2.0 (the "License");
14386 * you may not use this file except in compliance with the License.
14387 * You may obtain a copy of the License at
14388 *
14389 * http://www.apache.org/licenses/LICENSE-2.0
14390 *
14391 * Unless required by applicable law or agreed to in writing, software
14392 * distributed under the License is distributed on an "AS IS" BASIS,
14393 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14394 * See the License for the specific language governing permissions and
14395 * limitations under the License.
14396 */
14397class ec {
14398 constructor(t) {
14399 this.key = t;
14400 }
14401}
14402
14403class nc {
14404 constructor(t) {
14405 this.key = t;
14406 }
14407}
14408
14409/**
14410 * View is responsible for computing the final merged truth of what docs are in
14411 * a query. It gets notified of local and remote changes to docs, and applies
14412 * the query filters and limits to determine the most correct possible results.
14413 */ class sc {
14414 constructor(t,
14415 /** Documents included in the remote target */
14416 e) {
14417 this.query = t, this.Uu = e, this.qu = null, this.hasCachedResults = !1,
14418 /**
14419 * A flag whether the view is current with the backend. A view is considered
14420 * current after it has seen the current flag from the backend and did not
14421 * lose consistency within the watch stream (e.g. because of an existence
14422 * filter mismatch).
14423 */
14424 this.current = !1,
14425 /** Documents in the view but not in the remote target */
14426 this.Ku = _s(),
14427 /** Document Keys that have local changes */
14428 this.mutatedKeys = _s(), this.Gu = gn(t), this.Qu = new Lu(this.Gu);
14429 }
14430 /**
14431 * The set of remote documents that the server has told us belongs to the target associated with
14432 * this view.
14433 */ get ju() {
14434 return this.Uu;
14435 }
14436 /**
14437 * Iterates over a set of doc changes, applies the query limit, and computes
14438 * what the new results should be, what the changes were, and whether we may
14439 * need to go back to the local cache for more results. Does not make any
14440 * changes to the view.
14441 * @param docChanges - The doc changes to apply to this view.
14442 * @param previousChanges - If this is being called with a refill, then start
14443 * with this set of docs and changes instead of the current view.
14444 * @returns a new set of docs, changes, and refill flag.
14445 */ Wu(t, e) {
14446 const n = e ? e.zu : new Uu, s = e ? e.Qu : this.Qu;
14447 let i = e ? e.mutatedKeys : this.mutatedKeys, r = s, o = !1;
14448 // Track the last doc in a (full) limit. This is necessary, because some
14449 // update (a delete, or an update moving a doc past the old limit) might
14450 // mean there is some other document in the local cache that either should
14451 // come (1) between the old last limit doc and the new last document, in the
14452 // case of updates, or (2) after the new last document, in the case of
14453 // deletes. So we keep this doc at the old limit to compare the updates to.
14454 // Note that this should never get used in a refill (when previousChanges is
14455 // set), because there will only be adds -- no deletes or updates.
14456 const u = "F" /* First */ === this.query.limitType && s.size === this.query.limit ? s.last() : null, c = "L" /* Last */ === this.query.limitType && s.size === this.query.limit ? s.first() : null;
14457 // Drop documents out to meet limit/limitToLast requirement.
14458 if (t.inorderTraversal(((t, e) => {
14459 const a = s.get(t), h = wn(this.query, e) ? e : null, l = !!a && this.mutatedKeys.has(a.key), f = !!h && (h.hasLocalMutations ||
14460 // We only consider committed mutations for documents that were
14461 // mutated during the lifetime of the view.
14462 this.mutatedKeys.has(h.key) && h.hasCommittedMutations);
14463 let d = !1;
14464 // Calculate change
14465 if (a && h) {
14466 a.data.isEqual(h.data) ? l !== f && (n.track({
14467 type: 3 /* Metadata */ ,
14468 doc: h
14469 }), d = !0) : this.Hu(a, h) || (n.track({
14470 type: 2 /* Modified */ ,
14471 doc: h
14472 }), d = !0, (u && this.Gu(h, u) > 0 || c && this.Gu(h, c) < 0) && (
14473 // This doc moved from inside the limit to outside the limit.
14474 // That means there may be some other doc in the local cache
14475 // that should be included instead.
14476 o = !0));
14477 } else !a && h ? (n.track({
14478 type: 0 /* Added */ ,
14479 doc: h
14480 }), d = !0) : a && !h && (n.track({
14481 type: 1 /* Removed */ ,
14482 doc: a
14483 }), d = !0, (u || c) && (
14484 // A doc was removed from a full limit query. We'll need to
14485 // requery from the local cache to see if we know about some other
14486 // doc that should be in the results.
14487 o = !0));
14488 d && (h ? (r = r.add(h), i = f ? i.add(t) : i.delete(t)) : (r = r.delete(t), i = i.delete(t)));
14489 })), null !== this.query.limit) for (;r.size > this.query.limit; ) {
14490 const t = "F" /* First */ === this.query.limitType ? r.last() : r.first();
14491 r = r.delete(t.key), i = i.delete(t.key), n.track({
14492 type: 1 /* Removed */ ,
14493 doc: t
14494 });
14495 }
14496 return {
14497 Qu: r,
14498 zu: n,
14499 $i: o,
14500 mutatedKeys: i
14501 };
14502 }
14503 Hu(t, e) {
14504 // We suppress the initial change event for documents that were modified as
14505 // part of a write acknowledgment (e.g. when the value of a server transform
14506 // is applied) as Watch will send us the same document again.
14507 // By suppressing the event, we only raise two user visible events (one with
14508 // `hasPendingWrites` and the final state of the document) instead of three
14509 // (one with `hasPendingWrites`, the modified document with
14510 // `hasPendingWrites` and the final state of the document).
14511 return t.hasLocalMutations && e.hasCommittedMutations && !e.hasLocalMutations;
14512 }
14513 /**
14514 * Updates the view with the given ViewDocumentChanges and optionally updates
14515 * limbo docs and sync state from the provided target change.
14516 * @param docChanges - The set of changes to make to the view's docs.
14517 * @param updateLimboDocuments - Whether to update limbo documents based on
14518 * this change.
14519 * @param targetChange - A target change to apply for computing limbo docs and
14520 * sync state.
14521 * @returns A new ViewChange with the given docs, changes, and sync state.
14522 */
14523 // PORTING NOTE: The iOS/Android clients always compute limbo document changes.
14524 applyChanges(t, e, n) {
14525 const s = this.Qu;
14526 this.Qu = t.Qu, this.mutatedKeys = t.mutatedKeys;
14527 // Sort changes based on type and query comparator
14528 const i = t.zu.Eu();
14529 i.sort(((t, e) => function(t, e) {
14530 const n = t => {
14531 switch (t) {
14532 case 0 /* Added */ :
14533 return 1;
14534
14535 case 2 /* Modified */ :
14536 case 3 /* Metadata */ :
14537 // A metadata change is converted to a modified change at the public
14538 // api layer. Since we sort by document key and then change type,
14539 // metadata and modified changes must be sorted equivalently.
14540 return 2;
14541
14542 case 1 /* Removed */ :
14543 return 0;
14544
14545 default:
14546 return M();
14547 }
14548 };
14549 return n(t) - n(e);
14550 }
14551 /**
14552 * @license
14553 * Copyright 2020 Google LLC
14554 *
14555 * Licensed under the Apache License, Version 2.0 (the "License");
14556 * you may not use this file except in compliance with the License.
14557 * You may obtain a copy of the License at
14558 *
14559 * http://www.apache.org/licenses/LICENSE-2.0
14560 *
14561 * Unless required by applicable law or agreed to in writing, software
14562 * distributed under the License is distributed on an "AS IS" BASIS,
14563 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14564 * See the License for the specific language governing permissions and
14565 * limitations under the License.
14566 */ (t.type, e.type) || this.Gu(t.doc, e.doc))), this.Ju(n);
14567 const r = e ? this.Yu() : [], o = 0 === this.Ku.size && this.current ? 1 /* Synced */ : 0 /* Local */ , u = o !== this.qu;
14568 if (this.qu = o, 0 !== i.length || u) {
14569 return {
14570 snapshot: new qu(this.query, t.Qu, s, i, t.mutatedKeys, 0 /* Local */ === o, u,
14571 /* excludesMetadataChanges= */ !1, !!n && n.resumeToken.approximateByteSize() > 0),
14572 Xu: r
14573 };
14574 }
14575 // no changes
14576 return {
14577 Xu: r
14578 };
14579 }
14580 /**
14581 * Applies an OnlineState change to the view, potentially generating a
14582 * ViewChange if the view's syncState changes as a result.
14583 */ bu(t) {
14584 return this.current && "Offline" /* Offline */ === t ? (
14585 // If we're offline, set `current` to false and then call applyChanges()
14586 // to refresh our syncState and generate a ViewChange as appropriate. We
14587 // are guaranteed to get a new TargetChange that sets `current` back to
14588 // true once the client is back online.
14589 this.current = !1, this.applyChanges({
14590 Qu: this.Qu,
14591 zu: new Uu,
14592 mutatedKeys: this.mutatedKeys,
14593 $i: !1
14594 },
14595 /* updateLimboDocuments= */ !1)) : {
14596 Xu: []
14597 };
14598 }
14599 /**
14600 * Returns whether the doc for the given key should be in limbo.
14601 */ Zu(t) {
14602 // If the remote end says it's part of this query, it's not in limbo.
14603 return !this.Uu.has(t) && (
14604 // The local store doesn't think it's a result, so it shouldn't be in limbo.
14605 !!this.Qu.has(t) && !this.Qu.get(t).hasLocalMutations);
14606 }
14607 /**
14608 * Updates syncedDocuments, current, and limbo docs based on the given change.
14609 * Returns the list of changes to which docs are in limbo.
14610 */ Ju(t) {
14611 t && (t.addedDocuments.forEach((t => this.Uu = this.Uu.add(t))), t.modifiedDocuments.forEach((t => {})),
14612 t.removedDocuments.forEach((t => this.Uu = this.Uu.delete(t))), this.current = t.current);
14613 }
14614 Yu() {
14615 // We can only determine limbo documents when we're in-sync with the server.
14616 if (!this.current) return [];
14617 // TODO(klimt): Do this incrementally so that it's not quadratic when
14618 // updating many documents.
14619 const t = this.Ku;
14620 this.Ku = _s(), this.Qu.forEach((t => {
14621 this.Zu(t.key) && (this.Ku = this.Ku.add(t.key));
14622 }));
14623 // Diff the new limbo docs with the old limbo docs.
14624 const e = [];
14625 return t.forEach((t => {
14626 this.Ku.has(t) || e.push(new nc(t));
14627 })), this.Ku.forEach((n => {
14628 t.has(n) || e.push(new ec(n));
14629 })), e;
14630 }
14631 /**
14632 * Update the in-memory state of the current view with the state read from
14633 * persistence.
14634 *
14635 * We update the query view whenever a client's primary status changes:
14636 * - When a client transitions from primary to secondary, it can miss
14637 * LocalStorage updates and its query views may temporarily not be
14638 * synchronized with the state on disk.
14639 * - For secondary to primary transitions, the client needs to update the list
14640 * of `syncedDocuments` since secondary clients update their query views
14641 * based purely on synthesized RemoteEvents.
14642 *
14643 * @param queryResult.documents - The documents that match the query according
14644 * to the LocalStore.
14645 * @param queryResult.remoteKeys - The keys of the documents that match the
14646 * query according to the backend.
14647 *
14648 * @returns The ViewChange that resulted from this synchronization.
14649 */
14650 // PORTING NOTE: Multi-tab only.
14651 tc(t) {
14652 this.Uu = t.Hi, this.Ku = _s();
14653 const e = this.Wu(t.documents);
14654 return this.applyChanges(e, /*updateLimboDocuments=*/ !0);
14655 }
14656 /**
14657 * Returns a view snapshot as if this query was just listened to. Contains
14658 * a document add for every existing document and the `fromCache` and
14659 * `hasPendingWrites` status of the already established view.
14660 */
14661 // PORTING NOTE: Multi-tab only.
14662 ec() {
14663 return qu.fromInitialDocuments(this.query, this.Qu, this.mutatedKeys, 0 /* Local */ === this.qu, this.hasCachedResults);
14664 }
14665}
14666
14667/**
14668 * QueryView contains all of the data that SyncEngine needs to keep track of for
14669 * a particular query.
14670 */
14671class ic {
14672 constructor(
14673 /**
14674 * The query itself.
14675 */
14676 t,
14677 /**
14678 * The target number created by the client that is used in the watch
14679 * stream to identify this query.
14680 */
14681 e,
14682 /**
14683 * The view is responsible for computing the final merged truth of what
14684 * docs are in the query. It gets notified of local and remote changes,
14685 * and applies the query filters and limits to determine the most correct
14686 * possible results.
14687 */
14688 n) {
14689 this.query = t, this.targetId = e, this.view = n;
14690 }
14691}
14692
14693/** Tracks a limbo resolution. */ class rc {
14694 constructor(t) {
14695 this.key = t,
14696 /**
14697 * Set to true once we've received a document. This is used in
14698 * getRemoteKeysForTarget() and ultimately used by WatchChangeAggregator to
14699 * decide whether it needs to manufacture a delete event for the target once
14700 * the target is CURRENT.
14701 */
14702 this.nc = !1;
14703 }
14704}
14705
14706/**
14707 * An implementation of `SyncEngine` coordinating with other parts of SDK.
14708 *
14709 * The parts of SyncEngine that act as a callback to RemoteStore need to be
14710 * registered individually. This is done in `syncEngineWrite()` and
14711 * `syncEngineListen()` (as well as `applyPrimaryState()`) as these methods
14712 * serve as entry points to RemoteStore's functionality.
14713 *
14714 * Note: some field defined in this class might have public access level, but
14715 * the class is not exported so they are only accessible from this module.
14716 * This is useful to implement optional features (like bundles) in free
14717 * functions, such that they are tree-shakeable.
14718 */ class oc {
14719 constructor(t, e, n,
14720 // PORTING NOTE: Manages state synchronization in multi-tab environments.
14721 s, i, r) {
14722 this.localStore = t, this.remoteStore = e, this.eventManager = n, this.sharedClientState = s,
14723 this.currentUser = i, this.maxConcurrentLimboResolutions = r, this.sc = {}, this.ic = new ss((t => dn(t)), fn),
14724 this.rc = new Map,
14725 /**
14726 * The keys of documents that are in limbo for which we haven't yet started a
14727 * limbo resolution query. The strings in this set are the result of calling
14728 * `key.path.canonicalString()` where `key` is a `DocumentKey` object.
14729 *
14730 * The `Set` type was chosen because it provides efficient lookup and removal
14731 * of arbitrary elements and it also maintains insertion order, providing the
14732 * desired queue-like FIFO semantics.
14733 */
14734 this.oc = new Set,
14735 /**
14736 * Keeps track of the target ID for each document that is in limbo with an
14737 * active target.
14738 */
14739 this.uc = new Lt(at.comparator),
14740 /**
14741 * Keeps track of the information about an active limbo resolution for each
14742 * active target ID that was started for the purpose of limbo resolution.
14743 */
14744 this.cc = new Map, this.ac = new so,
14745 /** Stores user completion handlers, indexed by User and BatchId. */
14746 this.hc = {},
14747 /** Stores user callbacks waiting for all pending writes to be acknowledged. */
14748 this.lc = new Map, this.fc = Nr.vn(), this.onlineState = "Unknown" /* Unknown */ ,
14749 // The primary state is set to `true` or `false` immediately after Firestore
14750 // startup. In the interim, a client should only be considered primary if
14751 // `isPrimary` is true.
14752 this.dc = void 0;
14753 }
14754 get isPrimaryClient() {
14755 return !0 === this.dc;
14756 }
14757}
14758
14759/**
14760 * Initiates the new listen, resolves promise when listen enqueued to the
14761 * server. All the subsequent view snapshots or errors are sent to the
14762 * subscribed handlers. Returns the initial snapshot.
14763 */
14764async function uc(t, e) {
14765 const n = Mc(t);
14766 let s, i;
14767 const r = n.ic.get(e);
14768 if (r)
14769 // PORTING NOTE: With Multi-Tab Web, it is possible that a query view
14770 // already exists when EventManager calls us for the first time. This
14771 // happens when the primary tab is already listening to this query on
14772 // behalf of another tab and the user of the primary also starts listening
14773 // to the query. EventManager will not have an assigned target ID in this
14774 // case and calls `listen` to obtain this ID.
14775 s = r.targetId, n.sharedClientState.addLocalQueryTarget(s), i = r.view.ec(); else {
14776 const t = await Do(n.localStore, hn(e));
14777 n.isPrimaryClient && fu(n.remoteStore, t);
14778 const r = n.sharedClientState.addLocalQueryTarget(t.targetId);
14779 s = t.targetId, i = await cc(n, e, s, "current" === r, t.resumeToken);
14780 }
14781 return i;
14782}
14783
14784/**
14785 * Registers a view for a previously unknown query and computes its initial
14786 * snapshot.
14787 */ async function cc(t, e, n, s, i) {
14788 // PORTING NOTE: On Web only, we inject the code that registers new Limbo
14789 // targets based on view changes. This allows us to only depend on Limbo
14790 // changes when user code includes queries.
14791 t._c = (e, n, s) => async function(t, e, n, s) {
14792 let i = e.view.Wu(n);
14793 i.$i && (
14794 // The query has a limit and some docs were removed, so we need
14795 // to re-run the query against the local store to make sure we
14796 // didn't lose any good docs that had been past the limit.
14797 i = await xo(t.localStore, e.query,
14798 /* usePreviousResults= */ !1).then((({documents: t}) => e.view.Wu(t, i))));
14799 const r = s && s.targetChanges.get(e.targetId), o = e.view.applyChanges(i,
14800 /* updateLimboDocuments= */ t.isPrimaryClient, r);
14801 return Tc(t, e.targetId, o.Xu), o.snapshot;
14802 }(t, e, n, s);
14803 const r = await xo(t.localStore, e,
14804 /* usePreviousResults= */ !0), o = new sc(e, r.Hi), u = o.Wu(r.documents), c = ys.createSynthesizedTargetChangeForCurrentChange(n, s && "Offline" /* Offline */ !== t.onlineState, i), a = o.applyChanges(u,
14805 /* updateLimboDocuments= */ t.isPrimaryClient, c);
14806 Tc(t, n, a.Xu);
14807 const h = new ic(e, n, o);
14808 return t.ic.set(e, h), t.rc.has(n) ? t.rc.get(n).push(e) : t.rc.set(n, [ e ]), a.snapshot;
14809}
14810
14811/** Stops listening to the query. */ async function ac(t, e) {
14812 const n = B(t), s = n.ic.get(e), i = n.rc.get(s.targetId);
14813 if (i.length > 1) return n.rc.set(s.targetId, i.filter((t => !fn(t, e)))), void n.ic.delete(e);
14814 // No other queries are mapped to the target, clean up the query and the target.
14815 if (n.isPrimaryClient) {
14816 // We need to remove the local query target first to allow us to verify
14817 // whether any other client is still interested in this target.
14818 n.sharedClientState.removeLocalQueryTarget(s.targetId);
14819 n.sharedClientState.isActiveQueryTarget(s.targetId) || await Co(n.localStore, s.targetId,
14820 /*keepPersistedTargetData=*/ !1).then((() => {
14821 n.sharedClientState.clearQueryState(s.targetId), du(n.remoteStore, s.targetId),
14822 pc(n, s.targetId);
14823 })).catch(At);
14824 } else pc(n, s.targetId), await Co(n.localStore, s.targetId,
14825 /*keepPersistedTargetData=*/ !0);
14826}
14827
14828/**
14829 * Initiates the write of local mutation batch which involves adding the
14830 * writes to the mutation queue, notifying the remote store about new
14831 * mutations and raising events for any changes this write caused.
14832 *
14833 * The promise returned by this call is resolved when the above steps
14834 * have completed, *not* when the write was acked by the backend. The
14835 * userCallback is resolved once the write was acked/rejected by the
14836 * backend (or failed locally for any other reason).
14837 */ async function hc(t, e, n) {
14838 const s = Fc(t);
14839 try {
14840 const t = await function(t, e) {
14841 const n = B(t), s = st.now(), i = e.reduce(((t, e) => t.add(e.key)), _s());
14842 let r, o;
14843 return n.persistence.runTransaction("Locally write mutations", "readwrite", (t => {
14844 // Figure out which keys do not have a remote version in the cache, this
14845 // is needed to create the right overlay mutation: if no remote version
14846 // presents, we do not need to create overlays as patch mutations.
14847 // TODO(Overlay): Is there a better way to determine this? Using the
14848 // document version does not work because local mutations set them back
14849 // to 0.
14850 let u = rs(), c = _s();
14851 return n.Gi.getEntries(t, i).next((t => {
14852 u = t, u.forEach(((t, e) => {
14853 e.isValidDocument() || (c = c.add(t));
14854 }));
14855 })).next((() => n.localDocuments.getOverlayedDocuments(t, u))).next((i => {
14856 r = i;
14857 // For non-idempotent mutations (such as `FieldValue.increment()`),
14858 // we record the base state in a separate patch mutation. This is
14859 // later used to guarantee consistent values and prevents flicker
14860 // even if the backend sends us an update that already includes our
14861 // transform.
14862 const o = [];
14863 for (const t of e) {
14864 const e = Kn(t, r.get(t.key).overlayedDocument);
14865 null != e &&
14866 // NOTE: The base state should only be applied if there's some
14867 // existing document to override, so use a Precondition of
14868 // exists=true
14869 o.push(new jn(t.key, e, De(e.value.mapValue), Fn.exists(!0)));
14870 }
14871 return n.mutationQueue.addMutationBatch(t, s, o, e);
14872 })).next((e => {
14873 o = e;
14874 const s = e.applyToLocalDocumentSet(r, c);
14875 return n.documentOverlayCache.saveOverlays(t, e.batchId, s);
14876 }));
14877 })).then((() => ({
14878 batchId: o.batchId,
14879 changes: cs(r)
14880 })));
14881 }(s.localStore, e);
14882 s.sharedClientState.addPendingMutation(t.batchId), function(t, e, n) {
14883 let s = t.hc[t.currentUser.toKey()];
14884 s || (s = new Lt(tt));
14885 s = s.insert(e, n), t.hc[t.currentUser.toKey()] = s;
14886 }
14887 /**
14888 * Resolves or rejects the user callback for the given batch and then discards
14889 * it.
14890 */ (s, t.batchId, n), await Rc(s, t.changes), await bu(s.remoteStore);
14891 } catch (t) {
14892 // If we can't persist the mutation, we reject the user callback and
14893 // don't send the mutation. The user can then retry the write.
14894 const e = Bu(t, "Failed to persist write");
14895 n.reject(e);
14896 }
14897}
14898
14899/**
14900 * Applies one remote event to the sync engine, notifying any views of the
14901 * changes, and releasing any pending mutation batches that would become
14902 * visible because of the snapshot version the remote event contains.
14903 */ async function lc(t, e) {
14904 const n = B(t);
14905 try {
14906 const t = await vo(n.localStore, e);
14907 // Update `receivedDocument` as appropriate for any limbo targets.
14908 e.targetChanges.forEach(((t, e) => {
14909 const s = n.cc.get(e);
14910 s && (
14911 // Since this is a limbo resolution lookup, it's for a single document
14912 // and it could be added, modified, or removed, but not a combination.
14913 F(t.addedDocuments.size + t.modifiedDocuments.size + t.removedDocuments.size <= 1),
14914 t.addedDocuments.size > 0 ? s.nc = !0 : t.modifiedDocuments.size > 0 ? F(s.nc) : t.removedDocuments.size > 0 && (F(s.nc),
14915 s.nc = !1));
14916 })), await Rc(n, t, e);
14917 } catch (t) {
14918 await At(t);
14919 }
14920}
14921
14922/**
14923 * Applies an OnlineState change to the sync engine and notifies any views of
14924 * the change.
14925 */ function fc(t, e, n) {
14926 const s = B(t);
14927 // If we are the secondary client, we explicitly ignore the remote store's
14928 // online state (the local client may go offline, even though the primary
14929 // tab remains online) and only apply the primary tab's online state from
14930 // SharedClientState.
14931 if (s.isPrimaryClient && 0 /* RemoteStore */ === n || !s.isPrimaryClient && 1 /* SharedClientState */ === n) {
14932 const t = [];
14933 s.ic.forEach(((n, s) => {
14934 const i = s.view.bu(e);
14935 i.snapshot && t.push(i.snapshot);
14936 })), function(t, e) {
14937 const n = B(t);
14938 n.onlineState = e;
14939 let s = !1;
14940 n.queries.forEach(((t, n) => {
14941 for (const t of n.listeners)
14942 // Run global snapshot listeners if a consistent snapshot has been emitted.
14943 t.bu(e) && (s = !0);
14944 })), s && Hu(n);
14945 }(s.eventManager, e), t.length && s.sc.Wo(t), s.onlineState = e, s.isPrimaryClient && s.sharedClientState.setOnlineState(e);
14946 }
14947}
14948
14949/**
14950 * Rejects the listen for the given targetID. This can be triggered by the
14951 * backend for any active target.
14952 *
14953 * @param syncEngine - The sync engine implementation.
14954 * @param targetId - The targetID corresponds to one previously initiated by the
14955 * user as part of TargetData passed to listen() on RemoteStore.
14956 * @param err - A description of the condition that has forced the rejection.
14957 * Nearly always this will be an indication that the user is no longer
14958 * authorized to see the data matching the target.
14959 */ async function dc(t, e, n) {
14960 const s = B(t);
14961 // PORTING NOTE: Multi-tab only.
14962 s.sharedClientState.updateQueryState(e, "rejected", n);
14963 const i = s.cc.get(e), r = i && i.key;
14964 if (r) {
14965 // TODO(klimt): We really only should do the following on permission
14966 // denied errors, but we don't have the cause code here.
14967 // It's a limbo doc. Create a synthetic event saying it was deleted.
14968 // This is kind of a hack. Ideally, we would have a method in the local
14969 // store to purge a document. However, it would be tricky to keep all of
14970 // the local store's invariants with another method.
14971 let t = new Lt(at.comparator);
14972 // TODO(b/217189216): This limbo document should ideally have a read time,
14973 // so that it is picked up by any read-time based scans. The backend,
14974 // however, does not send a read time for target removals.
14975 t = t.insert(r, Ce.newNoDocument(r, it.min()));
14976 const n = _s().add(r), i = new gs(it.min(),
14977 /* targetChanges= */ new Map,
14978 /* targetMismatches= */ new Kt(tt), t, n);
14979 await lc(s, i),
14980 // Since this query failed, we won't want to manually unlisten to it.
14981 // We only remove it from bookkeeping after we successfully applied the
14982 // RemoteEvent. If `applyRemoteEvent()` throws, we want to re-listen to
14983 // this query when the RemoteStore restarts the Watch stream, which should
14984 // re-trigger the target failure.
14985 s.uc = s.uc.remove(r), s.cc.delete(e), Ac(s);
14986 } else await Co(s.localStore, e,
14987 /* keepPersistedTargetData */ !1).then((() => pc(s, e, n))).catch(At);
14988}
14989
14990async function _c(t, e) {
14991 const n = B(t), s = e.batch.batchId;
14992 try {
14993 const t = await bo(n.localStore, e);
14994 // The local store may or may not be able to apply the write result and
14995 // raise events immediately (depending on whether the watcher is caught
14996 // up), so we raise user callbacks first so that they consistently happen
14997 // before listen events.
14998 yc(n, s, /*error=*/ null), gc(n, s), n.sharedClientState.updateMutationState(s, "acknowledged"),
14999 await Rc(n, t);
15000 } catch (t) {
15001 await At(t);
15002 }
15003}
15004
15005async function wc(t, e, n) {
15006 const s = B(t);
15007 try {
15008 const t = await function(t, e) {
15009 const n = B(t);
15010 return n.persistence.runTransaction("Reject batch", "readwrite-primary", (t => {
15011 let s;
15012 return n.mutationQueue.lookupMutationBatch(t, e).next((e => (F(null !== e), s = e.keys(),
15013 n.mutationQueue.removeMutationBatch(t, e)))).next((() => n.mutationQueue.performConsistencyCheck(t))).next((() => n.documentOverlayCache.removeOverlaysForBatchId(t, s, e))).next((() => n.localDocuments.recalculateAndSaveOverlaysForDocumentKeys(t, s))).next((() => n.localDocuments.getDocuments(t, s)));
15014 }));
15015 }
15016 /**
15017 * Returns the largest (latest) batch id in mutation queue that is pending
15018 * server response.
15019 *
15020 * Returns `BATCHID_UNKNOWN` if the queue is empty.
15021 */ (s.localStore, e);
15022 // The local store may or may not be able to apply the write result and
15023 // raise events immediately (depending on whether the watcher is caught up),
15024 // so we raise user callbacks first so that they consistently happen before
15025 // listen events.
15026 yc(s, e, n), gc(s, e), s.sharedClientState.updateMutationState(e, "rejected", n),
15027 await Rc(s, t);
15028 } catch (n) {
15029 await At(n);
15030 }
15031}
15032
15033/**
15034 * Registers a user callback that resolves when all pending mutations at the moment of calling
15035 * are acknowledged .
15036 */ async function mc(t, e) {
15037 const n = B(t);
15038 yu(n.remoteStore) || x("SyncEngine", "The network is disabled. The task returned by 'awaitPendingWrites()' will not complete until the network is enabled.");
15039 try {
15040 const t = await function(t) {
15041 const e = B(t);
15042 return e.persistence.runTransaction("Get highest unacknowledged batch id", "readonly", (t => e.mutationQueue.getHighestUnacknowledgedBatchId(t)));
15043 }(n.localStore);
15044 if (-1 === t)
15045 // Trigger the callback right away if there is no pending writes at the moment.
15046 return void e.resolve();
15047 const s = n.lc.get(t) || [];
15048 s.push(e), n.lc.set(t, s);
15049 } catch (t) {
15050 const n = Bu(t, "Initialization of waitForPendingWrites() operation failed");
15051 e.reject(n);
15052 }
15053}
15054
15055/**
15056 * Triggers the callbacks that are waiting for this batch id to get acknowledged by server,
15057 * if there are any.
15058 */ function gc(t, e) {
15059 (t.lc.get(e) || []).forEach((t => {
15060 t.resolve();
15061 })), t.lc.delete(e);
15062}
15063
15064/** Reject all outstanding callbacks waiting for pending writes to complete. */ function yc(t, e, n) {
15065 const s = B(t);
15066 let i = s.hc[s.currentUser.toKey()];
15067 // NOTE: Mutations restored from persistence won't have callbacks, so it's
15068 // okay for there to be no callback for this ID.
15069 if (i) {
15070 const t = i.get(e);
15071 t && (n ? t.reject(n) : t.resolve(), i = i.remove(e)), s.hc[s.currentUser.toKey()] = i;
15072 }
15073}
15074
15075function pc(t, e, n = null) {
15076 t.sharedClientState.removeLocalQueryTarget(e);
15077 for (const s of t.rc.get(e)) t.ic.delete(s), n && t.sc.wc(s, n);
15078 if (t.rc.delete(e), t.isPrimaryClient) {
15079 t.ac.ls(e).forEach((e => {
15080 t.ac.containsKey(e) ||
15081 // We removed the last reference for this key
15082 Ic(t, e);
15083 }));
15084 }
15085}
15086
15087function Ic(t, e) {
15088 t.oc.delete(e.path.canonicalString());
15089 // It's possible that the target already got removed because the query failed. In that case,
15090 // the key won't exist in `limboTargetsByKey`. Only do the cleanup if we still have the target.
15091 const n = t.uc.get(e);
15092 null !== n && (du(t.remoteStore, n), t.uc = t.uc.remove(e), t.cc.delete(n), Ac(t));
15093}
15094
15095function Tc(t, e, n) {
15096 for (const s of n) if (s instanceof ec) t.ac.addReference(s.key, e), Ec(t, s); else if (s instanceof nc) {
15097 x("SyncEngine", "Document no longer in limbo: " + s.key), t.ac.removeReference(s.key, e);
15098 t.ac.containsKey(s.key) ||
15099 // We removed the last reference for this key
15100 Ic(t, s.key);
15101 } else M();
15102}
15103
15104function Ec(t, e) {
15105 const n = e.key, s = n.path.canonicalString();
15106 t.uc.get(n) || t.oc.has(s) || (x("SyncEngine", "New document in limbo: " + n), t.oc.add(s),
15107 Ac(t));
15108}
15109
15110/**
15111 * Starts listens for documents in limbo that are enqueued for resolution,
15112 * subject to a maximum number of concurrent resolutions.
15113 *
15114 * Without bounding the number of concurrent resolutions, the server can fail
15115 * with "resource exhausted" errors which can lead to pathological client
15116 * behavior as seen in https://github.com/firebase/firebase-js-sdk/issues/2683.
15117 */ function Ac(t) {
15118 for (;t.oc.size > 0 && t.uc.size < t.maxConcurrentLimboResolutions; ) {
15119 const e = t.oc.values().next().value;
15120 t.oc.delete(e);
15121 const n = new at(ot.fromString(e)), s = t.fc.next();
15122 t.cc.set(s, new rc(n)), t.uc = t.uc.insert(n, s), fu(t.remoteStore, new Bi(hn(sn(n.path)), s, 2 /* LimboResolution */ , Mt.at));
15123 }
15124}
15125
15126async function Rc(t, e, n) {
15127 const s = B(t), i = [], r = [], o = [];
15128 s.ic.isEmpty() || (s.ic.forEach(((t, u) => {
15129 o.push(s._c(u, e, n).then((t => {
15130 // Update views if there are actual changes.
15131 if (
15132 // If there are changes, or we are handling a global snapshot, notify
15133 // secondary clients to update query state.
15134 (t || n) && s.isPrimaryClient && s.sharedClientState.updateQueryState(u.targetId, (null == t ? void 0 : t.fromCache) ? "not-current" : "current"),
15135 t) {
15136 i.push(t);
15137 const e = Io.Ci(u.targetId, t);
15138 r.push(e);
15139 }
15140 })));
15141 })), await Promise.all(o), s.sc.Wo(i), await async function(t, e) {
15142 const n = B(t);
15143 try {
15144 await n.persistence.runTransaction("notifyLocalViewChanges", "readwrite", (t => Rt.forEach(e, (e => Rt.forEach(e.Si, (s => n.persistence.referenceDelegate.addReference(t, e.targetId, s))).next((() => Rt.forEach(e.Di, (s => n.persistence.referenceDelegate.removeReference(t, e.targetId, s)))))))));
15145 } catch (t) {
15146 if (!St(t)) throw t;
15147 // If `notifyLocalViewChanges` fails, we did not advance the sequence
15148 // number for the documents that were included in this transaction.
15149 // This might trigger them to be deleted earlier than they otherwise
15150 // would have, but it should not invalidate the integrity of the data.
15151 x("LocalStore", "Failed to update sequence numbers: " + t);
15152 }
15153 for (const t of e) {
15154 const e = t.targetId;
15155 if (!t.fromCache) {
15156 const t = n.Ui.get(e), s = t.snapshotVersion, i = t.withLastLimboFreeSnapshotVersion(s);
15157 // Advance the last limbo free snapshot version
15158 n.Ui = n.Ui.insert(e, i);
15159 }
15160 }
15161 }(s.localStore, r));
15162}
15163
15164async function bc(t, e) {
15165 const n = B(t);
15166 if (!n.currentUser.isEqual(e)) {
15167 x("SyncEngine", "User change. New user:", e.toKey());
15168 const t = await Ro(n.localStore, e);
15169 n.currentUser = e,
15170 // Fails tasks waiting for pending writes requested by previous user.
15171 function(t, e) {
15172 t.lc.forEach((t => {
15173 t.forEach((t => {
15174 t.reject(new U(L.CANCELLED, e));
15175 }));
15176 })), t.lc.clear();
15177 }(n, "'waitForPendingWrites' promise is rejected due to a user change."),
15178 // TODO(b/114226417): Consider calling this only in the primary tab.
15179 n.sharedClientState.handleUserChange(e, t.removedBatchIds, t.addedBatchIds), await Rc(n, t.ji);
15180 }
15181}
15182
15183function Pc(t, e) {
15184 const n = B(t), s = n.cc.get(e);
15185 if (s && s.nc) return _s().add(s.key);
15186 {
15187 let t = _s();
15188 const s = n.rc.get(e);
15189 if (!s) return t;
15190 for (const e of s) {
15191 const s = n.ic.get(e);
15192 t = t.unionWith(s.view.ju);
15193 }
15194 return t;
15195 }
15196}
15197
15198/**
15199 * Reconcile the list of synced documents in an existing view with those
15200 * from persistence.
15201 */ async function vc(t, e) {
15202 const n = B(t), s = await xo(n.localStore, e.query,
15203 /* usePreviousResults= */ !0), i = e.view.tc(s);
15204 return n.isPrimaryClient && Tc(n, e.targetId, i.Xu), i;
15205}
15206
15207/**
15208 * Retrieves newly changed documents from remote document cache and raises
15209 * snapshots if needed.
15210 */
15211// PORTING NOTE: Multi-Tab only.
15212async function Vc(t, e) {
15213 const n = B(t);
15214 return ko(n.localStore, e).then((t => Rc(n, t)));
15215}
15216
15217/** Applies a mutation state to an existing batch. */
15218// PORTING NOTE: Multi-Tab only.
15219async function Sc(t, e, n, s) {
15220 const i = B(t), r = await function(t, e) {
15221 const n = B(t), s = B(n.mutationQueue);
15222 return n.persistence.runTransaction("Lookup mutation documents", "readonly", (t => s.Tn(t, e).next((e => e ? n.localDocuments.getDocuments(t, e) : Rt.resolve(null)))));
15223 }
15224 // PORTING NOTE: Multi-Tab only.
15225 (i.localStore, e);
15226 null !== r ? ("pending" === n ?
15227 // If we are the primary client, we need to send this write to the
15228 // backend. Secondary clients will ignore these writes since their remote
15229 // connection is disabled.
15230 await bu(i.remoteStore) : "acknowledged" === n || "rejected" === n ? (
15231 // NOTE: Both these methods are no-ops for batches that originated from
15232 // other clients.
15233 yc(i, e, s || null), gc(i, e), function(t, e) {
15234 B(B(t).mutationQueue).An(e);
15235 }
15236 // PORTING NOTE: Multi-Tab only.
15237 (i.localStore, e)) : M(), await Rc(i, r)) :
15238 // A throttled tab may not have seen the mutation before it was completed
15239 // and removed from the mutation queue, in which case we won't have cached
15240 // the affected documents. In this case we can safely ignore the update
15241 // since that means we didn't apply the mutation locally at all (if we
15242 // had, we would have cached the affected documents), and so we will just
15243 // see any resulting document changes via normal remote document updates
15244 // as applicable.
15245 x("SyncEngine", "Cannot apply mutation batch with id: " + e);
15246}
15247
15248/** Applies a query target change from a different tab. */
15249// PORTING NOTE: Multi-Tab only.
15250async function Dc(t, e) {
15251 const n = B(t);
15252 if (Mc(n), Fc(n), !0 === e && !0 !== n.dc) {
15253 // Secondary tabs only maintain Views for their local listeners and the
15254 // Views internal state may not be 100% populated (in particular
15255 // secondary tabs don't track syncedDocuments, the set of documents the
15256 // server considers to be in the target). So when a secondary becomes
15257 // primary, we need to need to make sure that all views for all targets
15258 // match the state on disk.
15259 const t = n.sharedClientState.getAllActiveQueryTargets(), e = await Cc(n, t.toArray());
15260 n.dc = !0, await Ou(n.remoteStore, !0);
15261 for (const t of e) fu(n.remoteStore, t);
15262 } else if (!1 === e && !1 !== n.dc) {
15263 const t = [];
15264 let e = Promise.resolve();
15265 n.rc.forEach(((s, i) => {
15266 n.sharedClientState.isLocalQueryTarget(i) ? t.push(i) : e = e.then((() => (pc(n, i),
15267 Co(n.localStore, i,
15268 /*keepPersistedTargetData=*/ !0)))), du(n.remoteStore, i);
15269 })), await e, await Cc(n, t),
15270 // PORTING NOTE: Multi-Tab only.
15271 function(t) {
15272 const e = B(t);
15273 e.cc.forEach(((t, n) => {
15274 du(e.remoteStore, n);
15275 })), e.ac.fs(), e.cc = new Map, e.uc = new Lt(at.comparator);
15276 }
15277 /**
15278 * Reconcile the query views of the provided query targets with the state from
15279 * persistence. Raises snapshots for any changes that affect the local
15280 * client and returns the updated state of all target's query data.
15281 *
15282 * @param syncEngine - The sync engine implementation
15283 * @param targets - the list of targets with views that need to be recomputed
15284 * @param transitionToPrimary - `true` iff the tab transitions from a secondary
15285 * tab to a primary tab
15286 */
15287 // PORTING NOTE: Multi-Tab only.
15288 (n), n.dc = !1, await Ou(n.remoteStore, !1);
15289 }
15290}
15291
15292async function Cc(t, e, n) {
15293 const s = B(t), i = [], r = [];
15294 for (const t of e) {
15295 let e;
15296 const n = s.rc.get(t);
15297 if (n && 0 !== n.length) {
15298 // For queries that have a local View, we fetch their current state
15299 // from LocalStore (as the resume token and the snapshot version
15300 // might have changed) and reconcile their views with the persisted
15301 // state (the list of syncedDocuments may have gotten out of sync).
15302 e = await Do(s.localStore, hn(n[0]));
15303 for (const t of n) {
15304 const e = s.ic.get(t), n = await vc(s, e);
15305 n.snapshot && r.push(n.snapshot);
15306 }
15307 } else {
15308 // For queries that never executed on this client, we need to
15309 // allocate the target in LocalStore and initialize a new View.
15310 const n = await No(s.localStore, t);
15311 e = await Do(s.localStore, n), await cc(s, xc(n), t,
15312 /*current=*/ !1, e.resumeToken);
15313 }
15314 i.push(e);
15315 }
15316 return s.sc.Wo(r), i;
15317}
15318
15319/**
15320 * Creates a `Query` object from the specified `Target`. There is no way to
15321 * obtain the original `Query`, so we synthesize a `Query` from the `Target`
15322 * object.
15323 *
15324 * The synthesized result might be different from the original `Query`, but
15325 * since the synthesized `Query` should return the same results as the
15326 * original one (only the presentation of results might differ), the potential
15327 * difference will not cause issues.
15328 */
15329// PORTING NOTE: Multi-Tab only.
15330function xc(t) {
15331 return nn(t.path, t.collectionGroup, t.orderBy, t.filters, t.limit, "F" /* First */ , t.startAt, t.endAt);
15332}
15333
15334/** Returns the IDs of the clients that are currently active. */
15335// PORTING NOTE: Multi-Tab only.
15336function Nc(t) {
15337 const e = B(t);
15338 return B(B(e.localStore).persistence).vi();
15339}
15340
15341/** Applies a query target change from a different tab. */
15342// PORTING NOTE: Multi-Tab only.
15343async function kc(t, e, n, s) {
15344 const i = B(t);
15345 if (i.dc)
15346 // If we receive a target state notification via WebStorage, we are
15347 // either already secondary or another tab has taken the primary lease.
15348 return void x("SyncEngine", "Ignoring unexpected query state notification.");
15349 const r = i.rc.get(e);
15350 if (r && r.length > 0) switch (n) {
15351 case "current":
15352 case "not-current":
15353 {
15354 const t = await ko(i.localStore, mn(r[0])), s = gs.createSynthesizedRemoteEventForCurrentChange(e, "current" === n, Ht.EMPTY_BYTE_STRING);
15355 await Rc(i, t, s);
15356 break;
15357 }
15358
15359 case "rejected":
15360 await Co(i.localStore, e,
15361 /* keepPersistedTargetData */ !0), pc(i, e, s);
15362 break;
15363
15364 default:
15365 M();
15366 }
15367}
15368
15369/** Adds or removes Watch targets for queries from different tabs. */ async function Oc(t, e, n) {
15370 const s = Mc(t);
15371 if (s.dc) {
15372 for (const t of e) {
15373 if (s.rc.has(t)) {
15374 // A target might have been added in a previous attempt
15375 x("SyncEngine", "Adding an already active target " + t);
15376 continue;
15377 }
15378 const e = await No(s.localStore, t), n = await Do(s.localStore, e);
15379 await cc(s, xc(e), n.targetId,
15380 /*current=*/ !1, n.resumeToken), fu(s.remoteStore, n);
15381 }
15382 for (const t of n)
15383 // Check that the target is still active since the target might have been
15384 // removed if it has been rejected by the backend.
15385 s.rc.has(t) &&
15386 // Release queries that are still active.
15387 await Co(s.localStore, t,
15388 /* keepPersistedTargetData */ !1).then((() => {
15389 du(s.remoteStore, t), pc(s, t);
15390 })).catch(At);
15391 }
15392}
15393
15394function Mc(t) {
15395 const e = B(t);
15396 return e.remoteStore.remoteSyncer.applyRemoteEvent = lc.bind(null, e), e.remoteStore.remoteSyncer.getRemoteKeysForTarget = Pc.bind(null, e),
15397 e.remoteStore.remoteSyncer.rejectListen = dc.bind(null, e), e.sc.Wo = Wu.bind(null, e.eventManager),
15398 e.sc.wc = zu.bind(null, e.eventManager), e;
15399}
15400
15401function Fc(t) {
15402 const e = B(t);
15403 return e.remoteStore.remoteSyncer.applySuccessfulWrite = _c.bind(null, e), e.remoteStore.remoteSyncer.rejectFailedWrite = wc.bind(null, e),
15404 e;
15405}
15406
15407/**
15408 * Loads a Firestore bundle into the SDK. The returned promise resolves when
15409 * the bundle finished loading.
15410 *
15411 * @param syncEngine - SyncEngine to use.
15412 * @param bundleReader - Bundle to load into the SDK.
15413 * @param task - LoadBundleTask used to update the loading progress to public API.
15414 */ function $c(t, e, n) {
15415 const s = B(t);
15416 // eslint-disable-next-line @typescript-eslint/no-floating-promises
15417 (
15418 /** Loads a bundle and returns the list of affected collection groups. */
15419 async function(t, e, n) {
15420 try {
15421 const s = await e.getMetadata();
15422 if (await function(t, e) {
15423 const n = B(t), s = xs(e.createTime);
15424 return n.persistence.runTransaction("hasNewerBundle", "readonly", (t => n.Ns.getBundleMetadata(t, e.id))).then((t => !!t && t.createTime.compareTo(s) >= 0));
15425 }
15426 /**
15427 * Saves the given `BundleMetadata` to local persistence.
15428 */ (t.localStore, s)) return await e.close(), n._completeWith(function(t) {
15429 return {
15430 taskState: "Success",
15431 documentsLoaded: t.totalDocuments,
15432 bytesLoaded: t.totalBytes,
15433 totalDocuments: t.totalDocuments,
15434 totalBytes: t.totalBytes
15435 };
15436 }(s)), Promise.resolve(new Set);
15437 n._updateProgress(tc(s));
15438 const i = new Zu(s, t.localStore, e.It);
15439 let r = await e.mc();
15440 for (;r; ) {
15441 const t = await i.Fu(r);
15442 t && n._updateProgress(t), r = await e.mc();
15443 }
15444 const o = await i.complete();
15445 return await Rc(t, o.Lu,
15446 /* remoteEvent */ void 0),
15447 // Save metadata, so loading the same bundle will skip.
15448 await function(t, e) {
15449 const n = B(t);
15450 return n.persistence.runTransaction("Save bundle", "readwrite", (t => n.Ns.saveBundleMetadata(t, e)));
15451 }
15452 /**
15453 * Returns a promise of a `NamedQuery` associated with given query name. Promise
15454 * resolves to undefined if no persisted data can be found.
15455 */ (t.localStore, s), n._completeWith(o.progress), Promise.resolve(o.Bu);
15456 } catch (t) {
15457 return k("SyncEngine", `Loading bundle failed with ${t}`), n._failWith(t), Promise.resolve(new Set);
15458 }
15459 }
15460 /**
15461 * @license
15462 * Copyright 2020 Google LLC
15463 *
15464 * Licensed under the Apache License, Version 2.0 (the "License");
15465 * you may not use this file except in compliance with the License.
15466 * You may obtain a copy of the License at
15467 *
15468 * http://www.apache.org/licenses/LICENSE-2.0
15469 *
15470 * Unless required by applicable law or agreed to in writing, software
15471 * distributed under the License is distributed on an "AS IS" BASIS,
15472 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15473 * See the License for the specific language governing permissions and
15474 * limitations under the License.
15475 */
15476 /**
15477 * Provides all components needed for Firestore with in-memory persistence.
15478 * Uses EagerGC garbage collection.
15479 */)(s, e, n).then((t => {
15480 s.sharedClientState.notifyBundleLoaded(t);
15481 }));
15482}
15483
15484class Bc {
15485 constructor() {
15486 this.synchronizeTabs = !1;
15487 }
15488 async initialize(t) {
15489 this.It = eu(t.databaseInfo.databaseId), this.sharedClientState = this.gc(t), this.persistence = this.yc(t),
15490 await this.persistence.start(), this.localStore = this.Ic(t), this.gcScheduler = this.Tc(t, this.localStore),
15491 this.indexBackfillerScheduler = this.Ec(t, this.localStore);
15492 }
15493 Tc(t, e) {
15494 return null;
15495 }
15496 Ec(t, e) {
15497 return null;
15498 }
15499 Ic(t) {
15500 return Ao(this.persistence, new To, t.initialUser, this.It);
15501 }
15502 yc(t) {
15503 return new ao(lo.Bs, this.It);
15504 }
15505 gc(t) {
15506 return new Wo;
15507 }
15508 async terminate() {
15509 this.gcScheduler && this.gcScheduler.stop(), await this.sharedClientState.shutdown(),
15510 await this.persistence.shutdown();
15511 }
15512}
15513
15514/**
15515 * Provides all components needed for Firestore with IndexedDB persistence.
15516 */ class Lc extends Bc {
15517 constructor(t, e, n) {
15518 super(), this.Ac = t, this.cacheSizeBytes = e, this.forceOwnership = n, this.synchronizeTabs = !1;
15519 }
15520 async initialize(t) {
15521 await super.initialize(t), await this.Ac.initialize(this, t),
15522 // Enqueue writes from a previous session
15523 await Fc(this.Ac.syncEngine), await bu(this.Ac.remoteStore),
15524 // NOTE: This will immediately call the listener, so we make sure to
15525 // set it after localStore / remoteStore are started.
15526 await this.persistence.li((() => (this.gcScheduler && !this.gcScheduler.started && this.gcScheduler.start(),
15527 this.indexBackfillerScheduler && !this.indexBackfillerScheduler.started && this.indexBackfillerScheduler.start(),
15528 Promise.resolve())));
15529 }
15530 Ic(t) {
15531 return Ao(this.persistence, new To, t.initialUser, this.It);
15532 }
15533 Tc(t, e) {
15534 const n = this.persistence.referenceDelegate.garbageCollector;
15535 return new Lr(n, t.asyncQueue, e);
15536 }
15537 Ec(t, e) {
15538 const n = new Ot(e, this.persistence);
15539 return new kt(t.asyncQueue, n);
15540 }
15541 yc(t) {
15542 const e = po(t.databaseInfo.databaseId, t.databaseInfo.persistenceKey), n = void 0 !== this.cacheSizeBytes ? br.withCacheSize(this.cacheSizeBytes) : br.DEFAULT;
15543 return new mo(this.synchronizeTabs, e, t.clientId, n, t.asyncQueue, Zo(), tu(), this.It, this.sharedClientState, !!this.forceOwnership);
15544 }
15545 gc(t) {
15546 return new Wo;
15547 }
15548}
15549
15550/**
15551 * Provides all components needed for Firestore with multi-tab IndexedDB
15552 * persistence.
15553 *
15554 * In the legacy client, this provider is used to provide both multi-tab and
15555 * non-multi-tab persistence since we cannot tell at build time whether
15556 * `synchronizeTabs` will be enabled.
15557 */ class Uc extends Lc {
15558 constructor(t, e) {
15559 super(t, e, /* forceOwnership= */ !1), this.Ac = t, this.cacheSizeBytes = e, this.synchronizeTabs = !0;
15560 }
15561 async initialize(t) {
15562 await super.initialize(t);
15563 const e = this.Ac.syncEngine;
15564 this.sharedClientState instanceof jo && (this.sharedClientState.syncEngine = {
15565 Fr: Sc.bind(null, e),
15566 $r: kc.bind(null, e),
15567 Br: Oc.bind(null, e),
15568 vi: Nc.bind(null, e),
15569 Mr: Vc.bind(null, e)
15570 }, await this.sharedClientState.start()),
15571 // NOTE: This will immediately call the listener, so we make sure to
15572 // set it after localStore / remoteStore are started.
15573 await this.persistence.li((async t => {
15574 await Dc(this.Ac.syncEngine, t), this.gcScheduler && (t && !this.gcScheduler.started ? this.gcScheduler.start() : t || this.gcScheduler.stop()),
15575 this.indexBackfillerScheduler && (t && !this.indexBackfillerScheduler.started ? this.indexBackfillerScheduler.start() : t || this.indexBackfillerScheduler.stop());
15576 }));
15577 }
15578 gc(t) {
15579 const e = Zo();
15580 if (!jo.C(e)) throw new U(L.UNIMPLEMENTED, "IndexedDB persistence is only available on platforms that support LocalStorage.");
15581 const n = po(t.databaseInfo.databaseId, t.databaseInfo.persistenceKey);
15582 return new jo(e, t.asyncQueue, n, t.clientId, t.initialUser);
15583 }
15584}
15585
15586/**
15587 * Initializes and wires the components that are needed to interface with the
15588 * network.
15589 */ class qc {
15590 async initialize(t, e) {
15591 this.localStore || (this.localStore = t.localStore, this.sharedClientState = t.sharedClientState,
15592 this.datastore = this.createDatastore(e), this.remoteStore = this.createRemoteStore(e),
15593 this.eventManager = this.createEventManager(e), this.syncEngine = this.createSyncEngine(e,
15594 /* startAsPrimary=*/ !t.synchronizeTabs), this.sharedClientState.onlineStateHandler = t => fc(this.syncEngine, t, 1 /* SharedClientState */),
15595 this.remoteStore.remoteSyncer.handleCredentialChange = bc.bind(null, this.syncEngine),
15596 await Ou(this.remoteStore, this.syncEngine.isPrimaryClient));
15597 }
15598 createEventManager(t) {
15599 return new Gu;
15600 }
15601 createDatastore(t) {
15602 const e = eu(t.databaseInfo.databaseId), n = (s = t.databaseInfo, new Xo(s));
15603 var s;
15604 /** Return the Platform-specific connectivity monitor. */ return function(t, e, n, s) {
15605 return new ou(t, e, n, s);
15606 }(t.authCredentials, t.appCheckCredentials, n, e);
15607 }
15608 createRemoteStore(t) {
15609 return e = this.localStore, n = this.datastore, s = t.asyncQueue, i = t => fc(this.syncEngine, t, 0 /* RemoteStore */),
15610 r = Ho.C() ? new Ho : new zo, new au(e, n, s, i, r);
15611 var e, n, s, i, r;
15612 /** Re-enables the network. Idempotent. */ }
15613 createSyncEngine(t, e) {
15614 return function(t, e, n,
15615 // PORTING NOTE: Manages state synchronization in multi-tab environments.
15616 s, i, r, o) {
15617 const u = new oc(t, e, n, s, i, r);
15618 return o && (u.dc = !0), u;
15619 }(this.localStore, this.remoteStore, this.eventManager, this.sharedClientState, t.initialUser, t.maxConcurrentLimboResolutions, e);
15620 }
15621 terminate() {
15622 return async function(t) {
15623 const e = B(t);
15624 x("RemoteStore", "RemoteStore shutting down."), e._u.add(5 /* Shutdown */), await lu(e),
15625 e.mu.shutdown(),
15626 // Set the OnlineState to Unknown (rather than Offline) to avoid potentially
15627 // triggering spurious listener events with cached data, etc.
15628 e.gu.set("Unknown" /* Unknown */);
15629 }(this.remoteStore);
15630 }
15631}
15632
15633/**
15634 * @license
15635 * Copyright 2017 Google LLC
15636 *
15637 * Licensed under the Apache License, Version 2.0 (the "License");
15638 * you may not use this file except in compliance with the License.
15639 * You may obtain a copy of the License at
15640 *
15641 * http://www.apache.org/licenses/LICENSE-2.0
15642 *
15643 * Unless required by applicable law or agreed to in writing, software
15644 * distributed under the License is distributed on an "AS IS" BASIS,
15645 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15646 * See the License for the specific language governing permissions and
15647 * limitations under the License.
15648 */ function Kc(t, e, n) {
15649 if (!n) throw new U(L.INVALID_ARGUMENT, `Function ${t}() cannot be called with an empty ${e}.`);
15650}
15651
15652/**
15653 * Validates that two boolean options are not set at the same time.
15654 * @internal
15655 */ function Gc(t, e, n, s) {
15656 if (!0 === e && !0 === s) throw new U(L.INVALID_ARGUMENT, `${t} and ${n} cannot be used together.`);
15657}
15658
15659/**
15660 * Validates that `path` refers to a document (indicated by the fact it contains
15661 * an even numbers of segments).
15662 */ function Qc(t) {
15663 if (!at.isDocumentKey(t)) throw new U(L.INVALID_ARGUMENT, `Invalid document reference. Document references must have an even number of segments, but ${t} has ${t.length}.`);
15664}
15665
15666/**
15667 * Validates that `path` refers to a collection (indicated by the fact it
15668 * contains an odd numbers of segments).
15669 */ function jc(t) {
15670 if (at.isDocumentKey(t)) throw new U(L.INVALID_ARGUMENT, `Invalid collection reference. Collection references must have an odd number of segments, but ${t} has ${t.length}.`);
15671}
15672
15673/**
15674 * Returns true if it's a non-null object without a custom prototype
15675 * (i.e. excludes Array, Date, etc.).
15676 */
15677/** Returns a string describing the type / value of the provided input. */
15678function Wc(t) {
15679 if (void 0 === t) return "undefined";
15680 if (null === t) return "null";
15681 if ("string" == typeof t) return t.length > 20 && (t = `${t.substring(0, 20)}...`),
15682 JSON.stringify(t);
15683 if ("number" == typeof t || "boolean" == typeof t) return "" + t;
15684 if ("object" == typeof t) {
15685 if (t instanceof Array) return "an array";
15686 {
15687 const e =
15688 /** try to get the constructor name for an object. */
15689 function(t) {
15690 if (t.constructor) return t.constructor.name;
15691 return null;
15692 }
15693 /**
15694 * Casts `obj` to `T`, optionally unwrapping Compat types to expose the
15695 * underlying instance. Throws if `obj` is not an instance of `T`.
15696 *
15697 * This cast is used in the Lite and Full SDK to verify instance types for
15698 * arguments passed to the public API.
15699 * @internal
15700 */ (t);
15701 return e ? `a custom ${e} object` : "an object";
15702 }
15703 }
15704 return "function" == typeof t ? "a function" : M();
15705}
15706
15707function zc(t,
15708// eslint-disable-next-line @typescript-eslint/no-explicit-any
15709e) {
15710 if ("_delegate" in t && (
15711 // Unwrap Compat types
15712 // eslint-disable-next-line @typescript-eslint/no-explicit-any
15713 t = t._delegate), !(t instanceof e)) {
15714 if (e.name === t.constructor.name) throw new U(L.INVALID_ARGUMENT, "Type does not match the expected instance. Did you pass a reference from a different Firestore SDK?");
15715 {
15716 const n = Wc(t);
15717 throw new U(L.INVALID_ARGUMENT, `Expected type '${e.name}', but it was: ${n}`);
15718 }
15719 }
15720 return t;
15721}
15722
15723function Hc(t, e) {
15724 if (e <= 0) throw new U(L.INVALID_ARGUMENT, `Function ${t}() requires a positive number, but it was: ${e}.`);
15725}
15726
15727/**
15728 * @license
15729 * Copyright 2020 Google LLC
15730 *
15731 * Licensed under the Apache License, Version 2.0 (the "License");
15732 * you may not use this file except in compliance with the License.
15733 * You may obtain a copy of the License at
15734 *
15735 * http://www.apache.org/licenses/LICENSE-2.0
15736 *
15737 * Unless required by applicable law or agreed to in writing, software
15738 * distributed under the License is distributed on an "AS IS" BASIS,
15739 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15740 * See the License for the specific language governing permissions and
15741 * limitations under the License.
15742 */ const Jc = new Map;
15743
15744/**
15745 * An instance map that ensures only one Datastore exists per Firestore
15746 * instance.
15747 */
15748/**
15749 * A concrete type describing all the values that can be applied via a
15750 * user-supplied `FirestoreSettings` object. This is a separate type so that
15751 * defaults can be supplied and the value can be checked for equality.
15752 */
15753class Yc {
15754 constructor(t) {
15755 var e;
15756 if (void 0 === t.host) {
15757 if (void 0 !== t.ssl) throw new U(L.INVALID_ARGUMENT, "Can't provide ssl option if host option is not set");
15758 this.host = "firestore.googleapis.com", this.ssl = true;
15759 } else this.host = t.host, this.ssl = null === (e = t.ssl) || void 0 === e || e;
15760 if (this.credentials = t.credentials, this.ignoreUndefinedProperties = !!t.ignoreUndefinedProperties,
15761 void 0 === t.cacheSizeBytes) this.cacheSizeBytes = 41943040; else {
15762 if (-1 !== t.cacheSizeBytes && t.cacheSizeBytes < 1048576) throw new U(L.INVALID_ARGUMENT, "cacheSizeBytes must be at least 1048576");
15763 this.cacheSizeBytes = t.cacheSizeBytes;
15764 }
15765 this.experimentalForceLongPolling = !!t.experimentalForceLongPolling, this.experimentalAutoDetectLongPolling = !!t.experimentalAutoDetectLongPolling,
15766 this.useFetchStreams = !!t.useFetchStreams, Gc("experimentalForceLongPolling", t.experimentalForceLongPolling, "experimentalAutoDetectLongPolling", t.experimentalAutoDetectLongPolling);
15767 }
15768 isEqual(t) {
15769 return this.host === t.host && this.ssl === t.ssl && this.credentials === t.credentials && this.cacheSizeBytes === t.cacheSizeBytes && this.experimentalForceLongPolling === t.experimentalForceLongPolling && this.experimentalAutoDetectLongPolling === t.experimentalAutoDetectLongPolling && this.ignoreUndefinedProperties === t.ignoreUndefinedProperties && this.useFetchStreams === t.useFetchStreams;
15770 }
15771}
15772
15773/**
15774 * @license
15775 * Copyright 2020 Google LLC
15776 *
15777 * Licensed under the Apache License, Version 2.0 (the "License");
15778 * you may not use this file except in compliance with the License.
15779 * You may obtain a copy of the License at
15780 *
15781 * http://www.apache.org/licenses/LICENSE-2.0
15782 *
15783 * Unless required by applicable law or agreed to in writing, software
15784 * distributed under the License is distributed on an "AS IS" BASIS,
15785 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15786 * See the License for the specific language governing permissions and
15787 * limitations under the License.
15788 */
15789/**
15790 * The Cloud Firestore service interface.
15791 *
15792 * Do not call this constructor directly. Instead, use {@link (getFirestore:1)}.
15793 */ class Xc {
15794 /** @hideconstructor */
15795 constructor(t, e, n, s) {
15796 this._authCredentials = t, this._appCheckCredentials = e, this._databaseId = n,
15797 this._app = s,
15798 /**
15799 * Whether it's a Firestore or Firestore Lite instance.
15800 */
15801 this.type = "firestore-lite", this._persistenceKey = "(lite)", this._settings = new Yc({}),
15802 this._settingsFrozen = !1;
15803 }
15804 /**
15805 * The {@link @firebase/app#FirebaseApp} associated with this `Firestore` service
15806 * instance.
15807 */ get app() {
15808 if (!this._app) throw new U(L.FAILED_PRECONDITION, "Firestore was not initialized using the Firebase SDK. 'app' is not available");
15809 return this._app;
15810 }
15811 get _initialized() {
15812 return this._settingsFrozen;
15813 }
15814 get _terminated() {
15815 return void 0 !== this._terminateTask;
15816 }
15817 _setSettings(t) {
15818 if (this._settingsFrozen) throw new U(L.FAILED_PRECONDITION, "Firestore has already been started and its settings can no longer be changed. You can only modify settings before calling any other methods on a Firestore object.");
15819 this._settings = new Yc(t), void 0 !== t.credentials && (this._authCredentials = function(t) {
15820 if (!t) return new G;
15821 switch (t.type) {
15822 case "gapi":
15823 const e = t.client;
15824 return new z(e, t.sessionIndex || "0", t.iamToken || null, t.authTokenFactory || null);
15825
15826 case "provider":
15827 return t.client;
15828
15829 default:
15830 throw new U(L.INVALID_ARGUMENT, "makeAuthCredentialsProvider failed due to invalid credential type");
15831 }
15832 }(t.credentials));
15833 }
15834 _getSettings() {
15835 return this._settings;
15836 }
15837 _freezeSettings() {
15838 return this._settingsFrozen = !0, this._settings;
15839 }
15840 _delete() {
15841 return this._terminateTask || (this._terminateTask = this._terminate()), this._terminateTask;
15842 }
15843 /** Returns a JSON-serializable representation of this `Firestore` instance. */ toJSON() {
15844 return {
15845 app: this._app,
15846 databaseId: this._databaseId,
15847 settings: this._settings
15848 };
15849 }
15850 /**
15851 * Terminates all components used by this client. Subclasses can override
15852 * this method to clean up their own dependencies, but must also call this
15853 * method.
15854 *
15855 * Only ever called once.
15856 */ _terminate() {
15857 /**
15858 * Removes all components associated with the provided instance. Must be called
15859 * when the `Firestore` instance is terminated.
15860 */
15861 return function(t) {
15862 const e = Jc.get(t);
15863 e && (x("ComponentProvider", "Removing Datastore"), Jc.delete(t), e.terminate());
15864 }(this), Promise.resolve();
15865 }
15866}
15867
15868/**
15869 * Modify this instance to communicate with the Cloud Firestore emulator.
15870 *
15871 * Note: This must be called before this instance has been used to do any
15872 * operations.
15873 *
15874 * @param firestore - The `Firestore` instance to configure to connect to the
15875 * emulator.
15876 * @param host - the emulator host (ex: localhost).
15877 * @param port - the emulator port (ex: 9000).
15878 * @param options.mockUserToken - the mock auth token to use for unit testing
15879 * Security Rules.
15880 */ function Zc(t, e, n, s = {}) {
15881 var i;
15882 const r = (t = zc(t, Xc))._getSettings();
15883 if ("firestore.googleapis.com" !== r.host && r.host !== e && k("Host has been set in both settings() and useEmulator(), emulator host will be used"),
15884 t._setSettings(Object.assign(Object.assign({}, r), {
15885 host: `${e}:${n}`,
15886 ssl: !1
15887 })), s.mockUserToken) {
15888 let e, n;
15889 if ("string" == typeof s.mockUserToken) e = s.mockUserToken, n = v.MOCK_USER; else {
15890 // Let createMockUserToken validate first (catches common mistakes like
15891 // invalid field "uid" and missing field "sub" / "user_id".)
15892 e = d(s.mockUserToken, null === (i = t._app) || void 0 === i ? void 0 : i.options.projectId);
15893 const r = s.mockUserToken.sub || s.mockUserToken.user_id;
15894 if (!r) throw new U(L.INVALID_ARGUMENT, "mockUserToken must contain 'sub' or 'user_id' field!");
15895 n = new v(r);
15896 }
15897 t._authCredentials = new Q(new K(e, n));
15898 }
15899}
15900
15901/**
15902 * @license
15903 * Copyright 2020 Google LLC
15904 *
15905 * Licensed under the Apache License, Version 2.0 (the "License");
15906 * you may not use this file except in compliance with the License.
15907 * You may obtain a copy of the License at
15908 *
15909 * http://www.apache.org/licenses/LICENSE-2.0
15910 *
15911 * Unless required by applicable law or agreed to in writing, software
15912 * distributed under the License is distributed on an "AS IS" BASIS,
15913 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15914 * See the License for the specific language governing permissions and
15915 * limitations under the License.
15916 */
15917/**
15918 * A `DocumentReference` refers to a document location in a Firestore database
15919 * and can be used to write, read, or listen to the location. The document at
15920 * the referenced location may or may not exist.
15921 */ class ta {
15922 /** @hideconstructor */
15923 constructor(t,
15924 /**
15925 * If provided, the `FirestoreDataConverter` associated with this instance.
15926 */
15927 e, n) {
15928 this.converter = e, this._key = n,
15929 /** The type of this Firestore reference. */
15930 this.type = "document", this.firestore = t;
15931 }
15932 get _path() {
15933 return this._key.path;
15934 }
15935 /**
15936 * The document's identifier within its collection.
15937 */ get id() {
15938 return this._key.path.lastSegment();
15939 }
15940 /**
15941 * A string representing the path of the referenced document (relative
15942 * to the root of the database).
15943 */ get path() {
15944 return this._key.path.canonicalString();
15945 }
15946 /**
15947 * The collection this `DocumentReference` belongs to.
15948 */ get parent() {
15949 return new na(this.firestore, this.converter, this._key.path.popLast());
15950 }
15951 withConverter(t) {
15952 return new ta(this.firestore, t, this._key);
15953 }
15954}
15955
15956/**
15957 * A `Query` refers to a query which you can read or listen to. You can also
15958 * construct refined `Query` objects by adding filters and ordering.
15959 */ class ea {
15960 // This is the lite version of the Query class in the main SDK.
15961 /** @hideconstructor protected */
15962 constructor(t,
15963 /**
15964 * If provided, the `FirestoreDataConverter` associated with this instance.
15965 */
15966 e, n) {
15967 this.converter = e, this._query = n,
15968 /** The type of this Firestore reference. */
15969 this.type = "query", this.firestore = t;
15970 }
15971 withConverter(t) {
15972 return new ea(this.firestore, t, this._query);
15973 }
15974}
15975
15976/**
15977 * A `CollectionReference` object can be used for adding documents, getting
15978 * document references, and querying for documents (using {@link query}).
15979 */ class na extends ea {
15980 /** @hideconstructor */
15981 constructor(t, e, n) {
15982 super(t, e, sn(n)), this._path = n,
15983 /** The type of this Firestore reference. */
15984 this.type = "collection";
15985 }
15986 /** The collection's identifier. */ get id() {
15987 return this._query.path.lastSegment();
15988 }
15989 /**
15990 * A string representing the path of the referenced collection (relative
15991 * to the root of the database).
15992 */ get path() {
15993 return this._query.path.canonicalString();
15994 }
15995 /**
15996 * A reference to the containing `DocumentReference` if this is a
15997 * subcollection. If this isn't a subcollection, the reference is null.
15998 */ get parent() {
15999 const t = this._path.popLast();
16000 return t.isEmpty() ? null : new ta(this.firestore,
16001 /* converter= */ null, new at(t));
16002 }
16003 withConverter(t) {
16004 return new na(this.firestore, t, this._path);
16005 }
16006}
16007
16008function sa(t, e, ...n) {
16009 if (t = _(t), Kc("collection", "path", e), t instanceof Xc) {
16010 const s = ot.fromString(e, ...n);
16011 return jc(s), new na(t, /* converter= */ null, s);
16012 }
16013 {
16014 if (!(t instanceof ta || t instanceof na)) throw new U(L.INVALID_ARGUMENT, "Expected first argument to collection() to be a CollectionReference, a DocumentReference or FirebaseFirestore");
16015 const s = t._path.child(ot.fromString(e, ...n));
16016 return jc(s), new na(t.firestore,
16017 /* converter= */ null, s);
16018 }
16019}
16020
16021// TODO(firestorelite): Consider using ErrorFactory -
16022// https://github.com/firebase/firebase-js-sdk/blob/0131e1f/packages/util/src/errors.ts#L106
16023/**
16024 * Creates and returns a new `Query` instance that includes all documents in the
16025 * database that are contained in a collection or subcollection with the
16026 * given `collectionId`.
16027 *
16028 * @param firestore - A reference to the root `Firestore` instance.
16029 * @param collectionId - Identifies the collections to query over. Every
16030 * collection or subcollection with this ID as the last segment of its path
16031 * will be included. Cannot contain a slash.
16032 * @returns The created `Query`.
16033 */ function ia(t, e) {
16034 if (t = zc(t, Xc), Kc("collectionGroup", "collection id", e), e.indexOf("/") >= 0) throw new U(L.INVALID_ARGUMENT, `Invalid collection ID '${e}' passed to function collectionGroup(). Collection IDs must not contain '/'.`);
16035 return new ea(t,
16036 /* converter= */ null,
16037 /**
16038 * Creates a new Query for a collection group query that matches all documents
16039 * within the provided collection group.
16040 */
16041 function(t) {
16042 return new en(ot.emptyPath(), t);
16043 }(e));
16044}
16045
16046function ra(t, e, ...n) {
16047 if (t = _(t),
16048 // We allow omission of 'pathString' but explicitly prohibit passing in both
16049 // 'undefined' and 'null'.
16050 1 === arguments.length && (e = Z.R()), Kc("doc", "path", e), t instanceof Xc) {
16051 const s = ot.fromString(e, ...n);
16052 return Qc(s), new ta(t,
16053 /* converter= */ null, new at(s));
16054 }
16055 {
16056 if (!(t instanceof ta || t instanceof na)) throw new U(L.INVALID_ARGUMENT, "Expected first argument to collection() to be a CollectionReference, a DocumentReference or FirebaseFirestore");
16057 const s = t._path.child(ot.fromString(e, ...n));
16058 return Qc(s), new ta(t.firestore, t instanceof na ? t.converter : null, new at(s));
16059 }
16060}
16061
16062/**
16063 * Returns true if the provided references are equal.
16064 *
16065 * @param left - A reference to compare.
16066 * @param right - A reference to compare.
16067 * @returns true if the references point to the same location in the same
16068 * Firestore database.
16069 */ function oa(t, e) {
16070 return t = _(t), e = _(e), (t instanceof ta || t instanceof na) && (e instanceof ta || e instanceof na) && (t.firestore === e.firestore && t.path === e.path && t.converter === e.converter);
16071}
16072
16073/**
16074 * Returns true if the provided queries point to the same collection and apply
16075 * the same constraints.
16076 *
16077 * @param left - A `Query` to compare.
16078 * @param right - A `Query` to compare.
16079 * @returns true if the references point to the same location in the same
16080 * Firestore database.
16081 */ function ua(t, e) {
16082 return t = _(t), e = _(e), t instanceof ea && e instanceof ea && (t.firestore === e.firestore && fn(t._query, e._query) && t.converter === e.converter);
16083}
16084
16085/**
16086 * @license
16087 * Copyright 2020 Google LLC
16088 *
16089 * Licensed under the Apache License, Version 2.0 (the "License");
16090 * you may not use this file except in compliance with the License.
16091 * You may obtain a copy of the License at
16092 *
16093 * http://www.apache.org/licenses/LICENSE-2.0
16094 *
16095 * Unless required by applicable law or agreed to in writing, software
16096 * distributed under the License is distributed on an "AS IS" BASIS,
16097 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16098 * See the License for the specific language governing permissions and
16099 * limitations under the License.
16100 */
16101/**
16102 * How many bytes to read each time when `ReadableStreamReader.read()` is
16103 * called. Only applicable for byte streams that we control (e.g. those backed
16104 * by an UInt8Array).
16105 */
16106/**
16107 * Builds a `ByteStreamReader` from a UInt8Array.
16108 * @param source - The data source to use.
16109 * @param bytesPerRead - How many bytes each `read()` from the returned reader
16110 * will read.
16111 */
16112function ca(t, e = 10240) {
16113 let n = 0;
16114 // The TypeScript definition for ReadableStreamReader changed. We use
16115 // `any` here to allow this code to compile with different versions.
16116 // See https://github.com/microsoft/TypeScript/issues/42970
16117 // eslint-disable-next-line @typescript-eslint/no-explicit-any
16118 return {
16119 // eslint-disable-next-line @typescript-eslint/no-explicit-any
16120 async read() {
16121 if (n < t.byteLength) {
16122 const s = {
16123 value: t.slice(n, n + e),
16124 done: !1
16125 };
16126 return n += e, s;
16127 }
16128 return {
16129 done: !0
16130 };
16131 },
16132 async cancel() {},
16133 releaseLock() {},
16134 closed: Promise.reject("unimplemented")
16135 };
16136}
16137
16138/**
16139 * @license
16140 * Copyright 2020 Google LLC
16141 *
16142 * Licensed under the Apache License, Version 2.0 (the "License");
16143 * you may not use this file except in compliance with the License.
16144 * You may obtain a copy of the License at
16145 *
16146 * http://www.apache.org/licenses/LICENSE-2.0
16147 *
16148 * Unless required by applicable law or agreed to in writing, software
16149 * distributed under the License is distributed on an "AS IS" BASIS,
16150 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16151 * See the License for the specific language governing permissions and
16152 * limitations under the License.
16153 */
16154/**
16155 * On web, a `ReadableStream` is wrapped around by a `ByteStreamReader`.
16156 */
16157/**
16158 * @license
16159 * Copyright 2017 Google LLC
16160 *
16161 * Licensed under the Apache License, Version 2.0 (the "License");
16162 * you may not use this file except in compliance with the License.
16163 * You may obtain a copy of the License at
16164 *
16165 * http://www.apache.org/licenses/LICENSE-2.0
16166 *
16167 * Unless required by applicable law or agreed to in writing, software
16168 * distributed under the License is distributed on an "AS IS" BASIS,
16169 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16170 * See the License for the specific language governing permissions and
16171 * limitations under the License.
16172 */
16173/*
16174 * A wrapper implementation of Observer<T> that will dispatch events
16175 * asynchronously. To allow immediate silencing, a mute call is added which
16176 * causes events scheduled to no longer be raised.
16177 */
16178class aa {
16179 constructor(t) {
16180 this.observer = t,
16181 /**
16182 * When set to true, will not raise future events. Necessary to deal with
16183 * async detachment of listener.
16184 */
16185 this.muted = !1;
16186 }
16187 next(t) {
16188 this.observer.next && this.Rc(this.observer.next, t);
16189 }
16190 error(t) {
16191 this.observer.error ? this.Rc(this.observer.error, t) : N("Uncaught Error in snapshot listener:", t.toString());
16192 }
16193 bc() {
16194 this.muted = !0;
16195 }
16196 Rc(t, e) {
16197 this.muted || setTimeout((() => {
16198 this.muted || t(e);
16199 }), 0);
16200 }
16201}
16202
16203/**
16204 * @license
16205 * Copyright 2020 Google LLC
16206 *
16207 * Licensed under the Apache License, Version 2.0 (the "License");
16208 * you may not use this file except in compliance with the License.
16209 * You may obtain a copy of the License at
16210 *
16211 * http://www.apache.org/licenses/LICENSE-2.0
16212 *
16213 * Unless required by applicable law or agreed to in writing, software
16214 * distributed under the License is distributed on an "AS IS" BASIS,
16215 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16216 * See the License for the specific language governing permissions and
16217 * limitations under the License.
16218 */
16219/**
16220 * A class representing a bundle.
16221 *
16222 * Takes a bundle stream or buffer, and presents abstractions to read bundled
16223 * elements out of the underlying content.
16224 */ class ha {
16225 constructor(
16226 /** The reader to read from underlying binary bundle data source. */
16227 t, e) {
16228 this.Pc = t, this.It = e,
16229 /** Cached bundle metadata. */
16230 this.metadata = new q,
16231 /**
16232 * Internal buffer to hold bundle content, accumulating incomplete element
16233 * content.
16234 */
16235 this.buffer = new Uint8Array, this.vc = new TextDecoder("utf-8"),
16236 // Read the metadata (which is the first element).
16237 this.Vc().then((t => {
16238 t && t.Ou() ? this.metadata.resolve(t.ku.metadata) : this.metadata.reject(new Error(`The first element of the bundle is not a metadata, it is\n ${JSON.stringify(null == t ? void 0 : t.ku)}`));
16239 }), (t => this.metadata.reject(t)));
16240 }
16241 close() {
16242 return this.Pc.cancel();
16243 }
16244 async getMetadata() {
16245 return this.metadata.promise;
16246 }
16247 async mc() {
16248 // Makes sure metadata is read before proceeding.
16249 return await this.getMetadata(), this.Vc();
16250 }
16251 /**
16252 * Reads from the head of internal buffer, and pulling more data from
16253 * underlying stream if a complete element cannot be found, until an
16254 * element(including the prefixed length and the JSON string) is found.
16255 *
16256 * Once a complete element is read, it is dropped from internal buffer.
16257 *
16258 * Returns either the bundled element, or null if we have reached the end of
16259 * the stream.
16260 */ async Vc() {
16261 const t = await this.Sc();
16262 if (null === t) return null;
16263 const e = this.vc.decode(t), n = Number(e);
16264 isNaN(n) && this.Dc(`length string (${e}) is not valid number`);
16265 const s = await this.Cc(n);
16266 return new Yu(JSON.parse(s), t.length + n);
16267 }
16268 /** First index of '{' from the underlying buffer. */ xc() {
16269 return this.buffer.findIndex((t => t === "{".charCodeAt(0)));
16270 }
16271 /**
16272 * Reads from the beginning of the internal buffer, until the first '{', and
16273 * return the content.
16274 *
16275 * If reached end of the stream, returns a null.
16276 */ async Sc() {
16277 for (;this.xc() < 0; ) {
16278 if (await this.Nc()) break;
16279 }
16280 // Broke out of the loop because underlying stream is closed, and there
16281 // happens to be no more data to process.
16282 if (0 === this.buffer.length) return null;
16283 const t = this.xc();
16284 // Broke out of the loop because underlying stream is closed, but still
16285 // cannot find an open bracket.
16286 t < 0 && this.Dc("Reached the end of bundle when a length string is expected.");
16287 const e = this.buffer.slice(0, t);
16288 // Update the internal buffer to drop the read length.
16289 return this.buffer = this.buffer.slice(t), e;
16290 }
16291 /**
16292 * Reads from a specified position from the internal buffer, for a specified
16293 * number of bytes, pulling more data from the underlying stream if needed.
16294 *
16295 * Returns a string decoded from the read bytes.
16296 */ async Cc(t) {
16297 for (;this.buffer.length < t; ) {
16298 await this.Nc() && this.Dc("Reached the end of bundle when more is expected.");
16299 }
16300 const e = this.vc.decode(this.buffer.slice(0, t));
16301 // Update the internal buffer to drop the read json string.
16302 return this.buffer = this.buffer.slice(t), e;
16303 }
16304 Dc(t) {
16305 // eslint-disable-next-line @typescript-eslint/no-floating-promises
16306 throw this.Pc.cancel(), new Error(`Invalid bundle format: ${t}`);
16307 }
16308 /**
16309 * Pulls more data from underlying stream to internal buffer.
16310 * Returns a boolean indicating whether the stream is finished.
16311 */ async Nc() {
16312 const t = await this.Pc.read();
16313 if (!t.done) {
16314 const e = new Uint8Array(this.buffer.length + t.value.length);
16315 e.set(this.buffer), e.set(t.value, this.buffer.length), this.buffer = e;
16316 }
16317 return t.done;
16318 }
16319}
16320
16321/**
16322 * @license
16323 * Copyright 2022 Google LLC
16324 *
16325 * Licensed under the Apache License, Version 2.0 (the "License");
16326 * you may not use this file except in compliance with the License.
16327 * You may obtain a copy of the License at
16328 *
16329 * http://www.apache.org/licenses/LICENSE-2.0
16330 *
16331 * Unless required by applicable law or agreed to in writing, software
16332 * distributed under the License is distributed on an "AS IS" BASIS,
16333 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16334 * See the License for the specific language governing permissions and
16335 * limitations under the License.
16336 */
16337/**
16338 * Represents an aggregation that can be performed by Firestore.
16339 */
16340// eslint-disable-next-line @typescript-eslint/no-unused-vars
16341class la {
16342 constructor() {
16343 /** A type string to uniquely identify instances of this class. */
16344 this.type = "AggregateField";
16345 }
16346}
16347
16348/**
16349 * The results of executing an aggregation query.
16350 */ class fa {
16351 /** @hideconstructor */
16352 constructor(t, e) {
16353 this._data = e,
16354 /** A type string to uniquely identify instances of this class. */
16355 this.type = "AggregateQuerySnapshot", this.query = t;
16356 }
16357 /**
16358 * Returns the results of the aggregations performed over the underlying
16359 * query.
16360 *
16361 * The keys of the returned object will be the same as those of the
16362 * `AggregateSpec` object specified to the aggregation method, and the values
16363 * will be the corresponding aggregation result.
16364 *
16365 * @returns The results of the aggregations performed over the underlying
16366 * query.
16367 */ data() {
16368 return this._data;
16369 }
16370}
16371
16372/**
16373 * @license
16374 * Copyright 2022 Google LLC
16375 *
16376 * Licensed under the Apache License, Version 2.0 (the "License");
16377 * you may not use this file except in compliance with the License.
16378 * You may obtain a copy of the License at
16379 *
16380 * http://www.apache.org/licenses/LICENSE-2.0
16381 *
16382 * Unless required by applicable law or agreed to in writing, software
16383 * distributed under the License is distributed on an "AS IS" BASIS,
16384 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16385 * See the License for the specific language governing permissions and
16386 * limitations under the License.
16387 */
16388/**
16389 * CountQueryRunner encapsulates the logic needed to run the count aggregation
16390 * queries.
16391 */ class da {
16392 constructor(t, e, n) {
16393 this.query = t, this.datastore = e, this.userDataWriter = n;
16394 }
16395 run() {
16396 return uu(this.datastore, this.query._query).then((t => {
16397 F(void 0 !== t[0]);
16398 const e = Object.entries(t[0]).filter((([t, e]) => "count_alias" === t)).map((([t, e]) => this.userDataWriter.convertValue(e)))[0];
16399 return F("number" == typeof e), Promise.resolve(new fa(this.query, {
16400 count: e
16401 }));
16402 }));
16403 }
16404}
16405
16406/**
16407 * @license
16408 * Copyright 2017 Google LLC
16409 *
16410 * Licensed under the Apache License, Version 2.0 (the "License");
16411 * you may not use this file except in compliance with the License.
16412 * You may obtain a copy of the License at
16413 *
16414 * http://www.apache.org/licenses/LICENSE-2.0
16415 *
16416 * Unless required by applicable law or agreed to in writing, software
16417 * distributed under the License is distributed on an "AS IS" BASIS,
16418 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16419 * See the License for the specific language governing permissions and
16420 * limitations under the License.
16421 */
16422/**
16423 * Internal transaction object responsible for accumulating the mutations to
16424 * perform and the base versions for any documents read.
16425 */ class _a {
16426 constructor(t) {
16427 this.datastore = t,
16428 // The version of each document that was read during this transaction.
16429 this.readVersions = new Map, this.mutations = [], this.committed = !1,
16430 /**
16431 * A deferred usage error that occurred previously in this transaction that
16432 * will cause the transaction to fail once it actually commits.
16433 */
16434 this.lastWriteError = null,
16435 /**
16436 * Set of documents that have been written in the transaction.
16437 *
16438 * When there's more than one write to the same key in a transaction, any
16439 * writes after the first are handled differently.
16440 */
16441 this.writtenDocs = new Set;
16442 }
16443 async lookup(t) {
16444 if (this.ensureCommitNotCalled(), this.mutations.length > 0) throw new U(L.INVALID_ARGUMENT, "Firestore transactions require all reads to be executed before all writes.");
16445 const e = await async function(t, e) {
16446 const n = B(t), s = Bs(n.It) + "/documents", i = {
16447 documents: e.map((t => Os(n.It, t)))
16448 }, r = await n._o("BatchGetDocuments", s, i, e.length), o = new Map;
16449 r.forEach((t => {
16450 const e = Ks(n.It, t);
16451 o.set(e.key.toString(), e);
16452 }));
16453 const u = [];
16454 return e.forEach((t => {
16455 const e = o.get(t.toString());
16456 F(!!e), u.push(e);
16457 })), u;
16458 }(this.datastore, t);
16459 return e.forEach((t => this.recordVersion(t))), e;
16460 }
16461 set(t, e) {
16462 this.write(e.toMutation(t, this.precondition(t))), this.writtenDocs.add(t.toString());
16463 }
16464 update(t, e) {
16465 try {
16466 this.write(e.toMutation(t, this.preconditionForUpdate(t)));
16467 } catch (t) {
16468 this.lastWriteError = t;
16469 }
16470 this.writtenDocs.add(t.toString());
16471 }
16472 delete(t) {
16473 this.write(new Jn(t, this.precondition(t))), this.writtenDocs.add(t.toString());
16474 }
16475 async commit() {
16476 if (this.ensureCommitNotCalled(), this.lastWriteError) throw this.lastWriteError;
16477 const t = this.readVersions;
16478 // For each mutation, note that the doc was written.
16479 this.mutations.forEach((e => {
16480 t.delete(e.key.toString());
16481 })),
16482 // For each document that was read but not written to, we want to perform
16483 // a `verify` operation.
16484 t.forEach(((t, e) => {
16485 const n = at.fromPath(e);
16486 this.mutations.push(new Yn(n, this.precondition(n)));
16487 })), await async function(t, e) {
16488 const n = B(t), s = Bs(n.It) + "/documents", i = {
16489 writes: e.map((t => Qs(n.It, t)))
16490 };
16491 await n.ao("Commit", s, i);
16492 }(this.datastore, this.mutations), this.committed = !0;
16493 }
16494 recordVersion(t) {
16495 let e;
16496 if (t.isFoundDocument()) e = t.version; else {
16497 if (!t.isNoDocument()) throw M();
16498 // Represent a deleted doc using SnapshotVersion.min().
16499 e = it.min();
16500 }
16501 const n = this.readVersions.get(t.key.toString());
16502 if (n) {
16503 if (!e.isEqual(n))
16504 // This transaction will fail no matter what.
16505 throw new U(L.ABORTED, "Document version changed between two reads.");
16506 } else this.readVersions.set(t.key.toString(), e);
16507 }
16508 /**
16509 * Returns the version of this document when it was read in this transaction,
16510 * as a precondition, or no precondition if it was not read.
16511 */ precondition(t) {
16512 const e = this.readVersions.get(t.toString());
16513 return !this.writtenDocs.has(t.toString()) && e ? e.isEqual(it.min()) ? Fn.exists(!1) : Fn.updateTime(e) : Fn.none();
16514 }
16515 /**
16516 * Returns the precondition for a document if the operation is an update.
16517 */ preconditionForUpdate(t) {
16518 const e = this.readVersions.get(t.toString());
16519 // The first time a document is written, we want to take into account the
16520 // read time and existence
16521 if (!this.writtenDocs.has(t.toString()) && e) {
16522 if (e.isEqual(it.min()))
16523 // The document doesn't exist, so fail the transaction.
16524 // This has to be validated locally because you can't send a
16525 // precondition that a document does not exist without changing the
16526 // semantics of the backend write to be an insert. This is the reverse
16527 // of what we want, since we want to assert that the document doesn't
16528 // exist but then send the update and have it fail. Since we can't
16529 // express that to the backend, we have to validate locally.
16530 // Note: this can change once we can send separate verify writes in the
16531 // transaction.
16532 throw new U(L.INVALID_ARGUMENT, "Can't update a document that doesn't exist.");
16533 // Document exists, base precondition on document update time.
16534 return Fn.updateTime(e);
16535 }
16536 // Document was not read, so we just use the preconditions for a blind
16537 // update.
16538 return Fn.exists(!0);
16539 }
16540 write(t) {
16541 this.ensureCommitNotCalled(), this.mutations.push(t);
16542 }
16543 ensureCommitNotCalled() {}
16544}
16545
16546/**
16547 * @license
16548 * Copyright 2019 Google LLC
16549 *
16550 * Licensed under the Apache License, Version 2.0 (the "License");
16551 * you may not use this file except in compliance with the License.
16552 * You may obtain a copy of the License at
16553 *
16554 * http://www.apache.org/licenses/LICENSE-2.0
16555 *
16556 * Unless required by applicable law or agreed to in writing, software
16557 * distributed under the License is distributed on an "AS IS" BASIS,
16558 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16559 * See the License for the specific language governing permissions and
16560 * limitations under the License.
16561 */
16562/**
16563 * TransactionRunner encapsulates the logic needed to run and retry transactions
16564 * with backoff.
16565 */ class wa {
16566 constructor(t, e, n, s, i) {
16567 this.asyncQueue = t, this.datastore = e, this.options = n, this.updateFunction = s,
16568 this.deferred = i, this.kc = n.maxAttempts, this.xo = new nu(this.asyncQueue, "transaction_retry" /* TransactionRetry */);
16569 }
16570 /** Runs the transaction and sets the result on deferred. */ run() {
16571 this.kc -= 1, this.Oc();
16572 }
16573 Oc() {
16574 this.xo.Ro((async () => {
16575 const t = new _a(this.datastore), e = this.Mc(t);
16576 e && e.then((e => {
16577 this.asyncQueue.enqueueAndForget((() => t.commit().then((() => {
16578 this.deferred.resolve(e);
16579 })).catch((t => {
16580 this.Fc(t);
16581 }))));
16582 })).catch((t => {
16583 this.Fc(t);
16584 }));
16585 }));
16586 }
16587 Mc(t) {
16588 try {
16589 const e = this.updateFunction(t);
16590 return !re(e) && e.catch && e.then ? e : (this.deferred.reject(Error("Transaction callback must return a Promise")),
16591 null);
16592 } catch (t) {
16593 // Do not retry errors thrown by user provided updateFunction.
16594 return this.deferred.reject(t), null;
16595 }
16596 }
16597 Fc(t) {
16598 this.kc > 0 && this.$c(t) ? (this.kc -= 1, this.asyncQueue.enqueueAndForget((() => (this.Oc(),
16599 Promise.resolve())))) : this.deferred.reject(t);
16600 }
16601 $c(t) {
16602 if ("FirebaseError" === t.name) {
16603 // In transactions, the backend will fail outdated reads with FAILED_PRECONDITION and
16604 // non-matching document versions with ABORTED. These errors should be retried.
16605 const e = t.code;
16606 return "aborted" === e || "failed-precondition" === e || "already-exists" === e || !es(e);
16607 }
16608 return !1;
16609 }
16610}
16611
16612/**
16613 * @license
16614 * Copyright 2017 Google LLC
16615 *
16616 * Licensed under the Apache License, Version 2.0 (the "License");
16617 * you may not use this file except in compliance with the License.
16618 * You may obtain a copy of the License at
16619 *
16620 * http://www.apache.org/licenses/LICENSE-2.0
16621 *
16622 * Unless required by applicable law or agreed to in writing, software
16623 * distributed under the License is distributed on an "AS IS" BASIS,
16624 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16625 * See the License for the specific language governing permissions and
16626 * limitations under the License.
16627 */
16628/**
16629 * FirestoreClient is a top-level class that constructs and owns all of the
16630 * pieces of the client SDK architecture. It is responsible for creating the
16631 * async queue that is shared by all of the other components in the system.
16632 */
16633class ma {
16634 constructor(t, e,
16635 /**
16636 * Asynchronous queue responsible for all of our internal processing. When
16637 * we get incoming work from the user (via public API) or the network
16638 * (incoming GRPC messages), we should always schedule onto this queue.
16639 * This ensures all of our work is properly serialized (e.g. we don't
16640 * start processing a new operation while the previous one is waiting for
16641 * an async I/O to complete).
16642 */
16643 n, s) {
16644 this.authCredentials = t, this.appCheckCredentials = e, this.asyncQueue = n, this.databaseInfo = s,
16645 this.user = v.UNAUTHENTICATED, this.clientId = Z.R(), this.authCredentialListener = () => Promise.resolve(),
16646 this.appCheckCredentialListener = () => Promise.resolve(), this.authCredentials.start(n, (async t => {
16647 x("FirestoreClient", "Received user=", t.uid), await this.authCredentialListener(t),
16648 this.user = t;
16649 })), this.appCheckCredentials.start(n, (t => (x("FirestoreClient", "Received new app check token=", t),
16650 this.appCheckCredentialListener(t, this.user))));
16651 }
16652 async getConfiguration() {
16653 return {
16654 asyncQueue: this.asyncQueue,
16655 databaseInfo: this.databaseInfo,
16656 clientId: this.clientId,
16657 authCredentials: this.authCredentials,
16658 appCheckCredentials: this.appCheckCredentials,
16659 initialUser: this.user,
16660 maxConcurrentLimboResolutions: 100
16661 };
16662 }
16663 setCredentialChangeListener(t) {
16664 this.authCredentialListener = t;
16665 }
16666 setAppCheckTokenChangeListener(t) {
16667 this.appCheckCredentialListener = t;
16668 }
16669 /**
16670 * Checks that the client has not been terminated. Ensures that other methods on
16671 * this class cannot be called after the client is terminated.
16672 */ verifyNotTerminated() {
16673 if (this.asyncQueue.isShuttingDown) throw new U(L.FAILED_PRECONDITION, "The client has already been terminated.");
16674 }
16675 terminate() {
16676 this.asyncQueue.enterRestrictedMode();
16677 const t = new q;
16678 return this.asyncQueue.enqueueAndForgetEvenWhileRestricted((async () => {
16679 try {
16680 this.onlineComponents && await this.onlineComponents.terminate(), this.offlineComponents && await this.offlineComponents.terminate(),
16681 // The credentials provider must be terminated after shutting down the
16682 // RemoteStore as it will prevent the RemoteStore from retrieving auth
16683 // tokens.
16684 this.authCredentials.shutdown(), this.appCheckCredentials.shutdown(), t.resolve();
16685 } catch (e) {
16686 const n = Bu(e, "Failed to shutdown persistence");
16687 t.reject(n);
16688 }
16689 })), t.promise;
16690 }
16691}
16692
16693async function ga(t, e) {
16694 t.asyncQueue.verifyOperationInProgress(), x("FirestoreClient", "Initializing OfflineComponentProvider");
16695 const n = await t.getConfiguration();
16696 await e.initialize(n);
16697 let s = n.initialUser;
16698 t.setCredentialChangeListener((async t => {
16699 s.isEqual(t) || (await Ro(e.localStore, t), s = t);
16700 })),
16701 // When a user calls clearPersistence() in one client, all other clients
16702 // need to be terminated to allow the delete to succeed.
16703 e.persistence.setDatabaseDeletedListener((() => t.terminate())), t.offlineComponents = e;
16704}
16705
16706async function ya(t, e) {
16707 t.asyncQueue.verifyOperationInProgress();
16708 const n = await pa(t);
16709 x("FirestoreClient", "Initializing OnlineComponentProvider");
16710 const s = await t.getConfiguration();
16711 await e.initialize(n, s),
16712 // The CredentialChangeListener of the online component provider takes
16713 // precedence over the offline component provider.
16714 t.setCredentialChangeListener((t => ku(e.remoteStore, t))), t.setAppCheckTokenChangeListener(((t, n) => ku(e.remoteStore, n))),
16715 t.onlineComponents = e;
16716}
16717
16718async function pa(t) {
16719 return t.offlineComponents || (x("FirestoreClient", "Using default OfflineComponentProvider"),
16720 await ga(t, new Bc)), t.offlineComponents;
16721}
16722
16723async function Ia(t) {
16724 return t.onlineComponents || (x("FirestoreClient", "Using default OnlineComponentProvider"),
16725 await ya(t, new qc)), t.onlineComponents;
16726}
16727
16728function Ta(t) {
16729 return pa(t).then((t => t.persistence));
16730}
16731
16732function Ea(t) {
16733 return pa(t).then((t => t.localStore));
16734}
16735
16736function Aa(t) {
16737 return Ia(t).then((t => t.remoteStore));
16738}
16739
16740function Ra(t) {
16741 return Ia(t).then((t => t.syncEngine));
16742}
16743
16744function ba(t) {
16745 return Ia(t).then((t => t.datastore));
16746}
16747
16748async function Pa(t) {
16749 const e = await Ia(t), n = e.eventManager;
16750 return n.onListen = uc.bind(null, e.syncEngine), n.onUnlisten = ac.bind(null, e.syncEngine),
16751 n;
16752}
16753
16754/** Enables the network connection and re-enqueues all pending operations. */ function va(t) {
16755 return t.asyncQueue.enqueue((async () => {
16756 const e = await Ta(t), n = await Aa(t);
16757 return e.setNetworkEnabled(!0), function(t) {
16758 const e = B(t);
16759 return e._u.delete(0 /* UserDisabled */), hu(e);
16760 }(n);
16761 }));
16762}
16763
16764/** Disables the network connection. Pending operations will not complete. */ function Va(t) {
16765 return t.asyncQueue.enqueue((async () => {
16766 const e = await Ta(t), n = await Aa(t);
16767 return e.setNetworkEnabled(!1), async function(t) {
16768 const e = B(t);
16769 e._u.add(0 /* UserDisabled */), await lu(e),
16770 // Set the OnlineState to Offline so get()s return from cache, etc.
16771 e.gu.set("Offline" /* Offline */);
16772 }(n);
16773 }));
16774}
16775
16776/**
16777 * Returns a Promise that resolves when all writes that were pending at the time
16778 * this method was called received server acknowledgement. An acknowledgement
16779 * can be either acceptance or rejection.
16780 */ function Sa(t, e) {
16781 const n = new q;
16782 return t.asyncQueue.enqueueAndForget((async () => async function(t, e, n) {
16783 try {
16784 const s = await function(t, e) {
16785 const n = B(t);
16786 return n.persistence.runTransaction("read document", "readonly", (t => n.localDocuments.getDocument(t, e)));
16787 }(t, e);
16788 s.isFoundDocument() ? n.resolve(s) : s.isNoDocument() ? n.resolve(null) : n.reject(new U(L.UNAVAILABLE, "Failed to get document from cache. (However, this document may exist on the server. Run again without setting 'source' in the GetOptions to attempt to retrieve the document from the server.)"));
16789 } catch (t) {
16790 const s = Bu(t, `Failed to get document '${e} from cache`);
16791 n.reject(s);
16792 }
16793 }
16794 /**
16795 * Retrieves a latency-compensated document from the backend via a
16796 * SnapshotListener.
16797 */ (await Ea(t), e, n))), n.promise;
16798}
16799
16800function Da(t, e, n = {}) {
16801 const s = new q;
16802 return t.asyncQueue.enqueueAndForget((async () => function(t, e, n, s, i) {
16803 const r = new aa({
16804 next: r => {
16805 // Remove query first before passing event to user to avoid
16806 // user actions affecting the now stale query.
16807 e.enqueueAndForget((() => ju(t, o)));
16808 const u = r.docs.has(n);
16809 !u && r.fromCache ?
16810 // TODO(dimond): If we're online and the document doesn't
16811 // exist then we resolve with a doc.exists set to false. If
16812 // we're offline however, we reject the Promise in this
16813 // case. Two options: 1) Cache the negative response from
16814 // the server so we can deliver that even when you're
16815 // offline 2) Actually reject the Promise in the online case
16816 // if the document doesn't exist.
16817 i.reject(new U(L.UNAVAILABLE, "Failed to get document because the client is offline.")) : u && r.fromCache && s && "server" === s.source ? i.reject(new U(L.UNAVAILABLE, 'Failed to get document from server. (However, this document does exist in the local cache. Run again without setting source to "server" to retrieve the cached document.)')) : i.resolve(r);
16818 },
16819 error: t => i.reject(t)
16820 }), o = new Ju(sn(n.path), r, {
16821 includeMetadataChanges: !0,
16822 Nu: !0
16823 });
16824 return Qu(t, o);
16825 }(await Pa(t), t.asyncQueue, e, n, s))), s.promise;
16826}
16827
16828function Ca(t, e) {
16829 const n = new q;
16830 return t.asyncQueue.enqueueAndForget((async () => async function(t, e, n) {
16831 try {
16832 const s = await xo(t, e,
16833 /* usePreviousResults= */ !0), i = new sc(e, s.Hi), r = i.Wu(s.documents), o = i.applyChanges(r,
16834 /* updateLimboDocuments= */ !1);
16835 n.resolve(o.snapshot);
16836 } catch (t) {
16837 const s = Bu(t, `Failed to execute query '${e} against cache`);
16838 n.reject(s);
16839 }
16840 }
16841 /**
16842 * Retrieves a latency-compensated query snapshot from the backend via a
16843 * SnapshotListener.
16844 */ (await Ea(t), e, n))), n.promise;
16845}
16846
16847function xa(t, e, n = {}) {
16848 const s = new q;
16849 return t.asyncQueue.enqueueAndForget((async () => function(t, e, n, s, i) {
16850 const r = new aa({
16851 next: n => {
16852 // Remove query first before passing event to user to avoid
16853 // user actions affecting the now stale query.
16854 e.enqueueAndForget((() => ju(t, o))), n.fromCache && "server" === s.source ? i.reject(new U(L.UNAVAILABLE, 'Failed to get documents from server. (However, these documents may exist in the local cache. Run again without setting source to "server" to retrieve the cached documents.)')) : i.resolve(n);
16855 },
16856 error: t => i.reject(t)
16857 }), o = new Ju(n, r, {
16858 includeMetadataChanges: !0,
16859 Nu: !0
16860 });
16861 return Qu(t, o);
16862 }(await Pa(t), t.asyncQueue, e, n, s))), s.promise;
16863}
16864
16865function Na(t, e) {
16866 const n = new aa(e);
16867 return t.asyncQueue.enqueueAndForget((async () => function(t, e) {
16868 B(t).Ru.add(e),
16869 // Immediately fire an initial event, indicating all existing listeners
16870 // are in-sync.
16871 e.next();
16872 }(await Pa(t), n))), () => {
16873 n.bc(), t.asyncQueue.enqueueAndForget((async () => function(t, e) {
16874 B(t).Ru.delete(e);
16875 }(await Pa(t), n)));
16876 };
16877}
16878
16879/**
16880 * Takes an updateFunction in which a set of reads and writes can be performed
16881 * atomically. In the updateFunction, the client can read and write values
16882 * using the supplied transaction object. After the updateFunction, all
16883 * changes will be committed. If a retryable error occurs (ex: some other
16884 * client has changed any of the data referenced), then the updateFunction
16885 * will be called again after a backoff. If the updateFunction still fails
16886 * after all retries, then the transaction will be rejected.
16887 *
16888 * The transaction object passed to the updateFunction contains methods for
16889 * accessing documents and collections. Unlike other datastore access, data
16890 * accessed with the transaction will not reflect local changes that have not
16891 * been committed. For this reason, it is required that all reads are
16892 * performed before any writes. Transactions must be performed while online.
16893 */ function ka(t, e, n, s) {
16894 const i = function(t, e) {
16895 let n;
16896 n = "string" == typeof t ? (new TextEncoder).encode(t) : t;
16897 return function(t, e) {
16898 return new ha(t, e);
16899 }(function(t, e) {
16900 if (t instanceof Uint8Array) return ca(t, e);
16901 if (t instanceof ArrayBuffer) return ca(new Uint8Array(t), e);
16902 if (t instanceof ReadableStream) return t.getReader();
16903 throw new Error("Source of `toByteStreamReader` has to be a ArrayBuffer or ReadableStream");
16904 }(n), e);
16905 }
16906 /**
16907 * @license
16908 * Copyright 2020 Google LLC
16909 *
16910 * Licensed under the Apache License, Version 2.0 (the "License");
16911 * you may not use this file except in compliance with the License.
16912 * You may obtain a copy of the License at
16913 *
16914 * http://www.apache.org/licenses/LICENSE-2.0
16915 *
16916 * Unless required by applicable law or agreed to in writing, software
16917 * distributed under the License is distributed on an "AS IS" BASIS,
16918 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16919 * See the License for the specific language governing permissions and
16920 * limitations under the License.
16921 */ (n, eu(e));
16922 t.asyncQueue.enqueueAndForget((async () => {
16923 $c(await Ra(t), i, s);
16924 }));
16925}
16926
16927function Oa(t, e) {
16928 return t.asyncQueue.enqueue((async () => function(t, e) {
16929 const n = B(t);
16930 return n.persistence.runTransaction("Get named query", "readonly", (t => n.Ns.getNamedQuery(t, e)));
16931 }(await Ea(t), e)));
16932}
16933
16934class Ma {
16935 constructor() {
16936 // The last promise in the queue.
16937 this.Bc = Promise.resolve(),
16938 // A list of retryable operations. Retryable operations are run in order and
16939 // retried with backoff.
16940 this.Lc = [],
16941 // Is this AsyncQueue being shut down? Once it is set to true, it will not
16942 // be changed again.
16943 this.Uc = !1,
16944 // Operations scheduled to be queued in the future. Operations are
16945 // automatically removed after they are run or canceled.
16946 this.qc = [],
16947 // visible for testing
16948 this.Kc = null,
16949 // Flag set while there's an outstanding AsyncQueue operation, used for
16950 // assertion sanity-checks.
16951 this.Gc = !1,
16952 // Enabled during shutdown on Safari to prevent future access to IndexedDB.
16953 this.Qc = !1,
16954 // List of TimerIds to fast-forward delays for.
16955 this.jc = [],
16956 // Backoff timer used to schedule retries for retryable operations
16957 this.xo = new nu(this, "async_queue_retry" /* AsyncQueueRetry */),
16958 // Visibility handler that triggers an immediate retry of all retryable
16959 // operations. Meant to speed up recovery when we regain file system access
16960 // after page comes into foreground.
16961 this.Wc = () => {
16962 const t = tu();
16963 t && x("AsyncQueue", "Visibility state changed to " + t.visibilityState), this.xo.Po();
16964 };
16965 const t = tu();
16966 t && "function" == typeof t.addEventListener && t.addEventListener("visibilitychange", this.Wc);
16967 }
16968 get isShuttingDown() {
16969 return this.Uc;
16970 }
16971 /**
16972 * Adds a new operation to the queue without waiting for it to complete (i.e.
16973 * we ignore the Promise result).
16974 */ enqueueAndForget(t) {
16975 // eslint-disable-next-line @typescript-eslint/no-floating-promises
16976 this.enqueue(t);
16977 }
16978 enqueueAndForgetEvenWhileRestricted(t) {
16979 this.zc(),
16980 // eslint-disable-next-line @typescript-eslint/no-floating-promises
16981 this.Hc(t);
16982 }
16983 enterRestrictedMode(t) {
16984 if (!this.Uc) {
16985 this.Uc = !0, this.Qc = t || !1;
16986 const e = tu();
16987 e && "function" == typeof e.removeEventListener && e.removeEventListener("visibilitychange", this.Wc);
16988 }
16989 }
16990 enqueue(t) {
16991 if (this.zc(), this.Uc)
16992 // Return a Promise which never resolves.
16993 return new Promise((() => {}));
16994 // Create a deferred Promise that we can return to the callee. This
16995 // allows us to return a "hanging Promise" only to the callee and still
16996 // advance the queue even when the operation is not run.
16997 const e = new q;
16998 return this.Hc((() => this.Uc && this.Qc ? Promise.resolve() : (t().then(e.resolve, e.reject),
16999 e.promise))).then((() => e.promise));
17000 }
17001 enqueueRetryable(t) {
17002 this.enqueueAndForget((() => (this.Lc.push(t), this.Jc())));
17003 }
17004 /**
17005 * Runs the next operation from the retryable queue. If the operation fails,
17006 * reschedules with backoff.
17007 */ async Jc() {
17008 if (0 !== this.Lc.length) {
17009 try {
17010 await this.Lc[0](), this.Lc.shift(), this.xo.reset();
17011 } catch (t) {
17012 if (!St(t)) throw t;
17013 // Failure will be handled by AsyncQueue
17014 x("AsyncQueue", "Operation failed with retryable error: " + t);
17015 }
17016 this.Lc.length > 0 &&
17017 // If there are additional operations, we re-schedule `retryNextOp()`.
17018 // This is necessary to run retryable operations that failed during
17019 // their initial attempt since we don't know whether they are already
17020 // enqueued. If, for example, `op1`, `op2`, `op3` are enqueued and `op1`
17021 // needs to be re-run, we will run `op1`, `op1`, `op2` using the
17022 // already enqueued calls to `retryNextOp()`. `op3()` will then run in the
17023 // call scheduled here.
17024 // Since `backoffAndRun()` cancels an existing backoff and schedules a
17025 // new backoff on every call, there is only ever a single additional
17026 // operation in the queue.
17027 this.xo.Ro((() => this.Jc()));
17028 }
17029 }
17030 Hc(t) {
17031 const e = this.Bc.then((() => (this.Gc = !0, t().catch((t => {
17032 this.Kc = t, this.Gc = !1;
17033 const e =
17034 /**
17035 * Chrome includes Error.message in Error.stack. Other browsers do not.
17036 * This returns expected output of message + stack when available.
17037 * @param error - Error or FirestoreError
17038 */
17039 function(t) {
17040 let e = t.message || "";
17041 t.stack && (e = t.stack.includes(t.message) ? t.stack : t.message + "\n" + t.stack);
17042 return e;
17043 }
17044 /**
17045 * @license
17046 * Copyright 2017 Google LLC
17047 *
17048 * Licensed under the Apache License, Version 2.0 (the "License");
17049 * you may not use this file except in compliance with the License.
17050 * You may obtain a copy of the License at
17051 *
17052 * http://www.apache.org/licenses/LICENSE-2.0
17053 *
17054 * Unless required by applicable law or agreed to in writing, software
17055 * distributed under the License is distributed on an "AS IS" BASIS,
17056 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17057 * See the License for the specific language governing permissions and
17058 * limitations under the License.
17059 */ (t);
17060 // Re-throw the error so that this.tail becomes a rejected Promise and
17061 // all further attempts to chain (via .then) will just short-circuit
17062 // and return the rejected Promise.
17063 throw N("INTERNAL UNHANDLED ERROR: ", e), t;
17064 })).then((t => (this.Gc = !1, t))))));
17065 return this.Bc = e, e;
17066 }
17067 enqueueAfterDelay(t, e, n) {
17068 this.zc(),
17069 // Fast-forward delays for timerIds that have been overriden.
17070 this.jc.indexOf(t) > -1 && (e = 0);
17071 const s = $u.createAndSchedule(this, t, e, n, (t => this.Yc(t)));
17072 return this.qc.push(s), s;
17073 }
17074 zc() {
17075 this.Kc && M();
17076 }
17077 verifyOperationInProgress() {}
17078 /**
17079 * Waits until all currently queued tasks are finished executing. Delayed
17080 * operations are not run.
17081 */ async Xc() {
17082 // Operations in the queue prior to draining may have enqueued additional
17083 // operations. Keep draining the queue until the tail is no longer advanced,
17084 // which indicates that no more new operations were enqueued and that all
17085 // operations were executed.
17086 let t;
17087 do {
17088 t = this.Bc, await t;
17089 } while (t !== this.Bc);
17090 }
17091 /**
17092 * For Tests: Determine if a delayed operation with a particular TimerId
17093 * exists.
17094 */ Zc(t) {
17095 for (const e of this.qc) if (e.timerId === t) return !0;
17096 return !1;
17097 }
17098 /**
17099 * For Tests: Runs some or all delayed operations early.
17100 *
17101 * @param lastTimerId - Delayed operations up to and including this TimerId
17102 * will be drained. Pass TimerId.All to run all delayed operations.
17103 * @returns a Promise that resolves once all operations have been run.
17104 */ ta(t) {
17105 // Note that draining may generate more delayed ops, so we do that first.
17106 return this.Xc().then((() => {
17107 // Run ops in the same order they'd run if they ran naturally.
17108 this.qc.sort(((t, e) => t.targetTimeMs - e.targetTimeMs));
17109 for (const e of this.qc) if (e.skipDelay(), "all" /* All */ !== t && e.timerId === t) break;
17110 return this.Xc();
17111 }));
17112 }
17113 /**
17114 * For Tests: Skip all subsequent delays for a timer id.
17115 */ ea(t) {
17116 this.jc.push(t);
17117 }
17118 /** Called once a DelayedOperation is run or canceled. */ Yc(t) {
17119 // NOTE: indexOf / slice are O(n), but delayedOperations is expected to be small.
17120 const e = this.qc.indexOf(t);
17121 this.qc.splice(e, 1);
17122 }
17123}
17124
17125function Fa(t) {
17126 /**
17127 * Returns true if obj is an object and contains at least one of the specified
17128 * methods.
17129 */
17130 return function(t, e) {
17131 if ("object" != typeof t || null === t) return !1;
17132 const n = t;
17133 for (const t of e) if (t in n && "function" == typeof n[t]) return !0;
17134 return !1;
17135 }
17136 /**
17137 * @license
17138 * Copyright 2020 Google LLC
17139 *
17140 * Licensed under the Apache License, Version 2.0 (the "License");
17141 * you may not use this file except in compliance with the License.
17142 * You may obtain a copy of the License at
17143 *
17144 * http://www.apache.org/licenses/LICENSE-2.0
17145 *
17146 * Unless required by applicable law or agreed to in writing, software
17147 * distributed under the License is distributed on an "AS IS" BASIS,
17148 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17149 * See the License for the specific language governing permissions and
17150 * limitations under the License.
17151 */
17152 /**
17153 * Represents the task of loading a Firestore bundle. It provides progress of bundle
17154 * loading, as well as task completion and error events.
17155 *
17156 * The API is compatible with `Promise<LoadBundleTaskProgress>`.
17157 */ (t, [ "next", "error", "complete" ]);
17158}
17159
17160class $a {
17161 constructor() {
17162 this._progressObserver = {}, this._taskCompletionResolver = new q, this._lastProgress = {
17163 taskState: "Running",
17164 totalBytes: 0,
17165 totalDocuments: 0,
17166 bytesLoaded: 0,
17167 documentsLoaded: 0
17168 };
17169 }
17170 /**
17171 * Registers functions to listen to bundle loading progress events.
17172 * @param next - Called when there is a progress update from bundle loading. Typically `next` calls occur
17173 * each time a Firestore document is loaded from the bundle.
17174 * @param error - Called when an error occurs during bundle loading. The task aborts after reporting the
17175 * error, and there should be no more updates after this.
17176 * @param complete - Called when the loading task is complete.
17177 */ onProgress(t, e, n) {
17178 this._progressObserver = {
17179 next: t,
17180 error: e,
17181 complete: n
17182 };
17183 }
17184 /**
17185 * Implements the `Promise<LoadBundleTaskProgress>.catch` interface.
17186 *
17187 * @param onRejected - Called when an error occurs during bundle loading.
17188 */ catch(t) {
17189 return this._taskCompletionResolver.promise.catch(t);
17190 }
17191 /**
17192 * Implements the `Promise<LoadBundleTaskProgress>.then` interface.
17193 *
17194 * @param onFulfilled - Called on the completion of the loading task with a final `LoadBundleTaskProgress` update.
17195 * The update will always have its `taskState` set to `"Success"`.
17196 * @param onRejected - Called when an error occurs during bundle loading.
17197 */ then(t, e) {
17198 return this._taskCompletionResolver.promise.then(t, e);
17199 }
17200 /**
17201 * Notifies all observers that bundle loading has completed, with a provided
17202 * `LoadBundleTaskProgress` object.
17203 *
17204 * @private
17205 */ _completeWith(t) {
17206 this._updateProgress(t), this._progressObserver.complete && this._progressObserver.complete(),
17207 this._taskCompletionResolver.resolve(t);
17208 }
17209 /**
17210 * Notifies all observers that bundle loading has failed, with a provided
17211 * `Error` as the reason.
17212 *
17213 * @private
17214 */ _failWith(t) {
17215 this._lastProgress.taskState = "Error", this._progressObserver.next && this._progressObserver.next(this._lastProgress),
17216 this._progressObserver.error && this._progressObserver.error(t), this._taskCompletionResolver.reject(t);
17217 }
17218 /**
17219 * Notifies a progress update of loading a bundle.
17220 * @param progress - The new progress.
17221 *
17222 * @private
17223 */ _updateProgress(t) {
17224 this._lastProgress = t, this._progressObserver.next && this._progressObserver.next(t);
17225 }
17226}
17227
17228/**
17229 * @license
17230 * Copyright 2020 Google LLC
17231 *
17232 * Licensed under the Apache License, Version 2.0 (the "License");
17233 * you may not use this file except in compliance with the License.
17234 * You may obtain a copy of the License at
17235 *
17236 * http://www.apache.org/licenses/LICENSE-2.0
17237 *
17238 * Unless required by applicable law or agreed to in writing, software
17239 * distributed under the License is distributed on an "AS IS" BASIS,
17240 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17241 * See the License for the specific language governing permissions and
17242 * limitations under the License.
17243 */
17244/** DOMException error code constants. */ const Ba = -1;
17245
17246/**
17247 * The Cloud Firestore service interface.
17248 *
17249 * Do not call this constructor directly. Instead, use {@link (getFirestore:1)}.
17250 */
17251class La extends Xc {
17252 /** @hideconstructor */
17253 constructor(t, e, n, s) {
17254 super(t, e, n, s),
17255 /**
17256 * Whether it's a {@link Firestore} or Firestore Lite instance.
17257 */
17258 this.type = "firestore", this._queue = new Ma, this._persistenceKey = (null == s ? void 0 : s.name) || "[DEFAULT]";
17259 }
17260 _terminate() {
17261 return this._firestoreClient ||
17262 // The client must be initialized to ensure that all subsequent API
17263 // usage throws an exception.
17264 Ga(this), this._firestoreClient.terminate();
17265 }
17266}
17267
17268/**
17269 * Initializes a new instance of {@link Firestore} with the provided settings.
17270 * Can only be called before any other function, including
17271 * {@link (getFirestore:1)}. If the custom settings are empty, this function is
17272 * equivalent to calling {@link (getFirestore:1)}.
17273 *
17274 * @param app - The {@link @firebase/app#FirebaseApp} with which the {@link Firestore} instance will
17275 * be associated.
17276 * @param settings - A settings object to configure the {@link Firestore} instance.
17277 * @param databaseId - The name of database.
17278 * @returns A newly initialized {@link Firestore} instance.
17279 */ function Ua(t, e, n) {
17280 n || (n = "(default)");
17281 const s = _getProvider(t, "firestore");
17282 if (s.isInitialized(n)) {
17283 const t = s.getImmediate({
17284 identifier: n
17285 }), i = s.getOptions(n);
17286 if (w(i, e)) return t;
17287 throw new U(L.FAILED_PRECONDITION, "initializeFirestore() has already been called with different options. To avoid this error, call initializeFirestore() with the same options as when it was originally called, or call getFirestore() to return the already initialized instance.");
17288 }
17289 if (void 0 !== e.cacheSizeBytes && -1 !== e.cacheSizeBytes && e.cacheSizeBytes < 1048576) throw new U(L.INVALID_ARGUMENT, "cacheSizeBytes must be at least 1048576");
17290 return s.initialize({
17291 options: e,
17292 instanceIdentifier: n
17293 });
17294}
17295
17296function qa(e, n) {
17297 const s = "object" == typeof e ? e : t(), i = "string" == typeof e ? e : n || "(default)", r = _getProvider(s, "firestore").getImmediate({
17298 identifier: i
17299 });
17300 if (!r._initialized) {
17301 const t = m("firestore");
17302 t && Zc(r, ...t);
17303 }
17304 return r;
17305}
17306
17307/**
17308 * @internal
17309 */ function Ka(t) {
17310 return t._firestoreClient || Ga(t), t._firestoreClient.verifyNotTerminated(), t._firestoreClient;
17311}
17312
17313function Ga(t) {
17314 var e;
17315 const n = t._freezeSettings(), s = function(t, e, n, s) {
17316 return new se(t, e, n, s.host, s.ssl, s.experimentalForceLongPolling, s.experimentalAutoDetectLongPolling, s.useFetchStreams);
17317 }
17318 /**
17319 * @license
17320 * Copyright 2020 Google LLC
17321 *
17322 * Licensed under the Apache License, Version 2.0 (the "License");
17323 * you may not use this file except in compliance with the License.
17324 * You may obtain a copy of the License at
17325 *
17326 * http://www.apache.org/licenses/LICENSE-2.0
17327 *
17328 * Unless required by applicable law or agreed to in writing, software
17329 * distributed under the License is distributed on an "AS IS" BASIS,
17330 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17331 * See the License for the specific language governing permissions and
17332 * limitations under the License.
17333 */
17334 // settings() defaults:
17335 (t._databaseId, (null === (e = t._app) || void 0 === e ? void 0 : e.options.appId) || "", t._persistenceKey, n);
17336 t._firestoreClient = new ma(t._authCredentials, t._appCheckCredentials, t._queue, s);
17337}
17338
17339/**
17340 * Attempts to enable persistent storage, if possible.
17341 *
17342 * Must be called before any other functions (other than
17343 * {@link initializeFirestore}, {@link (getFirestore:1)} or
17344 * {@link clearIndexedDbPersistence}.
17345 *
17346 * If this fails, `enableIndexedDbPersistence()` will reject the promise it
17347 * returns. Note that even after this failure, the {@link Firestore} instance will
17348 * remain usable, however offline persistence will be disabled.
17349 *
17350 * There are several reasons why this can fail, which can be identified by
17351 * the `code` on the error.
17352 *
17353 * * failed-precondition: The app is already open in another browser tab.
17354 * * unimplemented: The browser is incompatible with the offline
17355 * persistence implementation.
17356 *
17357 * @param firestore - The {@link Firestore} instance to enable persistence for.
17358 * @param persistenceSettings - Optional settings object to configure
17359 * persistence.
17360 * @returns A `Promise` that represents successfully enabling persistent storage.
17361 */ function Qa(t, e) {
17362 eh(t = zc(t, La));
17363 const n = Ka(t), s = t._freezeSettings(), i = new qc;
17364 return Wa(n, i, new Lc(i, s.cacheSizeBytes, null == e ? void 0 : e.forceOwnership));
17365}
17366
17367/**
17368 * Attempts to enable multi-tab persistent storage, if possible. If enabled
17369 * across all tabs, all operations share access to local persistence, including
17370 * shared execution of queries and latency-compensated local document updates
17371 * across all connected instances.
17372 *
17373 * If this fails, `enableMultiTabIndexedDbPersistence()` will reject the promise
17374 * it returns. Note that even after this failure, the {@link Firestore} instance will
17375 * remain usable, however offline persistence will be disabled.
17376 *
17377 * There are several reasons why this can fail, which can be identified by
17378 * the `code` on the error.
17379 *
17380 * * failed-precondition: The app is already open in another browser tab and
17381 * multi-tab is not enabled.
17382 * * unimplemented: The browser is incompatible with the offline
17383 * persistence implementation.
17384 *
17385 * @param firestore - The {@link Firestore} instance to enable persistence for.
17386 * @returns A `Promise` that represents successfully enabling persistent
17387 * storage.
17388 */ function ja(t) {
17389 eh(t = zc(t, La));
17390 const e = Ka(t), n = t._freezeSettings(), s = new qc;
17391 return Wa(e, s, new Uc(s, n.cacheSizeBytes));
17392}
17393
17394/**
17395 * Registers both the `OfflineComponentProvider` and `OnlineComponentProvider`.
17396 * If the operation fails with a recoverable error (see
17397 * `canRecoverFromIndexedDbError()` below), the returned Promise is rejected
17398 * but the client remains usable.
17399 */ function Wa(t, e, n) {
17400 const s = new q;
17401 return t.asyncQueue.enqueue((async () => {
17402 try {
17403 await ga(t, n), await ya(t, e), s.resolve();
17404 } catch (t) {
17405 const e = t;
17406 if (!
17407 /**
17408 * Decides whether the provided error allows us to gracefully disable
17409 * persistence (as opposed to crashing the client).
17410 */
17411 function(t) {
17412 if ("FirebaseError" === t.name) return t.code === L.FAILED_PRECONDITION || t.code === L.UNIMPLEMENTED;
17413 if ("undefined" != typeof DOMException && t instanceof DOMException)
17414 // There are a few known circumstances where we can open IndexedDb but
17415 // trying to read/write will fail (e.g. quota exceeded). For
17416 // well-understood cases, we attempt to detect these and then gracefully
17417 // fall back to memory persistence.
17418 // NOTE: Rather than continue to add to this list, we could decide to
17419 // always fall back, with the risk that we might accidentally hide errors
17420 // representing actual SDK bugs.
17421 // When the browser is out of quota we could get either quota exceeded
17422 // or an aborted error depending on whether the error happened during
17423 // schema migration.
17424 return 22 === t.code || 20 === t.code ||
17425 // Firefox Private Browsing mode disables IndexedDb and returns
17426 // INVALID_STATE for any usage.
17427 11 === t.code;
17428 return !0;
17429 }
17430 /**
17431 * Clears the persistent storage. This includes pending writes and cached
17432 * documents.
17433 *
17434 * Must be called while the {@link Firestore} instance is not started (after the app is
17435 * terminated or when the app is first initialized). On startup, this function
17436 * must be called before other functions (other than {@link
17437 * initializeFirestore} or {@link (getFirestore:1)})). If the {@link Firestore}
17438 * instance is still running, the promise will be rejected with the error code
17439 * of `failed-precondition`.
17440 *
17441 * Note: `clearIndexedDbPersistence()` is primarily intended to help write
17442 * reliable tests that use Cloud Firestore. It uses an efficient mechanism for
17443 * dropping existing data but does not attempt to securely overwrite or
17444 * otherwise make cached data unrecoverable. For applications that are sensitive
17445 * to the disclosure of cached data in between user sessions, we strongly
17446 * recommend not enabling persistence at all.
17447 *
17448 * @param firestore - The {@link Firestore} instance to clear persistence for.
17449 * @returns A `Promise` that is resolved when the persistent storage is
17450 * cleared. Otherwise, the promise is rejected with an error.
17451 */ (e)) throw e;
17452 k("Error enabling offline persistence. Falling back to persistence disabled: " + e),
17453 s.reject(e);
17454 }
17455 })).then((() => s.promise));
17456}
17457
17458function za(t) {
17459 if (t._initialized && !t._terminated) throw new U(L.FAILED_PRECONDITION, "Persistence can only be cleared before a Firestore instance is initialized or after it is terminated.");
17460 const e = new q;
17461 return t._queue.enqueueAndForgetEvenWhileRestricted((async () => {
17462 try {
17463 await async function(t) {
17464 if (!Pt.C()) return Promise.resolve();
17465 const e = t + "main";
17466 await Pt.delete(e);
17467 }
17468 /**
17469 * @license
17470 * Copyright 2017 Google LLC
17471 *
17472 * Licensed under the Apache License, Version 2.0 (the "License");
17473 * you may not use this file except in compliance with the License.
17474 * You may obtain a copy of the License at
17475 *
17476 * http://www.apache.org/licenses/LICENSE-2.0
17477 *
17478 * Unless required by applicable law or agreed to in writing, software
17479 * distributed under the License is distributed on an "AS IS" BASIS,
17480 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17481 * See the License for the specific language governing permissions and
17482 * limitations under the License.
17483 */
17484 /**
17485 * Compares two array for equality using comparator. The method computes the
17486 * intersection and invokes `onAdd` for every element that is in `after` but not
17487 * `before`. `onRemove` is invoked for every element in `before` but missing
17488 * from `after`.
17489 *
17490 * The method creates a copy of both `before` and `after` and runs in O(n log
17491 * n), where n is the size of the two lists.
17492 *
17493 * @param before - The elements that exist in the original array.
17494 * @param after - The elements to diff against the original array.
17495 * @param comparator - The comparator for the elements in before and after.
17496 * @param onAdd - A function to invoke for every element that is part of `
17497 * after` but not `before`.
17498 * @param onRemove - A function to invoke for every element that is part of
17499 * `before` but not `after`.
17500 */ (po(t._databaseId, t._persistenceKey)), e.resolve();
17501 } catch (t) {
17502 e.reject(t);
17503 }
17504 })), e.promise;
17505}
17506
17507/**
17508 * Waits until all currently pending writes for the active user have been
17509 * acknowledged by the backend.
17510 *
17511 * The returned promise resolves immediately if there are no outstanding writes.
17512 * Otherwise, the promise waits for all previously issued writes (including
17513 * those written in a previous app session), but it does not wait for writes
17514 * that were added after the function is called. If you want to wait for
17515 * additional writes, call `waitForPendingWrites()` again.
17516 *
17517 * Any outstanding `waitForPendingWrites()` promises are rejected during user
17518 * changes.
17519 *
17520 * @returns A `Promise` which resolves when all currently pending writes have been
17521 * acknowledged by the backend.
17522 */ function Ha(t) {
17523 return function(t) {
17524 const e = new q;
17525 return t.asyncQueue.enqueueAndForget((async () => mc(await Ra(t), e))), e.promise;
17526 }(Ka(t = zc(t, La)));
17527}
17528
17529/**
17530 * Re-enables use of the network for this {@link Firestore} instance after a prior
17531 * call to {@link disableNetwork}.
17532 *
17533 * @returns A `Promise` that is resolved once the network has been enabled.
17534 */ function Ja(t) {
17535 return va(Ka(t = zc(t, La)));
17536}
17537
17538/**
17539 * Disables network usage for this instance. It can be re-enabled via {@link
17540 * enableNetwork}. While the network is disabled, any snapshot listeners,
17541 * `getDoc()` or `getDocs()` calls will return results from cache, and any write
17542 * operations will be queued until the network is restored.
17543 *
17544 * @returns A `Promise` that is resolved once the network has been disabled.
17545 */ function Ya(t) {
17546 return Va(Ka(t = zc(t, La)));
17547}
17548
17549/**
17550 * Terminates the provided {@link Firestore} instance.
17551 *
17552 * After calling `terminate()` only the `clearIndexedDbPersistence()` function
17553 * may be used. Any other function will throw a `FirestoreError`.
17554 *
17555 * To restart after termination, create a new instance of FirebaseFirestore with
17556 * {@link (getFirestore:1)}.
17557 *
17558 * Termination does not cancel any pending writes, and any promises that are
17559 * awaiting a response from the server will not be resolved. If you have
17560 * persistence enabled, the next time you start this instance, it will resume
17561 * sending these writes to the server.
17562 *
17563 * Note: Under normal circumstances, calling `terminate()` is not required. This
17564 * function is useful only when you want to force this instance to release all
17565 * of its resources or in combination with `clearIndexedDbPersistence()` to
17566 * ensure that all local state is destroyed between test runs.
17567 *
17568 * @returns A `Promise` that is resolved when the instance has been successfully
17569 * terminated.
17570 */ function Xa(t) {
17571 return e(t.app, "firestore", t._databaseId.database), t._delete();
17572}
17573
17574/**
17575 * Loads a Firestore bundle into the local cache.
17576 *
17577 * @param firestore - The {@link Firestore} instance to load bundles for.
17578 * @param bundleData - An object representing the bundle to be loaded. Valid
17579 * objects are `ArrayBuffer`, `ReadableStream<Uint8Array>` or `string`.
17580 *
17581 * @returns A `LoadBundleTask` object, which notifies callers with progress
17582 * updates, and completion or error events. It can be used as a
17583 * `Promise<LoadBundleTaskProgress>`.
17584 */ function Za(t, e) {
17585 const n = Ka(t = zc(t, La)), s = new $a;
17586 return ka(n, t._databaseId, e, s), s;
17587}
17588
17589/**
17590 * Reads a Firestore {@link Query} from local cache, identified by the given
17591 * name.
17592 *
17593 * The named queries are packaged into bundles on the server side (along
17594 * with resulting documents), and loaded to local cache using `loadBundle`. Once
17595 * in local cache, use this method to extract a {@link Query} by name.
17596 *
17597 * @param firestore - The {@link Firestore} instance to read the query from.
17598 * @param name - The name of the query.
17599 * @returns A `Promise` that is resolved with the Query or `null`.
17600 */ function th(t, e) {
17601 return Oa(Ka(t = zc(t, La)), e).then((e => e ? new ea(t, null, e.query) : null));
17602}
17603
17604function eh(t) {
17605 if (t._initialized || t._terminated) throw new U(L.FAILED_PRECONDITION, "Firestore has already been started and persistence can no longer be enabled. You can only enable persistence before calling any other methods on a Firestore object.");
17606}
17607
17608/**
17609 * @license
17610 * Copyright 2020 Google LLC
17611 *
17612 * Licensed under the Apache License, Version 2.0 (the "License");
17613 * you may not use this file except in compliance with the License.
17614 * You may obtain a copy of the License at
17615 *
17616 * http://www.apache.org/licenses/LICENSE-2.0
17617 *
17618 * Unless required by applicable law or agreed to in writing, software
17619 * distributed under the License is distributed on an "AS IS" BASIS,
17620 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17621 * See the License for the specific language governing permissions and
17622 * limitations under the License.
17623 */
17624/**
17625 * @license
17626 * Copyright 2020 Google LLC
17627 *
17628 * Licensed under the Apache License, Version 2.0 (the "License");
17629 * you may not use this file except in compliance with the License.
17630 * You may obtain a copy of the License at
17631 *
17632 * http://www.apache.org/licenses/LICENSE-2.0
17633 *
17634 * Unless required by applicable law or agreed to in writing, software
17635 * distributed under the License is distributed on an "AS IS" BASIS,
17636 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17637 * See the License for the specific language governing permissions and
17638 * limitations under the License.
17639 */
17640/**
17641 * An immutable object representing an array of bytes.
17642 */
17643class nh {
17644 /** @hideconstructor */
17645 constructor(t) {
17646 this._byteString = t;
17647 }
17648 /**
17649 * Creates a new `Bytes` object from the given Base64 string, converting it to
17650 * bytes.
17651 *
17652 * @param base64 - The Base64 string used to create the `Bytes` object.
17653 */ static fromBase64String(t) {
17654 try {
17655 return new nh(Ht.fromBase64String(t));
17656 } catch (t) {
17657 throw new U(L.INVALID_ARGUMENT, "Failed to construct data from Base64 string: " + t);
17658 }
17659 }
17660 /**
17661 * Creates a new `Bytes` object from the given Uint8Array.
17662 *
17663 * @param array - The Uint8Array used to create the `Bytes` object.
17664 */ static fromUint8Array(t) {
17665 return new nh(Ht.fromUint8Array(t));
17666 }
17667 /**
17668 * Returns the underlying bytes as a Base64-encoded string.
17669 *
17670 * @returns The Base64-encoded string created from the `Bytes` object.
17671 */ toBase64() {
17672 return this._byteString.toBase64();
17673 }
17674 /**
17675 * Returns the underlying bytes in a new `Uint8Array`.
17676 *
17677 * @returns The Uint8Array created from the `Bytes` object.
17678 */ toUint8Array() {
17679 return this._byteString.toUint8Array();
17680 }
17681 /**
17682 * Returns a string representation of the `Bytes` object.
17683 *
17684 * @returns A string representation of the `Bytes` object.
17685 */ toString() {
17686 return "Bytes(base64: " + this.toBase64() + ")";
17687 }
17688 /**
17689 * Returns true if this `Bytes` object is equal to the provided one.
17690 *
17691 * @param other - The `Bytes` object to compare against.
17692 * @returns true if this `Bytes` object is equal to the provided one.
17693 */ isEqual(t) {
17694 return this._byteString.isEqual(t._byteString);
17695 }
17696}
17697
17698/**
17699 * @license
17700 * Copyright 2020 Google LLC
17701 *
17702 * Licensed under the Apache License, Version 2.0 (the "License");
17703 * you may not use this file except in compliance with the License.
17704 * You may obtain a copy of the License at
17705 *
17706 * http://www.apache.org/licenses/LICENSE-2.0
17707 *
17708 * Unless required by applicable law or agreed to in writing, software
17709 * distributed under the License is distributed on an "AS IS" BASIS,
17710 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17711 * See the License for the specific language governing permissions and
17712 * limitations under the License.
17713 */
17714/**
17715 * A `FieldPath` refers to a field in a document. The path may consist of a
17716 * single field name (referring to a top-level field in the document), or a
17717 * list of field names (referring to a nested field in the document).
17718 *
17719 * Create a `FieldPath` by providing field names. If more than one field
17720 * name is provided, the path will point to a nested field in a document.
17721 */ class sh {
17722 /**
17723 * Creates a `FieldPath` from the provided field names. If more than one field
17724 * name is provided, the path will point to a nested field in a document.
17725 *
17726 * @param fieldNames - A list of field names.
17727 */
17728 constructor(...t) {
17729 for (let e = 0; e < t.length; ++e) if (0 === t[e].length) throw new U(L.INVALID_ARGUMENT, "Invalid field name at argument $(i + 1). Field names must not be empty.");
17730 this._internalPath = new ct(t);
17731 }
17732 /**
17733 * Returns true if this `FieldPath` is equal to the provided one.
17734 *
17735 * @param other - The `FieldPath` to compare against.
17736 * @returns true if this `FieldPath` is equal to the provided one.
17737 */ isEqual(t) {
17738 return this._internalPath.isEqual(t._internalPath);
17739 }
17740}
17741
17742/**
17743 * Returns a special sentinel `FieldPath` to refer to the ID of a document.
17744 * It can be used in queries to sort or filter by the document ID.
17745 */ function ih() {
17746 return new sh("__name__");
17747}
17748
17749/**
17750 * @license
17751 * Copyright 2020 Google LLC
17752 *
17753 * Licensed under the Apache License, Version 2.0 (the "License");
17754 * you may not use this file except in compliance with the License.
17755 * You may obtain a copy of the License at
17756 *
17757 * http://www.apache.org/licenses/LICENSE-2.0
17758 *
17759 * Unless required by applicable law or agreed to in writing, software
17760 * distributed under the License is distributed on an "AS IS" BASIS,
17761 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17762 * See the License for the specific language governing permissions and
17763 * limitations under the License.
17764 */
17765/**
17766 * Sentinel values that can be used when writing document fields with `set()`
17767 * or `update()`.
17768 */ class rh {
17769 /**
17770 * @param _methodName - The public API endpoint that returns this class.
17771 * @hideconstructor
17772 */
17773 constructor(t) {
17774 this._methodName = t;
17775 }
17776}
17777
17778/**
17779 * @license
17780 * Copyright 2017 Google LLC
17781 *
17782 * Licensed under the Apache License, Version 2.0 (the "License");
17783 * you may not use this file except in compliance with the License.
17784 * You may obtain a copy of the License at
17785 *
17786 * http://www.apache.org/licenses/LICENSE-2.0
17787 *
17788 * Unless required by applicable law or agreed to in writing, software
17789 * distributed under the License is distributed on an "AS IS" BASIS,
17790 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17791 * See the License for the specific language governing permissions and
17792 * limitations under the License.
17793 */
17794/**
17795 * An immutable object representing a geographic location in Firestore. The
17796 * location is represented as latitude/longitude pair.
17797 *
17798 * Latitude values are in the range of [-90, 90].
17799 * Longitude values are in the range of [-180, 180].
17800 */ class oh {
17801 /**
17802 * Creates a new immutable `GeoPoint` object with the provided latitude and
17803 * longitude values.
17804 * @param latitude - The latitude as number between -90 and 90.
17805 * @param longitude - The longitude as number between -180 and 180.
17806 */
17807 constructor(t, e) {
17808 if (!isFinite(t) || t < -90 || t > 90) throw new U(L.INVALID_ARGUMENT, "Latitude must be a number between -90 and 90, but was: " + t);
17809 if (!isFinite(e) || e < -180 || e > 180) throw new U(L.INVALID_ARGUMENT, "Longitude must be a number between -180 and 180, but was: " + e);
17810 this._lat = t, this._long = e;
17811 }
17812 /**
17813 * The latitude of this `GeoPoint` instance.
17814 */ get latitude() {
17815 return this._lat;
17816 }
17817 /**
17818 * The longitude of this `GeoPoint` instance.
17819 */ get longitude() {
17820 return this._long;
17821 }
17822 /**
17823 * Returns true if this `GeoPoint` is equal to the provided one.
17824 *
17825 * @param other - The `GeoPoint` to compare against.
17826 * @returns true if this `GeoPoint` is equal to the provided one.
17827 */ isEqual(t) {
17828 return this._lat === t._lat && this._long === t._long;
17829 }
17830 /** Returns a JSON-serializable representation of this GeoPoint. */ toJSON() {
17831 return {
17832 latitude: this._lat,
17833 longitude: this._long
17834 };
17835 }
17836 /**
17837 * Actually private to JS consumers of our API, so this function is prefixed
17838 * with an underscore.
17839 */ _compareTo(t) {
17840 return tt(this._lat, t._lat) || tt(this._long, t._long);
17841 }
17842}
17843
17844/**
17845 * @license
17846 * Copyright 2017 Google LLC
17847 *
17848 * Licensed under the Apache License, Version 2.0 (the "License");
17849 * you may not use this file except in compliance with the License.
17850 * You may obtain a copy of the License at
17851 *
17852 * http://www.apache.org/licenses/LICENSE-2.0
17853 *
17854 * Unless required by applicable law or agreed to in writing, software
17855 * distributed under the License is distributed on an "AS IS" BASIS,
17856 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17857 * See the License for the specific language governing permissions and
17858 * limitations under the License.
17859 */ const uh = /^__.*__$/;
17860
17861/** The result of parsing document data (e.g. for a setData call). */ class ch {
17862 constructor(t, e, n) {
17863 this.data = t, this.fieldMask = e, this.fieldTransforms = n;
17864 }
17865 toMutation(t, e) {
17866 return null !== this.fieldMask ? new jn(t, this.data, this.fieldMask, e, this.fieldTransforms) : new Qn(t, this.data, e, this.fieldTransforms);
17867 }
17868}
17869
17870/** The result of parsing "update" data (i.e. for an updateData call). */ class ah {
17871 constructor(t,
17872 // The fieldMask does not include document transforms.
17873 e, n) {
17874 this.data = t, this.fieldMask = e, this.fieldTransforms = n;
17875 }
17876 toMutation(t, e) {
17877 return new jn(t, this.data, this.fieldMask, e, this.fieldTransforms);
17878 }
17879}
17880
17881function hh(t) {
17882 switch (t) {
17883 case 0 /* Set */ :
17884 // fall through
17885 case 2 /* MergeSet */ :
17886 // fall through
17887 case 1 /* Update */ :
17888 return !0;
17889
17890 case 3 /* Argument */ :
17891 case 4 /* ArrayArgument */ :
17892 return !1;
17893
17894 default:
17895 throw M();
17896 }
17897}
17898
17899/** A "context" object passed around while parsing user data. */ class lh {
17900 /**
17901 * Initializes a ParseContext with the given source and path.
17902 *
17903 * @param settings - The settings for the parser.
17904 * @param databaseId - The database ID of the Firestore instance.
17905 * @param serializer - The serializer to use to generate the Value proto.
17906 * @param ignoreUndefinedProperties - Whether to ignore undefined properties
17907 * rather than throw.
17908 * @param fieldTransforms - A mutable list of field transforms encountered
17909 * while parsing the data.
17910 * @param fieldMask - A mutable list of field paths encountered while parsing
17911 * the data.
17912 *
17913 * TODO(b/34871131): We don't support array paths right now, so path can be
17914 * null to indicate the context represents any location within an array (in
17915 * which case certain features will not work and errors will be somewhat
17916 * compromised).
17917 */
17918 constructor(t, e, n, s, i, r) {
17919 this.settings = t, this.databaseId = e, this.It = n, this.ignoreUndefinedProperties = s,
17920 // Minor hack: If fieldTransforms is undefined, we assume this is an
17921 // external call and we need to validate the entire path.
17922 void 0 === i && this.na(), this.fieldTransforms = i || [], this.fieldMask = r || [];
17923 }
17924 get path() {
17925 return this.settings.path;
17926 }
17927 get sa() {
17928 return this.settings.sa;
17929 }
17930 /** Returns a new context with the specified settings overwritten. */ ia(t) {
17931 return new lh(Object.assign(Object.assign({}, this.settings), t), this.databaseId, this.It, this.ignoreUndefinedProperties, this.fieldTransforms, this.fieldMask);
17932 }
17933 ra(t) {
17934 var e;
17935 const n = null === (e = this.path) || void 0 === e ? void 0 : e.child(t), s = this.ia({
17936 path: n,
17937 oa: !1
17938 });
17939 return s.ua(t), s;
17940 }
17941 ca(t) {
17942 var e;
17943 const n = null === (e = this.path) || void 0 === e ? void 0 : e.child(t), s = this.ia({
17944 path: n,
17945 oa: !1
17946 });
17947 return s.na(), s;
17948 }
17949 aa(t) {
17950 // TODO(b/34871131): We don't support array paths right now; so make path
17951 // undefined.
17952 return this.ia({
17953 path: void 0,
17954 oa: !0
17955 });
17956 }
17957 ha(t) {
17958 return Ch(t, this.settings.methodName, this.settings.la || !1, this.path, this.settings.fa);
17959 }
17960 /** Returns 'true' if 'fieldPath' was traversed when creating this context. */ contains(t) {
17961 return void 0 !== this.fieldMask.find((e => t.isPrefixOf(e))) || void 0 !== this.fieldTransforms.find((e => t.isPrefixOf(e.field)));
17962 }
17963 na() {
17964 // TODO(b/34871131): Remove null check once we have proper paths for fields
17965 // within arrays.
17966 if (this.path) for (let t = 0; t < this.path.length; t++) this.ua(this.path.get(t));
17967 }
17968 ua(t) {
17969 if (0 === t.length) throw this.ha("Document fields must not be empty");
17970 if (hh(this.sa) && uh.test(t)) throw this.ha('Document fields cannot begin and end with "__"');
17971 }
17972}
17973
17974/**
17975 * Helper for parsing raw user input (provided via the API) into internal model
17976 * classes.
17977 */ class fh {
17978 constructor(t, e, n) {
17979 this.databaseId = t, this.ignoreUndefinedProperties = e, this.It = n || eu(t);
17980 }
17981 /** Creates a new top-level parse context. */ da(t, e, n, s = !1) {
17982 return new lh({
17983 sa: t,
17984 methodName: e,
17985 fa: n,
17986 path: ct.emptyPath(),
17987 oa: !1,
17988 la: s
17989 }, this.databaseId, this.It, this.ignoreUndefinedProperties);
17990 }
17991}
17992
17993function dh(t) {
17994 const e = t._freezeSettings(), n = eu(t._databaseId);
17995 return new fh(t._databaseId, !!e.ignoreUndefinedProperties, n);
17996}
17997
17998/** Parse document data from a set() call. */ function _h(t, e, n, s, i, r = {}) {
17999 const o = t.da(r.merge || r.mergeFields ? 2 /* MergeSet */ : 0 /* Set */ , e, n, i);
18000 vh("Data must be an object, but it was:", o, s);
18001 const u = bh(s, o);
18002 let c, a;
18003 if (r.merge) c = new jt(o.fieldMask), a = o.fieldTransforms; else if (r.mergeFields) {
18004 const t = [];
18005 for (const s of r.mergeFields) {
18006 const i = Vh(e, s, n);
18007 if (!o.contains(i)) throw new U(L.INVALID_ARGUMENT, `Field '${i}' is specified in your field mask but missing from your input data.`);
18008 xh(t, i) || t.push(i);
18009 }
18010 c = new jt(t), a = o.fieldTransforms.filter((t => c.covers(t.field)));
18011 } else c = null, a = o.fieldTransforms;
18012 return new ch(new Se(u), c, a);
18013}
18014
18015class wh extends rh {
18016 _toFieldTransform(t) {
18017 if (2 /* MergeSet */ !== t.sa) throw 1 /* Update */ === t.sa ? t.ha(`${this._methodName}() can only appear at the top level of your update data`) : t.ha(`${this._methodName}() cannot be used with set() unless you pass {merge:true}`);
18018 // No transform to add for a delete, but we need to add it to our
18019 // fieldMask so it gets deleted.
18020 return t.fieldMask.push(t.path), null;
18021 }
18022 isEqual(t) {
18023 return t instanceof wh;
18024 }
18025}
18026
18027/**
18028 * Creates a child context for parsing SerializableFieldValues.
18029 *
18030 * This is different than calling `ParseContext.contextWith` because it keeps
18031 * the fieldTransforms and fieldMask separate.
18032 *
18033 * The created context has its `dataSource` set to `UserDataSource.Argument`.
18034 * Although these values are used with writes, any elements in these FieldValues
18035 * are not considered writes since they cannot contain any FieldValue sentinels,
18036 * etc.
18037 *
18038 * @param fieldValue - The sentinel FieldValue for which to create a child
18039 * context.
18040 * @param context - The parent context.
18041 * @param arrayElement - Whether or not the FieldValue has an array.
18042 */ function mh(t, e, n) {
18043 return new lh({
18044 sa: 3 /* Argument */ ,
18045 fa: e.settings.fa,
18046 methodName: t._methodName,
18047 oa: n
18048 }, e.databaseId, e.It, e.ignoreUndefinedProperties);
18049}
18050
18051class gh extends rh {
18052 _toFieldTransform(t) {
18053 return new kn(t.path, new Pn);
18054 }
18055 isEqual(t) {
18056 return t instanceof gh;
18057 }
18058}
18059
18060class yh extends rh {
18061 constructor(t, e) {
18062 super(t), this._a = e;
18063 }
18064 _toFieldTransform(t) {
18065 const e = mh(this, t,
18066 /*array=*/ !0), n = this._a.map((t => Rh(t, e))), s = new vn(n);
18067 return new kn(t.path, s);
18068 }
18069 isEqual(t) {
18070 // TODO(mrschmidt): Implement isEquals
18071 return this === t;
18072 }
18073}
18074
18075class ph extends rh {
18076 constructor(t, e) {
18077 super(t), this._a = e;
18078 }
18079 _toFieldTransform(t) {
18080 const e = mh(this, t,
18081 /*array=*/ !0), n = this._a.map((t => Rh(t, e))), s = new Sn(n);
18082 return new kn(t.path, s);
18083 }
18084 isEqual(t) {
18085 // TODO(mrschmidt): Implement isEquals
18086 return this === t;
18087 }
18088}
18089
18090class Ih extends rh {
18091 constructor(t, e) {
18092 super(t), this.wa = e;
18093 }
18094 _toFieldTransform(t) {
18095 const e = new Cn(t.It, Tn(t.It, this.wa));
18096 return new kn(t.path, e);
18097 }
18098 isEqual(t) {
18099 // TODO(mrschmidt): Implement isEquals
18100 return this === t;
18101 }
18102}
18103
18104/** Parse update data from an update() call. */ function Th(t, e, n, s) {
18105 const i = t.da(1 /* Update */ , e, n);
18106 vh("Data must be an object, but it was:", i, s);
18107 const r = [], o = Se.empty();
18108 $t(s, ((t, s) => {
18109 const u = Dh(e, t, n);
18110 // For Compat types, we have to "extract" the underlying types before
18111 // performing validation.
18112 s = _(s);
18113 const c = i.ca(u);
18114 if (s instanceof wh)
18115 // Add it to the field mask, but don't add anything to updateData.
18116 r.push(u); else {
18117 const t = Rh(s, c);
18118 null != t && (r.push(u), o.set(u, t));
18119 }
18120 }));
18121 const u = new jt(r);
18122 return new ah(o, u, i.fieldTransforms);
18123}
18124
18125/** Parse update data from a list of field/value arguments. */ function Eh(t, e, n, s, i, r) {
18126 const o = t.da(1 /* Update */ , e, n), u = [ Vh(e, s, n) ], c = [ i ];
18127 if (r.length % 2 != 0) throw new U(L.INVALID_ARGUMENT, `Function ${e}() needs to be called with an even number of arguments that alternate between field names and values.`);
18128 for (let t = 0; t < r.length; t += 2) u.push(Vh(e, r[t])), c.push(r[t + 1]);
18129 const a = [], h = Se.empty();
18130 // We iterate in reverse order to pick the last value for a field if the
18131 // user specified the field multiple times.
18132 for (let t = u.length - 1; t >= 0; --t) if (!xh(a, u[t])) {
18133 const e = u[t];
18134 let n = c[t];
18135 // For Compat types, we have to "extract" the underlying types before
18136 // performing validation.
18137 n = _(n);
18138 const s = o.ca(e);
18139 if (n instanceof wh)
18140 // Add it to the field mask, but don't add anything to updateData.
18141 a.push(e); else {
18142 const t = Rh(n, s);
18143 null != t && (a.push(e), h.set(e, t));
18144 }
18145 }
18146 const l = new jt(a);
18147 return new ah(h, l, o.fieldTransforms);
18148}
18149
18150/**
18151 * Parse a "query value" (e.g. value in a where filter or a value in a cursor
18152 * bound).
18153 *
18154 * @param allowArrays - Whether the query value is an array that may directly
18155 * contain additional arrays (e.g. the operand of an `in` query).
18156 */ function Ah(t, e, n, s = !1) {
18157 return Rh(n, t.da(s ? 4 /* ArrayArgument */ : 3 /* Argument */ , e));
18158}
18159
18160/**
18161 * Parses user data to Protobuf Values.
18162 *
18163 * @param input - Data to be parsed.
18164 * @param context - A context object representing the current path being parsed,
18165 * the source of the data being parsed, etc.
18166 * @returns The parsed value, or null if the value was a FieldValue sentinel
18167 * that should not be included in the resulting parsed data.
18168 */ function Rh(t, e) {
18169 if (Ph(
18170 // Unwrap the API type from the Compat SDK. This will return the API type
18171 // from firestore-exp.
18172 t = _(t))) return vh("Unsupported field value:", e, t), bh(t, e);
18173 if (t instanceof rh)
18174 // FieldValues usually parse into transforms (except deleteField())
18175 // in which case we do not want to include this field in our parsed data
18176 // (as doing so will overwrite the field directly prior to the transform
18177 // trying to transform it). So we don't add this location to
18178 // context.fieldMask and we return null as our parsing result.
18179 /**
18180 * "Parses" the provided FieldValueImpl, adding any necessary transforms to
18181 * context.fieldTransforms.
18182 */
18183 return function(t, e) {
18184 // Sentinels are only supported with writes, and not within arrays.
18185 if (!hh(e.sa)) throw e.ha(`${t._methodName}() can only be used with update() and set()`);
18186 if (!e.path) throw e.ha(`${t._methodName}() is not currently supported inside arrays`);
18187 const n = t._toFieldTransform(e);
18188 n && e.fieldTransforms.push(n);
18189 }
18190 /**
18191 * Helper to parse a scalar value (i.e. not an Object, Array, or FieldValue)
18192 *
18193 * @returns The parsed value
18194 */ (t, e), null;
18195 if (void 0 === t && e.ignoreUndefinedProperties)
18196 // If the input is undefined it can never participate in the fieldMask, so
18197 // don't handle this below. If `ignoreUndefinedProperties` is false,
18198 // `parseScalarValue` will reject an undefined value.
18199 return null;
18200 if (
18201 // If context.path is null we are inside an array and we don't support
18202 // field mask paths more granular than the top-level array.
18203 e.path && e.fieldMask.push(e.path), t instanceof Array) {
18204 // TODO(b/34871131): Include the path containing the array in the error
18205 // message.
18206 // In the case of IN queries, the parsed data is an array (representing
18207 // the set of values to be included for the IN query) that may directly
18208 // contain additional arrays (each representing an individual field
18209 // value), so we disable this validation.
18210 if (e.settings.oa && 4 /* ArrayArgument */ !== e.sa) throw e.ha("Nested arrays are not supported");
18211 return function(t, e) {
18212 const n = [];
18213 let s = 0;
18214 for (const i of t) {
18215 let t = Rh(i, e.aa(s));
18216 null == t && (
18217 // Just include nulls in the array for fields being replaced with a
18218 // sentinel.
18219 t = {
18220 nullValue: "NULL_VALUE"
18221 }), n.push(t), s++;
18222 }
18223 return {
18224 arrayValue: {
18225 values: n
18226 }
18227 };
18228 }(t, e);
18229 }
18230 return function(t, e) {
18231 if (null === (t = _(t))) return {
18232 nullValue: "NULL_VALUE"
18233 };
18234 if ("number" == typeof t) return Tn(e.It, t);
18235 if ("boolean" == typeof t) return {
18236 booleanValue: t
18237 };
18238 if ("string" == typeof t) return {
18239 stringValue: t
18240 };
18241 if (t instanceof Date) {
18242 const n = st.fromDate(t);
18243 return {
18244 timestampValue: Ss(e.It, n)
18245 };
18246 }
18247 if (t instanceof st) {
18248 // Firestore backend truncates precision down to microseconds. To ensure
18249 // offline mode works the same with regards to truncation, perform the
18250 // truncation immediately without waiting for the backend to do that.
18251 const n = new st(t.seconds, 1e3 * Math.floor(t.nanoseconds / 1e3));
18252 return {
18253 timestampValue: Ss(e.It, n)
18254 };
18255 }
18256 if (t instanceof oh) return {
18257 geoPointValue: {
18258 latitude: t.latitude,
18259 longitude: t.longitude
18260 }
18261 };
18262 if (t instanceof nh) return {
18263 bytesValue: Ds(e.It, t._byteString)
18264 };
18265 if (t instanceof ta) {
18266 const n = e.databaseId, s = t.firestore._databaseId;
18267 if (!s.isEqual(n)) throw e.ha(`Document reference is for database ${s.projectId}/${s.database} but should be for database ${n.projectId}/${n.database}`);
18268 return {
18269 referenceValue: Ns(t.firestore._databaseId || e.databaseId, t._key.path)
18270 };
18271 }
18272 throw e.ha(`Unsupported field value: ${Wc(t)}`);
18273 }
18274 /**
18275 * Checks whether an object looks like a JSON object that should be converted
18276 * into a struct. Normal class/prototype instances are considered to look like
18277 * JSON objects since they should be converted to a struct value. Arrays, Dates,
18278 * GeoPoints, etc. are not considered to look like JSON objects since they map
18279 * to specific FieldValue types other than ObjectValue.
18280 */ (t, e);
18281}
18282
18283function bh(t, e) {
18284 const n = {};
18285 return Bt(t) ?
18286 // If we encounter an empty object, we explicitly add it to the update
18287 // mask to ensure that the server creates a map entry.
18288 e.path && e.path.length > 0 && e.fieldMask.push(e.path) : $t(t, ((t, s) => {
18289 const i = Rh(s, e.ra(t));
18290 null != i && (n[t] = i);
18291 })), {
18292 mapValue: {
18293 fields: n
18294 }
18295 };
18296}
18297
18298function Ph(t) {
18299 return !("object" != typeof t || null === t || t instanceof Array || t instanceof Date || t instanceof st || t instanceof oh || t instanceof nh || t instanceof ta || t instanceof rh);
18300}
18301
18302function vh(t, e, n) {
18303 if (!Ph(n) || !function(t) {
18304 return "object" == typeof t && null !== t && (Object.getPrototypeOf(t) === Object.prototype || null === Object.getPrototypeOf(t));
18305 }(n)) {
18306 const s = Wc(n);
18307 throw "an object" === s ? e.ha(t + " a custom object") : e.ha(t + " " + s);
18308 }
18309}
18310
18311/**
18312 * Helper that calls fromDotSeparatedString() but wraps any error thrown.
18313 */ function Vh(t, e, n) {
18314 if ((
18315 // If required, replace the FieldPath Compat class with with the firestore-exp
18316 // FieldPath.
18317 e = _(e)) instanceof sh) return e._internalPath;
18318 if ("string" == typeof e) return Dh(t, e);
18319 throw Ch("Field path arguments must be of type string or ", t,
18320 /* hasConverter= */ !1,
18321 /* path= */ void 0, n);
18322}
18323
18324/**
18325 * Matches any characters in a field path string that are reserved.
18326 */ const Sh = new RegExp("[~\\*/\\[\\]]");
18327
18328/**
18329 * Wraps fromDotSeparatedString with an error message about the method that
18330 * was thrown.
18331 * @param methodName - The publicly visible method name
18332 * @param path - The dot-separated string form of a field path which will be
18333 * split on dots.
18334 * @param targetDoc - The document against which the field path will be
18335 * evaluated.
18336 */ function Dh(t, e, n) {
18337 if (e.search(Sh) >= 0) throw Ch(`Invalid field path (${e}). Paths must not contain '~', '*', '/', '[', or ']'`, t,
18338 /* hasConverter= */ !1,
18339 /* path= */ void 0, n);
18340 try {
18341 return new sh(...e.split("."))._internalPath;
18342 } catch (s) {
18343 throw Ch(`Invalid field path (${e}). Paths must not be empty, begin with '.', end with '.', or contain '..'`, t,
18344 /* hasConverter= */ !1,
18345 /* path= */ void 0, n);
18346 }
18347}
18348
18349function Ch(t, e, n, s, i) {
18350 const r = s && !s.isEmpty(), o = void 0 !== i;
18351 let u = `Function ${e}() called with invalid data`;
18352 n && (u += " (via `toFirestore()`)"), u += ". ";
18353 let c = "";
18354 return (r || o) && (c += " (found", r && (c += ` in field ${s}`), o && (c += ` in document ${i}`),
18355 c += ")"), new U(L.INVALID_ARGUMENT, u + t + c);
18356}
18357
18358/** Checks `haystack` if FieldPath `needle` is present. Runs in O(n). */ function xh(t, e) {
18359 return t.some((t => t.isEqual(e)));
18360}
18361
18362/**
18363 * @license
18364 * Copyright 2020 Google LLC
18365 *
18366 * Licensed under the Apache License, Version 2.0 (the "License");
18367 * you may not use this file except in compliance with the License.
18368 * You may obtain a copy of the License at
18369 *
18370 * http://www.apache.org/licenses/LICENSE-2.0
18371 *
18372 * Unless required by applicable law or agreed to in writing, software
18373 * distributed under the License is distributed on an "AS IS" BASIS,
18374 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18375 * See the License for the specific language governing permissions and
18376 * limitations under the License.
18377 */
18378/**
18379 * A `DocumentSnapshot` contains data read from a document in your Firestore
18380 * database. The data can be extracted with `.data()` or `.get(<field>)` to
18381 * get a specific field.
18382 *
18383 * For a `DocumentSnapshot` that points to a non-existing document, any data
18384 * access will return 'undefined'. You can use the `exists()` method to
18385 * explicitly verify a document's existence.
18386 */ class Nh {
18387 // Note: This class is stripped down version of the DocumentSnapshot in
18388 // the legacy SDK. The changes are:
18389 // - No support for SnapshotMetadata.
18390 // - No support for SnapshotOptions.
18391 /** @hideconstructor protected */
18392 constructor(t, e, n, s, i) {
18393 this._firestore = t, this._userDataWriter = e, this._key = n, this._document = s,
18394 this._converter = i;
18395 }
18396 /** Property of the `DocumentSnapshot` that provides the document's ID. */ get id() {
18397 return this._key.path.lastSegment();
18398 }
18399 /**
18400 * The `DocumentReference` for the document included in the `DocumentSnapshot`.
18401 */ get ref() {
18402 return new ta(this._firestore, this._converter, this._key);
18403 }
18404 /**
18405 * Signals whether or not the document at the snapshot's location exists.
18406 *
18407 * @returns true if the document exists.
18408 */ exists() {
18409 return null !== this._document;
18410 }
18411 /**
18412 * Retrieves all fields in the document as an `Object`. Returns `undefined` if
18413 * the document doesn't exist.
18414 *
18415 * @returns An `Object` containing all fields in the document or `undefined`
18416 * if the document doesn't exist.
18417 */ data() {
18418 if (this._document) {
18419 if (this._converter) {
18420 // We only want to use the converter and create a new DocumentSnapshot
18421 // if a converter has been provided.
18422 const t = new kh(this._firestore, this._userDataWriter, this._key, this._document,
18423 /* converter= */ null);
18424 return this._converter.fromFirestore(t);
18425 }
18426 return this._userDataWriter.convertValue(this._document.data.value);
18427 }
18428 }
18429 /**
18430 * Retrieves the field specified by `fieldPath`. Returns `undefined` if the
18431 * document or field doesn't exist.
18432 *
18433 * @param fieldPath - The path (for example 'foo' or 'foo.bar') to a specific
18434 * field.
18435 * @returns The data at the specified field location or undefined if no such
18436 * field exists in the document.
18437 */
18438 // We are using `any` here to avoid an explicit cast by our users.
18439 // eslint-disable-next-line @typescript-eslint/no-explicit-any
18440 get(t) {
18441 if (this._document) {
18442 const e = this._document.data.field(Oh("DocumentSnapshot.get", t));
18443 if (null !== e) return this._userDataWriter.convertValue(e);
18444 }
18445 }
18446}
18447
18448/**
18449 * A `QueryDocumentSnapshot` contains data read from a document in your
18450 * Firestore database as part of a query. The document is guaranteed to exist
18451 * and its data can be extracted with `.data()` or `.get(<field>)` to get a
18452 * specific field.
18453 *
18454 * A `QueryDocumentSnapshot` offers the same API surface as a
18455 * `DocumentSnapshot`. Since query results contain only existing documents, the
18456 * `exists` property will always be true and `data()` will never return
18457 * 'undefined'.
18458 */ class kh extends Nh {
18459 /**
18460 * Retrieves all fields in the document as an `Object`.
18461 *
18462 * @override
18463 * @returns An `Object` containing all fields in the document.
18464 */
18465 data() {
18466 return super.data();
18467 }
18468}
18469
18470/**
18471 * Helper that calls `fromDotSeparatedString()` but wraps any error thrown.
18472 */ function Oh(t, e) {
18473 return "string" == typeof e ? Dh(t, e) : e instanceof sh ? e._internalPath : e._delegate._internalPath;
18474}
18475
18476/**
18477 * @license
18478 * Copyright 2020 Google LLC
18479 *
18480 * Licensed under the Apache License, Version 2.0 (the "License");
18481 * you may not use this file except in compliance with the License.
18482 * You may obtain a copy of the License at
18483 *
18484 * http://www.apache.org/licenses/LICENSE-2.0
18485 *
18486 * Unless required by applicable law or agreed to in writing, software
18487 * distributed under the License is distributed on an "AS IS" BASIS,
18488 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18489 * See the License for the specific language governing permissions and
18490 * limitations under the License.
18491 */ function Mh(t) {
18492 if ("L" /* Last */ === t.limitType && 0 === t.explicitOrderBy.length) throw new U(L.UNIMPLEMENTED, "limitToLast() queries require specifying at least one orderBy() clause");
18493}
18494
18495/**
18496 * A `QueryConstraint` is used to narrow the set of documents returned by a
18497 * Firestore query. `QueryConstraint`s are created by invoking {@link where},
18498 * {@link orderBy}, {@link (startAt:1)}, {@link (startAfter:1)}, {@link
18499 * endBefore:1}, {@link (endAt:1)}, {@link limit} or {@link limitToLast} and
18500 * can then be passed to {@link query} to create a new query instance that
18501 * also contains this `QueryConstraint`.
18502 */ class Fh {}
18503
18504/**
18505 * Creates a new immutable instance of {@link Query} that is extended to also include
18506 * additional query constraints.
18507 *
18508 * @param query - The {@link Query} instance to use as a base for the new constraints.
18509 * @param queryConstraints - The list of {@link QueryConstraint}s to apply.
18510 * @throws if any of the provided query constraints cannot be combined with the
18511 * existing or new constraints.
18512 */ function $h(t, ...e) {
18513 for (const n of e) t = n._apply(t);
18514 return t;
18515}
18516
18517class Bh extends Fh {
18518 constructor(t, e, n) {
18519 super(), this.ma = t, this.ga = e, this.ya = n, this.type = "where";
18520 }
18521 _apply(t) {
18522 const e = dh(t.firestore), n = function(t, e, n, s, i, r, o) {
18523 let u;
18524 if (i.isKeyField()) {
18525 if ("array-contains" /* ARRAY_CONTAINS */ === r || "array-contains-any" /* ARRAY_CONTAINS_ANY */ === r) throw new U(L.INVALID_ARGUMENT, `Invalid Query. You can't perform '${r}' queries on documentId().`);
18526 if ("in" /* IN */ === r || "not-in" /* NOT_IN */ === r) {
18527 tl(o, r);
18528 const e = [];
18529 for (const n of o) e.push(Zh(s, t, n));
18530 u = {
18531 arrayValue: {
18532 values: e
18533 }
18534 };
18535 } else u = Zh(s, t, o);
18536 } else "in" /* IN */ !== r && "not-in" /* NOT_IN */ !== r && "array-contains-any" /* ARRAY_CONTAINS_ANY */ !== r || tl(o, r),
18537 u = Ah(n, e, o,
18538 /* allowArrays= */ "in" /* IN */ === r || "not-in" /* NOT_IN */ === r);
18539 const c = Ue.create(i, r, u);
18540 return function(t, e) {
18541 if (e.dt()) {
18542 const n = un(t);
18543 if (null !== n && !n.isEqual(e.field)) throw new U(L.INVALID_ARGUMENT, `Invalid query. All where filters with an inequality (<, <=, !=, not-in, >, or >=) must be on the same field. But you have inequality filters on '${n.toString()}' and '${e.field.toString()}'`);
18544 const s = on(t);
18545 null !== s && el(t, e.field, s);
18546 }
18547 const n = function(t, e) {
18548 for (const n of t.filters) if (e.indexOf(n.op) >= 0) return n.op;
18549 return null;
18550 }(t,
18551 /**
18552 * Given an operator, returns the set of operators that cannot be used with it.
18553 *
18554 * Operators in a query must adhere to the following set of rules:
18555 * 1. Only one array operator is allowed.
18556 * 2. Only one disjunctive operator is allowed.
18557 * 3. `NOT_EQUAL` cannot be used with another `NOT_EQUAL` operator.
18558 * 4. `NOT_IN` cannot be used with array, disjunctive, or `NOT_EQUAL` operators.
18559 *
18560 * Array operators: `ARRAY_CONTAINS`, `ARRAY_CONTAINS_ANY`
18561 * Disjunctive operators: `IN`, `ARRAY_CONTAINS_ANY`, `NOT_IN`
18562 */
18563 function(t) {
18564 switch (t) {
18565 case "!=" /* NOT_EQUAL */ :
18566 return [ "!=" /* NOT_EQUAL */ , "not-in" /* NOT_IN */ ];
18567
18568 case "array-contains" /* ARRAY_CONTAINS */ :
18569 return [ "array-contains" /* ARRAY_CONTAINS */ , "array-contains-any" /* ARRAY_CONTAINS_ANY */ , "not-in" /* NOT_IN */ ];
18570
18571 case "in" /* IN */ :
18572 return [ "array-contains-any" /* ARRAY_CONTAINS_ANY */ , "in" /* IN */ , "not-in" /* NOT_IN */ ];
18573
18574 case "array-contains-any" /* ARRAY_CONTAINS_ANY */ :
18575 return [ "array-contains" /* ARRAY_CONTAINS */ , "array-contains-any" /* ARRAY_CONTAINS_ANY */ , "in" /* IN */ , "not-in" /* NOT_IN */ ];
18576
18577 case "not-in" /* NOT_IN */ :
18578 return [ "array-contains" /* ARRAY_CONTAINS */ , "array-contains-any" /* ARRAY_CONTAINS_ANY */ , "in" /* IN */ , "not-in" /* NOT_IN */ , "!=" /* NOT_EQUAL */ ];
18579
18580 default:
18581 return [];
18582 }
18583 }(e.op));
18584 if (null !== n)
18585 // Special case when it's a duplicate op to give a slightly clearer error message.
18586 throw n === e.op ? new U(L.INVALID_ARGUMENT, `Invalid query. You cannot use more than one '${e.op.toString()}' filter.`) : new U(L.INVALID_ARGUMENT, `Invalid query. You cannot use '${e.op.toString()}' filters with '${n.toString()}' filters.`);
18587 }(t, c), c;
18588 }(t._query, "where", e, t.firestore._databaseId, this.ma, this.ga, this.ya);
18589 return new ea(t.firestore, t.converter, function(t, e) {
18590 const n = t.filters.concat([ e ]);
18591 return new en(t.path, t.collectionGroup, t.explicitOrderBy.slice(), n, t.limit, t.limitType, t.startAt, t.endAt);
18592 }(t._query, n));
18593 }
18594}
18595
18596/**
18597 * Creates a {@link QueryConstraint} that enforces that documents must contain the
18598 * specified field and that the value should satisfy the relation constraint
18599 * provided.
18600 *
18601 * @param fieldPath - The path to compare
18602 * @param opStr - The operation string (e.g "&lt;", "&lt;=", "==", "&lt;",
18603 * "&lt;=", "!=").
18604 * @param value - The value for comparison
18605 * @returns The created {@link Query}.
18606 */ function Lh(t, e, n) {
18607 const s = e, i = Oh("where", t);
18608 return new Bh(i, s, n);
18609}
18610
18611class Uh extends Fh {
18612 constructor(t, e) {
18613 super(), this.ma = t, this.pa = e, this.type = "orderBy";
18614 }
18615 _apply(t) {
18616 const e = function(t, e, n) {
18617 if (null !== t.startAt) throw new U(L.INVALID_ARGUMENT, "Invalid query. You must not call startAt() or startAfter() before calling orderBy().");
18618 if (null !== t.endAt) throw new U(L.INVALID_ARGUMENT, "Invalid query. You must not call endAt() or endBefore() before calling orderBy().");
18619 const s = new Ye(e, n);
18620 return function(t, e) {
18621 if (null === on(t)) {
18622 // This is the first order by. It must match any inequality.
18623 const n = un(t);
18624 null !== n && el(t, n, e.field);
18625 }
18626 }(t, s), s;
18627 }
18628 /**
18629 * Create a `Bound` from a query and a document.
18630 *
18631 * Note that the `Bound` will always include the key of the document
18632 * and so only the provided document will compare equal to the returned
18633 * position.
18634 *
18635 * Will throw if the document does not contain all fields of the order by
18636 * of the query or if any of the fields in the order by are an uncommitted
18637 * server timestamp.
18638 */ (t._query, this.ma, this.pa);
18639 return new ea(t.firestore, t.converter, function(t, e) {
18640 // TODO(dimond): validate that orderBy does not list the same key twice.
18641 const n = t.explicitOrderBy.concat([ e ]);
18642 return new en(t.path, t.collectionGroup, n, t.filters.slice(), t.limit, t.limitType, t.startAt, t.endAt);
18643 }(t._query, e));
18644 }
18645}
18646
18647/**
18648 * Creates a {@link QueryConstraint} that sorts the query result by the
18649 * specified field, optionally in descending order instead of ascending.
18650 *
18651 * @param fieldPath - The field to sort by.
18652 * @param directionStr - Optional direction to sort by ('asc' or 'desc'). If
18653 * not specified, order will be ascending.
18654 * @returns The created {@link Query}.
18655 */ function qh(t, e = "asc") {
18656 const n = e, s = Oh("orderBy", t);
18657 return new Uh(s, n);
18658}
18659
18660class Kh extends Fh {
18661 constructor(t, e, n) {
18662 super(), this.type = t, this.Ia = e, this.Ta = n;
18663 }
18664 _apply(t) {
18665 return new ea(t.firestore, t.converter, ln(t._query, this.Ia, this.Ta));
18666 }
18667}
18668
18669/**
18670 * Creates a {@link QueryConstraint} that only returns the first matching documents.
18671 *
18672 * @param limit - The maximum number of items to return.
18673 * @returns The created {@link Query}.
18674 */ function Gh(t) {
18675 return Hc("limit", t), new Kh("limit", t, "F" /* First */);
18676}
18677
18678/**
18679 * Creates a {@link QueryConstraint} that only returns the last matching documents.
18680 *
18681 * You must specify at least one `orderBy` clause for `limitToLast` queries,
18682 * otherwise an exception will be thrown during execution.
18683 *
18684 * @param limit - The maximum number of items to return.
18685 * @returns The created {@link Query}.
18686 */ function Qh(t) {
18687 return Hc("limitToLast", t), new Kh("limitToLast", t, "L" /* Last */);
18688}
18689
18690class jh extends Fh {
18691 constructor(t, e, n) {
18692 super(), this.type = t, this.Ea = e, this.Aa = n;
18693 }
18694 _apply(t) {
18695 const e = Xh(t, this.type, this.Ea, this.Aa);
18696 return new ea(t.firestore, t.converter, function(t, e) {
18697 return new en(t.path, t.collectionGroup, t.explicitOrderBy.slice(), t.filters.slice(), t.limit, t.limitType, e, t.endAt);
18698 }(t._query, e));
18699 }
18700}
18701
18702function Wh(...t) {
18703 return new jh("startAt", t,
18704 /*inclusive=*/ !0);
18705}
18706
18707function zh(...t) {
18708 return new jh("startAfter", t,
18709 /*inclusive=*/ !1);
18710}
18711
18712class Hh extends Fh {
18713 constructor(t, e, n) {
18714 super(), this.type = t, this.Ea = e, this.Aa = n;
18715 }
18716 _apply(t) {
18717 const e = Xh(t, this.type, this.Ea, this.Aa);
18718 return new ea(t.firestore, t.converter, function(t, e) {
18719 return new en(t.path, t.collectionGroup, t.explicitOrderBy.slice(), t.filters.slice(), t.limit, t.limitType, t.startAt, e);
18720 }(t._query, e));
18721 }
18722}
18723
18724function Jh(...t) {
18725 return new Hh("endBefore", t,
18726 /*inclusive=*/ !1);
18727}
18728
18729function Yh(...t) {
18730 return new Hh("endAt", t, /*inclusive=*/ !0);
18731}
18732
18733/** Helper function to create a bound from a document or fields */ function Xh(t, e, n, s) {
18734 if (n[0] = _(n[0]), n[0] instanceof Nh) return function(t, e, n, s, i) {
18735 if (!s) throw new U(L.NOT_FOUND, `Can't use a DocumentSnapshot that doesn't exist for ${n}().`);
18736 const r = [];
18737 // Because people expect to continue/end a query at the exact document
18738 // provided, we need to use the implicit sort order rather than the explicit
18739 // sort order, because it's guaranteed to contain the document key. That way
18740 // the position becomes unambiguous and the query continues/ends exactly at
18741 // the provided document. Without the key (by using the explicit sort
18742 // orders), multiple documents could match the position, yielding duplicate
18743 // results.
18744 for (const n of an(t)) if (n.field.isKeyField()) r.push(ge(e, s.key)); else {
18745 const t = s.data.field(n.field);
18746 if (te(t)) throw new U(L.INVALID_ARGUMENT, 'Invalid query. You are trying to start or end a query using a document for which the field "' + n.field + '" is an uncommitted server timestamp. (Since the value of this field is unknown, you cannot start/end a query with it.)');
18747 if (null === t) {
18748 const t = n.field.canonicalString();
18749 throw new U(L.INVALID_ARGUMENT, `Invalid query. You are trying to start or end a query using a document for which the field '${t}' (used as the orderBy) does not exist.`);
18750 }
18751 r.push(t);
18752 }
18753 return new Je(r, i);
18754 }
18755 /**
18756 * Converts a list of field values to a `Bound` for the given query.
18757 */ (t._query, t.firestore._databaseId, e, n[0]._document, s);
18758 {
18759 const i = dh(t.firestore);
18760 return function(t, e, n, s, i, r) {
18761 // Use explicit order by's because it has to match the query the user made
18762 const o = t.explicitOrderBy;
18763 if (i.length > o.length) throw new U(L.INVALID_ARGUMENT, `Too many arguments provided to ${s}(). The number of arguments must be less than or equal to the number of orderBy() clauses`);
18764 const u = [];
18765 for (let r = 0; r < i.length; r++) {
18766 const c = i[r];
18767 if (o[r].field.isKeyField()) {
18768 if ("string" != typeof c) throw new U(L.INVALID_ARGUMENT, `Invalid query. Expected a string for document ID in ${s}(), but got a ${typeof c}`);
18769 if (!cn(t) && -1 !== c.indexOf("/")) throw new U(L.INVALID_ARGUMENT, `Invalid query. When querying a collection and ordering by documentId(), the value passed to ${s}() must be a plain document ID, but '${c}' contains a slash.`);
18770 const n = t.path.child(ot.fromString(c));
18771 if (!at.isDocumentKey(n)) throw new U(L.INVALID_ARGUMENT, `Invalid query. When querying a collection group and ordering by documentId(), the value passed to ${s}() must result in a valid document path, but '${n}' is not because it contains an odd number of segments.`);
18772 const i = new at(n);
18773 u.push(ge(e, i));
18774 } else {
18775 const t = Ah(n, s, c);
18776 u.push(t);
18777 }
18778 }
18779 return new Je(u, r);
18780 }
18781 /**
18782 * Parses the given `documentIdValue` into a `ReferenceValue`, throwing
18783 * appropriate errors if the value is anything other than a `DocumentReference`
18784 * or `string`, or if the string is malformed.
18785 */ (t._query, t.firestore._databaseId, i, e, n, s);
18786 }
18787}
18788
18789function Zh(t, e, n) {
18790 if ("string" == typeof (n = _(n))) {
18791 if ("" === n) throw new U(L.INVALID_ARGUMENT, "Invalid query. When querying with documentId(), you must provide a valid document ID, but it was an empty string.");
18792 if (!cn(e) && -1 !== n.indexOf("/")) throw new U(L.INVALID_ARGUMENT, `Invalid query. When querying a collection by documentId(), you must provide a plain document ID, but '${n}' contains a '/' character.`);
18793 const s = e.path.child(ot.fromString(n));
18794 if (!at.isDocumentKey(s)) throw new U(L.INVALID_ARGUMENT, `Invalid query. When querying a collection group by documentId(), the value provided must result in a valid document path, but '${s}' is not because it has an odd number of segments (${s.length}).`);
18795 return ge(t, new at(s));
18796 }
18797 if (n instanceof ta) return ge(t, n._key);
18798 throw new U(L.INVALID_ARGUMENT, `Invalid query. When querying with documentId(), you must provide a valid string or a DocumentReference, but it was: ${Wc(n)}.`);
18799}
18800
18801/**
18802 * Validates that the value passed into a disjunctive filter satisfies all
18803 * array requirements.
18804 */ function tl(t, e) {
18805 if (!Array.isArray(t) || 0 === t.length) throw new U(L.INVALID_ARGUMENT, `Invalid Query. A non-empty array is required for '${e.toString()}' filters.`);
18806 if (t.length > 10) throw new U(L.INVALID_ARGUMENT, `Invalid Query. '${e.toString()}' filters support a maximum of 10 elements in the value array.`);
18807}
18808
18809function el(t, e, n) {
18810 if (!n.isEqual(e)) throw new U(L.INVALID_ARGUMENT, `Invalid query. You have a where filter with an inequality (<, <=, !=, not-in, >, or >=) on field '${e.toString()}' and so you must also use '${e.toString()}' as your first argument to orderBy(), but your first orderBy() is on field '${n.toString()}' instead.`);
18811}
18812
18813/**
18814 * @license
18815 * Copyright 2020 Google LLC
18816 *
18817 * Licensed under the Apache License, Version 2.0 (the "License");
18818 * you may not use this file except in compliance with the License.
18819 * You may obtain a copy of the License at
18820 *
18821 * http://www.apache.org/licenses/LICENSE-2.0
18822 *
18823 * Unless required by applicable law or agreed to in writing, software
18824 * distributed under the License is distributed on an "AS IS" BASIS,
18825 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18826 * See the License for the specific language governing permissions and
18827 * limitations under the License.
18828 */
18829/**
18830 * Converts Firestore's internal types to the JavaScript types that we expose
18831 * to the user.
18832 *
18833 * @internal
18834 */ class nl {
18835 convertValue(t, e = "none") {
18836 switch (he(t)) {
18837 case 0 /* NullValue */ :
18838 return null;
18839
18840 case 1 /* BooleanValue */ :
18841 return t.booleanValue;
18842
18843 case 2 /* NumberValue */ :
18844 return Xt(t.integerValue || t.doubleValue);
18845
18846 case 3 /* TimestampValue */ :
18847 return this.convertTimestamp(t.timestampValue);
18848
18849 case 4 /* ServerTimestampValue */ :
18850 return this.convertServerTimestamp(t, e);
18851
18852 case 5 /* StringValue */ :
18853 return t.stringValue;
18854
18855 case 6 /* BlobValue */ :
18856 return this.convertBytes(Zt(t.bytesValue));
18857
18858 case 7 /* RefValue */ :
18859 return this.convertReference(t.referenceValue);
18860
18861 case 8 /* GeoPointValue */ :
18862 return this.convertGeoPoint(t.geoPointValue);
18863
18864 case 9 /* ArrayValue */ :
18865 return this.convertArray(t.arrayValue, e);
18866
18867 case 10 /* ObjectValue */ :
18868 return this.convertObject(t.mapValue, e);
18869
18870 default:
18871 throw M();
18872 }
18873 }
18874 convertObject(t, e) {
18875 const n = {};
18876 return $t(t.fields, ((t, s) => {
18877 n[t] = this.convertValue(s, e);
18878 })), n;
18879 }
18880 convertGeoPoint(t) {
18881 return new oh(Xt(t.latitude), Xt(t.longitude));
18882 }
18883 convertArray(t, e) {
18884 return (t.values || []).map((t => this.convertValue(t, e)));
18885 }
18886 convertServerTimestamp(t, e) {
18887 switch (e) {
18888 case "previous":
18889 const n = ee(t);
18890 return null == n ? null : this.convertValue(n, e);
18891
18892 case "estimate":
18893 return this.convertTimestamp(ne(t));
18894
18895 default:
18896 return null;
18897 }
18898 }
18899 convertTimestamp(t) {
18900 const e = Yt(t);
18901 return new st(e.seconds, e.nanos);
18902 }
18903 convertDocumentKey(t, e) {
18904 const n = ot.fromString(t);
18905 F(oi(n));
18906 const s = new ie(n.get(1), n.get(3)), i = new at(n.popFirst(5));
18907 return s.isEqual(e) ||
18908 // TODO(b/64130202): Somehow support foreign references.
18909 N(`Document ${i} contains a document reference within a different database (${s.projectId}/${s.database}) which is not supported. It will be treated as a reference in the current database (${e.projectId}/${e.database}) instead.`),
18910 i;
18911 }
18912}
18913
18914/**
18915 * @license
18916 * Copyright 2020 Google LLC
18917 *
18918 * Licensed under the Apache License, Version 2.0 (the "License");
18919 * you may not use this file except in compliance with the License.
18920 * You may obtain a copy of the License at
18921 *
18922 * http://www.apache.org/licenses/LICENSE-2.0
18923 *
18924 * Unless required by applicable law or agreed to in writing, software
18925 * distributed under the License is distributed on an "AS IS" BASIS,
18926 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18927 * See the License for the specific language governing permissions and
18928 * limitations under the License.
18929 */
18930/**
18931 * Converts custom model object of type T into `DocumentData` by applying the
18932 * converter if it exists.
18933 *
18934 * This function is used when converting user objects to `DocumentData`
18935 * because we want to provide the user with a more specific error message if
18936 * their `set()` or fails due to invalid data originating from a `toFirestore()`
18937 * call.
18938 */ function sl(t, e, n) {
18939 let s;
18940 // Cast to `any` in order to satisfy the union type constraint on
18941 // toFirestore().
18942 // eslint-disable-next-line @typescript-eslint/no-explicit-any
18943 return s = t ? n && (n.merge || n.mergeFields) ? t.toFirestore(e, n) : t.toFirestore(e) : e,
18944 s;
18945}
18946
18947class il extends nl {
18948 constructor(t) {
18949 super(), this.firestore = t;
18950 }
18951 convertBytes(t) {
18952 return new nh(t);
18953 }
18954 convertReference(t) {
18955 const e = this.convertDocumentKey(t, this.firestore._databaseId);
18956 return new ta(this.firestore, /* converter= */ null, e);
18957 }
18958}
18959
18960/**
18961 * @license
18962 * Copyright 2020 Google LLC
18963 *
18964 * Licensed under the Apache License, Version 2.0 (the "License");
18965 * you may not use this file except in compliance with the License.
18966 * You may obtain a copy of the License at
18967 *
18968 * http://www.apache.org/licenses/LICENSE-2.0
18969 *
18970 * Unless required by applicable law or agreed to in writing, software
18971 * distributed under the License is distributed on an "AS IS" BASIS,
18972 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18973 * See the License for the specific language governing permissions and
18974 * limitations under the License.
18975 */
18976/**
18977 * Metadata about a snapshot, describing the state of the snapshot.
18978 */ class rl {
18979 /** @hideconstructor */
18980 constructor(t, e) {
18981 this.hasPendingWrites = t, this.fromCache = e;
18982 }
18983 /**
18984 * Returns true if this `SnapshotMetadata` is equal to the provided one.
18985 *
18986 * @param other - The `SnapshotMetadata` to compare against.
18987 * @returns true if this `SnapshotMetadata` is equal to the provided one.
18988 */ isEqual(t) {
18989 return this.hasPendingWrites === t.hasPendingWrites && this.fromCache === t.fromCache;
18990 }
18991}
18992
18993/**
18994 * A `DocumentSnapshot` contains data read from a document in your Firestore
18995 * database. The data can be extracted with `.data()` or `.get(<field>)` to
18996 * get a specific field.
18997 *
18998 * For a `DocumentSnapshot` that points to a non-existing document, any data
18999 * access will return 'undefined'. You can use the `exists()` method to
19000 * explicitly verify a document's existence.
19001 */ class ol extends Nh {
19002 /** @hideconstructor protected */
19003 constructor(t, e, n, s, i, r) {
19004 super(t, e, n, s, r), this._firestore = t, this._firestoreImpl = t, this.metadata = i;
19005 }
19006 /**
19007 * Returns whether or not the data exists. True if the document exists.
19008 */ exists() {
19009 return super.exists();
19010 }
19011 /**
19012 * Retrieves all fields in the document as an `Object`. Returns `undefined` if
19013 * the document doesn't exist.
19014 *
19015 * By default, `serverTimestamp()` values that have not yet been
19016 * set to their final value will be returned as `null`. You can override
19017 * this by passing an options object.
19018 *
19019 * @param options - An options object to configure how data is retrieved from
19020 * the snapshot (for example the desired behavior for server timestamps that
19021 * have not yet been set to their final value).
19022 * @returns An `Object` containing all fields in the document or `undefined` if
19023 * the document doesn't exist.
19024 */ data(t = {}) {
19025 if (this._document) {
19026 if (this._converter) {
19027 // We only want to use the converter and create a new DocumentSnapshot
19028 // if a converter has been provided.
19029 const e = new ul(this._firestore, this._userDataWriter, this._key, this._document, this.metadata,
19030 /* converter= */ null);
19031 return this._converter.fromFirestore(e, t);
19032 }
19033 return this._userDataWriter.convertValue(this._document.data.value, t.serverTimestamps);
19034 }
19035 }
19036 /**
19037 * Retrieves the field specified by `fieldPath`. Returns `undefined` if the
19038 * document or field doesn't exist.
19039 *
19040 * By default, a `serverTimestamp()` that has not yet been set to
19041 * its final value will be returned as `null`. You can override this by
19042 * passing an options object.
19043 *
19044 * @param fieldPath - The path (for example 'foo' or 'foo.bar') to a specific
19045 * field.
19046 * @param options - An options object to configure how the field is retrieved
19047 * from the snapshot (for example the desired behavior for server timestamps
19048 * that have not yet been set to their final value).
19049 * @returns The data at the specified field location or undefined if no such
19050 * field exists in the document.
19051 */
19052 // We are using `any` here to avoid an explicit cast by our users.
19053 // eslint-disable-next-line @typescript-eslint/no-explicit-any
19054 get(t, e = {}) {
19055 if (this._document) {
19056 const n = this._document.data.field(Oh("DocumentSnapshot.get", t));
19057 if (null !== n) return this._userDataWriter.convertValue(n, e.serverTimestamps);
19058 }
19059 }
19060}
19061
19062/**
19063 * A `QueryDocumentSnapshot` contains data read from a document in your
19064 * Firestore database as part of a query. The document is guaranteed to exist
19065 * and its data can be extracted with `.data()` or `.get(<field>)` to get a
19066 * specific field.
19067 *
19068 * A `QueryDocumentSnapshot` offers the same API surface as a
19069 * `DocumentSnapshot`. Since query results contain only existing documents, the
19070 * `exists` property will always be true and `data()` will never return
19071 * 'undefined'.
19072 */ class ul extends ol {
19073 /**
19074 * Retrieves all fields in the document as an `Object`.
19075 *
19076 * By default, `serverTimestamp()` values that have not yet been
19077 * set to their final value will be returned as `null`. You can override
19078 * this by passing an options object.
19079 *
19080 * @override
19081 * @param options - An options object to configure how data is retrieved from
19082 * the snapshot (for example the desired behavior for server timestamps that
19083 * have not yet been set to their final value).
19084 * @returns An `Object` containing all fields in the document.
19085 */
19086 data(t = {}) {
19087 return super.data(t);
19088 }
19089}
19090
19091/**
19092 * A `QuerySnapshot` contains zero or more `DocumentSnapshot` objects
19093 * representing the results of a query. The documents can be accessed as an
19094 * array via the `docs` property or enumerated using the `forEach` method. The
19095 * number of documents can be determined via the `empty` and `size`
19096 * properties.
19097 */ class cl {
19098 /** @hideconstructor */
19099 constructor(t, e, n, s) {
19100 this._firestore = t, this._userDataWriter = e, this._snapshot = s, this.metadata = new rl(s.hasPendingWrites, s.fromCache),
19101 this.query = n;
19102 }
19103 /** An array of all the documents in the `QuerySnapshot`. */ get docs() {
19104 const t = [];
19105 return this.forEach((e => t.push(e))), t;
19106 }
19107 /** The number of documents in the `QuerySnapshot`. */ get size() {
19108 return this._snapshot.docs.size;
19109 }
19110 /** True if there are no documents in the `QuerySnapshot`. */ get empty() {
19111 return 0 === this.size;
19112 }
19113 /**
19114 * Enumerates all of the documents in the `QuerySnapshot`.
19115 *
19116 * @param callback - A callback to be called with a `QueryDocumentSnapshot` for
19117 * each document in the snapshot.
19118 * @param thisArg - The `this` binding for the callback.
19119 */ forEach(t, e) {
19120 this._snapshot.docs.forEach((n => {
19121 t.call(e, new ul(this._firestore, this._userDataWriter, n.key, n, new rl(this._snapshot.mutatedKeys.has(n.key), this._snapshot.fromCache), this.query.converter));
19122 }));
19123 }
19124 /**
19125 * Returns an array of the documents changes since the last snapshot. If this
19126 * is the first snapshot, all documents will be in the list as 'added'
19127 * changes.
19128 *
19129 * @param options - `SnapshotListenOptions` that control whether metadata-only
19130 * changes (i.e. only `DocumentSnapshot.metadata` changed) should trigger
19131 * snapshot events.
19132 */ docChanges(t = {}) {
19133 const e = !!t.includeMetadataChanges;
19134 if (e && this._snapshot.excludesMetadataChanges) throw new U(L.INVALID_ARGUMENT, "To include metadata changes with your document changes, you must also pass { includeMetadataChanges:true } to onSnapshot().");
19135 return this._cachedChanges && this._cachedChangesIncludeMetadataChanges === e || (this._cachedChanges =
19136 /** Calculates the array of `DocumentChange`s for a given `ViewSnapshot`. */
19137 function(t, e) {
19138 if (t._snapshot.oldDocs.isEmpty()) {
19139 let e = 0;
19140 return t._snapshot.docChanges.map((n => {
19141 const s = new ul(t._firestore, t._userDataWriter, n.doc.key, n.doc, new rl(t._snapshot.mutatedKeys.has(n.doc.key), t._snapshot.fromCache), t.query.converter);
19142 return n.doc, {
19143 type: "added",
19144 doc: s,
19145 oldIndex: -1,
19146 newIndex: e++
19147 };
19148 }));
19149 }
19150 {
19151 // A `DocumentSet` that is updated incrementally as changes are applied to use
19152 // to lookup the index of a document.
19153 let n = t._snapshot.oldDocs;
19154 return t._snapshot.docChanges.filter((t => e || 3 /* Metadata */ !== t.type)).map((e => {
19155 const s = new ul(t._firestore, t._userDataWriter, e.doc.key, e.doc, new rl(t._snapshot.mutatedKeys.has(e.doc.key), t._snapshot.fromCache), t.query.converter);
19156 let i = -1, r = -1;
19157 return 0 /* Added */ !== e.type && (i = n.indexOf(e.doc.key), n = n.delete(e.doc.key)),
19158 1 /* Removed */ !== e.type && (n = n.add(e.doc), r = n.indexOf(e.doc.key)), {
19159 type: al(e.type),
19160 doc: s,
19161 oldIndex: i,
19162 newIndex: r
19163 };
19164 }));
19165 }
19166 }(this, e), this._cachedChangesIncludeMetadataChanges = e), this._cachedChanges;
19167 }
19168}
19169
19170function al(t) {
19171 switch (t) {
19172 case 0 /* Added */ :
19173 return "added";
19174
19175 case 2 /* Modified */ :
19176 case 3 /* Metadata */ :
19177 return "modified";
19178
19179 case 1 /* Removed */ :
19180 return "removed";
19181
19182 default:
19183 return M();
19184 }
19185}
19186
19187// TODO(firestoreexp): Add tests for snapshotEqual with different snapshot
19188// metadata
19189/**
19190 * Returns true if the provided snapshots are equal.
19191 *
19192 * @param left - A snapshot to compare.
19193 * @param right - A snapshot to compare.
19194 * @returns true if the snapshots are equal.
19195 */ function hl(t, e) {
19196 return t instanceof ol && e instanceof ol ? t._firestore === e._firestore && t._key.isEqual(e._key) && (null === t._document ? null === e._document : t._document.isEqual(e._document)) && t._converter === e._converter : t instanceof cl && e instanceof cl && (t._firestore === e._firestore && ua(t.query, e.query) && t.metadata.isEqual(e.metadata) && t._snapshot.isEqual(e._snapshot));
19197}
19198
19199/**
19200 * @license
19201 * Copyright 2020 Google LLC
19202 *
19203 * Licensed under the Apache License, Version 2.0 (the "License");
19204 * you may not use this file except in compliance with the License.
19205 * You may obtain a copy of the License at
19206 *
19207 * http://www.apache.org/licenses/LICENSE-2.0
19208 *
19209 * Unless required by applicable law or agreed to in writing, software
19210 * distributed under the License is distributed on an "AS IS" BASIS,
19211 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19212 * See the License for the specific language governing permissions and
19213 * limitations under the License.
19214 */
19215/**
19216 * Reads the document referred to by this `DocumentReference`.
19217 *
19218 * Note: `getDoc()` attempts to provide up-to-date data when possible by waiting
19219 * for data from the server, but it may return cached data or fail if you are
19220 * offline and the server cannot be reached. To specify this behavior, invoke
19221 * {@link getDocFromCache} or {@link getDocFromServer}.
19222 *
19223 * @param reference - The reference of the document to fetch.
19224 * @returns A Promise resolved with a `DocumentSnapshot` containing the
19225 * current document contents.
19226 */ function ll(t) {
19227 t = zc(t, ta);
19228 const e = zc(t.firestore, La);
19229 return Da(Ka(e), t._key).then((n => bl(e, t, n)));
19230}
19231
19232class fl extends nl {
19233 constructor(t) {
19234 super(), this.firestore = t;
19235 }
19236 convertBytes(t) {
19237 return new nh(t);
19238 }
19239 convertReference(t) {
19240 const e = this.convertDocumentKey(t, this.firestore._databaseId);
19241 return new ta(this.firestore, /* converter= */ null, e);
19242 }
19243}
19244
19245/**
19246 * Reads the document referred to by this `DocumentReference` from cache.
19247 * Returns an error if the document is not currently cached.
19248 *
19249 * @returns A `Promise` resolved with a `DocumentSnapshot` containing the
19250 * current document contents.
19251 */ function dl(t) {
19252 t = zc(t, ta);
19253 const e = zc(t.firestore, La), n = Ka(e), s = new fl(e);
19254 return Sa(n, t._key).then((n => new ol(e, s, t._key, n, new rl(null !== n && n.hasLocalMutations,
19255 /* fromCache= */ !0), t.converter)));
19256}
19257
19258/**
19259 * Reads the document referred to by this `DocumentReference` from the server.
19260 * Returns an error if the network is not available.
19261 *
19262 * @returns A `Promise` resolved with a `DocumentSnapshot` containing the
19263 * current document contents.
19264 */ function _l(t) {
19265 t = zc(t, ta);
19266 const e = zc(t.firestore, La);
19267 return Da(Ka(e), t._key, {
19268 source: "server"
19269 }).then((n => bl(e, t, n)));
19270}
19271
19272/**
19273 * Executes the query and returns the results as a `QuerySnapshot`.
19274 *
19275 * Note: `getDocs()` attempts to provide up-to-date data when possible by
19276 * waiting for data from the server, but it may return cached data or fail if
19277 * you are offline and the server cannot be reached. To specify this behavior,
19278 * invoke {@link getDocsFromCache} or {@link getDocsFromServer}.
19279 *
19280 * @returns A `Promise` that will be resolved with the results of the query.
19281 */ function wl(t) {
19282 t = zc(t, ea);
19283 const e = zc(t.firestore, La), n = Ka(e), s = new fl(e);
19284 return Mh(t._query), xa(n, t._query).then((n => new cl(e, s, t, n)));
19285}
19286
19287/**
19288 * Executes the query and returns the results as a `QuerySnapshot` from cache.
19289 * Returns an error if the document is not currently cached.
19290 *
19291 * @returns A `Promise` that will be resolved with the results of the query.
19292 */ function ml(t) {
19293 t = zc(t, ea);
19294 const e = zc(t.firestore, La), n = Ka(e), s = new fl(e);
19295 return Ca(n, t._query).then((n => new cl(e, s, t, n)));
19296}
19297
19298/**
19299 * Executes the query and returns the results as a `QuerySnapshot` from the
19300 * server. Returns an error if the network is not available.
19301 *
19302 * @returns A `Promise` that will be resolved with the results of the query.
19303 */ function gl(t) {
19304 t = zc(t, ea);
19305 const e = zc(t.firestore, La), n = Ka(e), s = new fl(e);
19306 return xa(n, t._query, {
19307 source: "server"
19308 }).then((n => new cl(e, s, t, n)));
19309}
19310
19311function yl(t, e, n) {
19312 t = zc(t, ta);
19313 const s = zc(t.firestore, La), i = sl(t.converter, e, n);
19314 return Rl(s, [ _h(dh(s), "setDoc", t._key, i, null !== t.converter, n).toMutation(t._key, Fn.none()) ]);
19315}
19316
19317function pl(t, e, n, ...s) {
19318 t = zc(t, ta);
19319 const i = zc(t.firestore, La), r = dh(i);
19320 let o;
19321 o = "string" == typeof (
19322 // For Compat types, we have to "extract" the underlying types before
19323 // performing validation.
19324 e = _(e)) || e instanceof sh ? Eh(r, "updateDoc", t._key, e, n, s) : Th(r, "updateDoc", t._key, e);
19325 return Rl(i, [ o.toMutation(t._key, Fn.exists(!0)) ]);
19326}
19327
19328/**
19329 * Deletes the document referred to by the specified `DocumentReference`.
19330 *
19331 * @param reference - A reference to the document to delete.
19332 * @returns A Promise resolved once the document has been successfully
19333 * deleted from the backend (note that it won't resolve while you're offline).
19334 */ function Il(t) {
19335 return Rl(zc(t.firestore, La), [ new Jn(t._key, Fn.none()) ]);
19336}
19337
19338/**
19339 * Add a new document to specified `CollectionReference` with the given data,
19340 * assigning it a document ID automatically.
19341 *
19342 * @param reference - A reference to the collection to add this document to.
19343 * @param data - An Object containing the data for the new document.
19344 * @returns A `Promise` resolved with a `DocumentReference` pointing to the
19345 * newly created document after it has been written to the backend (Note that it
19346 * won't resolve while you're offline).
19347 */ function Tl(t, e) {
19348 const n = zc(t.firestore, La), s = ra(t), i = sl(t.converter, e);
19349 return Rl(n, [ _h(dh(t.firestore), "addDoc", s._key, i, null !== t.converter, {}).toMutation(s._key, Fn.exists(!1)) ]).then((() => s));
19350}
19351
19352function El(t, ...e) {
19353 var n, s, i;
19354 t = _(t);
19355 let r = {
19356 includeMetadataChanges: !1
19357 }, o = 0;
19358 "object" != typeof e[o] || Fa(e[o]) || (r = e[o], o++);
19359 const u = {
19360 includeMetadataChanges: r.includeMetadataChanges
19361 };
19362 if (Fa(e[o])) {
19363 const t = e[o];
19364 e[o] = null === (n = t.next) || void 0 === n ? void 0 : n.bind(t), e[o + 1] = null === (s = t.error) || void 0 === s ? void 0 : s.bind(t),
19365 e[o + 2] = null === (i = t.complete) || void 0 === i ? void 0 : i.bind(t);
19366 }
19367 let c, a, h;
19368 if (t instanceof ta) a = zc(t.firestore, La), h = sn(t._key.path), c = {
19369 next: n => {
19370 e[o] && e[o](bl(a, t, n));
19371 },
19372 error: e[o + 1],
19373 complete: e[o + 2]
19374 }; else {
19375 const n = zc(t, ea);
19376 a = zc(n.firestore, La), h = n._query;
19377 const s = new fl(a);
19378 c = {
19379 next: t => {
19380 e[o] && e[o](new cl(a, s, n, t));
19381 },
19382 error: e[o + 1],
19383 complete: e[o + 2]
19384 }, Mh(t._query);
19385 }
19386 return function(t, e, n, s) {
19387 const i = new aa(s), r = new Ju(e, i, n);
19388 return t.asyncQueue.enqueueAndForget((async () => Qu(await Pa(t), r))), () => {
19389 i.bc(), t.asyncQueue.enqueueAndForget((async () => ju(await Pa(t), r)));
19390 };
19391 }(Ka(a), h, u, c);
19392}
19393
19394function Al(t, e) {
19395 return Na(Ka(t = zc(t, La)), Fa(e) ? e : {
19396 next: e
19397 });
19398}
19399
19400/**
19401 * Locally writes `mutations` on the async queue.
19402 * @internal
19403 */ function Rl(t, e) {
19404 return function(t, e) {
19405 const n = new q;
19406 return t.asyncQueue.enqueueAndForget((async () => hc(await Ra(t), e, n))), n.promise;
19407 }(Ka(t), e);
19408}
19409
19410/**
19411 * Converts a {@link ViewSnapshot} that contains the single document specified by `ref`
19412 * to a {@link DocumentSnapshot}.
19413 */ function bl(t, e, n) {
19414 const s = n.docs.get(e._key), i = new fl(t);
19415 return new ol(t, i, e._key, s, new rl(n.hasPendingWrites, n.fromCache), e.converter);
19416}
19417
19418/**
19419 * @license
19420 * Copyright 2022 Google LLC
19421 *
19422 * Licensed under the Apache License, Version 2.0 (the "License");
19423 * you may not use this file except in compliance with the License.
19424 * You may obtain a copy of the License at
19425 *
19426 * http://www.apache.org/licenses/LICENSE-2.0
19427 *
19428 * Unless required by applicable law or agreed to in writing, software
19429 * distributed under the License is distributed on an "AS IS" BASIS,
19430 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19431 * See the License for the specific language governing permissions and
19432 * limitations under the License.
19433 */
19434/**
19435 * Compares two `AggregateQuerySnapshot` instances for equality.
19436 *
19437 * Two `AggregateQuerySnapshot` instances are considered "equal" if they have
19438 * underlying queries that compare equal, and the same data.
19439 *
19440 * @param left - The first `AggregateQuerySnapshot` to compare.
19441 * @param right - The second `AggregateQuerySnapshot` to compare.
19442 *
19443 * @returns `true` if the objects are "equal", as defined above, or `false`
19444 * otherwise.
19445 */ function Pl(t, e) {
19446 return ua(t.query, e.query) && w(t.data(), e.data());
19447}
19448
19449/**
19450 * @license
19451 * Copyright 2022 Google LLC
19452 *
19453 * Licensed under the Apache License, Version 2.0 (the "License");
19454 * you may not use this file except in compliance with the License.
19455 * You may obtain a copy of the License at
19456 *
19457 * http://www.apache.org/licenses/LICENSE-2.0
19458 *
19459 * Unless required by applicable law or agreed to in writing, software
19460 * distributed under the License is distributed on an "AS IS" BASIS,
19461 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19462 * See the License for the specific language governing permissions and
19463 * limitations under the License.
19464 */
19465/**
19466 * Calculates the number of documents in the result set of the given query,
19467 * without actually downloading the documents.
19468 *
19469 * Using this function to count the documents is efficient because only the
19470 * final count, not the documents' data, is downloaded. This function can even
19471 * count the documents if the result set would be prohibitively large to
19472 * download entirely (e.g. thousands of documents).
19473 *
19474 * The result received from the server is presented, unaltered, without
19475 * considering any local state. That is, documents in the local cache are not
19476 * taken into consideration, neither are local modifications not yet
19477 * synchronized with the server. Previously-downloaded results, if any, are not
19478 * used: every request using this source necessarily involves a round trip to
19479 * the server.
19480 *
19481 * @param query - The query whose result set size to calculate.
19482 * @returns A Promise that will be resolved with the count; the count can be
19483 * retrieved from `snapshot.data().count`, where `snapshot` is the
19484 * `AggregateQuerySnapshot` to which the returned Promise resolves.
19485 */ function vl(t) {
19486 const e = zc(t.firestore, La);
19487 return function(t, e, n) {
19488 const s = new q;
19489 return t.asyncQueue.enqueueAndForget((async () => {
19490 try {
19491 if (yu(await Aa(t))) {
19492 const i = await ba(t), r = new da(e, i, n).run();
19493 s.resolve(r);
19494 } else s.reject(new U(L.UNAVAILABLE, "Failed to get count result because the client is offline."));
19495 } catch (t) {
19496 s.reject(t);
19497 }
19498 })), s.promise;
19499 }(Ka(e), t, new fl(e));
19500}
19501
19502/**
19503 * @license
19504 * Copyright 2022 Google LLC
19505 *
19506 * Licensed under the Apache License, Version 2.0 (the "License");
19507 * you may not use this file except in compliance with the License.
19508 * You may obtain a copy of the License at
19509 *
19510 * http://www.apache.org/licenses/LICENSE-2.0
19511 *
19512 * Unless required by applicable law or agreed to in writing, software
19513 * distributed under the License is distributed on an "AS IS" BASIS,
19514 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19515 * See the License for the specific language governing permissions and
19516 * limitations under the License.
19517 */ const Vl = {
19518 maxAttempts: 5
19519};
19520
19521/**
19522 * @license
19523 * Copyright 2020 Google LLC
19524 *
19525 * Licensed under the Apache License, Version 2.0 (the "License");
19526 * you may not use this file except in compliance with the License.
19527 * You may obtain a copy of the License at
19528 *
19529 * http://www.apache.org/licenses/LICENSE-2.0
19530 *
19531 * Unless required by applicable law or agreed to in writing, software
19532 * distributed under the License is distributed on an "AS IS" BASIS,
19533 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19534 * See the License for the specific language governing permissions and
19535 * limitations under the License.
19536 */
19537/**
19538 * A write batch, used to perform multiple writes as a single atomic unit.
19539 *
19540 * A `WriteBatch` object can be acquired by calling {@link writeBatch}. It
19541 * provides methods for adding writes to the write batch. None of the writes
19542 * will be committed (or visible locally) until {@link WriteBatch.commit} is
19543 * called.
19544 */
19545class Sl {
19546 /** @hideconstructor */
19547 constructor(t, e) {
19548 this._firestore = t, this._commitHandler = e, this._mutations = [], this._committed = !1,
19549 this._dataReader = dh(t);
19550 }
19551 set(t, e, n) {
19552 this._verifyNotCommitted();
19553 const s = Dl(t, this._firestore), i = sl(s.converter, e, n), r = _h(this._dataReader, "WriteBatch.set", s._key, i, null !== s.converter, n);
19554 return this._mutations.push(r.toMutation(s._key, Fn.none())), this;
19555 }
19556 update(t, e, n, ...s) {
19557 this._verifyNotCommitted();
19558 const i = Dl(t, this._firestore);
19559 // For Compat types, we have to "extract" the underlying types before
19560 // performing validation.
19561 let r;
19562 return r = "string" == typeof (e = _(e)) || e instanceof sh ? Eh(this._dataReader, "WriteBatch.update", i._key, e, n, s) : Th(this._dataReader, "WriteBatch.update", i._key, e),
19563 this._mutations.push(r.toMutation(i._key, Fn.exists(!0))), this;
19564 }
19565 /**
19566 * Deletes the document referred to by the provided {@link DocumentReference}.
19567 *
19568 * @param documentRef - A reference to the document to be deleted.
19569 * @returns This `WriteBatch` instance. Used for chaining method calls.
19570 */ delete(t) {
19571 this._verifyNotCommitted();
19572 const e = Dl(t, this._firestore);
19573 return this._mutations = this._mutations.concat(new Jn(e._key, Fn.none())), this;
19574 }
19575 /**
19576 * Commits all of the writes in this write batch as a single atomic unit.
19577 *
19578 * The result of these writes will only be reflected in document reads that
19579 * occur after the returned promise resolves. If the client is offline, the
19580 * write fails. If you would like to see local modifications or buffer writes
19581 * until the client is online, use the full Firestore SDK.
19582 *
19583 * @returns A `Promise` resolved once all of the writes in the batch have been
19584 * successfully written to the backend as an atomic unit (note that it won't
19585 * resolve while you're offline).
19586 */ commit() {
19587 return this._verifyNotCommitted(), this._committed = !0, this._mutations.length > 0 ? this._commitHandler(this._mutations) : Promise.resolve();
19588 }
19589 _verifyNotCommitted() {
19590 if (this._committed) throw new U(L.FAILED_PRECONDITION, "A write batch can no longer be used after commit() has been called.");
19591 }
19592}
19593
19594function Dl(t, e) {
19595 if ((t = _(t)).firestore !== e) throw new U(L.INVALID_ARGUMENT, "Provided document reference is from a different Firestore instance.");
19596 return t;
19597}
19598
19599/**
19600 * @license
19601 * Copyright 2020 Google LLC
19602 *
19603 * Licensed under the Apache License, Version 2.0 (the "License");
19604 * you may not use this file except in compliance with the License.
19605 * You may obtain a copy of the License at
19606 *
19607 * http://www.apache.org/licenses/LICENSE-2.0
19608 *
19609 * Unless required by applicable law or agreed to in writing, software
19610 * distributed under the License is distributed on an "AS IS" BASIS,
19611 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19612 * See the License for the specific language governing permissions and
19613 * limitations under the License.
19614 */
19615// TODO(mrschmidt) Consider using `BaseTransaction` as the base class in the
19616// legacy SDK.
19617/**
19618 * A reference to a transaction.
19619 *
19620 * The `Transaction` object passed to a transaction's `updateFunction` provides
19621 * the methods to read and write data within the transaction context. See
19622 * {@link runTransaction}.
19623 */
19624/**
19625 * @license
19626 * Copyright 2020 Google LLC
19627 *
19628 * Licensed under the Apache License, Version 2.0 (the "License");
19629 * you may not use this file except in compliance with the License.
19630 * You may obtain a copy of the License at
19631 *
19632 * http://www.apache.org/licenses/LICENSE-2.0
19633 *
19634 * Unless required by applicable law or agreed to in writing, software
19635 * distributed under the License is distributed on an "AS IS" BASIS,
19636 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19637 * See the License for the specific language governing permissions and
19638 * limitations under the License.
19639 */
19640/**
19641 * A reference to a transaction.
19642 *
19643 * The `Transaction` object passed to a transaction's `updateFunction` provides
19644 * the methods to read and write data within the transaction context. See
19645 * {@link runTransaction}.
19646 */
19647class Cl extends class {
19648 /** @hideconstructor */
19649 constructor(t, e) {
19650 this._firestore = t, this._transaction = e, this._dataReader = dh(t);
19651 }
19652 /**
19653 * Reads the document referenced by the provided {@link DocumentReference}.
19654 *
19655 * @param documentRef - A reference to the document to be read.
19656 * @returns A `DocumentSnapshot` with the read data.
19657 */ get(t) {
19658 const e = Dl(t, this._firestore), n = new il(this._firestore);
19659 return this._transaction.lookup([ e._key ]).then((t => {
19660 if (!t || 1 !== t.length) return M();
19661 const s = t[0];
19662 if (s.isFoundDocument()) return new Nh(this._firestore, n, s.key, s, e.converter);
19663 if (s.isNoDocument()) return new Nh(this._firestore, n, e._key, null, e.converter);
19664 throw M();
19665 }));
19666 }
19667 set(t, e, n) {
19668 const s = Dl(t, this._firestore), i = sl(s.converter, e, n), r = _h(this._dataReader, "Transaction.set", s._key, i, null !== s.converter, n);
19669 return this._transaction.set(s._key, r), this;
19670 }
19671 update(t, e, n, ...s) {
19672 const i = Dl(t, this._firestore);
19673 // For Compat types, we have to "extract" the underlying types before
19674 // performing validation.
19675 let r;
19676 return r = "string" == typeof (e = _(e)) || e instanceof sh ? Eh(this._dataReader, "Transaction.update", i._key, e, n, s) : Th(this._dataReader, "Transaction.update", i._key, e),
19677 this._transaction.update(i._key, r), this;
19678 }
19679 /**
19680 * Deletes the document referred to by the provided {@link DocumentReference}.
19681 *
19682 * @param documentRef - A reference to the document to be deleted.
19683 * @returns This `Transaction` instance. Used for chaining method calls.
19684 */ delete(t) {
19685 const e = Dl(t, this._firestore);
19686 return this._transaction.delete(e._key), this;
19687 }
19688} {
19689 // This class implements the same logic as the Transaction API in the Lite SDK
19690 // but is subclassed in order to return its own DocumentSnapshot types.
19691 /** @hideconstructor */
19692 constructor(t, e) {
19693 super(t, e), this._firestore = t;
19694 }
19695 /**
19696 * Reads the document referenced by the provided {@link DocumentReference}.
19697 *
19698 * @param documentRef - A reference to the document to be read.
19699 * @returns A `DocumentSnapshot` with the read data.
19700 */ get(t) {
19701 const e = Dl(t, this._firestore), n = new fl(this._firestore);
19702 return super.get(t).then((t => new ol(this._firestore, n, e._key, t._document, new rl(
19703 /* hasPendingWrites= */ !1,
19704 /* fromCache= */ !1), e.converter)));
19705 }
19706}
19707
19708/**
19709 * Executes the given `updateFunction` and then attempts to commit the changes
19710 * applied within the transaction. If any document read within the transaction
19711 * has changed, Cloud Firestore retries the `updateFunction`. If it fails to
19712 * commit after 5 attempts, the transaction fails.
19713 *
19714 * The maximum number of writes allowed in a single transaction is 500.
19715 *
19716 * @param firestore - A reference to the Firestore database to run this
19717 * transaction against.
19718 * @param updateFunction - The function to execute within the transaction
19719 * context.
19720 * @param options - An options object to configure maximum number of attempts to
19721 * commit.
19722 * @returns If the transaction completed successfully or was explicitly aborted
19723 * (the `updateFunction` returned a failed promise), the promise returned by the
19724 * `updateFunction `is returned here. Otherwise, if the transaction failed, a
19725 * rejected promise with the corresponding failure error is returned.
19726 */ function xl(t, e, n) {
19727 t = zc(t, La);
19728 const s = Object.assign(Object.assign({}, Vl), n);
19729 !function(t) {
19730 if (t.maxAttempts < 1) throw new U(L.INVALID_ARGUMENT, "Max attempts must be at least 1");
19731 }(s);
19732 return function(t, e, n) {
19733 const s = new q;
19734 return t.asyncQueue.enqueueAndForget((async () => {
19735 const i = await ba(t);
19736 new wa(t.asyncQueue, i, n, e, s).run();
19737 })), s.promise;
19738 }(Ka(t), (n => e(new Cl(t, n))), s);
19739}
19740
19741/**
19742 * @license
19743 * Copyright 2020 Google LLC
19744 *
19745 * Licensed under the Apache License, Version 2.0 (the "License");
19746 * you may not use this file except in compliance with the License.
19747 * You may obtain a copy of the License at
19748 *
19749 * http://www.apache.org/licenses/LICENSE-2.0
19750 *
19751 * Unless required by applicable law or agreed to in writing, software
19752 * distributed under the License is distributed on an "AS IS" BASIS,
19753 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19754 * See the License for the specific language governing permissions and
19755 * limitations under the License.
19756 */
19757/**
19758 * Returns a sentinel for use with {@link @firebase/firestore/lite#(updateDoc:1)} or
19759 * {@link @firebase/firestore/lite#(setDoc:1)} with `{merge: true}` to mark a field for deletion.
19760 */ function Nl() {
19761 return new wh("deleteField");
19762}
19763
19764/**
19765 * Returns a sentinel used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link @firebase/firestore/lite#(updateDoc:1)} to
19766 * include a server-generated timestamp in the written data.
19767 */ function kl() {
19768 return new gh("serverTimestamp");
19769}
19770
19771/**
19772 * Returns a special value that can be used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link
19773 * @firebase/firestore/lite#(updateDoc:1)} that tells the server to union the given elements with any array
19774 * value that already exists on the server. Each specified element that doesn't
19775 * already exist in the array will be added to the end. If the field being
19776 * modified is not already an array it will be overwritten with an array
19777 * containing exactly the specified elements.
19778 *
19779 * @param elements - The elements to union into the array.
19780 * @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
19781 * `updateDoc()`.
19782 */ function Ol(...t) {
19783 // NOTE: We don't actually parse the data until it's used in set() or
19784 // update() since we'd need the Firestore instance to do this.
19785 return new yh("arrayUnion", t);
19786}
19787
19788/**
19789 * Returns a special value that can be used with {@link (setDoc:1)} or {@link
19790 * updateDoc:1} that tells the server to remove the given elements from any
19791 * array value that already exists on the server. All instances of each element
19792 * specified will be removed from the array. If the field being modified is not
19793 * already an array it will be overwritten with an empty array.
19794 *
19795 * @param elements - The elements to remove from the array.
19796 * @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
19797 * `updateDoc()`
19798 */ function Ml(...t) {
19799 // NOTE: We don't actually parse the data until it's used in set() or
19800 // update() since we'd need the Firestore instance to do this.
19801 return new ph("arrayRemove", t);
19802}
19803
19804/**
19805 * Returns a special value that can be used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link
19806 * @firebase/firestore/lite#(updateDoc:1)} that tells the server to increment the field's current value by
19807 * the given value.
19808 *
19809 * If either the operand or the current field value uses floating point
19810 * precision, all arithmetic follows IEEE 754 semantics. If both values are
19811 * integers, values outside of JavaScript's safe number range
19812 * (`Number.MIN_SAFE_INTEGER` to `Number.MAX_SAFE_INTEGER`) are also subject to
19813 * precision loss. Furthermore, once processed by the Firestore backend, all
19814 * integer operations are capped between -2^63 and 2^63-1.
19815 *
19816 * If the current field value is not of type `number`, or if the field does not
19817 * yet exist, the transformation sets the field to the given value.
19818 *
19819 * @param n - The value to increment by.
19820 * @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
19821 * `updateDoc()`
19822 */ function Fl(t) {
19823 return new Ih("increment", t);
19824}
19825
19826/**
19827 * @license
19828 * Copyright 2020 Google LLC
19829 *
19830 * Licensed under the Apache License, Version 2.0 (the "License");
19831 * you may not use this file except in compliance with the License.
19832 * You may obtain a copy of the License at
19833 *
19834 * http://www.apache.org/licenses/LICENSE-2.0
19835 *
19836 * Unless required by applicable law or agreed to in writing, software
19837 * distributed under the License is distributed on an "AS IS" BASIS,
19838 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19839 * See the License for the specific language governing permissions and
19840 * limitations under the License.
19841 */
19842/**
19843 * Creates a write batch, used for performing multiple writes as a single
19844 * atomic operation. The maximum number of writes allowed in a single {@link WriteBatch}
19845 * is 500.
19846 *
19847 * Unlike transactions, write batches are persisted offline and therefore are
19848 * preferable when you don't need to condition your writes on read data.
19849 *
19850 * @returns A {@link WriteBatch} that can be used to atomically execute multiple
19851 * writes.
19852 */ function $l(t) {
19853 return Ka(t = zc(t, La)), new Sl(t, (e => Rl(t, e)));
19854}
19855
19856/**
19857 * @license
19858 * Copyright 2021 Google LLC
19859 *
19860 * Licensed under the Apache License, Version 2.0 (the "License");
19861 * you may not use this file except in compliance with the License.
19862 * You may obtain a copy of the License at
19863 *
19864 * http://www.apache.org/licenses/LICENSE-2.0
19865 *
19866 * Unless required by applicable law or agreed to in writing, software
19867 * distributed under the License is distributed on an "AS IS" BASIS,
19868 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19869 * See the License for the specific language governing permissions and
19870 * limitations under the License.
19871 */ function Bl(t, e) {
19872 var n;
19873 const s = Ka(t = zc(t, La));
19874 // PORTING NOTE: We don't return an error if the user has not enabled
19875 // persistence since `enableIndexeddbPersistence()` can fail on the Web.
19876 if (!(null === (n = s.offlineComponents) || void 0 === n ? void 0 : n.indexBackfillerScheduler)) return k("Cannot enable indexes when persistence is disabled"),
19877 Promise.resolve();
19878 const i = function(t) {
19879 const e = "string" == typeof t ? function(t) {
19880 var e;
19881 try {
19882 return JSON.parse(t);
19883 } catch (t) {
19884 throw new U(L.INVALID_ARGUMENT, "Failed to parse JSON: " + (null === (e = t) || void 0 === e ? void 0 : e.message));
19885 }
19886 }(t) : t, n = [];
19887 if (Array.isArray(e.indexes)) for (const t of e.indexes) {
19888 const e = Ll(t, "collectionGroup"), s = [];
19889 if (Array.isArray(t.fields)) for (const e of t.fields) {
19890 const t = Dh("setIndexConfiguration", Ll(e, "fieldPath"));
19891 "CONTAINS" === e.arrayConfig ? s.push(new _t(t, 2 /* CONTAINS */)) : "ASCENDING" === e.order ? s.push(new _t(t, 0 /* ASCENDING */)) : "DESCENDING" === e.order && s.push(new _t(t, 1 /* DESCENDING */));
19892 }
19893 n.push(new ht(ht.UNKNOWN_ID, e, s, mt.empty()));
19894 }
19895 return n;
19896 }(e);
19897 return Ea(s).then((t => async function(t, e) {
19898 const n = B(t), s = n.indexManager, i = [];
19899 return n.persistence.runTransaction("Configure indexes", "readwrite", (t => s.getFieldIndexes(t).next((n => function(t, e, n, s, i) {
19900 t = [ ...t ], e = [ ...e ], t.sort(n), e.sort(n);
19901 const r = t.length, o = e.length;
19902 let u = 0, c = 0;
19903 for (;u < o && c < r; ) {
19904 const r = n(t[c], e[u]);
19905 r < 0 ?
19906 // The element was removed if the next element in our ordered
19907 // walkthrough is only in `before`.
19908 i(t[c++]) : r > 0 ?
19909 // The element was added if the next element in our ordered walkthrough
19910 // is only in `after`.
19911 s(e[u++]) : (u++, c++);
19912 }
19913 for (;u < o; ) s(e[u++]);
19914 for (;c < r; ) i(t[c++]);
19915 }(n, e, dt, (e => {
19916 i.push(s.addFieldIndex(t, e));
19917 }), (e => {
19918 i.push(s.deleteFieldIndex(t, e));
19919 })))).next((() => Rt.waitFor(i)))));
19920 }
19921 /**
19922 * @license
19923 * Copyright 2019 Google LLC
19924 *
19925 * Licensed under the Apache License, Version 2.0 (the "License");
19926 * you may not use this file except in compliance with the License.
19927 * You may obtain a copy of the License at
19928 *
19929 * http://www.apache.org/licenses/LICENSE-2.0
19930 *
19931 * Unless required by applicable law or agreed to in writing, software
19932 * distributed under the License is distributed on an "AS IS" BASIS,
19933 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19934 * See the License for the specific language governing permissions and
19935 * limitations under the License.
19936 */
19937 // The format of the LocalStorage key that stores the client state is:
19938 // firestore_clients_<persistence_prefix>_<instance_key>
19939 (t, i)));
19940}
19941
19942function Ll(t, e) {
19943 if ("string" != typeof t[e]) throw new U(L.INVALID_ARGUMENT, "Missing string value for: " + e);
19944 return t[e];
19945}
19946
19947/**
19948 * @license
19949 * Copyright 2021 Google LLC
19950 *
19951 * Licensed under the Apache License, Version 2.0 (the "License");
19952 * you may not use this file except in compliance with the License.
19953 * You may obtain a copy of the License at
19954 *
19955 * http://www.apache.org/licenses/LICENSE-2.0
19956 *
19957 * Unless required by applicable law or agreed to in writing, software
19958 * distributed under the License is distributed on an "AS IS" BASIS,
19959 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19960 * See the License for the specific language governing permissions and
19961 * limitations under the License.
19962 */ !function(t, e = !0) {
19963 !function(t) {
19964 V = t;
19965 }(i), n(new r("firestore", ((t, {instanceIdentifier: n, options: s}) => {
19966 const i = t.getProvider("app").getImmediate(), r = new La(new j(t.getProvider("auth-internal")), new J(t.getProvider("app-check-internal")), function(t, e) {
19967 if (!Object.prototype.hasOwnProperty.apply(t.options, [ "projectId" ])) throw new U(L.INVALID_ARGUMENT, '"projectId" not provided in firebase.initializeApp.');
19968 return new ie(t.options.projectId, e);
19969 }
19970 /**
19971 * @license
19972 * Copyright 2017 Google LLC
19973 *
19974 * Licensed under the Apache License, Version 2.0 (the "License");
19975 * you may not use this file except in compliance with the License.
19976 * You may obtain a copy of the License at
19977 *
19978 * http://www.apache.org/licenses/LICENSE-2.0
19979 *
19980 * Unless required by applicable law or agreed to in writing, software
19981 * distributed under the License is distributed on an "AS IS" BASIS,
19982 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19983 * See the License for the specific language governing permissions and
19984 * limitations under the License.
19985 */
19986 /** Sentinel value that sorts before any Mutation Batch ID. */ (i, n), i);
19987 return s = Object.assign({
19988 useFetchStreams: e
19989 }, s), r._setSettings(s), r;
19990 }), "PUBLIC").setMultipleInstances(!0)), s(P, "3.7.3", t),
19991 // BUILD_TARGET will be replaced by values like esm5, esm2017, cjs5, etc during the compilation
19992 s(P, "3.7.3", "esm2017");
19993}("rn", /* useFetchStreams= */ !1);
19994
19995export { nl as AbstractUserDataWriter, la as AggregateField, fa as AggregateQuerySnapshot, nh as Bytes, Ba as CACHE_SIZE_UNLIMITED, na as CollectionReference, ta as DocumentReference, ol as DocumentSnapshot, sh as FieldPath, rh as FieldValue, La as Firestore, U as FirestoreError, oh as GeoPoint, $a as LoadBundleTask, ea as Query, Fh as QueryConstraint, ul as QueryDocumentSnapshot, cl as QuerySnapshot, rl as SnapshotMetadata, st as Timestamp, Cl as Transaction, Sl as WriteBatch, ie as _DatabaseId, at as _DocumentKey, Y as _EmptyAppCheckTokenProvider, G as _EmptyAuthCredentialsProvider, ct as _FieldPath, zc as _cast, $ as _debugAssert, zt as _isBase64Available, k as _logWarn, Gc as _validateIsNotUsedTogether, Tl as addDoc, Pl as aggregateQuerySnapshotEqual, Ml as arrayRemove, Ol as arrayUnion, za as clearIndexedDbPersistence, sa as collection, ia as collectionGroup, Zc as connectFirestoreEmulator, Il as deleteDoc, Nl as deleteField, Ya as disableNetwork, ra as doc, ih as documentId, Qa as enableIndexedDbPersistence, ja as enableMultiTabIndexedDbPersistence, Ja as enableNetwork, Yh as endAt, Jh as endBefore, Ka as ensureFirestoreConfigured, Rl as executeWrite, vl as getCountFromServer, ll as getDoc, dl as getDocFromCache, _l as getDocFromServer, wl as getDocs, ml as getDocsFromCache, gl as getDocsFromServer, qa as getFirestore, Fl as increment, Ua as initializeFirestore, Gh as limit, Qh as limitToLast, Za as loadBundle, th as namedQuery, El as onSnapshot, Al as onSnapshotsInSync, qh as orderBy, $h as query, ua as queryEqual, oa as refEqual, xl as runTransaction, kl as serverTimestamp, yl as setDoc, Bl as setIndexConfiguration, C as setLogLevel, hl as snapshotEqual, zh as startAfter, Wh as startAt, Xa as terminate, pl as updateDoc, Ha as waitForPendingWrites, Lh as where, $l as writeBatch };
19996//# sourceMappingURL=index.rn.js.map