} struct.\n */\nfunction Si(t, e) {\n return t.useProto3Json || Ft(e) ? e : {\n value: e\n };\n}\n\n/**\n * Returns a number (or null) from a google.protobuf.Int32Value proto.\n */\n/**\n * Returns a value for a Date that's appropriate to put into a proto.\n */\nfunction Di(t, e) {\n if (t.useProto3Json) {\n return `${new Date(1e3 * e.seconds).toISOString().replace(/\\.\\d*/, \"\").replace(\"Z\", \"\")}.${(\"000000000\" + e.nanoseconds).slice(-9)}Z`;\n }\n return {\n seconds: \"\" + e.seconds,\n nanos: e.nanoseconds\n };\n}\n\n/**\n * Returns a value for bytes that's appropriate to put in a proto.\n *\n * Visible for testing.\n */\nfunction Ci(t, e) {\n return t.useProto3Json ? e.toBase64() : e.toUint8Array();\n}\n\n/**\n * Returns a ByteString based on the proto string value.\n */ function xi(t, e) {\n return Di(t, e.toTimestamp());\n}\n\nfunction Ni(t) {\n return F(!!t), rt.fromTimestamp(function(t) {\n const e = De(t);\n return new it(e.seconds, e.nanos);\n }(t));\n}\n\nfunction ki(t, e) {\n return function(t) {\n return new ut([ \"projects\", t.projectId, \"databases\", t.database ]);\n }(t).child(\"documents\").child(e).canonicalString();\n}\n\nfunction Mi(t) {\n const e = ut.fromString(t);\n return F(ur(e)), e;\n}\n\nfunction $i(t, e) {\n return ki(t.databaseId, e.path);\n}\n\nfunction Oi(t, e) {\n const n = Mi(e);\n if (n.get(1) !== t.databaseId.projectId) throw new U(q.INVALID_ARGUMENT, \"Tried to deserialize key from different project: \" + n.get(1) + \" vs \" + t.databaseId.projectId);\n if (n.get(3) !== t.databaseId.database) throw new U(q.INVALID_ARGUMENT, \"Tried to deserialize key from different database: \" + n.get(3) + \" vs \" + t.databaseId.database);\n return new ht(qi(n));\n}\n\nfunction Fi(t, e) {\n return ki(t.databaseId, e);\n}\n\nfunction Bi(t) {\n const e = Mi(t);\n // In v1beta1 queries for collections at the root did not have a trailing\n // \"/documents\". In v1 all resource paths contain \"/documents\". Preserve the\n // ability to read the v1beta1 form for compatibility with queries persisted\n // in the local target cache.\n return 4 === e.length ? ut.emptyPath() : qi(e);\n}\n\nfunction Li(t) {\n return new ut([ \"projects\", t.databaseId.projectId, \"databases\", t.databaseId.database ]).canonicalString();\n}\n\nfunction qi(t) {\n return F(t.length > 4 && \"documents\" === t.get(4)), t.popFirst(5);\n}\n\n/** Creates a Document proto from key and fields (but no create/update time) */ function Ui(t, e, n) {\n return {\n name: $i(t, e),\n fields: n.value.mapValue.fields\n };\n}\n\nfunction Ki(t, e, n) {\n const s = Oi(t, e.name), i = Ni(e.updateTime), r = e.createTime ? Ni(e.createTime) : rt.min(), o = new un({\n mapValue: {\n fields: e.fields\n }\n }), u = an.newFoundDocument(s, i, r, o);\n return n && u.setHasCommittedMutations(), n ? u.setHasCommittedMutations() : u;\n}\n\nfunction Gi(t, e) {\n return \"found\" in e ? function(t, e) {\n F(!!e.found), e.found.name, e.found.updateTime;\n const n = Oi(t, e.found.name), s = Ni(e.found.updateTime), i = e.found.createTime ? Ni(e.found.createTime) : rt.min(), r = new un({\n mapValue: {\n fields: e.found.fields\n }\n });\n return an.newFoundDocument(n, s, i, r);\n }(t, e) : \"missing\" in e ? function(t, e) {\n F(!!e.missing), F(!!e.readTime);\n const n = Oi(t, e.missing), s = Ni(e.readTime);\n return an.newNoDocument(n, s);\n }(t, e) : O();\n}\n\nfunction Qi(t, e) {\n let n;\n if (\"targetChange\" in e) {\n e.targetChange;\n // proto3 default value is unset in JSON (undefined), so use 'NO_CHANGE'\n // if unset\n const s = function(t) {\n return \"NO_CHANGE\" === t ? 0 /* WatchTargetChangeState.NoChange */ : \"ADD\" === t ? 1 /* WatchTargetChangeState.Added */ : \"REMOVE\" === t ? 2 /* WatchTargetChangeState.Removed */ : \"CURRENT\" === t ? 3 /* WatchTargetChangeState.Current */ : \"RESET\" === t ? 4 /* WatchTargetChangeState.Reset */ : O();\n }(e.targetChange.targetChangeType || \"NO_CHANGE\"), i = e.targetChange.targetIds || [], r = function(t, e) {\n return t.useProto3Json ? (F(void 0 === e || \"string\" == typeof e), Ve.fromBase64String(e || \"\")) : (F(void 0 === e || e instanceof Uint8Array), \n Ve.fromUint8Array(e || new Uint8Array));\n }(t, e.targetChange.resumeToken), o = e.targetChange.cause, u = o && function(t) {\n const e = void 0 === t.code ? q.UNKNOWN : ui(t.code);\n return new U(e, t.message || \"\");\n }(o);\n n = new Ii(s, i, r, u || null);\n } else if (\"documentChange\" in e) {\n e.documentChange;\n const s = e.documentChange;\n s.document, s.document.name, s.document.updateTime;\n const i = Oi(t, s.document.name), r = Ni(s.document.updateTime), o = s.document.createTime ? Ni(s.document.createTime) : rt.min(), u = new un({\n mapValue: {\n fields: s.document.fields\n }\n }), c = an.newFoundDocument(i, r, o, u), a = s.targetIds || [], h = s.removedTargetIds || [];\n n = new yi(a, h, c.key, c);\n } else if (\"documentDelete\" in e) {\n e.documentDelete;\n const s = e.documentDelete;\n s.document;\n const i = Oi(t, s.document), r = s.readTime ? Ni(s.readTime) : rt.min(), o = an.newNoDocument(i, r), u = s.removedTargetIds || [];\n n = new yi([], u, o.key, o);\n } else if (\"documentRemove\" in e) {\n e.documentRemove;\n const s = e.documentRemove;\n s.document;\n const i = Oi(t, s.document), r = s.removedTargetIds || [];\n n = new yi([], r, i, null);\n } else {\n if (!(\"filter\" in e)) return O();\n {\n e.filter;\n const t = e.filter;\n t.targetId;\n const {count: s = 0, unchangedNames: i} = t, r = new si(s, i), o = t.targetId;\n n = new pi(o, r);\n }\n }\n return n;\n}\n\nfunction ji(t, e) {\n let n;\n if (e instanceof js) n = {\n update: Ui(t, e.key, e.value)\n }; else if (e instanceof Ys) n = {\n delete: $i(t, e.key)\n }; else if (e instanceof zs) n = {\n update: Ui(t, e.key, e.data),\n updateMask: or(e.fieldMask)\n }; else {\n if (!(e instanceof Xs)) return O();\n n = {\n verify: $i(t, e.key)\n };\n }\n return e.fieldTransforms.length > 0 && (n.updateTransforms = e.fieldTransforms.map((t => function(t, e) {\n const n = e.transform;\n if (n instanceof bs) return {\n fieldPath: e.field.canonicalString(),\n setToServerValue: \"REQUEST_TIME\"\n };\n if (n instanceof Vs) return {\n fieldPath: e.field.canonicalString(),\n appendMissingElements: {\n values: n.elements\n }\n };\n if (n instanceof Ds) return {\n fieldPath: e.field.canonicalString(),\n removeAllFromArray: {\n values: n.elements\n }\n };\n if (n instanceof xs) return {\n fieldPath: e.field.canonicalString(),\n increment: n.gt\n };\n throw O();\n }(0, t)))), e.precondition.isNone || (n.currentDocument = function(t, e) {\n return void 0 !== e.updateTime ? {\n updateTime: xi(t, e.updateTime)\n } : void 0 !== e.exists ? {\n exists: e.exists\n } : O();\n }(t, e.precondition)), n;\n}\n\nfunction zi(t, e) {\n const n = e.currentDocument ? function(t) {\n return void 0 !== t.updateTime ? Fs.updateTime(Ni(t.updateTime)) : void 0 !== t.exists ? Fs.exists(t.exists) : Fs.none();\n }(e.currentDocument) : Fs.none(), s = e.updateTransforms ? e.updateTransforms.map((e => function(t, e) {\n let n = null;\n if (\"setToServerValue\" in e) F(\"REQUEST_TIME\" === e.setToServerValue), n = new bs; else if (\"appendMissingElements\" in e) {\n const t = e.appendMissingElements.values || [];\n n = new Vs(t);\n } else if (\"removeAllFromArray\" in e) {\n const t = e.removeAllFromArray.values || [];\n n = new Ds(t);\n } else \"increment\" in e ? n = new xs(t, e.increment) : O();\n const s = at.fromServerFormat(e.fieldPath);\n return new Ms(s, n);\n }(t, e))) : [];\n if (e.update) {\n e.update.name;\n const i = Oi(t, e.update.name), r = new un({\n mapValue: {\n fields: e.update.fields\n }\n });\n if (e.updateMask) {\n const t = function(t) {\n const e = t.fieldPaths || [];\n return new Re(e.map((t => at.fromServerFormat(t))));\n }(e.updateMask);\n return new zs(i, r, t, n, s);\n }\n return new js(i, r, n, s);\n }\n if (e.delete) {\n const s = Oi(t, e.delete);\n return new Ys(s, n);\n }\n if (e.verify) {\n const s = Oi(t, e.verify);\n return new Xs(s, n);\n }\n return O();\n}\n\nfunction Wi(t, e) {\n return t && t.length > 0 ? (F(void 0 !== e), t.map((t => function(t, e) {\n // NOTE: Deletes don't have an updateTime.\n let n = t.updateTime ? Ni(t.updateTime) : Ni(e);\n return n.isEqual(rt.min()) && (\n // The Firestore Emulator currently returns an update time of 0 for\n // deletes of non-existing documents (rather than null). This breaks the\n // test \"get deleted doc while offline with source=cache\" as NoDocuments\n // with version 0 are filtered by IndexedDb's RemoteDocumentCache.\n // TODO(#2149): Remove this when Emulator is fixed\n n = Ni(e)), new Os(n, t.transformResults || []);\n }(t, e)))) : [];\n}\n\nfunction Hi(t, e) {\n return {\n documents: [ Fi(t, e.path) ]\n };\n}\n\nfunction Ji(t, e) {\n // Dissect the path into parent, collectionId, and optional key filter.\n const n = {\n structuredQuery: {}\n }, s = e.path;\n null !== e.collectionGroup ? (n.parent = Fi(t, s), n.structuredQuery.from = [ {\n collectionId: e.collectionGroup,\n allDescendants: !0\n } ]) : (n.parent = Fi(t, s.popLast()), n.structuredQuery.from = [ {\n collectionId: s.lastSegment()\n } ]);\n const i = function(t) {\n if (0 === t.length) return;\n return rr(gn.create(t, \"and\" /* CompositeOperator.AND */));\n }(e.filters);\n i && (n.structuredQuery.where = i);\n const r = function(t) {\n if (0 === t.length) return;\n return t.map((t => \n // visible for testing\n function(t) {\n return {\n field: sr(t.field),\n direction: tr(t.dir)\n };\n }(t)));\n }(e.orderBy);\n r && (n.structuredQuery.orderBy = r);\n const o = Si(t, e.limit);\n var u;\n return null !== o && (n.structuredQuery.limit = o), e.startAt && (n.structuredQuery.startAt = {\n before: (u = e.startAt).inclusive,\n values: u.position\n }), e.endAt && (n.structuredQuery.endAt = function(t) {\n return {\n before: !t.inclusive,\n values: t.position\n };\n }(e.endAt)), n;\n}\n\nfunction Yi(t) {\n let e = Bi(t.parent);\n const n = t.structuredQuery, s = n.from ? n.from.length : 0;\n let i = null;\n if (s > 0) {\n F(1 === s);\n const t = n.from[0];\n t.allDescendants ? i = t.collectionId : e = e.child(t.collectionId);\n }\n let r = [];\n n.where && (r = function(t) {\n const e = Zi(t);\n if (e instanceof gn && In(e)) return e.getFilters();\n return [ e ];\n }(n.where));\n let o = [];\n n.orderBy && (o = n.orderBy.map((t => function(t) {\n return new dn(ir(t.field), \n // visible for testing\n function(t) {\n switch (t) {\n case \"ASCENDING\":\n return \"asc\" /* Direction.ASCENDING */;\n\n case \"DESCENDING\":\n return \"desc\" /* Direction.DESCENDING */;\n\n default:\n return;\n }\n }\n // visible for testing\n (t.direction));\n }\n // visible for testing\n (t))));\n let u = null;\n n.limit && (u = function(t) {\n let e;\n return e = \"object\" == typeof t ? t.value : t, Ft(e) ? null : e;\n }(n.limit));\n let c = null;\n n.startAt && (c = function(t) {\n const e = !!t.before, n = t.values || [];\n return new hn(n, e);\n }(n.startAt));\n let a = null;\n return n.endAt && (a = function(t) {\n const e = !t.before, n = t.values || [];\n return new hn(n, e);\n }\n // visible for testing\n (n.endAt)), Kn(e, i, o, r, u, \"F\" /* LimitType.First */ , c, a);\n}\n\nfunction Xi(t, e) {\n const n = function(t) {\n switch (t) {\n case \"TargetPurposeListen\" /* TargetPurpose.Listen */ :\n return null;\n\n case \"TargetPurposeExistenceFilterMismatch\" /* TargetPurpose.ExistenceFilterMismatch */ :\n return \"existence-filter-mismatch\";\n\n case \"TargetPurposeExistenceFilterMismatchBloom\" /* TargetPurpose.ExistenceFilterMismatchBloom */ :\n return \"existence-filter-mismatch-bloom\";\n\n case \"TargetPurposeLimboResolution\" /* TargetPurpose.LimboResolution */ :\n return \"limbo-document\";\n\n default:\n return O();\n }\n }(e.purpose);\n return null == n ? null : {\n \"goog-listen-tags\": n\n };\n}\n\nfunction Zi(t) {\n return void 0 !== t.unaryFilter ? function(t) {\n switch (t.unaryFilter.op) {\n case \"IS_NAN\":\n const e = ir(t.unaryFilter.field);\n return mn.create(e, \"==\" /* Operator.EQUAL */ , {\n doubleValue: NaN\n });\n\n case \"IS_NULL\":\n const n = ir(t.unaryFilter.field);\n return mn.create(n, \"==\" /* Operator.EQUAL */ , {\n nullValue: \"NULL_VALUE\"\n });\n\n case \"IS_NOT_NAN\":\n const s = ir(t.unaryFilter.field);\n return mn.create(s, \"!=\" /* Operator.NOT_EQUAL */ , {\n doubleValue: NaN\n });\n\n case \"IS_NOT_NULL\":\n const i = ir(t.unaryFilter.field);\n return mn.create(i, \"!=\" /* Operator.NOT_EQUAL */ , {\n nullValue: \"NULL_VALUE\"\n });\n\n default:\n return O();\n }\n }(t) : void 0 !== t.fieldFilter ? function(t) {\n return mn.create(ir(t.fieldFilter.field), function(t) {\n switch (t) {\n case \"EQUAL\":\n return \"==\" /* Operator.EQUAL */;\n\n case \"NOT_EQUAL\":\n return \"!=\" /* Operator.NOT_EQUAL */;\n\n case \"GREATER_THAN\":\n return \">\" /* Operator.GREATER_THAN */;\n\n case \"GREATER_THAN_OR_EQUAL\":\n return \">=\" /* Operator.GREATER_THAN_OR_EQUAL */;\n\n case \"LESS_THAN\":\n return \"<\" /* Operator.LESS_THAN */;\n\n case \"LESS_THAN_OR_EQUAL\":\n return \"<=\" /* Operator.LESS_THAN_OR_EQUAL */;\n\n case \"ARRAY_CONTAINS\":\n return \"array-contains\" /* Operator.ARRAY_CONTAINS */;\n\n case \"IN\":\n return \"in\" /* Operator.IN */;\n\n case \"NOT_IN\":\n return \"not-in\" /* Operator.NOT_IN */;\n\n case \"ARRAY_CONTAINS_ANY\":\n return \"array-contains-any\" /* Operator.ARRAY_CONTAINS_ANY */;\n\n default:\n return O();\n }\n }(t.fieldFilter.op), t.fieldFilter.value);\n }(t) : void 0 !== t.compositeFilter ? function(t) {\n return gn.create(t.compositeFilter.filters.map((t => Zi(t))), function(t) {\n switch (t) {\n case \"AND\":\n return \"and\" /* CompositeOperator.AND */;\n\n case \"OR\":\n return \"or\" /* CompositeOperator.OR */;\n\n default:\n return O();\n }\n }(t.compositeFilter.op));\n }(t) : O();\n}\n\nfunction tr(t) {\n return Ri[t];\n}\n\nfunction er(t) {\n return Pi[t];\n}\n\nfunction nr(t) {\n return bi[t];\n}\n\nfunction sr(t) {\n return {\n fieldPath: t.canonicalString()\n };\n}\n\nfunction ir(t) {\n return at.fromServerFormat(t.fieldPath);\n}\n\nfunction rr(t) {\n return t instanceof mn ? function(t) {\n if (\"==\" /* Operator.EQUAL */ === t.op) {\n if (Xe(t.value)) return {\n unaryFilter: {\n field: sr(t.field),\n op: \"IS_NAN\"\n }\n };\n if (Ye(t.value)) return {\n unaryFilter: {\n field: sr(t.field),\n op: \"IS_NULL\"\n }\n };\n } else if (\"!=\" /* Operator.NOT_EQUAL */ === t.op) {\n if (Xe(t.value)) return {\n unaryFilter: {\n field: sr(t.field),\n op: \"IS_NOT_NAN\"\n }\n };\n if (Ye(t.value)) return {\n unaryFilter: {\n field: sr(t.field),\n op: \"IS_NOT_NULL\"\n }\n };\n }\n return {\n fieldFilter: {\n field: sr(t.field),\n op: er(t.op),\n value: t.value\n }\n };\n }(t) : t instanceof gn ? function(t) {\n const e = t.getFilters().map((t => rr(t)));\n if (1 === e.length) return e[0];\n return {\n compositeFilter: {\n op: nr(t.op),\n filters: e\n }\n };\n }(t) : O();\n}\n\nfunction or(t) {\n const e = [];\n return t.fields.forEach((t => e.push(t.canonicalString()))), {\n fieldPaths: e\n };\n}\n\nfunction ur(t) {\n // Resource names have at least 4 components (project ID, database ID)\n return t.length >= 4 && \"projects\" === t.get(0) && \"databases\" === t.get(2);\n}\n\n/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * An immutable set of metadata that the local store tracks for each target.\n */ class cr {\n constructor(\n /** The target being listened to. */\n t, \n /**\n * The target ID to which the target corresponds; Assigned by the\n * LocalStore for user listens and by the SyncEngine for limbo watches.\n */\n e, \n /** The purpose of the target. */\n n, \n /**\n * The sequence number of the last transaction during which this target data\n * was modified.\n */\n s, \n /** The latest snapshot version seen for this target. */\n i = rt.min()\n /**\n * The maximum snapshot version at which the associated view\n * contained no limbo documents.\n */ , r = rt.min()\n /**\n * An opaque, server-assigned token that allows watching a target to be\n * resumed after disconnecting without retransmitting all the data that\n * matches the target. The resume token essentially identifies a point in\n * time from which the server should resume sending results.\n */ , o = Ve.EMPTY_BYTE_STRING\n /**\n * The number of documents that last matched the query at the resume token or\n * read time. Documents are counted only when making a listen request with\n * resume token or read time, otherwise, keep it null.\n */ , u = null) {\n this.target = t, this.targetId = e, this.purpose = n, this.sequenceNumber = s, this.snapshotVersion = i, \n this.lastLimboFreeSnapshotVersion = r, this.resumeToken = o, this.expectedCount = u;\n }\n /** Creates a new target data instance with an updated sequence number. */ withSequenceNumber(t) {\n return new cr(this.target, this.targetId, this.purpose, t, this.snapshotVersion, this.lastLimboFreeSnapshotVersion, this.resumeToken, this.expectedCount);\n }\n /**\n * Creates a new target data instance with an updated resume token and\n * snapshot version.\n */ withResumeToken(t, e) {\n return new cr(this.target, this.targetId, this.purpose, this.sequenceNumber, e, this.lastLimboFreeSnapshotVersion, t, \n /* expectedCount= */ null);\n }\n /**\n * Creates a new target data instance with an updated expected count.\n */ withExpectedCount(t) {\n return new cr(this.target, this.targetId, this.purpose, this.sequenceNumber, this.snapshotVersion, this.lastLimboFreeSnapshotVersion, this.resumeToken, t);\n }\n /**\n * Creates a new target data instance with an updated last limbo free\n * snapshot version number.\n */ withLastLimboFreeSnapshotVersion(t) {\n return new cr(this.target, this.targetId, this.purpose, this.sequenceNumber, this.snapshotVersion, t, this.resumeToken, this.expectedCount);\n }\n}\n\n/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/** Serializer for values stored in the LocalStore. */ class ar {\n constructor(t) {\n this.fe = t;\n }\n}\n\n/** Decodes a remote document from storage locally to a Document. */ function hr(t, e) {\n let n;\n if (e.document) n = Ki(t.fe, e.document, !!e.hasCommittedMutations); else if (e.noDocument) {\n const t = ht.fromSegments(e.noDocument.path), s = wr(e.noDocument.readTime);\n n = an.newNoDocument(t, s), e.hasCommittedMutations && n.setHasCommittedMutations();\n } else {\n if (!e.unknownDocument) return O();\n {\n const t = ht.fromSegments(e.unknownDocument.path), s = wr(e.unknownDocument.version);\n n = an.newUnknownDocument(t, s);\n }\n }\n return e.readTime && n.setReadTime(function(t) {\n const e = new it(t[0], t[1]);\n return rt.fromTimestamp(e);\n }(e.readTime)), n;\n}\n\n/** Encodes a document for storage locally. */ function lr(t, e) {\n const n = e.key, s = {\n prefixPath: n.getCollectionPath().popLast().toArray(),\n collectionGroup: n.collectionGroup,\n documentId: n.path.lastSegment(),\n readTime: fr(e.readTime),\n hasCommittedMutations: e.hasCommittedMutations\n };\n if (e.isFoundDocument()) s.document = function(t, e) {\n return {\n name: $i(t, e.key),\n fields: e.data.value.mapValue.fields,\n updateTime: Di(t, e.version.toTimestamp()),\n createTime: Di(t, e.createTime.toTimestamp())\n };\n }(t.fe, e); else if (e.isNoDocument()) s.noDocument = {\n path: n.path.toArray(),\n readTime: dr(e.version)\n }; else {\n if (!e.isUnknownDocument()) return O();\n s.unknownDocument = {\n path: n.path.toArray(),\n version: dr(e.version)\n };\n }\n return s;\n}\n\nfunction fr(t) {\n const e = t.toTimestamp();\n return [ e.seconds, e.nanoseconds ];\n}\n\nfunction dr(t) {\n const e = t.toTimestamp();\n return {\n seconds: e.seconds,\n nanoseconds: e.nanoseconds\n };\n}\n\nfunction wr(t) {\n const e = new it(t.seconds, t.nanoseconds);\n return rt.fromTimestamp(e);\n}\n\n/** Encodes a batch of mutations into a DbMutationBatch for local storage. */\n/** Decodes a DbMutationBatch into a MutationBatch */\nfunction _r(t, e) {\n const n = (e.baseMutations || []).map((e => zi(t.fe, e)));\n // Squash old transform mutations into existing patch or set mutations.\n // The replacement of representing `transforms` with `update_transforms`\n // on the SDK means that old `transform` mutations stored in IndexedDB need\n // to be updated to `update_transforms`.\n // TODO(b/174608374): Remove this code once we perform a schema migration.\n for (let t = 0; t < e.mutations.length - 1; ++t) {\n const n = e.mutations[t];\n if (t + 1 < e.mutations.length && void 0 !== e.mutations[t + 1].transform) {\n const s = e.mutations[t + 1];\n n.updateTransforms = s.transform.fieldTransforms, e.mutations.splice(t + 1, 1), \n ++t;\n }\n }\n const s = e.mutations.map((e => zi(t.fe, e))), i = it.fromMillis(e.localWriteTimeMs);\n return new Zs(e.batchId, i, n, s);\n}\n\n/** Decodes a DbTarget into TargetData */ function mr(t) {\n const e = wr(t.readTime), n = void 0 !== t.lastLimboFreeSnapshotVersion ? wr(t.lastLimboFreeSnapshotVersion) : rt.min();\n let s;\n var i;\n return void 0 !== t.query.documents ? (F(1 === (i = t.query).documents.length), \n s = Jn(Gn(Bi(i.documents[0])))) : s = function(t) {\n return Jn(Yi(t));\n }(t.query), new cr(s, t.targetId, \"TargetPurposeListen\" /* TargetPurpose.Listen */ , t.lastListenSequenceNumber, e, n, Ve.fromBase64String(t.resumeToken));\n}\n\n/** Encodes TargetData into a DbTarget for storage locally. */ function gr(t, e) {\n const n = dr(e.snapshotVersion), s = dr(e.lastLimboFreeSnapshotVersion);\n let i;\n i = Fn(e.target) ? Hi(t.fe, e.target) : Ji(t.fe, e.target);\n // We can't store the resumeToken as a ByteString in IndexedDb, so we\n // convert it to a base64 string for storage.\n const r = e.resumeToken.toBase64();\n // lastListenSequenceNumber is always 0 until we do real GC.\n return {\n targetId: e.targetId,\n canonicalId: $n(e.target),\n readTime: n,\n resumeToken: r,\n lastListenSequenceNumber: e.sequenceNumber,\n lastLimboFreeSnapshotVersion: s,\n query: i\n };\n}\n\n/**\n * A helper function for figuring out what kind of query has been stored.\n */\n/**\n * Encodes a `BundledQuery` from bundle proto to a Query object.\n *\n * This reconstructs the original query used to build the bundle being loaded,\n * including features exists only in SDKs (for example: limit-to-last).\n */\nfunction yr(t) {\n const e = Yi({\n parent: t.parent,\n structuredQuery: t.structuredQuery\n });\n return \"LAST\" === t.limitType ? Xn(e, e.limit, \"L\" /* LimitType.Last */) : e;\n}\n\n/** Encodes a NamedQuery proto object to a NamedQuery model object. */\n/** Encodes a DbDocumentOverlay object to an Overlay model object. */\nfunction pr(t, e) {\n return new ei(e.largestBatchId, zi(t.fe, e.overlayMutation));\n}\n\n/** Decodes an Overlay model object into a DbDocumentOverlay object. */\n/**\n * Returns the DbDocumentOverlayKey corresponding to the given user and\n * document key.\n */\nfunction Ir(t, e) {\n const n = e.path.lastSegment();\n return [ t, qt(e.path.popLast()), n ];\n}\n\nfunction Tr(t, e, n, s) {\n return {\n indexId: t,\n uid: e.uid || \"\",\n sequenceNumber: n,\n readTime: dr(s.readTime),\n documentKey: qt(s.documentKey.path),\n largestBatchId: s.largestBatchId\n };\n}\n\n/**\n * @license\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */ class Er {\n getBundleMetadata(t, e) {\n return Ar(t).get(e).next((t => {\n if (t) return {\n id: (e = t).bundleId,\n createTime: wr(e.createTime),\n version: e.version\n };\n /** Encodes a DbBundle to a BundleMetadata object. */\n var e;\n /** Encodes a BundleMetadata to a DbBundle. */ }));\n }\n saveBundleMetadata(t, e) {\n return Ar(t).put({\n bundleId: (n = e).id,\n createTime: dr(Ni(n.createTime)),\n version: n.version\n });\n var n;\n /** Encodes a DbNamedQuery to a NamedQuery. */ }\n getNamedQuery(t, e) {\n return vr(t).get(e).next((t => {\n if (t) return {\n name: (e = t).name,\n query: yr(e.bundledQuery),\n readTime: wr(e.readTime)\n };\n var e;\n /** Encodes a NamedQuery from a bundle proto to a DbNamedQuery. */ }));\n }\n saveNamedQuery(t, e) {\n return vr(t).put(function(t) {\n return {\n name: t.name,\n readTime: dr(Ni(t.readTime)),\n bundledQuery: t.bundledQuery\n };\n }(e));\n }\n}\n\n/**\n * Helper to get a typed SimpleDbStore for the bundles object store.\n */ function Ar(t) {\n return _e(t, \"bundles\");\n}\n\n/**\n * Helper to get a typed SimpleDbStore for the namedQueries object store.\n */ function vr(t) {\n return _e(t, \"namedQueries\");\n}\n\n/**\n * @license\n * Copyright 2022 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * Implementation of DocumentOverlayCache using IndexedDb.\n */ class Rr {\n /**\n * @param serializer - The document serializer.\n * @param userId - The userId for which we are accessing overlays.\n */\n constructor(t, e) {\n this.serializer = t, this.userId = e;\n }\n static de(t, e) {\n const n = e.uid || \"\";\n return new Rr(t, n);\n }\n getOverlay(t, e) {\n return Pr(t).get(Ir(this.userId, e)).next((t => t ? pr(this.serializer, t) : null));\n }\n getOverlays(t, e) {\n const n = fs();\n return Rt.forEach(e, (e => this.getOverlay(t, e).next((t => {\n null !== t && n.set(e, t);\n })))).next((() => n));\n }\n saveOverlays(t, e, n) {\n const s = [];\n return n.forEach(((n, i) => {\n const r = new ei(e, i);\n s.push(this.we(t, r));\n })), Rt.waitFor(s);\n }\n removeOverlaysForBatchId(t, e, n) {\n const s = new Set;\n // Get the set of unique collection paths.\n e.forEach((t => s.add(qt(t.getCollectionPath()))));\n const i = [];\n return s.forEach((e => {\n const s = IDBKeyRange.bound([ this.userId, e, n ], [ this.userId, e, n + 1 ], \n /*lowerOpen=*/ !1, \n /*upperOpen=*/ !0);\n i.push(Pr(t).J(\"collectionPathOverlayIndex\", s));\n })), Rt.waitFor(i);\n }\n getOverlaysForCollection(t, e, n) {\n const s = fs(), i = qt(e), r = IDBKeyRange.bound([ this.userId, i, n ], [ this.userId, i, Number.POSITIVE_INFINITY ], \n /*lowerOpen=*/ !0);\n return Pr(t).j(\"collectionPathOverlayIndex\", r).next((t => {\n for (const e of t) {\n const t = pr(this.serializer, e);\n s.set(t.getKey(), t);\n }\n return s;\n }));\n }\n getOverlaysForCollectionGroup(t, e, n, s) {\n const i = fs();\n let r;\n // We want batch IDs larger than `sinceBatchId`, and so the lower bound\n // is not inclusive.\n const o = IDBKeyRange.bound([ this.userId, e, n ], [ this.userId, e, Number.POSITIVE_INFINITY ], \n /*lowerOpen=*/ !0);\n return Pr(t).X({\n index: \"collectionGroupOverlayIndex\",\n range: o\n }, ((t, e, n) => {\n // We do not want to return partial batch overlays, even if the size\n // of the result set exceeds the given `count` argument. Therefore, we\n // continue to aggregate results even after the result size exceeds\n // `count` if there are more overlays from the `currentBatchId`.\n const o = pr(this.serializer, e);\n i.size() < s || o.largestBatchId === r ? (i.set(o.getKey(), o), r = o.largestBatchId) : n.done();\n })).next((() => i));\n }\n we(t, e) {\n return Pr(t).put(function(t, e, n) {\n const [s, i, r] = Ir(e, n.mutation.key);\n return {\n userId: e,\n collectionPath: i,\n documentId: r,\n collectionGroup: n.mutation.key.getCollectionGroup(),\n largestBatchId: n.largestBatchId,\n overlayMutation: ji(t.fe, n.mutation)\n };\n }(this.serializer, this.userId, e));\n }\n}\n\n/**\n * Helper to get a typed SimpleDbStore for the document overlay object store.\n */ function Pr(t) {\n return _e(t, \"documentOverlays\");\n}\n\n/**\n * @license\n * Copyright 2021 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n// Note: This code is copied from the backend. Code that is not used by\n// Firestore was removed.\n/** Firestore index value writer. */\nclass br {\n constructor() {}\n // The write methods below short-circuit writing terminators for values\n // containing a (terminating) truncated value.\n // As an example, consider the resulting encoding for:\n // [\"bar\", [2, \"foo\"]] -> (STRING, \"bar\", TERM, ARRAY, NUMBER, 2, STRING, \"foo\", TERM, TERM, TERM)\n // [\"bar\", [2, truncated(\"foo\")]] -> (STRING, \"bar\", TERM, ARRAY, NUMBER, 2, STRING, \"foo\", TRUNC)\n // [\"bar\", truncated([\"foo\"])] -> (STRING, \"bar\", TERM, ARRAY. STRING, \"foo\", TERM, TRUNC)\n /** Writes an index value. */\n _e(t, e) {\n this.me(t, e), \n // Write separator to split index values\n // (see go/firestore-storage-format#encodings).\n e.ge();\n }\n me(t, e) {\n if (\"nullValue\" in t) this.ye(e, 5); else if (\"booleanValue\" in t) this.ye(e, 10), \n e.pe(t.booleanValue ? 1 : 0); else if (\"integerValue\" in t) this.ye(e, 15), e.pe(Ce(t.integerValue)); else if (\"doubleValue\" in t) {\n const n = Ce(t.doubleValue);\n isNaN(n) ? this.ye(e, 13) : (this.ye(e, 15), Bt(n) ? \n // -0.0, 0 and 0.0 are all considered the same\n e.pe(0) : e.pe(n));\n } else if (\"timestampValue\" in t) {\n const n = t.timestampValue;\n this.ye(e, 20), \"string\" == typeof n ? e.Ie(n) : (e.Ie(`${n.seconds || \"\"}`), e.pe(n.nanos || 0));\n } else if (\"stringValue\" in t) this.Te(t.stringValue, e), this.Ee(e); else if (\"bytesValue\" in t) this.ye(e, 30), \n e.Ae(xe(t.bytesValue)), this.Ee(e); else if (\"referenceValue\" in t) this.ve(t.referenceValue, e); else if (\"geoPointValue\" in t) {\n const n = t.geoPointValue;\n this.ye(e, 45), e.pe(n.latitude || 0), e.pe(n.longitude || 0);\n } else \"mapValue\" in t ? en(t) ? this.ye(e, Number.MAX_SAFE_INTEGER) : (this.Re(t.mapValue, e), \n this.Ee(e)) : \"arrayValue\" in t ? (this.Pe(t.arrayValue, e), this.Ee(e)) : O();\n }\n Te(t, e) {\n this.ye(e, 25), this.be(t, e);\n }\n be(t, e) {\n e.Ie(t);\n }\n Re(t, e) {\n const n = t.fields || {};\n this.ye(e, 55);\n for (const t of Object.keys(n)) this.Te(t, e), this.me(n[t], e);\n }\n Pe(t, e) {\n const n = t.values || [];\n this.ye(e, 50);\n for (const t of n) this.me(t, e);\n }\n ve(t, e) {\n this.ye(e, 37);\n ht.fromName(t).path.forEach((t => {\n this.ye(e, 60), this.be(t, e);\n }));\n }\n ye(t, e) {\n t.pe(e);\n }\n Ee(t) {\n // While the SDK does not implement truncation, the truncation marker is\n // used to terminate all variable length values (which are strings, bytes,\n // references, arrays and maps).\n t.pe(2);\n }\n}\n\nbr.Ve = new br;\n\n/**\n * Counts the number of zeros in a byte.\n *\n * Visible for testing.\n */\nfunction Vr(t) {\n if (0 === t) return 8;\n let e = 0;\n return t >> 4 == 0 && (\n // Test if the first four bits are zero.\n e += 4, t <<= 4), t >> 6 == 0 && (\n // Test if the first two (or next two) bits are zero.\n e += 2, t <<= 2), t >> 7 == 0 && (\n // Test if the remaining bit is zero.\n e += 1), e;\n}\n\n/** Counts the number of leading zeros in the given byte array. */\n/**\n * Returns the number of bytes required to store \"value\". Leading zero bytes\n * are skipped.\n */\nfunction Sr(t) {\n // This is just the number of bytes for the unsigned representation of the number.\n const e = 64 - function(t) {\n let e = 0;\n for (let n = 0; n < 8; ++n) {\n const s = Vr(255 & t[n]);\n if (e += s, 8 !== s) break;\n }\n return e;\n }(t);\n return Math.ceil(e / 8);\n}\n\n/**\n * OrderedCodeWriter is a minimal-allocation implementation of the writing\n * behavior defined by the backend.\n *\n * The code is ported from its Java counterpart.\n */ class Dr {\n constructor() {\n this.buffer = new Uint8Array(1024), this.position = 0;\n }\n Se(t) {\n const e = t[Symbol.iterator]();\n let n = e.next();\n for (;!n.done; ) this.De(n.value), n = e.next();\n this.Ce();\n }\n xe(t) {\n const e = t[Symbol.iterator]();\n let n = e.next();\n for (;!n.done; ) this.Ne(n.value), n = e.next();\n this.ke();\n }\n /** Writes utf8 bytes into this byte sequence, ascending. */ Me(t) {\n for (const e of t) {\n const t = e.charCodeAt(0);\n if (t < 128) this.De(t); else if (t < 2048) this.De(960 | t >>> 6), this.De(128 | 63 & t); else if (e < \"\\ud800\" || \"\\udbff\" < e) this.De(480 | t >>> 12), \n this.De(128 | 63 & t >>> 6), this.De(128 | 63 & t); else {\n const t = e.codePointAt(0);\n this.De(240 | t >>> 18), this.De(128 | 63 & t >>> 12), this.De(128 | 63 & t >>> 6), \n this.De(128 | 63 & t);\n }\n }\n this.Ce();\n }\n /** Writes utf8 bytes into this byte sequence, descending */ $e(t) {\n for (const e of t) {\n const t = e.charCodeAt(0);\n if (t < 128) this.Ne(t); else if (t < 2048) this.Ne(960 | t >>> 6), this.Ne(128 | 63 & t); else if (e < \"\\ud800\" || \"\\udbff\" < e) this.Ne(480 | t >>> 12), \n this.Ne(128 | 63 & t >>> 6), this.Ne(128 | 63 & t); else {\n const t = e.codePointAt(0);\n this.Ne(240 | t >>> 18), this.Ne(128 | 63 & t >>> 12), this.Ne(128 | 63 & t >>> 6), \n this.Ne(128 | 63 & t);\n }\n }\n this.ke();\n }\n Oe(t) {\n // Values are encoded with a single byte length prefix, followed by the\n // actual value in big-endian format with leading 0 bytes dropped.\n const e = this.Fe(t), n = Sr(e);\n this.Be(1 + n), this.buffer[this.position++] = 255 & n;\n // Write the length\n for (let t = e.length - n; t < e.length; ++t) this.buffer[this.position++] = 255 & e[t];\n }\n Le(t) {\n // Values are encoded with a single byte length prefix, followed by the\n // inverted value in big-endian format with leading 0 bytes dropped.\n const e = this.Fe(t), n = Sr(e);\n this.Be(1 + n), this.buffer[this.position++] = ~(255 & n);\n // Write the length\n for (let t = e.length - n; t < e.length; ++t) this.buffer[this.position++] = ~(255 & e[t]);\n }\n /**\n * Writes the \"infinity\" byte sequence that sorts after all other byte\n * sequences written in ascending order.\n */ qe() {\n this.Ue(255), this.Ue(255);\n }\n /**\n * Writes the \"infinity\" byte sequence that sorts before all other byte\n * sequences written in descending order.\n */ Ke() {\n this.Ge(255), this.Ge(255);\n }\n /**\n * Resets the buffer such that it is the same as when it was newly\n * constructed.\n */ reset() {\n this.position = 0;\n }\n seed(t) {\n this.Be(t.length), this.buffer.set(t, this.position), this.position += t.length;\n }\n /** Makes a copy of the encoded bytes in this buffer. */ Qe() {\n return this.buffer.slice(0, this.position);\n }\n /**\n * Encodes `val` into an encoding so that the order matches the IEEE 754\n * floating-point comparison results with the following exceptions:\n * -0.0 < 0.0\n * all non-NaN < NaN\n * NaN = NaN\n */ Fe(t) {\n const e = \n /** Converts a JavaScript number to a byte array (using big endian encoding). */\n function(t) {\n const e = new DataView(new ArrayBuffer(8));\n return e.setFloat64(0, t, /* littleEndian= */ !1), new Uint8Array(e.buffer);\n }(t), n = 0 != (128 & e[0]);\n // Check if the first bit is set. We use a bit mask since value[0] is\n // encoded as a number from 0 to 255.\n // Revert the two complement to get natural ordering\n e[0] ^= n ? 255 : 128;\n for (let t = 1; t < e.length; ++t) e[t] ^= n ? 255 : 0;\n return e;\n }\n /** Writes a single byte ascending to the buffer. */ De(t) {\n const e = 255 & t;\n 0 === e ? (this.Ue(0), this.Ue(255)) : 255 === e ? (this.Ue(255), this.Ue(0)) : this.Ue(e);\n }\n /** Writes a single byte descending to the buffer. */ Ne(t) {\n const e = 255 & t;\n 0 === e ? (this.Ge(0), this.Ge(255)) : 255 === e ? (this.Ge(255), this.Ge(0)) : this.Ge(t);\n }\n Ce() {\n this.Ue(0), this.Ue(1);\n }\n ke() {\n this.Ge(0), this.Ge(1);\n }\n Ue(t) {\n this.Be(1), this.buffer[this.position++] = t;\n }\n Ge(t) {\n this.Be(1), this.buffer[this.position++] = ~t;\n }\n Be(t) {\n const e = t + this.position;\n if (e <= this.buffer.length) return;\n // Try doubling.\n let n = 2 * this.buffer.length;\n // Still not big enough? Just allocate the right size.\n n < e && (n = e);\n // Create the new buffer.\n const s = new Uint8Array(n);\n s.set(this.buffer), // copy old data\n this.buffer = s;\n }\n}\n\nclass Cr {\n constructor(t) {\n this.je = t;\n }\n Ae(t) {\n this.je.Se(t);\n }\n Ie(t) {\n this.je.Me(t);\n }\n pe(t) {\n this.je.Oe(t);\n }\n ge() {\n this.je.qe();\n }\n}\n\nclass xr {\n constructor(t) {\n this.je = t;\n }\n Ae(t) {\n this.je.xe(t);\n }\n Ie(t) {\n this.je.$e(t);\n }\n pe(t) {\n this.je.Le(t);\n }\n ge() {\n this.je.Ke();\n }\n}\n\n/**\n * Implements `DirectionalIndexByteEncoder` using `OrderedCodeWriter` for the\n * actual encoding.\n */ class Nr {\n constructor() {\n this.je = new Dr, this.ze = new Cr(this.je), this.We = new xr(this.je);\n }\n seed(t) {\n this.je.seed(t);\n }\n He(t) {\n return 0 /* IndexKind.ASCENDING */ === t ? this.ze : this.We;\n }\n Qe() {\n return this.je.Qe();\n }\n reset() {\n this.je.reset();\n }\n}\n\n/**\n * @license\n * Copyright 2022 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/** Represents an index entry saved by the SDK in persisted storage. */ class kr {\n constructor(t, e, n, s) {\n this.indexId = t, this.documentKey = e, this.arrayValue = n, this.directionalValue = s;\n }\n /**\n * Returns an IndexEntry entry that sorts immediately after the current\n * directional value.\n */ Je() {\n const t = this.directionalValue.length, e = 0 === t || 255 === this.directionalValue[t - 1] ? t + 1 : t, n = new Uint8Array(e);\n return n.set(this.directionalValue, 0), e !== t ? n.set([ 0 ], this.directionalValue.length) : ++n[n.length - 1], \n new kr(this.indexId, this.documentKey, this.arrayValue, n);\n }\n}\n\nfunction Mr(t, e) {\n let n = t.indexId - e.indexId;\n return 0 !== n ? n : (n = $r(t.arrayValue, e.arrayValue), 0 !== n ? n : (n = $r(t.directionalValue, e.directionalValue), \n 0 !== n ? n : ht.comparator(t.documentKey, e.documentKey)));\n}\n\nfunction $r(t, e) {\n for (let n = 0; n < t.length && n < e.length; ++n) {\n const s = t[n] - e[n];\n if (0 !== s) return s;\n }\n return t.length - e.length;\n}\n\n/**\n * @license\n * Copyright 2022 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * A light query planner for Firestore.\n *\n * This class matches a `FieldIndex` against a Firestore Query `Target`. It\n * determines whether a given index can be used to serve the specified target.\n *\n * The following table showcases some possible index configurations:\n *\n * Query | Index\n * -----------------------------------------------------------------------------\n * where('a', '==', 'a').where('b', '==', 'b') | a ASC, b DESC\n * where('a', '==', 'a').where('b', '==', 'b') | a ASC\n * where('a', '==', 'a').where('b', '==', 'b') | b DESC\n * where('a', '>=', 'a').orderBy('a') | a ASC\n * where('a', '>=', 'a').orderBy('a', 'desc') | a DESC\n * where('a', '>=', 'a').orderBy('a').orderBy('b') | a ASC, b ASC\n * where('a', '>=', 'a').orderBy('a').orderBy('b') | a ASC\n * where('a', 'array-contains', 'a').orderBy('b') | a CONTAINS, b ASCENDING\n * where('a', 'array-contains', 'a').orderBy('b') | a CONTAINS\n */ class Or {\n constructor(t) {\n this.collectionId = null != t.collectionGroup ? t.collectionGroup : t.path.lastSegment(), \n this.Ye = t.orderBy, this.Xe = [];\n for (const e of t.filters) {\n const t = e;\n t.isInequality() ? this.Ze = t : this.Xe.push(t);\n }\n }\n /**\n * Returns whether the index can be used to serve the TargetIndexMatcher's\n * target.\n *\n * An index is considered capable of serving the target when:\n * - The target uses all index segments for its filters and orderBy clauses.\n * The target can have additional filter and orderBy clauses, but not\n * fewer.\n * - If an ArrayContains/ArrayContainsAnyfilter is used, the index must also\n * have a corresponding `CONTAINS` segment.\n * - All directional index segments can be mapped to the target as a series of\n * equality filters, a single inequality filter and a series of orderBy\n * clauses.\n * - The segments that represent the equality filters may appear out of order.\n * - The optional segment for the inequality filter must appear after all\n * equality segments.\n * - The segments that represent that orderBy clause of the target must appear\n * in order after all equality and inequality segments. Single orderBy\n * clauses cannot be skipped, but a continuous orderBy suffix may be\n * omitted.\n */ tn(t) {\n F(t.collectionGroup === this.collectionId);\n // If there is an array element, find a matching filter.\n const e = ft(t);\n if (void 0 !== e && !this.en(e)) return !1;\n const n = dt(t);\n let s = new Set, i = 0, r = 0;\n // Process all equalities first. Equalities can appear out of order.\n for (;i < n.length && this.en(n[i]); ++i) s = s.add(n[i].fieldPath.canonicalString());\n // If we already have processed all segments, all segments are used to serve\n // the equality filters and we do not need to map any segments to the\n // target's inequality and orderBy clauses.\n if (i === n.length) return !0;\n if (void 0 !== this.Ze) {\n // If there is an inequality filter and the field was not in one of the\n // equality filters above, the next segment must match both the filter\n // and the first orderBy clause.\n if (!s.has(this.Ze.field.canonicalString())) {\n const t = n[i];\n if (!this.nn(this.Ze, t) || !this.sn(this.Ye[r++], t)) return !1;\n }\n ++i;\n }\n // All remaining segments need to represent the prefix of the target's\n // orderBy.\n for (;i < n.length; ++i) {\n const t = n[i];\n if (r >= this.Ye.length || !this.sn(this.Ye[r++], t)) return !1;\n }\n return !0;\n }\n en(t) {\n for (const e of this.Xe) if (this.nn(e, t)) return !0;\n return !1;\n }\n nn(t, e) {\n if (void 0 === t || !t.field.isEqual(e.fieldPath)) return !1;\n const n = \"array-contains\" /* Operator.ARRAY_CONTAINS */ === t.op || \"array-contains-any\" /* Operator.ARRAY_CONTAINS_ANY */ === t.op;\n return 2 /* IndexKind.CONTAINS */ === e.kind === n;\n }\n sn(t, e) {\n return !!t.field.isEqual(e.fieldPath) && (0 /* IndexKind.ASCENDING */ === e.kind && \"asc\" /* Direction.ASCENDING */ === t.dir || 1 /* IndexKind.DESCENDING */ === e.kind && \"desc\" /* Direction.DESCENDING */ === t.dir);\n }\n}\n\n/**\n * @license\n * Copyright 2022 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * Provides utility functions that help with boolean logic transformations needed for handling\n * complex filters used in queries.\n */\n/**\n * The `in` filter is only a syntactic sugar over a disjunction of equalities. For instance: `a in\n * [1,2,3]` is in fact `a==1 || a==2 || a==3`. This method expands any `in` filter in the given\n * input into a disjunction of equality filters and returns the expanded filter.\n */ function Fr(t) {\n var e, n;\n if (F(t instanceof mn || t instanceof gn), t instanceof mn) {\n if (t instanceof Cn) {\n const s = (null === (n = null === (e = t.value.arrayValue) || void 0 === e ? void 0 : e.values) || void 0 === n ? void 0 : n.map((e => mn.create(t.field, \"==\" /* Operator.EQUAL */ , e)))) || [];\n return gn.create(s, \"or\" /* CompositeOperator.OR */);\n }\n // We have reached other kinds of field filters.\n return t;\n }\n // We have a composite filter.\n const s = t.filters.map((t => Fr(t)));\n return gn.create(s, t.op);\n}\n\n/**\n * Given a composite filter, returns the list of terms in its disjunctive normal form.\n *\n * Each element in the return value is one term of the resulting DNF. For instance: For the\n * input: (A || B) && C, the DNF form is: (A && C) || (B && C), and the return value is a list\n * with two elements: a composite filter that performs (A && C), and a composite filter that\n * performs (B && C).\n *\n * @param filter the composite filter to calculate DNF transform for.\n * @return the terms in the DNF transform.\n */ function Br(t) {\n if (0 === t.getFilters().length) return [];\n const e = Kr(Fr(t));\n return F(Ur(e)), Lr(e) || qr(e) ? [ e ] : e.getFilters();\n}\n\n/** Returns true if the given filter is a single field filter. e.g. (a == 10). */ function Lr(t) {\n return t instanceof mn;\n}\n\n/**\n * Returns true if the given filter is the conjunction of one or more field filters. e.g. (a == 10\n * && b == 20)\n */ function qr(t) {\n return t instanceof gn && In(t);\n}\n\n/**\n * Returns whether or not the given filter is in disjunctive normal form (DNF).\n *\n *
In boolean logic, a disjunctive normal form (DNF) is a canonical normal form of a logical\n * formula consisting of a disjunction of conjunctions; it can also be described as an OR of ANDs.\n *\n *
For more info, visit: https://en.wikipedia.org/wiki/Disjunctive_normal_form\n */ function Ur(t) {\n return Lr(t) || qr(t) || \n /**\n * Returns true if the given filter is the disjunction of one or more \"flat conjunctions\" and\n * field filters. e.g. (a == 10) || (b==20 && c==30)\n */\n function(t) {\n if (t instanceof gn && pn(t)) {\n for (const e of t.getFilters()) if (!Lr(e) && !qr(e)) return !1;\n return !0;\n }\n return !1;\n }(t);\n}\n\nfunction Kr(t) {\n if (F(t instanceof mn || t instanceof gn), t instanceof mn) return t;\n if (1 === t.filters.length) return Kr(t.filters[0]);\n // Compute DNF for each of the subfilters first\n const e = t.filters.map((t => Kr(t)));\n let n = gn.create(e, t.op);\n return n = jr(n), Ur(n) ? n : (F(n instanceof gn), F(yn(n)), F(n.filters.length > 1), \n n.filters.reduce(((t, e) => Gr(t, e))));\n}\n\nfunction Gr(t, e) {\n let n;\n return F(t instanceof mn || t instanceof gn), F(e instanceof mn || e instanceof gn), \n // FieldFilter FieldFilter\n n = t instanceof mn ? e instanceof mn ? function(t, e) {\n // Conjunction distribution for two field filters is the conjunction of them.\n return gn.create([ t, e ], \"and\" /* CompositeOperator.AND */);\n }(t, e) : Qr(t, e) : e instanceof mn ? Qr(e, t) : function(t, e) {\n // There are four cases:\n // (A & B) & (C & D) --> (A & B & C & D)\n // (A & B) & (C | D) --> (A & B & C) | (A & B & D)\n // (A | B) & (C & D) --> (C & D & A) | (C & D & B)\n // (A | B) & (C | D) --> (A & C) | (A & D) | (B & C) | (B & D)\n // Case 1 is a merge.\n if (F(t.filters.length > 0 && e.filters.length > 0), yn(t) && yn(e)) return vn(t, e.getFilters());\n // Case 2,3,4 all have at least one side (lhs or rhs) that is a disjunction. In all three cases\n // we should take each element of the disjunction and distribute it over the other side, and\n // return the disjunction of the distribution results.\n const n = pn(t) ? t : e, s = pn(t) ? e : t, i = n.filters.map((t => Gr(t, s)));\n return gn.create(i, \"or\" /* CompositeOperator.OR */);\n }(t, e), jr(n);\n}\n\nfunction Qr(t, e) {\n // There are two cases:\n // A & (B & C) --> (A & B & C)\n // A & (B | C) --> (A & B) | (A & C)\n if (yn(e)) \n // Case 1\n return vn(e, t.getFilters());\n {\n // Case 2\n const n = e.filters.map((e => Gr(t, e)));\n return gn.create(n, \"or\" /* CompositeOperator.OR */);\n }\n}\n\n/**\n * Applies the associativity property to the given filter and returns the resulting filter.\n *\n *
\n * - A | (B | C) == (A | B) | C == (A | B | C)\n *
- A & (B & C) == (A & B) & C == (A & B & C)\n *
\n *\n * For more info, visit: https://en.wikipedia.org/wiki/Associative_property#Propositional_logic\n */ function jr(t) {\n if (F(t instanceof mn || t instanceof gn), t instanceof mn) return t;\n const e = t.getFilters();\n // If the composite filter only contains 1 filter, apply associativity to it.\n if (1 === e.length) return jr(e[0]);\n // Associativity applied to a flat composite filter results is itself.\n if (Tn(t)) return t;\n // First apply associativity to all subfilters. This will in turn recursively apply\n // associativity to all nested composite filters and field filters.\n const n = e.map((t => jr(t))), s = [];\n // For composite subfilters that perform the same kind of logical operation as `compositeFilter`\n // take out their filters and add them to `compositeFilter`. For example:\n // compositeFilter = (A | (B | C | D))\n // compositeSubfilter = (B | C | D)\n // Result: (A | B | C | D)\n // Note that the `compositeSubfilter` has been eliminated, and its filters (B, C, D) have been\n // added to the top-level \"compositeFilter\".\n return n.forEach((e => {\n e instanceof mn ? s.push(e) : e instanceof gn && (e.op === t.op ? \n // compositeFilter: (A | (B | C))\n // compositeSubfilter: (B | C)\n // Result: (A | B | C)\n s.push(...e.filters) : \n // compositeFilter: (A | (B & C))\n // compositeSubfilter: (B & C)\n // Result: (A | (B & C))\n s.push(e));\n })), 1 === s.length ? s[0] : gn.create(s, t.op);\n}\n\n/**\n * @license\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * An in-memory implementation of IndexManager.\n */ class zr {\n constructor() {\n this.rn = new Wr;\n }\n addToCollectionParentIndex(t, e) {\n return this.rn.add(e), Rt.resolve();\n }\n getCollectionParents(t, e) {\n return Rt.resolve(this.rn.getEntries(e));\n }\n addFieldIndex(t, e) {\n // Field indices are not supported with memory persistence.\n return Rt.resolve();\n }\n deleteFieldIndex(t, e) {\n // Field indices are not supported with memory persistence.\n return Rt.resolve();\n }\n getDocumentsMatchingTarget(t, e) {\n // Field indices are not supported with memory persistence.\n return Rt.resolve(null);\n }\n getIndexType(t, e) {\n // Field indices are not supported with memory persistence.\n return Rt.resolve(0 /* IndexType.NONE */);\n }\n getFieldIndexes(t, e) {\n // Field indices are not supported with memory persistence.\n return Rt.resolve([]);\n }\n getNextCollectionGroupToUpdate(t) {\n // Field indices are not supported with memory persistence.\n return Rt.resolve(null);\n }\n getMinOffset(t, e) {\n return Rt.resolve(It.min());\n }\n getMinOffsetFromCollectionGroup(t, e) {\n return Rt.resolve(It.min());\n }\n updateCollectionGroup(t, e, n) {\n // Field indices are not supported with memory persistence.\n return Rt.resolve();\n }\n updateIndexEntries(t, e) {\n // Field indices are not supported with memory persistence.\n return Rt.resolve();\n }\n}\n\n/**\n * Internal implementation of the collection-parent index exposed by MemoryIndexManager.\n * Also used for in-memory caching by IndexedDbIndexManager and initial index population\n * in indexeddb_schema.ts\n */ class Wr {\n constructor() {\n this.index = {};\n }\n // Returns false if the entry already existed.\n add(t) {\n const e = t.lastSegment(), n = t.popLast(), s = this.index[e] || new Ee(ut.comparator), i = !s.has(n);\n return this.index[e] = s.add(n), i;\n }\n has(t) {\n const e = t.lastSegment(), n = t.popLast(), s = this.index[e];\n return s && s.has(n);\n }\n getEntries(t) {\n return (this.index[t] || new Ee(ut.comparator)).toArray();\n }\n}\n\n/**\n * @license\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */ const Hr = new Uint8Array(0);\n\n/**\n * A persisted implementation of IndexManager.\n *\n * PORTING NOTE: Unlike iOS and Android, the Web SDK does not memoize index\n * data as it supports multi-tab access.\n */\nclass Jr {\n constructor(t, e) {\n this.user = t, this.databaseId = e, \n /**\n * An in-memory copy of the index entries we've already written since the SDK\n * launched. Used to avoid re-writing the same entry repeatedly.\n *\n * This is *NOT* a complete cache of what's in persistence and so can never be\n * used to satisfy reads.\n */\n this.on = new Wr, \n /**\n * Maps from a target to its equivalent list of sub-targets. Each sub-target\n * contains only one term from the target's disjunctive normal form (DNF).\n */\n this.un = new os((t => $n(t)), ((t, e) => On(t, e))), this.uid = t.uid || \"\";\n }\n /**\n * Adds a new entry to the collection parent index.\n *\n * Repeated calls for the same collectionPath should be avoided within a\n * transaction as IndexedDbIndexManager only caches writes once a transaction\n * has been committed.\n */ addToCollectionParentIndex(t, e) {\n if (!this.on.has(e)) {\n const n = e.lastSegment(), s = e.popLast();\n t.addOnCommittedListener((() => {\n // Add the collection to the in memory cache only if the transaction was\n // successfully committed.\n this.on.add(e);\n }));\n const i = {\n collectionId: n,\n parent: qt(s)\n };\n return Yr(t).put(i);\n }\n return Rt.resolve();\n }\n getCollectionParents(t, e) {\n const n = [], s = IDBKeyRange.bound([ e, \"\" ], [ st(e), \"\" ], \n /*lowerOpen=*/ !1, \n /*upperOpen=*/ !0);\n return Yr(t).j(s).next((t => {\n for (const s of t) {\n // This collectionId guard shouldn't be necessary (and isn't as long\n // as we're running in a real browser), but there's a bug in\n // indexeddbshim that breaks our range in our tests running in node:\n // https://github.com/axemclion/IndexedDBShim/issues/334\n if (s.collectionId !== e) break;\n n.push(Gt(s.parent));\n }\n return n;\n }));\n }\n addFieldIndex(t, e) {\n // TODO(indexing): Verify that the auto-incrementing index ID works in\n // Safari & Firefox.\n const n = Zr(t), s = function(t) {\n return {\n indexId: t.indexId,\n collectionGroup: t.collectionGroup,\n fields: t.fields.map((t => [ t.fieldPath.canonicalString(), t.kind ]))\n };\n }(e);\n delete s.indexId;\n // `indexId` is auto-populated by IndexedDb\n const i = n.add(s);\n if (e.indexState) {\n const n = to(t);\n return i.next((t => {\n n.put(Tr(t, this.user, e.indexState.sequenceNumber, e.indexState.offset));\n }));\n }\n return i.next();\n }\n deleteFieldIndex(t, e) {\n const n = Zr(t), s = to(t), i = Xr(t);\n return n.delete(e.indexId).next((() => s.delete(IDBKeyRange.bound([ e.indexId ], [ e.indexId + 1 ], \n /*lowerOpen=*/ !1, \n /*upperOpen=*/ !0)))).next((() => i.delete(IDBKeyRange.bound([ e.indexId ], [ e.indexId + 1 ], \n /*lowerOpen=*/ !1, \n /*upperOpen=*/ !0))));\n }\n getDocumentsMatchingTarget(t, e) {\n const n = Xr(t);\n let s = !0;\n const i = new Map;\n return Rt.forEach(this.cn(e), (e => this.an(t, e).next((t => {\n s && (s = !!t), i.set(e, t);\n })))).next((() => {\n if (s) {\n let t = gs();\n const s = [];\n return Rt.forEach(i, ((i, r) => {\n var o;\n N(\"IndexedDbIndexManager\", `Using index ${o = i, `id=${o.indexId}|cg=${o.collectionGroup}|f=${o.fields.map((t => `${t.fieldPath}:${t.kind}`)).join(\",\")}`} to execute ${$n(e)}`);\n const u = function(t, e) {\n const n = ft(e);\n if (void 0 === n) return null;\n for (const e of Bn(t, n.fieldPath)) switch (e.op) {\n case \"array-contains-any\" /* Operator.ARRAY_CONTAINS_ANY */ :\n return e.value.arrayValue.values || [];\n\n case \"array-contains\" /* Operator.ARRAY_CONTAINS */ :\n return [ e.value ];\n // Remaining filters are not array filters.\n }\n return null;\n }\n /**\n * Returns the list of values that are used in != or NOT_IN filters. Returns\n * `null` if there are no such filters.\n */ (r, i), c = function(t, e) {\n const n = new Map;\n for (const s of dt(e)) for (const e of Bn(t, s.fieldPath)) switch (e.op) {\n case \"==\" /* Operator.EQUAL */ :\n case \"in\" /* Operator.IN */ :\n // Encode equality prefix, which is encoded in the index value before\n // the inequality (e.g. `a == 'a' && b != 'b'` is encoded to\n // `value != 'ab'`).\n n.set(s.fieldPath.canonicalString(), e.value);\n break;\n\n case \"not-in\" /* Operator.NOT_IN */ :\n case \"!=\" /* Operator.NOT_EQUAL */ :\n // NotIn/NotEqual is always a suffix. There cannot be any remaining\n // segments and hence we can return early here.\n return n.set(s.fieldPath.canonicalString(), e.value), Array.from(n.values());\n // Remaining filters cannot be used as notIn bounds.\n }\n return null;\n }\n /**\n * Returns a lower bound of field values that can be used as a starting point to\n * scan the index defined by `fieldIndex`. Returns `MIN_VALUE` if no lower bound\n * exists.\n */ (r, i), a = function(t, e) {\n const n = [];\n let s = !0;\n // For each segment, retrieve a lower bound if there is a suitable filter or\n // startAt.\n for (const i of dt(e)) {\n const e = 0 /* IndexKind.ASCENDING */ === i.kind ? Ln(t, i.fieldPath, t.startAt) : qn(t, i.fieldPath, t.startAt);\n n.push(e.value), s && (s = e.inclusive);\n }\n return new hn(n, s);\n }\n /**\n * Returns an upper bound of field values that can be used as an ending point\n * when scanning the index defined by `fieldIndex`. Returns `MAX_VALUE` if no\n * upper bound exists.\n */ (r, i), h = function(t, e) {\n const n = [];\n let s = !0;\n // For each segment, retrieve an upper bound if there is a suitable filter or\n // endAt.\n for (const i of dt(e)) {\n const e = 0 /* IndexKind.ASCENDING */ === i.kind ? qn(t, i.fieldPath, t.endAt) : Ln(t, i.fieldPath, t.endAt);\n n.push(e.value), s && (s = e.inclusive);\n }\n return new hn(n, s);\n }(r, i), l = this.hn(i, r, a), f = this.hn(i, r, h), d = this.ln(i, r, c), w = this.fn(i.indexId, u, l, a.inclusive, f, h.inclusive, d);\n return Rt.forEach(w, (i => n.H(i, e.limit).next((e => {\n e.forEach((e => {\n const n = ht.fromSegments(e.documentKey);\n t.has(n) || (t = t.add(n), s.push(n));\n }));\n }))));\n })).next((() => s));\n }\n return Rt.resolve(null);\n }));\n }\n cn(t) {\n let e = this.un.get(t);\n if (e) return e;\n if (0 === t.filters.length) e = [ t ]; else {\n e = Br(gn.create(t.filters, \"and\" /* CompositeOperator.AND */)).map((e => Mn(t.path, t.collectionGroup, t.orderBy, e.getFilters(), t.limit, t.startAt, t.endAt)));\n }\n return this.un.set(t, e), e;\n }\n /**\n * Constructs a key range query on `DbIndexEntryStore` that unions all\n * bounds.\n */ fn(t, e, n, s, i, r, o) {\n // The number of total index scans we union together. This is similar to a\n // distributed normal form, but adapted for array values. We create a single\n // index range per value in an ARRAY_CONTAINS or ARRAY_CONTAINS_ANY filter\n // combined with the values from the query bounds.\n const u = (null != e ? e.length : 1) * Math.max(n.length, i.length), c = u / (null != e ? e.length : 1), a = [];\n for (let h = 0; h < u; ++h) {\n const u = e ? this.dn(e[h / c]) : Hr, l = this.wn(t, u, n[h % c], s), f = this._n(t, u, i[h % c], r), d = o.map((e => this.wn(t, u, e, \n /* inclusive= */ !0)));\n a.push(...this.createRange(l, f, d));\n }\n return a;\n }\n /** Generates the lower bound for `arrayValue` and `directionalValue`. */ wn(t, e, n, s) {\n const i = new kr(t, ht.empty(), e, n);\n return s ? i : i.Je();\n }\n /** Generates the upper bound for `arrayValue` and `directionalValue`. */ _n(t, e, n, s) {\n const i = new kr(t, ht.empty(), e, n);\n return s ? i.Je() : i;\n }\n an(t, e) {\n const n = new Or(e), s = null != e.collectionGroup ? e.collectionGroup : e.path.lastSegment();\n return this.getFieldIndexes(t, s).next((t => {\n // Return the index with the most number of segments.\n let e = null;\n for (const s of t) {\n n.tn(s) && (!e || s.fields.length > e.fields.length) && (e = s);\n }\n return e;\n }));\n }\n getIndexType(t, e) {\n let n = 2 /* IndexType.FULL */;\n const s = this.cn(e);\n return Rt.forEach(s, (e => this.an(t, e).next((t => {\n t ? 0 /* IndexType.NONE */ !== n && t.fields.length < function(t) {\n let e = new Ee(at.comparator), n = !1;\n for (const s of t.filters) for (const t of s.getFlattenedFilters()) \n // __name__ is not an explicit segment of any index, so we don't need to\n // count it.\n t.field.isKeyField() || (\n // ARRAY_CONTAINS or ARRAY_CONTAINS_ANY filters must be counted separately.\n // For instance, it is possible to have an index for \"a ARRAY a ASC\". Even\n // though these are on the same field, they should be counted as two\n // separate segments in an index.\n \"array-contains\" /* Operator.ARRAY_CONTAINS */ === t.op || \"array-contains-any\" /* Operator.ARRAY_CONTAINS_ANY */ === t.op ? n = !0 : e = e.add(t.field));\n for (const n of t.orderBy) \n // __name__ is not an explicit segment of any index, so we don't need to\n // count it.\n n.field.isKeyField() || (e = e.add(n.field));\n return e.size + (n ? 1 : 0);\n }(e) && (n = 1 /* IndexType.PARTIAL */) : n = 0 /* IndexType.NONE */;\n })))).next((() => \n // OR queries have more than one sub-target (one sub-target per DNF term). We currently consider\n // OR queries that have a `limit` to have a partial index. For such queries we perform sorting\n // and apply the limit in memory as a post-processing step.\n function(t) {\n return null !== t.limit;\n }(e) && s.length > 1 && 2 /* IndexType.FULL */ === n ? 1 /* IndexType.PARTIAL */ : n));\n }\n /**\n * Returns the byte encoded form of the directional values in the field index.\n * Returns `null` if the document does not have all fields specified in the\n * index.\n */ mn(t, e) {\n const n = new Nr;\n for (const s of dt(t)) {\n const t = e.data.field(s.fieldPath);\n if (null == t) return null;\n const i = n.He(s.kind);\n br.Ve._e(t, i);\n }\n return n.Qe();\n }\n /** Encodes a single value to the ascending index format. */ dn(t) {\n const e = new Nr;\n return br.Ve._e(t, e.He(0 /* IndexKind.ASCENDING */)), e.Qe();\n }\n /**\n * Returns an encoded form of the document key that sorts based on the key\n * ordering of the field index.\n */ gn(t, e) {\n const n = new Nr;\n return br.Ve._e(We(this.databaseId, e), n.He(function(t) {\n const e = dt(t);\n return 0 === e.length ? 0 /* IndexKind.ASCENDING */ : e[e.length - 1].kind;\n }(t))), n.Qe();\n }\n /**\n * Encodes the given field values according to the specification in `target`.\n * For IN queries, a list of possible values is returned.\n */ ln(t, e, n) {\n if (null === n) return [];\n let s = [];\n s.push(new Nr);\n let i = 0;\n for (const r of dt(t)) {\n const t = n[i++];\n for (const n of s) if (this.yn(e, r.fieldPath) && Je(t)) s = this.pn(s, r, t); else {\n const e = n.He(r.kind);\n br.Ve._e(t, e);\n }\n }\n return this.In(s);\n }\n /**\n * Encodes the given bounds according to the specification in `target`. For IN\n * queries, a list of possible values is returned.\n */ hn(t, e, n) {\n return this.ln(t, e, n.position);\n }\n /** Returns the byte representation for the provided encoders. */ In(t) {\n const e = [];\n for (let n = 0; n < t.length; ++n) e[n] = t[n].Qe();\n return e;\n }\n /**\n * Creates a separate encoder for each element of an array.\n *\n * The method appends each value to all existing encoders (e.g. filter(\"a\",\n * \"==\", \"a1\").filter(\"b\", \"in\", [\"b1\", \"b2\"]) becomes [\"a1,b1\", \"a1,b2\"]). A\n * list of new encoders is returned.\n */ pn(t, e, n) {\n const s = [ ...t ], i = [];\n for (const t of n.arrayValue.values || []) for (const n of s) {\n const s = new Nr;\n s.seed(n.Qe()), br.Ve._e(t, s.He(e.kind)), i.push(s);\n }\n return i;\n }\n yn(t, e) {\n return !!t.filters.find((t => t instanceof mn && t.field.isEqual(e) && (\"in\" /* Operator.IN */ === t.op || \"not-in\" /* Operator.NOT_IN */ === t.op)));\n }\n getFieldIndexes(t, e) {\n const n = Zr(t), s = to(t);\n return (e ? n.j(\"collectionGroupIndex\", IDBKeyRange.bound(e, e)) : n.j()).next((t => {\n const e = [];\n return Rt.forEach(t, (t => s.get([ t.indexId, this.uid ]).next((n => {\n e.push(function(t, e) {\n const n = e ? new gt(e.sequenceNumber, new It(wr(e.readTime), new ht(Gt(e.documentKey)), e.largestBatchId)) : gt.empty(), s = t.fields.map((([t, e]) => new _t(at.fromServerFormat(t), e)));\n return new lt(t.indexId, t.collectionGroup, s, n);\n }(t, n));\n })))).next((() => e));\n }));\n }\n getNextCollectionGroupToUpdate(t) {\n return this.getFieldIndexes(t).next((t => 0 === t.length ? null : (t.sort(((t, e) => {\n const n = t.indexState.sequenceNumber - e.indexState.sequenceNumber;\n return 0 !== n ? n : et(t.collectionGroup, e.collectionGroup);\n })), t[0].collectionGroup)));\n }\n updateCollectionGroup(t, e, n) {\n const s = Zr(t), i = to(t);\n return this.Tn(t).next((t => s.j(\"collectionGroupIndex\", IDBKeyRange.bound(e, e)).next((e => Rt.forEach(e, (e => i.put(Tr(e.indexId, this.user, t, n))))))));\n }\n updateIndexEntries(t, e) {\n // Porting Note: `getFieldIndexes()` on Web does not cache index lookups as\n // it could be used across different IndexedDB transactions. As any cached\n // data might be invalidated by other multi-tab clients, we can only trust\n // data within a single IndexedDB transaction. We therefore add a cache\n // here.\n const n = new Map;\n return Rt.forEach(e, ((e, s) => {\n const i = n.get(e.collectionGroup);\n return (i ? Rt.resolve(i) : this.getFieldIndexes(t, e.collectionGroup)).next((i => (n.set(e.collectionGroup, i), \n Rt.forEach(i, (n => this.En(t, e, n).next((e => {\n const i = this.An(s, n);\n return e.isEqual(i) ? Rt.resolve() : this.vn(t, s, n, e, i);\n })))))));\n }));\n }\n Rn(t, e, n, s) {\n return Xr(t).put({\n indexId: s.indexId,\n uid: this.uid,\n arrayValue: s.arrayValue,\n directionalValue: s.directionalValue,\n orderedDocumentKey: this.gn(n, e.key),\n documentKey: e.key.path.toArray()\n });\n }\n Pn(t, e, n, s) {\n return Xr(t).delete([ s.indexId, this.uid, s.arrayValue, s.directionalValue, this.gn(n, e.key), e.key.path.toArray() ]);\n }\n En(t, e, n) {\n const s = Xr(t);\n let i = new Ee(Mr);\n return s.X({\n index: \"documentKeyIndex\",\n range: IDBKeyRange.only([ n.indexId, this.uid, this.gn(n, e) ])\n }, ((t, s) => {\n i = i.add(new kr(n.indexId, e, s.arrayValue, s.directionalValue));\n })).next((() => i));\n }\n /** Creates the index entries for the given document. */ An(t, e) {\n let n = new Ee(Mr);\n const s = this.mn(e, t);\n if (null == s) return n;\n const i = ft(e);\n if (null != i) {\n const r = t.data.field(i.fieldPath);\n if (Je(r)) for (const i of r.arrayValue.values || []) n = n.add(new kr(e.indexId, t.key, this.dn(i), s));\n } else n = n.add(new kr(e.indexId, t.key, Hr, s));\n return n;\n }\n /**\n * Updates the index entries for the provided document by deleting entries\n * that are no longer referenced in `newEntries` and adding all newly added\n * entries.\n */ vn(t, e, n, s, i) {\n N(\"IndexedDbIndexManager\", \"Updating index entries for document '%s'\", e.key);\n const r = [];\n return function(t, e, n, s, i) {\n const r = t.getIterator(), o = e.getIterator();\n let u = ve(r), c = ve(o);\n // Walk through the two sets at the same time, using the ordering defined by\n // `comparator`.\n for (;u || c; ) {\n let t = !1, e = !1;\n if (u && c) {\n const s = n(u, c);\n s < 0 ? \n // The element was removed if the next element in our ordered\n // walkthrough is only in `before`.\n e = !0 : s > 0 && (\n // The element was added if the next element in our ordered walkthrough\n // is only in `after`.\n t = !0);\n } else null != u ? e = !0 : t = !0;\n t ? (s(c), c = ve(o)) : e ? (i(u), u = ve(r)) : (u = ve(r), c = ve(o));\n }\n }(s, i, Mr, (\n /* onAdd= */ s => {\n r.push(this.Rn(t, e, n, s));\n }), (\n /* onRemove= */ s => {\n r.push(this.Pn(t, e, n, s));\n })), Rt.waitFor(r);\n }\n Tn(t) {\n let e = 1;\n return to(t).X({\n index: \"sequenceNumberIndex\",\n reverse: !0,\n range: IDBKeyRange.upperBound([ this.uid, Number.MAX_SAFE_INTEGER ])\n }, ((t, n, s) => {\n s.done(), e = n.sequenceNumber + 1;\n })).next((() => e));\n }\n /**\n * Returns a new set of IDB ranges that splits the existing range and excludes\n * any values that match the `notInValue` from these ranges. As an example,\n * '[foo > 2 && foo != 3]` becomes `[foo > 2 && < 3, foo > 3]`.\n */ createRange(t, e, n) {\n // The notIn values need to be sorted and unique so that we can return a\n // sorted set of non-overlapping ranges.\n n = n.sort(((t, e) => Mr(t, e))).filter(((t, e, n) => !e || 0 !== Mr(t, n[e - 1])));\n const s = [];\n s.push(t);\n for (const i of n) {\n const n = Mr(i, t), r = Mr(i, e);\n if (0 === n) \n // `notInValue` is the lower bound. We therefore need to raise the bound\n // to the next value.\n s[0] = t.Je(); else if (n > 0 && r < 0) \n // `notInValue` is in the middle of the range\n s.push(i), s.push(i.Je()); else if (r > 0) \n // `notInValue` (and all following values) are out of the range\n break;\n }\n s.push(e);\n const i = [];\n for (let t = 0; t < s.length; t += 2) {\n // If we encounter two bounds that will create an unmatchable key range,\n // then we return an empty set of key ranges.\n if (this.bn(s[t], s[t + 1])) return [];\n const e = [ s[t].indexId, this.uid, s[t].arrayValue, s[t].directionalValue, Hr, [] ], n = [ s[t + 1].indexId, this.uid, s[t + 1].arrayValue, s[t + 1].directionalValue, Hr, [] ];\n i.push(IDBKeyRange.bound(e, n));\n }\n return i;\n }\n bn(t, e) {\n // If lower bound is greater than the upper bound, then the key\n // range can never be matched.\n return Mr(t, e) > 0;\n }\n getMinOffsetFromCollectionGroup(t, e) {\n return this.getFieldIndexes(t, e).next(eo);\n }\n getMinOffset(t, e) {\n return Rt.mapArray(this.cn(e), (e => this.an(t, e).next((t => t || O())))).next(eo);\n }\n}\n\n/**\n * Helper to get a typed SimpleDbStore for the collectionParents\n * document store.\n */ function Yr(t) {\n return _e(t, \"collectionParents\");\n}\n\n/**\n * Helper to get a typed SimpleDbStore for the index entry object store.\n */ function Xr(t) {\n return _e(t, \"indexEntries\");\n}\n\n/**\n * Helper to get a typed SimpleDbStore for the index configuration object store.\n */ function Zr(t) {\n return _e(t, \"indexConfiguration\");\n}\n\n/**\n * Helper to get a typed SimpleDbStore for the index state object store.\n */ function to(t) {\n return _e(t, \"indexState\");\n}\n\nfunction eo(t) {\n F(0 !== t.length);\n let e = t[0].indexState.offset, n = e.largestBatchId;\n for (let s = 1; s < t.length; s++) {\n const i = t[s].indexState.offset;\n Tt(i, e) < 0 && (e = i), n < i.largestBatchId && (n = i.largestBatchId);\n }\n return new It(e.readTime, e.documentKey, n);\n}\n\n/**\n * @license\n * Copyright 2018 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */ const no = {\n didRun: !1,\n sequenceNumbersCollected: 0,\n targetsRemoved: 0,\n documentsRemoved: 0\n};\n\nclass so {\n constructor(\n // When we attempt to collect, we will only do so if the cache size is greater than this\n // threshold. Passing `COLLECTION_DISABLED` here will cause collection to always be skipped.\n t, \n // The percentage of sequence numbers that we will attempt to collect\n e, \n // A cap on the total number of sequence numbers that will be collected. This prevents\n // us from collecting a huge number of sequence numbers if the cache has grown very large.\n n) {\n this.cacheSizeCollectionThreshold = t, this.percentileToCollect = e, this.maximumSequenceNumbersToCollect = n;\n }\n static withCacheSize(t) {\n return new so(t, so.DEFAULT_COLLECTION_PERCENTILE, so.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT);\n }\n}\n\n/**\n * @license\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * Delete a mutation batch and the associated document mutations.\n * @returns A PersistencePromise of the document mutations that were removed.\n */\nfunction io(t, e, n) {\n const s = t.store(\"mutations\"), i = t.store(\"documentMutations\"), r = [], o = IDBKeyRange.only(n.batchId);\n let u = 0;\n const c = s.X({\n range: o\n }, ((t, e, n) => (u++, n.delete())));\n r.push(c.next((() => {\n F(1 === u);\n })));\n const a = [];\n for (const t of n.mutations) {\n const s = zt(e, t.key.path, n.batchId);\n r.push(i.delete(s)), a.push(t.key);\n }\n return Rt.waitFor(r).next((() => a));\n}\n\n/**\n * Returns an approximate size for the given document.\n */ function ro(t) {\n if (!t) return 0;\n let e;\n if (t.document) e = t.document; else if (t.unknownDocument) e = t.unknownDocument; else {\n if (!t.noDocument) throw O();\n e = t.noDocument;\n }\n return JSON.stringify(e).length;\n}\n\n/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/** A mutation queue for a specific user, backed by IndexedDB. */ so.DEFAULT_COLLECTION_PERCENTILE = 10, \nso.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT = 1e3, so.DEFAULT = new so(41943040, so.DEFAULT_COLLECTION_PERCENTILE, so.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT), \nso.DISABLED = new so(-1, 0, 0);\n\nclass oo {\n constructor(\n /**\n * The normalized userId (e.g. null UID => \"\" userId) used to store /\n * retrieve mutations.\n */\n t, e, n, s) {\n this.userId = t, this.serializer = e, this.indexManager = n, this.referenceDelegate = s, \n /**\n * Caches the document keys for pending mutation batches. If the mutation\n * has been removed from IndexedDb, the cached value may continue to\n * be used to retrieve the batch's document keys. To remove a cached value\n * locally, `removeCachedMutationKeys()` should be invoked either directly\n * or through `removeMutationBatches()`.\n *\n * With multi-tab, when the primary client acknowledges or rejects a mutation,\n * this cache is used by secondary clients to invalidate the local\n * view of the documents that were previously affected by the mutation.\n */\n // PORTING NOTE: Multi-tab only.\n this.Vn = {};\n }\n /**\n * Creates a new mutation queue for the given user.\n * @param user - The user for which to create a mutation queue.\n * @param serializer - The serializer to use when persisting to IndexedDb.\n */ static de(t, e, n, s) {\n // TODO(mcg): Figure out what constraints there are on userIDs\n // In particular, are there any reserved characters? are empty ids allowed?\n // For the moment store these together in the same mutations table assuming\n // that empty userIDs aren't allowed.\n F(\"\" !== t.uid);\n const i = t.isAuthenticated() ? t.uid : \"\";\n return new oo(i, e, n, s);\n }\n checkEmpty(t) {\n let e = !0;\n const n = IDBKeyRange.bound([ this.userId, Number.NEGATIVE_INFINITY ], [ this.userId, Number.POSITIVE_INFINITY ]);\n return co(t).X({\n index: \"userMutationsIndex\",\n range: n\n }, ((t, n, s) => {\n e = !1, s.done();\n })).next((() => e));\n }\n addMutationBatch(t, e, n, s) {\n const i = ao(t), r = co(t);\n // The IndexedDb implementation in Chrome (and Firefox) does not handle\n // compound indices that include auto-generated keys correctly. To ensure\n // that the index entry is added correctly in all browsers, we perform two\n // writes: The first write is used to retrieve the next auto-generated Batch\n // ID, and the second write populates the index and stores the actual\n // mutation batch.\n // See: https://bugs.chromium.org/p/chromium/issues/detail?id=701972\n // We write an empty object to obtain key\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n return r.add({}).next((o => {\n F(\"number\" == typeof o);\n const u = new Zs(o, e, n, s), c = function(t, e, n) {\n const s = n.baseMutations.map((e => ji(t.fe, e))), i = n.mutations.map((e => ji(t.fe, e)));\n return {\n userId: e,\n batchId: n.batchId,\n localWriteTimeMs: n.localWriteTime.toMillis(),\n baseMutations: s,\n mutations: i\n };\n }(this.serializer, this.userId, u), a = [];\n let h = new Ee(((t, e) => et(t.canonicalString(), e.canonicalString())));\n for (const t of s) {\n const e = zt(this.userId, t.key.path, o);\n h = h.add(t.key.path.popLast()), a.push(r.put(c)), a.push(i.put(e, Wt));\n }\n return h.forEach((e => {\n a.push(this.indexManager.addToCollectionParentIndex(t, e));\n })), t.addOnCommittedListener((() => {\n this.Vn[o] = u.keys();\n })), Rt.waitFor(a).next((() => u));\n }));\n }\n lookupMutationBatch(t, e) {\n return co(t).get(e).next((t => t ? (F(t.userId === this.userId), _r(this.serializer, t)) : null));\n }\n /**\n * Returns the document keys for the mutation batch with the given batchId.\n * For primary clients, this method returns `null` after\n * `removeMutationBatches()` has been called. Secondary clients return a\n * cached result until `removeCachedMutationKeys()` is invoked.\n */\n // PORTING NOTE: Multi-tab only.\n Sn(t, e) {\n return this.Vn[e] ? Rt.resolve(this.Vn[e]) : this.lookupMutationBatch(t, e).next((t => {\n if (t) {\n const n = t.keys();\n return this.Vn[e] = n, n;\n }\n return null;\n }));\n }\n getNextMutationBatchAfterBatchId(t, e) {\n const n = e + 1, s = IDBKeyRange.lowerBound([ this.userId, n ]);\n let i = null;\n return co(t).X({\n index: \"userMutationsIndex\",\n range: s\n }, ((t, e, s) => {\n e.userId === this.userId && (F(e.batchId >= n), i = _r(this.serializer, e)), s.done();\n })).next((() => i));\n }\n getHighestUnacknowledgedBatchId(t) {\n const e = IDBKeyRange.upperBound([ this.userId, Number.POSITIVE_INFINITY ]);\n let n = -1;\n return co(t).X({\n index: \"userMutationsIndex\",\n range: e,\n reverse: !0\n }, ((t, e, s) => {\n n = e.batchId, s.done();\n })).next((() => n));\n }\n getAllMutationBatches(t) {\n const e = IDBKeyRange.bound([ this.userId, -1 ], [ this.userId, Number.POSITIVE_INFINITY ]);\n return co(t).j(\"userMutationsIndex\", e).next((t => t.map((t => _r(this.serializer, t)))));\n }\n getAllMutationBatchesAffectingDocumentKey(t, e) {\n // Scan the document-mutation index starting with a prefix starting with\n // the given documentKey.\n const n = jt(this.userId, e.path), s = IDBKeyRange.lowerBound(n), i = [];\n return ao(t).X({\n range: s\n }, ((n, s, r) => {\n const [o, u, c] = n, a = Gt(u);\n // Only consider rows matching exactly the specific key of\n // interest. Note that because we order by path first, and we\n // order terminators before path separators, we'll encounter all\n // the index rows for documentKey contiguously. In particular, all\n // the rows for documentKey will occur before any rows for\n // documents nested in a subcollection beneath documentKey so we\n // can stop as soon as we hit any such row.\n if (o === this.userId && e.path.isEqual(a)) \n // Look up the mutation batch in the store.\n return co(t).get(c).next((t => {\n if (!t) throw O();\n F(t.userId === this.userId), i.push(_r(this.serializer, t));\n }));\n r.done();\n })).next((() => i));\n }\n getAllMutationBatchesAffectingDocumentKeys(t, e) {\n let n = new Ee(et);\n const s = [];\n return e.forEach((e => {\n const i = jt(this.userId, e.path), r = IDBKeyRange.lowerBound(i), o = ao(t).X({\n range: r\n }, ((t, s, i) => {\n const [r, o, u] = t, c = Gt(o);\n // Only consider rows matching exactly the specific key of\n // interest. Note that because we order by path first, and we\n // order terminators before path separators, we'll encounter all\n // the index rows for documentKey contiguously. In particular, all\n // the rows for documentKey will occur before any rows for\n // documents nested in a subcollection beneath documentKey so we\n // can stop as soon as we hit any such row.\n r === this.userId && e.path.isEqual(c) ? n = n.add(u) : i.done();\n }));\n s.push(o);\n })), Rt.waitFor(s).next((() => this.Dn(t, n)));\n }\n getAllMutationBatchesAffectingQuery(t, e) {\n const n = e.path, s = n.length + 1, i = jt(this.userId, n), r = IDBKeyRange.lowerBound(i);\n // Collect up unique batchIDs encountered during a scan of the index. Use a\n // SortedSet to accumulate batch IDs so they can be traversed in order in a\n // scan of the main table.\n let o = new Ee(et);\n return ao(t).X({\n range: r\n }, ((t, e, i) => {\n const [r, u, c] = t, a = Gt(u);\n r === this.userId && n.isPrefixOf(a) ? \n // Rows with document keys more than one segment longer than the\n // query path can't be matches. For example, a query on 'rooms'\n // can't match the document /rooms/abc/messages/xyx.\n // TODO(mcg): we'll need a different scanner when we implement\n // ancestor queries.\n a.length === s && (o = o.add(c)) : i.done();\n })).next((() => this.Dn(t, o)));\n }\n Dn(t, e) {\n const n = [], s = [];\n // TODO(rockwood): Implement this using iterate.\n return e.forEach((e => {\n s.push(co(t).get(e).next((t => {\n if (null === t) throw O();\n F(t.userId === this.userId), n.push(_r(this.serializer, t));\n })));\n })), Rt.waitFor(s).next((() => n));\n }\n removeMutationBatch(t, e) {\n return io(t.ht, this.userId, e).next((n => (t.addOnCommittedListener((() => {\n this.Cn(e.batchId);\n })), Rt.forEach(n, (e => this.referenceDelegate.markPotentiallyOrphaned(t, e))))));\n }\n /**\n * Clears the cached keys for a mutation batch. This method should be\n * called by secondary clients after they process mutation updates.\n *\n * Note that this method does not have to be called from primary clients as\n * the corresponding cache entries are cleared when an acknowledged or\n * rejected batch is removed from the mutation queue.\n */\n // PORTING NOTE: Multi-tab only\n Cn(t) {\n delete this.Vn[t];\n }\n performConsistencyCheck(t) {\n return this.checkEmpty(t).next((e => {\n if (!e) return Rt.resolve();\n // Verify that there are no entries in the documentMutations index if\n // the queue is empty.\n const n = IDBKeyRange.lowerBound([ this.userId ]);\n const s = [];\n return ao(t).X({\n range: n\n }, ((t, e, n) => {\n if (t[0] === this.userId) {\n const e = Gt(t[1]);\n s.push(e);\n } else n.done();\n })).next((() => {\n F(0 === s.length);\n }));\n }));\n }\n containsKey(t, e) {\n return uo(t, this.userId, e);\n }\n // PORTING NOTE: Multi-tab only (state is held in memory in other clients).\n /** Returns the mutation queue's metadata from IndexedDb. */\n xn(t) {\n return ho(t).get(this.userId).next((t => t || {\n userId: this.userId,\n lastAcknowledgedBatchId: -1,\n lastStreamToken: \"\"\n }));\n }\n}\n\n/**\n * @returns true if the mutation queue for the given user contains a pending\n * mutation for the given key.\n */ function uo(t, e, n) {\n const s = jt(e, n.path), i = s[1], r = IDBKeyRange.lowerBound(s);\n let o = !1;\n return ao(t).X({\n range: r,\n Y: !0\n }, ((t, n, s) => {\n const [r, u, /*batchID*/ c] = t;\n r === e && u === i && (o = !0), s.done();\n })).next((() => o));\n}\n\n/** Returns true if any mutation queue contains the given document. */\n/**\n * Helper to get a typed SimpleDbStore for the mutations object store.\n */\nfunction co(t) {\n return _e(t, \"mutations\");\n}\n\n/**\n * Helper to get a typed SimpleDbStore for the mutationQueues object store.\n */ function ao(t) {\n return _e(t, \"documentMutations\");\n}\n\n/**\n * Helper to get a typed SimpleDbStore for the mutationQueues object store.\n */ function ho(t) {\n return _e(t, \"mutationQueues\");\n}\n\n/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/** Offset to ensure non-overlapping target ids. */\n/**\n * Generates monotonically increasing target IDs for sending targets to the\n * watch stream.\n *\n * The client constructs two generators, one for the target cache, and one for\n * for the sync engine (to generate limbo documents targets). These\n * generators produce non-overlapping IDs (by using even and odd IDs\n * respectively).\n *\n * By separating the target ID space, the query cache can generate target IDs\n * that persist across client restarts, while sync engine can independently\n * generate in-memory target IDs that are transient and can be reused after a\n * restart.\n */\nclass lo {\n constructor(t) {\n this.Nn = t;\n }\n next() {\n return this.Nn += 2, this.Nn;\n }\n static kn() {\n // The target cache generator must return '2' in its first call to `next()`\n // as there is no differentiation in the protocol layer between an unset\n // number and the number '0'. If we were to sent a target with target ID\n // '0', the backend would consider it unset and replace it with its own ID.\n return new lo(0);\n }\n static Mn() {\n // Sync engine assigns target IDs for limbo document detection.\n return new lo(-1);\n }\n}\n\n/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */ class fo {\n constructor(t, e) {\n this.referenceDelegate = t, this.serializer = e;\n }\n // PORTING NOTE: We don't cache global metadata for the target cache, since\n // some of it (in particular `highestTargetId`) can be modified by secondary\n // tabs. We could perhaps be more granular (and e.g. still cache\n // `lastRemoteSnapshotVersion` in memory) but for simplicity we currently go\n // to IndexedDb whenever we need to read metadata. We can revisit if it turns\n // out to have a meaningful performance impact.\n allocateTargetId(t) {\n return this.$n(t).next((e => {\n const n = new lo(e.highestTargetId);\n return e.highestTargetId = n.next(), this.On(t, e).next((() => e.highestTargetId));\n }));\n }\n getLastRemoteSnapshotVersion(t) {\n return this.$n(t).next((t => rt.fromTimestamp(new it(t.lastRemoteSnapshotVersion.seconds, t.lastRemoteSnapshotVersion.nanoseconds))));\n }\n getHighestSequenceNumber(t) {\n return this.$n(t).next((t => t.highestListenSequenceNumber));\n }\n setTargetsMetadata(t, e, n) {\n return this.$n(t).next((s => (s.highestListenSequenceNumber = e, n && (s.lastRemoteSnapshotVersion = n.toTimestamp()), \n e > s.highestListenSequenceNumber && (s.highestListenSequenceNumber = e), this.On(t, s))));\n }\n addTargetData(t, e) {\n return this.Fn(t, e).next((() => this.$n(t).next((n => (n.targetCount += 1, this.Bn(e, n), \n this.On(t, n))))));\n }\n updateTargetData(t, e) {\n return this.Fn(t, e);\n }\n removeTargetData(t, e) {\n return this.removeMatchingKeysForTargetId(t, e.targetId).next((() => wo(t).delete(e.targetId))).next((() => this.$n(t))).next((e => (F(e.targetCount > 0), \n e.targetCount -= 1, this.On(t, e))));\n }\n /**\n * Drops any targets with sequence number less than or equal to the upper bound, excepting those\n * present in `activeTargetIds`. Document associations for the removed targets are also removed.\n * Returns the number of targets removed.\n */ removeTargets(t, e, n) {\n let s = 0;\n const i = [];\n return wo(t).X(((r, o) => {\n const u = mr(o);\n u.sequenceNumber <= e && null === n.get(u.targetId) && (s++, i.push(this.removeTargetData(t, u)));\n })).next((() => Rt.waitFor(i))).next((() => s));\n }\n /**\n * Call provided function with each `TargetData` that we have cached.\n */ forEachTarget(t, e) {\n return wo(t).X(((t, n) => {\n const s = mr(n);\n e(s);\n }));\n }\n $n(t) {\n return _o(t).get(\"targetGlobalKey\").next((t => (F(null !== t), t)));\n }\n On(t, e) {\n return _o(t).put(\"targetGlobalKey\", e);\n }\n Fn(t, e) {\n return wo(t).put(gr(this.serializer, e));\n }\n /**\n * In-place updates the provided metadata to account for values in the given\n * TargetData. Saving is done separately. Returns true if there were any\n * changes to the metadata.\n */ Bn(t, e) {\n let n = !1;\n return t.targetId > e.highestTargetId && (e.highestTargetId = t.targetId, n = !0), \n t.sequenceNumber > e.highestListenSequenceNumber && (e.highestListenSequenceNumber = t.sequenceNumber, \n n = !0), n;\n }\n getTargetCount(t) {\n return this.$n(t).next((t => t.targetCount));\n }\n getTargetData(t, e) {\n // Iterating by the canonicalId may yield more than one result because\n // canonicalId values are not required to be unique per target. This query\n // depends on the queryTargets index to be efficient.\n const n = $n(e), s = IDBKeyRange.bound([ n, Number.NEGATIVE_INFINITY ], [ n, Number.POSITIVE_INFINITY ]);\n let i = null;\n return wo(t).X({\n range: s,\n index: \"queryTargetsIndex\"\n }, ((t, n, s) => {\n const r = mr(n);\n // After finding a potential match, check that the target is\n // actually equal to the requested target.\n On(e, r.target) && (i = r, s.done());\n })).next((() => i));\n }\n addMatchingKeys(t, e, n) {\n // PORTING NOTE: The reverse index (documentsTargets) is maintained by\n // IndexedDb.\n const s = [], i = mo(t);\n return e.forEach((e => {\n const r = qt(e.path);\n s.push(i.put({\n targetId: n,\n path: r\n })), s.push(this.referenceDelegate.addReference(t, n, e));\n })), Rt.waitFor(s);\n }\n removeMatchingKeys(t, e, n) {\n // PORTING NOTE: The reverse index (documentsTargets) is maintained by\n // IndexedDb.\n const s = mo(t);\n return Rt.forEach(e, (e => {\n const i = qt(e.path);\n return Rt.waitFor([ s.delete([ n, i ]), this.referenceDelegate.removeReference(t, n, e) ]);\n }));\n }\n removeMatchingKeysForTargetId(t, e) {\n const n = mo(t), s = IDBKeyRange.bound([ e ], [ e + 1 ], \n /*lowerOpen=*/ !1, \n /*upperOpen=*/ !0);\n return n.delete(s);\n }\n getMatchingKeysForTargetId(t, e) {\n const n = IDBKeyRange.bound([ e ], [ e + 1 ], \n /*lowerOpen=*/ !1, \n /*upperOpen=*/ !0), s = mo(t);\n let i = gs();\n return s.X({\n range: n,\n Y: !0\n }, ((t, e, n) => {\n const s = Gt(t[1]), r = new ht(s);\n i = i.add(r);\n })).next((() => i));\n }\n containsKey(t, e) {\n const n = qt(e.path), s = IDBKeyRange.bound([ n ], [ st(n) ], \n /*lowerOpen=*/ !1, \n /*upperOpen=*/ !0);\n let i = 0;\n return mo(t).X({\n index: \"documentTargetsIndex\",\n Y: !0,\n range: s\n }, (([t, e], n, s) => {\n // Having a sentinel row for a document does not count as containing that document;\n // For the target cache, containing the document means the document is part of some\n // target.\n 0 !== t && (i++, s.done());\n })).next((() => i > 0));\n }\n /**\n * Looks up a TargetData entry by target ID.\n *\n * @param targetId - The target ID of the TargetData entry to look up.\n * @returns The cached TargetData entry, or null if the cache has no entry for\n * the target.\n */\n // PORTING NOTE: Multi-tab only.\n le(t, e) {\n return wo(t).get(e).next((t => t ? mr(t) : null));\n }\n}\n\n/**\n * Helper to get a typed SimpleDbStore for the queries object store.\n */ function wo(t) {\n return _e(t, \"targets\");\n}\n\n/**\n * Helper to get a typed SimpleDbStore for the target globals object store.\n */ function _o(t) {\n return _e(t, \"targetGlobal\");\n}\n\n/**\n * Helper to get a typed SimpleDbStore for the document target object store.\n */ function mo(t) {\n return _e(t, \"targetDocuments\");\n}\n\n/**\n * @license\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */ function go([t, e], [n, s]) {\n const i = et(t, n);\n return 0 === i ? et(e, s) : i;\n}\n\n/**\n * Used to calculate the nth sequence number. Keeps a rolling buffer of the\n * lowest n values passed to `addElement`, and finally reports the largest of\n * them in `maxValue`.\n */ class yo {\n constructor(t) {\n this.Ln = t, this.buffer = new Ee(go), this.qn = 0;\n }\n Un() {\n return ++this.qn;\n }\n Kn(t) {\n const e = [ t, this.Un() ];\n if (this.buffer.size < this.Ln) this.buffer = this.buffer.add(e); else {\n const t = this.buffer.last();\n go(e, t) < 0 && (this.buffer = this.buffer.delete(t).add(e));\n }\n }\n get maxValue() {\n // Guaranteed to be non-empty. If we decide we are not collecting any\n // sequence numbers, nthSequenceNumber below short-circuits. If we have\n // decided that we are collecting n sequence numbers, it's because n is some\n // percentage of the existing sequence numbers. That means we should never\n // be in a situation where we are collecting sequence numbers but don't\n // actually have any.\n return this.buffer.last()[0];\n }\n}\n\n/**\n * This class is responsible for the scheduling of LRU garbage collection. It handles checking\n * whether or not GC is enabled, as well as which delay to use before the next run.\n */ class po {\n constructor(t, e, n) {\n this.garbageCollector = t, this.asyncQueue = e, this.localStore = n, this.Gn = null;\n }\n start() {\n -1 !== this.garbageCollector.params.cacheSizeCollectionThreshold && this.Qn(6e4);\n }\n stop() {\n this.Gn && (this.Gn.cancel(), this.Gn = null);\n }\n get started() {\n return null !== this.Gn;\n }\n Qn(t) {\n N(\"LruGarbageCollector\", `Garbage collection scheduled in ${t}ms`), this.Gn = this.asyncQueue.enqueueAfterDelay(\"lru_garbage_collection\" /* TimerId.LruGarbageCollection */ , t, (async () => {\n this.Gn = null;\n try {\n await this.localStore.collectGarbage(this.garbageCollector);\n } catch (t) {\n Dt(t) ? N(\"LruGarbageCollector\", \"Ignoring IndexedDB error during garbage collection: \", t) : await vt(t);\n }\n await this.Qn(3e5);\n }));\n }\n}\n\n/**\n * Implements the steps for LRU garbage collection.\n */ class Io {\n constructor(t, e) {\n this.jn = t, this.params = e;\n }\n calculateTargetCount(t, e) {\n return this.jn.zn(t).next((t => Math.floor(e / 100 * t)));\n }\n nthSequenceNumber(t, e) {\n if (0 === e) return Rt.resolve(Ot.ct);\n const n = new yo(e);\n return this.jn.forEachTarget(t, (t => n.Kn(t.sequenceNumber))).next((() => this.jn.Wn(t, (t => n.Kn(t))))).next((() => n.maxValue));\n }\n removeTargets(t, e, n) {\n return this.jn.removeTargets(t, e, n);\n }\n removeOrphanedDocuments(t, e) {\n return this.jn.removeOrphanedDocuments(t, e);\n }\n collect(t, e) {\n return -1 === this.params.cacheSizeCollectionThreshold ? (N(\"LruGarbageCollector\", \"Garbage collection skipped; disabled\"), \n Rt.resolve(no)) : this.getCacheSize(t).next((n => n < this.params.cacheSizeCollectionThreshold ? (N(\"LruGarbageCollector\", `Garbage collection skipped; Cache size ${n} is lower than threshold ${this.params.cacheSizeCollectionThreshold}`), \n no) : this.Hn(t, e)));\n }\n getCacheSize(t) {\n return this.jn.getCacheSize(t);\n }\n Hn(t, e) {\n let n, s, i, r, o, c, a;\n const h = Date.now();\n return this.calculateTargetCount(t, this.params.percentileToCollect).next((e => (\n // Cap at the configured max\n e > this.params.maximumSequenceNumbersToCollect ? (N(\"LruGarbageCollector\", `Capping sequence numbers to collect down to the maximum of ${this.params.maximumSequenceNumbersToCollect} from ${e}`), \n s = this.params.maximumSequenceNumbersToCollect) : s = e, r = Date.now(), this.nthSequenceNumber(t, s)))).next((s => (n = s, \n o = Date.now(), this.removeTargets(t, n, e)))).next((e => (i = e, c = Date.now(), \n this.removeOrphanedDocuments(t, n)))).next((t => {\n if (a = Date.now(), C() <= LogLevel.DEBUG) {\n N(\"LruGarbageCollector\", `LRU Garbage Collection\\n\\tCounted targets in ${r - h}ms\\n\\tDetermined least recently used ${s} in ` + (o - r) + \"ms\\n\" + `\\tRemoved ${i} targets in ` + (c - o) + \"ms\\n\" + `\\tRemoved ${t} documents in ` + (a - c) + \"ms\\n\" + `Total Duration: ${a - h}ms`);\n }\n return Rt.resolve({\n didRun: !0,\n sequenceNumbersCollected: s,\n targetsRemoved: i,\n documentsRemoved: t\n });\n }));\n }\n}\n\nfunction To(t, e) {\n return new Io(t, e);\n}\n\n/**\n * @license\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/** Provides LRU functionality for IndexedDB persistence. */ class Eo {\n constructor(t, e) {\n this.db = t, this.garbageCollector = To(this, e);\n }\n zn(t) {\n const e = this.Jn(t);\n return this.db.getTargetCache().getTargetCount(t).next((t => e.next((e => t + e))));\n }\n Jn(t) {\n let e = 0;\n return this.Wn(t, (t => {\n e++;\n })).next((() => e));\n }\n forEachTarget(t, e) {\n return this.db.getTargetCache().forEachTarget(t, e);\n }\n Wn(t, e) {\n return this.Yn(t, ((t, n) => e(n)));\n }\n addReference(t, e, n) {\n return Ao(t, n);\n }\n removeReference(t, e, n) {\n return Ao(t, n);\n }\n removeTargets(t, e, n) {\n return this.db.getTargetCache().removeTargets(t, e, n);\n }\n markPotentiallyOrphaned(t, e) {\n return Ao(t, e);\n }\n /**\n * Returns true if anything would prevent this document from being garbage\n * collected, given that the document in question is not present in any\n * targets and has a sequence number less than or equal to the upper bound for\n * the collection run.\n */ Xn(t, e) {\n return function(t, e) {\n let n = !1;\n return ho(t).Z((s => uo(t, s, e).next((t => (t && (n = !0), Rt.resolve(!t)))))).next((() => n));\n }(t, e);\n }\n removeOrphanedDocuments(t, e) {\n const n = this.db.getRemoteDocumentCache().newChangeBuffer(), s = [];\n let i = 0;\n return this.Yn(t, ((r, o) => {\n if (o <= e) {\n const e = this.Xn(t, r).next((e => {\n if (!e) \n // Our size accounting requires us to read all documents before\n // removing them.\n return i++, n.getEntry(t, r).next((() => (n.removeEntry(r, rt.min()), mo(t).delete([ 0, qt(r.path) ]))));\n }));\n s.push(e);\n }\n })).next((() => Rt.waitFor(s))).next((() => n.apply(t))).next((() => i));\n }\n removeTarget(t, e) {\n const n = e.withSequenceNumber(t.currentSequenceNumber);\n return this.db.getTargetCache().updateTargetData(t, n);\n }\n updateLimboDocument(t, e) {\n return Ao(t, e);\n }\n /**\n * Call provided function for each document in the cache that is 'orphaned'. Orphaned\n * means not a part of any target, so the only entry in the target-document index for\n * that document will be the sentinel row (targetId 0), which will also have the sequence\n * number for the last time the document was accessed.\n */ Yn(t, e) {\n const n = mo(t);\n let s, i = Ot.ct;\n return n.X({\n index: \"documentTargetsIndex\"\n }, (([t, n], {path: r, sequenceNumber: o}) => {\n 0 === t ? (\n // if nextToReport is valid, report it, this is a new key so the\n // last one must not be a member of any targets.\n i !== Ot.ct && e(new ht(Gt(s)), i), \n // set nextToReport to be this sequence number. It's the next one we\n // might report, if we don't find any targets for this document.\n // Note that the sequence number must be defined when the targetId\n // is 0.\n i = o, s = r) : \n // set nextToReport to be invalid, we know we don't need to report\n // this one since we found a target for it.\n i = Ot.ct;\n })).next((() => {\n // Since we report sequence numbers after getting to the next key, we\n // need to check if the last key we iterated over was an orphaned\n // document and report it.\n i !== Ot.ct && e(new ht(Gt(s)), i);\n }));\n }\n getCacheSize(t) {\n return this.db.getRemoteDocumentCache().getSize(t);\n }\n}\n\nfunction Ao(t, e) {\n return mo(t).put(\n /**\n * @returns A value suitable for writing a sentinel row in the target-document\n * store.\n */\n function(t, e) {\n return {\n targetId: 0,\n path: qt(t.path),\n sequenceNumber: e\n };\n }(e, t.currentSequenceNumber));\n}\n\n/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * An in-memory buffer of entries to be written to a RemoteDocumentCache.\n * It can be used to batch up a set of changes to be written to the cache, but\n * additionally supports reading entries back with the `getEntry()` method,\n * falling back to the underlying RemoteDocumentCache if no entry is\n * buffered.\n *\n * Entries added to the cache *must* be read first. This is to facilitate\n * calculating the size delta of the pending changes.\n *\n * PORTING NOTE: This class was implemented then removed from other platforms.\n * If byte-counting ends up being needed on the other platforms, consider\n * porting this class as part of that implementation work.\n */ class vo {\n constructor() {\n // A mapping of document key to the new cache entry that should be written.\n this.changes = new os((t => t.toString()), ((t, e) => t.isEqual(e))), this.changesApplied = !1;\n }\n /**\n * Buffers a `RemoteDocumentCache.addEntry()` call.\n *\n * You can only modify documents that have already been retrieved via\n * `getEntry()/getEntries()` (enforced via IndexedDbs `apply()`).\n */ addEntry(t) {\n this.assertNotApplied(), this.changes.set(t.key, t);\n }\n /**\n * Buffers a `RemoteDocumentCache.removeEntry()` call.\n *\n * You can only remove documents that have already been retrieved via\n * `getEntry()/getEntries()` (enforced via IndexedDbs `apply()`).\n */ removeEntry(t, e) {\n this.assertNotApplied(), this.changes.set(t, an.newInvalidDocument(t).setReadTime(e));\n }\n /**\n * Looks up an entry in the cache. The buffered changes will first be checked,\n * and if no buffered change applies, this will forward to\n * `RemoteDocumentCache.getEntry()`.\n *\n * @param transaction - The transaction in which to perform any persistence\n * operations.\n * @param documentKey - The key of the entry to look up.\n * @returns The cached document or an invalid document if we have nothing\n * cached.\n */ getEntry(t, e) {\n this.assertNotApplied();\n const n = this.changes.get(e);\n return void 0 !== n ? Rt.resolve(n) : this.getFromCache(t, e);\n }\n /**\n * Looks up several entries in the cache, forwarding to\n * `RemoteDocumentCache.getEntry()`.\n *\n * @param transaction - The transaction in which to perform any persistence\n * operations.\n * @param documentKeys - The keys of the entries to look up.\n * @returns A map of cached documents, indexed by key. If an entry cannot be\n * found, the corresponding key will be mapped to an invalid document.\n */ getEntries(t, e) {\n return this.getAllFromCache(t, e);\n }\n /**\n * Applies buffered changes to the underlying RemoteDocumentCache, using\n * the provided transaction.\n */ apply(t) {\n return this.assertNotApplied(), this.changesApplied = !0, this.applyChanges(t);\n }\n /** Helper to assert this.changes is not null */ assertNotApplied() {}\n}\n\n/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * The RemoteDocumentCache for IndexedDb. To construct, invoke\n * `newIndexedDbRemoteDocumentCache()`.\n */ class Ro {\n constructor(t) {\n this.serializer = t;\n }\n setIndexManager(t) {\n this.indexManager = t;\n }\n /**\n * Adds the supplied entries to the cache.\n *\n * All calls of `addEntry` are required to go through the RemoteDocumentChangeBuffer\n * returned by `newChangeBuffer()` to ensure proper accounting of metadata.\n */ addEntry(t, e, n) {\n return So(t).put(n);\n }\n /**\n * Removes a document from the cache.\n *\n * All calls of `removeEntry` are required to go through the RemoteDocumentChangeBuffer\n * returned by `newChangeBuffer()` to ensure proper accounting of metadata.\n */ removeEntry(t, e, n) {\n return So(t).delete(\n /**\n * Returns a key that can be used for document lookups via the primary key of\n * the DbRemoteDocument object store.\n */\n function(t, e) {\n const n = t.path.toArray();\n return [ \n /* prefix path */ n.slice(0, n.length - 2), \n /* collection id */ n[n.length - 2], fr(e), \n /* document id */ n[n.length - 1] ];\n }\n /**\n * Returns a key that can be used for document lookups on the\n * `DbRemoteDocumentDocumentCollectionGroupIndex` index.\n */ (e, n));\n }\n /**\n * Updates the current cache size.\n *\n * Callers to `addEntry()` and `removeEntry()` *must* call this afterwards to update the\n * cache's metadata.\n */ updateMetadata(t, e) {\n return this.getMetadata(t).next((n => (n.byteSize += e, this.Zn(t, n))));\n }\n getEntry(t, e) {\n let n = an.newInvalidDocument(e);\n return So(t).X({\n index: \"documentKeyIndex\",\n range: IDBKeyRange.only(Do(e))\n }, ((t, s) => {\n n = this.ts(e, s);\n })).next((() => n));\n }\n /**\n * Looks up an entry in the cache.\n *\n * @param documentKey - The key of the entry to look up.\n * @returns The cached document entry and its size.\n */ es(t, e) {\n let n = {\n size: 0,\n document: an.newInvalidDocument(e)\n };\n return So(t).X({\n index: \"documentKeyIndex\",\n range: IDBKeyRange.only(Do(e))\n }, ((t, s) => {\n n = {\n document: this.ts(e, s),\n size: ro(s)\n };\n })).next((() => n));\n }\n getEntries(t, e) {\n let n = cs();\n return this.ns(t, e, ((t, e) => {\n const s = this.ts(t, e);\n n = n.insert(t, s);\n })).next((() => n));\n }\n /**\n * Looks up several entries in the cache.\n *\n * @param documentKeys - The set of keys entries to look up.\n * @returns A map of documents indexed by key and a map of sizes indexed by\n * key (zero if the document does not exist).\n */ ss(t, e) {\n let n = cs(), s = new pe(ht.comparator);\n return this.ns(t, e, ((t, e) => {\n const i = this.ts(t, e);\n n = n.insert(t, i), s = s.insert(t, ro(e));\n })).next((() => ({\n documents: n,\n rs: s\n })));\n }\n ns(t, e, n) {\n if (e.isEmpty()) return Rt.resolve();\n let s = new Ee(xo);\n e.forEach((t => s = s.add(t)));\n const i = IDBKeyRange.bound(Do(s.first()), Do(s.last())), r = s.getIterator();\n let o = r.getNext();\n return So(t).X({\n index: \"documentKeyIndex\",\n range: i\n }, ((t, e, s) => {\n const i = ht.fromSegments([ ...e.prefixPath, e.collectionGroup, e.documentId ]);\n // Go through keys not found in cache.\n for (;o && xo(o, i) < 0; ) n(o, null), o = r.getNext();\n o && o.isEqual(i) && (\n // Key found in cache.\n n(o, e), o = r.hasNext() ? r.getNext() : null), \n // Skip to the next key (if there is one).\n o ? s.G(Do(o)) : s.done();\n })).next((() => {\n // The rest of the keys are not in the cache. One case where `iterate`\n // above won't go through them is when the cache is empty.\n for (;o; ) n(o, null), o = r.hasNext() ? r.getNext() : null;\n }));\n }\n getDocumentsMatchingQuery(t, e, n, s) {\n const i = e.path, r = [ i.popLast().toArray(), i.lastSegment(), fr(n.readTime), n.documentKey.path.isEmpty() ? \"\" : n.documentKey.path.lastSegment() ], o = [ i.popLast().toArray(), i.lastSegment(), [ Number.MAX_SAFE_INTEGER, Number.MAX_SAFE_INTEGER ], \"\" ];\n return So(t).j(IDBKeyRange.bound(r, o, !0)).next((t => {\n let n = cs();\n for (const i of t) {\n const t = this.ts(ht.fromSegments(i.prefixPath.concat(i.collectionGroup, i.documentId)), i);\n t.isFoundDocument() && (ns(e, t) || s.has(t.key)) && (\n // Either the document matches the given query, or it is mutated.\n n = n.insert(t.key, t));\n }\n return n;\n }));\n }\n getAllFromCollectionGroup(t, e, n, s) {\n let i = cs();\n const r = Co(e, n), o = Co(e, It.max());\n return So(t).X({\n index: \"collectionGroupIndex\",\n range: IDBKeyRange.bound(r, o, !0)\n }, ((t, e, n) => {\n const r = this.ts(ht.fromSegments(e.prefixPath.concat(e.collectionGroup, e.documentId)), e);\n i = i.insert(r.key, r), i.size === s && n.done();\n })).next((() => i));\n }\n newChangeBuffer(t) {\n return new bo(this, !!t && t.trackRemovals);\n }\n getSize(t) {\n return this.getMetadata(t).next((t => t.byteSize));\n }\n getMetadata(t) {\n return Vo(t).get(\"remoteDocumentGlobalKey\").next((t => (F(!!t), t)));\n }\n Zn(t, e) {\n return Vo(t).put(\"remoteDocumentGlobalKey\", e);\n }\n /**\n * Decodes `dbRemoteDoc` and returns the document (or an invalid document if\n * the document corresponds to the format used for sentinel deletes).\n */ ts(t, e) {\n if (e) {\n const t = hr(this.serializer, e);\n // Whether the document is a sentinel removal and should only be used in the\n // `getNewDocumentChanges()`\n if (!(t.isNoDocument() && t.version.isEqual(rt.min()))) return t;\n }\n return an.newInvalidDocument(t);\n }\n}\n\n/** Creates a new IndexedDbRemoteDocumentCache. */ function Po(t) {\n return new Ro(t);\n}\n\n/**\n * Handles the details of adding and updating documents in the IndexedDbRemoteDocumentCache.\n *\n * Unlike the MemoryRemoteDocumentChangeBuffer, the IndexedDb implementation computes the size\n * delta for all submitted changes. This avoids having to re-read all documents from IndexedDb\n * when we apply the changes.\n */ class bo extends vo {\n /**\n * @param documentCache - The IndexedDbRemoteDocumentCache to apply the changes to.\n * @param trackRemovals - Whether to create sentinel deletes that can be tracked by\n * `getNewDocumentChanges()`.\n */\n constructor(t, e) {\n super(), this.os = t, this.trackRemovals = e, \n // A map of document sizes and read times prior to applying the changes in\n // this buffer.\n this.us = new os((t => t.toString()), ((t, e) => t.isEqual(e)));\n }\n applyChanges(t) {\n const e = [];\n let n = 0, s = new Ee(((t, e) => et(t.canonicalString(), e.canonicalString())));\n return this.changes.forEach(((i, r) => {\n const o = this.us.get(i);\n if (e.push(this.os.removeEntry(t, i, o.readTime)), r.isValidDocument()) {\n const u = lr(this.os.serializer, r);\n s = s.add(i.path.popLast());\n const c = ro(u);\n n += c - o.size, e.push(this.os.addEntry(t, i, u));\n } else if (n -= o.size, this.trackRemovals) {\n // In order to track removals, we store a \"sentinel delete\" in the\n // RemoteDocumentCache. This entry is represented by a NoDocument\n // with a version of 0 and ignored by `maybeDecodeDocument()` but\n // preserved in `getNewDocumentChanges()`.\n const n = lr(this.os.serializer, r.convertToNoDocument(rt.min()));\n e.push(this.os.addEntry(t, i, n));\n }\n })), s.forEach((n => {\n e.push(this.os.indexManager.addToCollectionParentIndex(t, n));\n })), e.push(this.os.updateMetadata(t, n)), Rt.waitFor(e);\n }\n getFromCache(t, e) {\n // Record the size of everything we load from the cache so we can compute a delta later.\n return this.os.es(t, e).next((t => (this.us.set(e, {\n size: t.size,\n readTime: t.document.readTime\n }), t.document)));\n }\n getAllFromCache(t, e) {\n // Record the size of everything we load from the cache so we can compute\n // a delta later.\n return this.os.ss(t, e).next((({documents: t, rs: e}) => (\n // Note: `getAllFromCache` returns two maps instead of a single map from\n // keys to `DocumentSizeEntry`s. This is to allow returning the\n // `MutableDocumentMap` directly, without a conversion.\n e.forEach(((e, n) => {\n this.us.set(e, {\n size: n,\n readTime: t.get(e).readTime\n });\n })), t)));\n }\n}\n\nfunction Vo(t) {\n return _e(t, \"remoteDocumentGlobal\");\n}\n\n/**\n * Helper to get a typed SimpleDbStore for the remoteDocuments object store.\n */ function So(t) {\n return _e(t, \"remoteDocumentsV14\");\n}\n\n/**\n * Returns a key that can be used for document lookups on the\n * `DbRemoteDocumentDocumentKeyIndex` index.\n */ function Do(t) {\n const e = t.path.toArray();\n return [ \n /* prefix path */ e.slice(0, e.length - 2), \n /* collection id */ e[e.length - 2], \n /* document id */ e[e.length - 1] ];\n}\n\nfunction Co(t, e) {\n const n = e.documentKey.path.toArray();\n return [ \n /* collection id */ t, fr(e.readTime), \n /* prefix path */ n.slice(0, n.length - 2), \n /* document id */ n.length > 0 ? n[n.length - 1] : \"\" ];\n}\n\n/**\n * Comparator that compares document keys according to the primary key sorting\n * used by the `DbRemoteDocumentDocument` store (by prefix path, collection id\n * and then document ID).\n *\n * Visible for testing.\n */ function xo(t, e) {\n const n = t.path.toArray(), s = e.path.toArray();\n // The ordering is based on https://chromium.googlesource.com/chromium/blink/+/fe5c21fef94dae71c1c3344775b8d8a7f7e6d9ec/Source/modules/indexeddb/IDBKey.cpp#74\n let i = 0;\n for (let t = 0; t < n.length - 2 && t < s.length - 2; ++t) if (i = et(n[t], s[t]), \n i) return i;\n return i = et(n.length, s.length), i || (i = et(n[n.length - 2], s[s.length - 2]), \n i || et(n[n.length - 1], s[s.length - 1]));\n}\n\n/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * Schema Version for the Web client:\n * 1. Initial version including Mutation Queue, Query Cache, and Remote\n * Document Cache\n * 2. Used to ensure a targetGlobal object exists and add targetCount to it. No\n * longer required because migration 3 unconditionally clears it.\n * 3. Dropped and re-created Query Cache to deal with cache corruption related\n * to limbo resolution. Addresses\n * https://github.com/firebase/firebase-ios-sdk/issues/1548\n * 4. Multi-Tab Support.\n * 5. Removal of held write acks.\n * 6. Create document global for tracking document cache size.\n * 7. Ensure every cached document has a sentinel row with a sequence number.\n * 8. Add collection-parent index for Collection Group queries.\n * 9. Change RemoteDocumentChanges store to be keyed by readTime rather than\n * an auto-incrementing ID. This is required for Index-Free queries.\n * 10. Rewrite the canonical IDs to the explicit Protobuf-based format.\n * 11. Add bundles and named_queries for bundle support.\n * 12. Add document overlays.\n * 13. Rewrite the keys of the remote document cache to allow for efficient\n * document lookup via `getAll()`.\n * 14. Add overlays.\n * 15. Add indexing support.\n */\n/**\n * @license\n * Copyright 2022 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * Represents a local view (overlay) of a document, and the fields that are\n * locally mutated.\n */\nclass No {\n constructor(t, \n /**\n * The fields that are locally mutated by patch mutations.\n *\n * If the overlayed\tdocument is from set or delete mutations, this is `null`.\n * If there is no overlay (mutation) for the document, this is an empty `FieldMask`.\n */\n e) {\n this.overlayedDocument = t, this.mutatedFields = e;\n }\n}\n\n/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * A readonly view of the local state of all documents we're tracking (i.e. we\n * have a cached version in remoteDocumentCache or local mutations for the\n * document). The view is computed by applying the mutations in the\n * MutationQueue to the RemoteDocumentCache.\n */ class ko {\n constructor(t, e, n, s) {\n this.remoteDocumentCache = t, this.mutationQueue = e, this.documentOverlayCache = n, \n this.indexManager = s;\n }\n /**\n * Get the local view of the document identified by `key`.\n *\n * @returns Local view of the document or null if we don't have any cached\n * state for it.\n */ getDocument(t, e) {\n let n = null;\n return this.documentOverlayCache.getOverlay(t, e).next((s => (n = s, this.remoteDocumentCache.getEntry(t, e)))).next((t => (null !== n && Ks(n.mutation, t, Re.empty(), it.now()), \n t)));\n }\n /**\n * Gets the local view of the documents identified by `keys`.\n *\n * If we don't have cached state for a document in `keys`, a NoDocument will\n * be stored for that key in the resulting set.\n */ getDocuments(t, e) {\n return this.remoteDocumentCache.getEntries(t, e).next((e => this.getLocalViewOfDocuments(t, e, gs()).next((() => e))));\n }\n /**\n * Similar to `getDocuments`, but creates the local view from the given\n * `baseDocs` without retrieving documents from the local store.\n *\n * @param transaction - The transaction this operation is scoped to.\n * @param docs - The documents to apply local mutations to get the local views.\n * @param existenceStateChanged - The set of document keys whose existence state\n * is changed. This is useful to determine if some documents overlay needs\n * to be recalculated.\n */ getLocalViewOfDocuments(t, e, n = gs()) {\n const s = fs();\n return this.populateOverlays(t, s, e).next((() => this.computeViews(t, e, s, n).next((t => {\n let e = hs();\n return t.forEach(((t, n) => {\n e = e.insert(t, n.overlayedDocument);\n })), e;\n }))));\n }\n /**\n * Gets the overlayed documents for the given document map, which will include\n * the local view of those documents and a `FieldMask` indicating which fields\n * are mutated locally, `null` if overlay is a Set or Delete mutation.\n */ getOverlayedDocuments(t, e) {\n const n = fs();\n return this.populateOverlays(t, n, e).next((() => this.computeViews(t, e, n, gs())));\n }\n /**\n * Fetches the overlays for {@code docs} and adds them to provided overlay map\n * if the map does not already contain an entry for the given document key.\n */ populateOverlays(t, e, n) {\n const s = [];\n return n.forEach((t => {\n e.has(t) || s.push(t);\n })), this.documentOverlayCache.getOverlays(t, s).next((t => {\n t.forEach(((t, n) => {\n e.set(t, n);\n }));\n }));\n }\n /**\n * Computes the local view for the given documents.\n *\n * @param docs - The documents to compute views for. It also has the base\n * version of the documents.\n * @param overlays - The overlays that need to be applied to the given base\n * version of the documents.\n * @param existenceStateChanged - A set of documents whose existence states\n * might have changed. This is used to determine if we need to re-calculate\n * overlays from mutation queues.\n * @return A map represents the local documents view.\n */ computeViews(t, e, n, s) {\n let i = cs();\n const r = ws(), o = ws();\n return e.forEach(((t, e) => {\n const o = n.get(e.key);\n // Recalculate an overlay if the document's existence state changed due to\n // a remote event *and* the overlay is a PatchMutation. This is because\n // document existence state can change if some patch mutation's\n // preconditions are met.\n // NOTE: we recalculate when `overlay` is undefined as well, because there\n // might be a patch mutation whose precondition does not match before the\n // change (hence overlay is undefined), but would now match.\n s.has(e.key) && (void 0 === o || o.mutation instanceof zs) ? i = i.insert(e.key, e) : void 0 !== o ? (r.set(e.key, o.mutation.getFieldMask()), \n Ks(o.mutation, e, o.mutation.getFieldMask(), it.now())) : \n // no overlay exists\n // Using EMPTY to indicate there is no overlay for the document.\n r.set(e.key, Re.empty());\n })), this.recalculateAndSaveOverlays(t, i).next((t => (t.forEach(((t, e) => r.set(t, e))), \n e.forEach(((t, e) => {\n var n;\n return o.set(t, new No(e, null !== (n = r.get(t)) && void 0 !== n ? n : null));\n })), o)));\n }\n recalculateAndSaveOverlays(t, e) {\n const n = ws();\n // A reverse lookup map from batch id to the documents within that batch.\n let s = new pe(((t, e) => t - e)), i = gs();\n return this.mutationQueue.getAllMutationBatchesAffectingDocumentKeys(t, e).next((t => {\n for (const i of t) i.keys().forEach((t => {\n const r = e.get(t);\n if (null === r) return;\n let o = n.get(t) || Re.empty();\n o = i.applyToLocalView(r, o), n.set(t, o);\n const u = (s.get(i.batchId) || gs()).add(t);\n s = s.insert(i.batchId, u);\n }));\n })).next((() => {\n const r = [], o = s.getReverseIterator();\n // Iterate in descending order of batch IDs, and skip documents that are\n // already saved.\n for (;o.hasNext(); ) {\n const s = o.getNext(), u = s.key, c = s.value, a = ds();\n c.forEach((t => {\n if (!i.has(t)) {\n const s = qs(e.get(t), n.get(t));\n null !== s && a.set(t, s), i = i.add(t);\n }\n })), r.push(this.documentOverlayCache.saveOverlays(t, u, a));\n }\n return Rt.waitFor(r);\n })).next((() => n));\n }\n /**\n * Recalculates overlays by reading the documents from remote document cache\n * first, and saves them after they are calculated.\n */ recalculateAndSaveOverlaysForDocumentKeys(t, e) {\n return this.remoteDocumentCache.getEntries(t, e).next((e => this.recalculateAndSaveOverlays(t, e)));\n }\n /**\n * Performs a query against the local view of all documents.\n *\n * @param transaction - The persistence transaction.\n * @param query - The query to match documents against.\n * @param offset - Read time and key to start scanning by (exclusive).\n */ getDocumentsMatchingQuery(t, e, n) {\n /**\n * Returns whether the query matches a single document by path (rather than a\n * collection).\n */\n return function(t) {\n return ht.isDocumentKey(t.path) && null === t.collectionGroup && 0 === t.filters.length;\n }(e) ? this.getDocumentsMatchingDocumentQuery(t, e.path) : Wn(e) ? this.getDocumentsMatchingCollectionGroupQuery(t, e, n) : this.getDocumentsMatchingCollectionQuery(t, e, n);\n }\n /**\n * Given a collection group, returns the next documents that follow the provided offset, along\n * with an updated batch ID.\n *\n *
The documents returned by this method are ordered by remote version from the provided\n * offset. If there are no more remote documents after the provided offset, documents with\n * mutations in order of batch id from the offset are returned. Since all documents in a batch are\n * returned together, the total number of documents returned can exceed {@code count}.\n *\n * @param transaction\n * @param collectionGroup The collection group for the documents.\n * @param offset The offset to index into.\n * @param count The number of documents to return\n * @return A LocalWriteResult with the documents that follow the provided offset and the last processed batch id.\n */ getNextDocuments(t, e, n, s) {\n return this.remoteDocumentCache.getAllFromCollectionGroup(t, e, n, s).next((i => {\n const r = s - i.size > 0 ? this.documentOverlayCache.getOverlaysForCollectionGroup(t, e, n.largestBatchId, s - i.size) : Rt.resolve(fs());\n // The callsite will use the largest batch ID together with the latest read time to create\n // a new index offset. Since we only process batch IDs if all remote documents have been read,\n // no overlay will increase the overall read time. This is why we only need to special case\n // the batch id.\n let o = -1, u = i;\n return r.next((e => Rt.forEach(e, ((e, n) => (o < n.largestBatchId && (o = n.largestBatchId), \n i.get(e) ? Rt.resolve() : this.remoteDocumentCache.getEntry(t, e).next((t => {\n u = u.insert(e, t);\n }))))).next((() => this.populateOverlays(t, e, i))).next((() => this.computeViews(t, u, e, gs()))).next((t => ({\n batchId: o,\n changes: ls(t)\n })))));\n }));\n }\n getDocumentsMatchingDocumentQuery(t, e) {\n // Just do a simple document lookup.\n return this.getDocument(t, new ht(e)).next((t => {\n let e = hs();\n return t.isFoundDocument() && (e = e.insert(t.key, t)), e;\n }));\n }\n getDocumentsMatchingCollectionGroupQuery(t, e, n) {\n const s = e.collectionGroup;\n let i = hs();\n return this.indexManager.getCollectionParents(t, s).next((r => Rt.forEach(r, (r => {\n const o = function(t, e) {\n return new Un(e, \n /*collectionGroup=*/ null, t.explicitOrderBy.slice(), t.filters.slice(), t.limit, t.limitType, t.startAt, t.endAt);\n }(e, r.child(s));\n return this.getDocumentsMatchingCollectionQuery(t, o, n).next((t => {\n t.forEach(((t, e) => {\n i = i.insert(t, e);\n }));\n }));\n })).next((() => i))));\n }\n getDocumentsMatchingCollectionQuery(t, e, n) {\n // Query the remote documents and overlay mutations.\n let s;\n return this.documentOverlayCache.getOverlaysForCollection(t, e.path, n.largestBatchId).next((i => (s = i, \n this.remoteDocumentCache.getDocumentsMatchingQuery(t, e, n, s)))).next((t => {\n // As documents might match the query because of their overlay we need to\n // include documents for all overlays in the initial document set.\n s.forEach(((e, n) => {\n const s = n.getKey();\n null === t.get(s) && (t = t.insert(s, an.newInvalidDocument(s)));\n }));\n // Apply the overlays and match against the query.\n let n = hs();\n return t.forEach(((t, i) => {\n const r = s.get(t);\n void 0 !== r && Ks(r.mutation, i, Re.empty(), it.now()), \n // Finally, insert the documents that still match the query\n ns(e, i) && (n = n.insert(t, i));\n })), n;\n }));\n }\n}\n\n/**\n * @license\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */ class Mo {\n constructor(t) {\n this.serializer = t, this.cs = new Map, this.hs = new Map;\n }\n getBundleMetadata(t, e) {\n return Rt.resolve(this.cs.get(e));\n }\n saveBundleMetadata(t, e) {\n /** Decodes a BundleMetadata proto into a BundleMetadata object. */\n var n;\n return this.cs.set(e.id, {\n id: (n = e).id,\n version: n.version,\n createTime: Ni(n.createTime)\n }), Rt.resolve();\n }\n getNamedQuery(t, e) {\n return Rt.resolve(this.hs.get(e));\n }\n saveNamedQuery(t, e) {\n return this.hs.set(e.name, function(t) {\n return {\n name: t.name,\n query: yr(t.bundledQuery),\n readTime: Ni(t.readTime)\n };\n }(e)), Rt.resolve();\n }\n}\n\n/**\n * @license\n * Copyright 2022 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * An in-memory implementation of DocumentOverlayCache.\n */ class $o {\n constructor() {\n // A map sorted by DocumentKey, whose value is a pair of the largest batch id\n // for the overlay and the overlay itself.\n this.overlays = new pe(ht.comparator), this.ls = new Map;\n }\n getOverlay(t, e) {\n return Rt.resolve(this.overlays.get(e));\n }\n getOverlays(t, e) {\n const n = fs();\n return Rt.forEach(e, (e => this.getOverlay(t, e).next((t => {\n null !== t && n.set(e, t);\n })))).next((() => n));\n }\n saveOverlays(t, e, n) {\n return n.forEach(((n, s) => {\n this.we(t, e, s);\n })), Rt.resolve();\n }\n removeOverlaysForBatchId(t, e, n) {\n const s = this.ls.get(n);\n return void 0 !== s && (s.forEach((t => this.overlays = this.overlays.remove(t))), \n this.ls.delete(n)), Rt.resolve();\n }\n getOverlaysForCollection(t, e, n) {\n const s = fs(), i = e.length + 1, r = new ht(e.child(\"\")), o = this.overlays.getIteratorFrom(r);\n for (;o.hasNext(); ) {\n const t = o.getNext().value, r = t.getKey();\n if (!e.isPrefixOf(r.path)) break;\n // Documents from sub-collections\n r.path.length === i && (t.largestBatchId > n && s.set(t.getKey(), t));\n }\n return Rt.resolve(s);\n }\n getOverlaysForCollectionGroup(t, e, n, s) {\n let i = new pe(((t, e) => t - e));\n const r = this.overlays.getIterator();\n for (;r.hasNext(); ) {\n const t = r.getNext().value;\n if (t.getKey().getCollectionGroup() === e && t.largestBatchId > n) {\n let e = i.get(t.largestBatchId);\n null === e && (e = fs(), i = i.insert(t.largestBatchId, e)), e.set(t.getKey(), t);\n }\n }\n const o = fs(), u = i.getIterator();\n for (;u.hasNext(); ) {\n if (u.getNext().value.forEach(((t, e) => o.set(t, e))), o.size() >= s) break;\n }\n return Rt.resolve(o);\n }\n we(t, e, n) {\n // Remove the association of the overlay to its batch id.\n const s = this.overlays.get(n.key);\n if (null !== s) {\n const t = this.ls.get(s.largestBatchId).delete(n.key);\n this.ls.set(s.largestBatchId, t);\n }\n this.overlays = this.overlays.insert(n.key, new ei(e, n));\n // Create the association of this overlay to the given largestBatchId.\n let i = this.ls.get(e);\n void 0 === i && (i = gs(), this.ls.set(e, i)), this.ls.set(e, i.add(n.key));\n }\n}\n\n/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * A collection of references to a document from some kind of numbered entity\n * (either a target ID or batch ID). As references are added to or removed from\n * the set corresponding events are emitted to a registered garbage collector.\n *\n * Each reference is represented by a DocumentReference object. Each of them\n * contains enough information to uniquely identify the reference. They are all\n * stored primarily in a set sorted by key. A document is considered garbage if\n * there's no references in that set (this can be efficiently checked thanks to\n * sorting by key).\n *\n * ReferenceSet also keeps a secondary set that contains references sorted by\n * IDs. This one is used to efficiently implement removal of all references by\n * some target ID.\n */ class Oo {\n constructor() {\n // A set of outstanding references to a document sorted by key.\n this.fs = new Ee(Fo.ds), \n // A set of outstanding references to a document sorted by target id.\n this.ws = new Ee(Fo._s);\n }\n /** Returns true if the reference set contains no references. */ isEmpty() {\n return this.fs.isEmpty();\n }\n /** Adds a reference to the given document key for the given ID. */ addReference(t, e) {\n const n = new Fo(t, e);\n this.fs = this.fs.add(n), this.ws = this.ws.add(n);\n }\n /** Add references to the given document keys for the given ID. */ gs(t, e) {\n t.forEach((t => this.addReference(t, e)));\n }\n /**\n * Removes a reference to the given document key for the given\n * ID.\n */ removeReference(t, e) {\n this.ys(new Fo(t, e));\n }\n ps(t, e) {\n t.forEach((t => this.removeReference(t, e)));\n }\n /**\n * Clears all references with a given ID. Calls removeRef() for each key\n * removed.\n */ Is(t) {\n const e = new ht(new ut([])), n = new Fo(e, t), s = new Fo(e, t + 1), i = [];\n return this.ws.forEachInRange([ n, s ], (t => {\n this.ys(t), i.push(t.key);\n })), i;\n }\n Ts() {\n this.fs.forEach((t => this.ys(t)));\n }\n ys(t) {\n this.fs = this.fs.delete(t), this.ws = this.ws.delete(t);\n }\n Es(t) {\n const e = new ht(new ut([])), n = new Fo(e, t), s = new Fo(e, t + 1);\n let i = gs();\n return this.ws.forEachInRange([ n, s ], (t => {\n i = i.add(t.key);\n })), i;\n }\n containsKey(t) {\n const e = new Fo(t, 0), n = this.fs.firstAfterOrEqual(e);\n return null !== n && t.isEqual(n.key);\n }\n}\n\nclass Fo {\n constructor(t, e) {\n this.key = t, this.As = e;\n }\n /** Compare by key then by ID */ static ds(t, e) {\n return ht.comparator(t.key, e.key) || et(t.As, e.As);\n }\n /** Compare by ID then by key */ static _s(t, e) {\n return et(t.As, e.As) || ht.comparator(t.key, e.key);\n }\n}\n\n/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */ class Bo {\n constructor(t, e) {\n this.indexManager = t, this.referenceDelegate = e, \n /**\n * The set of all mutations that have been sent but not yet been applied to\n * the backend.\n */\n this.mutationQueue = [], \n /** Next value to use when assigning sequential IDs to each mutation batch. */\n this.vs = 1, \n /** An ordered mapping between documents and the mutations batch IDs. */\n this.Rs = new Ee(Fo.ds);\n }\n checkEmpty(t) {\n return Rt.resolve(0 === this.mutationQueue.length);\n }\n addMutationBatch(t, e, n, s) {\n const i = this.vs;\n this.vs++, this.mutationQueue.length > 0 && this.mutationQueue[this.mutationQueue.length - 1];\n const r = new Zs(i, e, n, s);\n this.mutationQueue.push(r);\n // Track references by document key and index collection parents.\n for (const e of s) this.Rs = this.Rs.add(new Fo(e.key, i)), this.indexManager.addToCollectionParentIndex(t, e.key.path.popLast());\n return Rt.resolve(r);\n }\n lookupMutationBatch(t, e) {\n return Rt.resolve(this.Ps(e));\n }\n getNextMutationBatchAfterBatchId(t, e) {\n const n = e + 1, s = this.bs(n), i = s < 0 ? 0 : s;\n // The requested batchId may still be out of range so normalize it to the\n // start of the queue.\n return Rt.resolve(this.mutationQueue.length > i ? this.mutationQueue[i] : null);\n }\n getHighestUnacknowledgedBatchId() {\n return Rt.resolve(0 === this.mutationQueue.length ? -1 : this.vs - 1);\n }\n getAllMutationBatches(t) {\n return Rt.resolve(this.mutationQueue.slice());\n }\n getAllMutationBatchesAffectingDocumentKey(t, e) {\n const n = new Fo(e, 0), s = new Fo(e, Number.POSITIVE_INFINITY), i = [];\n return this.Rs.forEachInRange([ n, s ], (t => {\n const e = this.Ps(t.As);\n i.push(e);\n })), Rt.resolve(i);\n }\n getAllMutationBatchesAffectingDocumentKeys(t, e) {\n let n = new Ee(et);\n return e.forEach((t => {\n const e = new Fo(t, 0), s = new Fo(t, Number.POSITIVE_INFINITY);\n this.Rs.forEachInRange([ e, s ], (t => {\n n = n.add(t.As);\n }));\n })), Rt.resolve(this.Vs(n));\n }\n getAllMutationBatchesAffectingQuery(t, e) {\n // Use the query path as a prefix for testing if a document matches the\n // query.\n const n = e.path, s = n.length + 1;\n // Construct a document reference for actually scanning the index. Unlike\n // the prefix the document key in this reference must have an even number of\n // segments. The empty segment can be used a suffix of the query path\n // because it precedes all other segments in an ordered traversal.\n let i = n;\n ht.isDocumentKey(i) || (i = i.child(\"\"));\n const r = new Fo(new ht(i), 0);\n // Find unique batchIDs referenced by all documents potentially matching the\n // query.\n let o = new Ee(et);\n return this.Rs.forEachWhile((t => {\n const e = t.key.path;\n return !!n.isPrefixOf(e) && (\n // Rows with document keys more than one segment longer than the query\n // path can't be matches. For example, a query on 'rooms' can't match\n // the document /rooms/abc/messages/xyx.\n // TODO(mcg): we'll need a different scanner when we implement\n // ancestor queries.\n e.length === s && (o = o.add(t.As)), !0);\n }), r), Rt.resolve(this.Vs(o));\n }\n Vs(t) {\n // Construct an array of matching batches, sorted by batchID to ensure that\n // multiple mutations affecting the same document key are applied in order.\n const e = [];\n return t.forEach((t => {\n const n = this.Ps(t);\n null !== n && e.push(n);\n })), e;\n }\n removeMutationBatch(t, e) {\n F(0 === this.Ss(e.batchId, \"removed\")), this.mutationQueue.shift();\n let n = this.Rs;\n return Rt.forEach(e.mutations, (s => {\n const i = new Fo(s.key, e.batchId);\n return n = n.delete(i), this.referenceDelegate.markPotentiallyOrphaned(t, s.key);\n })).next((() => {\n this.Rs = n;\n }));\n }\n Cn(t) {\n // No-op since the memory mutation queue does not maintain a separate cache.\n }\n containsKey(t, e) {\n const n = new Fo(e, 0), s = this.Rs.firstAfterOrEqual(n);\n return Rt.resolve(e.isEqual(s && s.key));\n }\n performConsistencyCheck(t) {\n return this.mutationQueue.length, Rt.resolve();\n }\n /**\n * Finds the index of the given batchId in the mutation queue and asserts that\n * the resulting index is within the bounds of the queue.\n *\n * @param batchId - The batchId to search for\n * @param action - A description of what the caller is doing, phrased in passive\n * form (e.g. \"acknowledged\" in a routine that acknowledges batches).\n */ Ss(t, e) {\n return this.bs(t);\n }\n /**\n * Finds the index of the given batchId in the mutation queue. This operation\n * is O(1).\n *\n * @returns The computed index of the batch with the given batchId, based on\n * the state of the queue. Note this index can be negative if the requested\n * batchId has already been remvoed from the queue or past the end of the\n * queue if the batchId is larger than the last added batch.\n */ bs(t) {\n if (0 === this.mutationQueue.length) \n // As an index this is past the end of the queue\n return 0;\n // Examine the front of the queue to figure out the difference between the\n // batchId and indexes in the array. Note that since the queue is ordered\n // by batchId, if the first batch has a larger batchId then the requested\n // batchId doesn't exist in the queue.\n return t - this.mutationQueue[0].batchId;\n }\n /**\n * A version of lookupMutationBatch that doesn't return a promise, this makes\n * other functions that uses this code easier to read and more efficent.\n */ Ps(t) {\n const e = this.bs(t);\n if (e < 0 || e >= this.mutationQueue.length) return null;\n return this.mutationQueue[e];\n }\n}\n\n/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * The memory-only RemoteDocumentCache for IndexedDb. To construct, invoke\n * `newMemoryRemoteDocumentCache()`.\n */\nclass Lo {\n /**\n * @param sizer - Used to assess the size of a document. For eager GC, this is\n * expected to just return 0 to avoid unnecessarily doing the work of\n * calculating the size.\n */\n constructor(t) {\n this.Ds = t, \n /** Underlying cache of documents and their read times. */\n this.docs = new pe(ht.comparator), \n /** Size of all cached documents. */\n this.size = 0;\n }\n setIndexManager(t) {\n this.indexManager = t;\n }\n /**\n * Adds the supplied entry to the cache and updates the cache size as appropriate.\n *\n * All calls of `addEntry` are required to go through the RemoteDocumentChangeBuffer\n * returned by `newChangeBuffer()`.\n */ addEntry(t, e) {\n const n = e.key, s = this.docs.get(n), i = s ? s.size : 0, r = this.Ds(e);\n return this.docs = this.docs.insert(n, {\n document: e.mutableCopy(),\n size: r\n }), this.size += r - i, this.indexManager.addToCollectionParentIndex(t, n.path.popLast());\n }\n /**\n * Removes the specified entry from the cache and updates the cache size as appropriate.\n *\n * All calls of `removeEntry` are required to go through the RemoteDocumentChangeBuffer\n * returned by `newChangeBuffer()`.\n */ removeEntry(t) {\n const e = this.docs.get(t);\n e && (this.docs = this.docs.remove(t), this.size -= e.size);\n }\n getEntry(t, e) {\n const n = this.docs.get(e);\n return Rt.resolve(n ? n.document.mutableCopy() : an.newInvalidDocument(e));\n }\n getEntries(t, e) {\n let n = cs();\n return e.forEach((t => {\n const e = this.docs.get(t);\n n = n.insert(t, e ? e.document.mutableCopy() : an.newInvalidDocument(t));\n })), Rt.resolve(n);\n }\n getDocumentsMatchingQuery(t, e, n, s) {\n let i = cs();\n // Documents are ordered by key, so we can use a prefix scan to narrow down\n // the documents we need to match the query against.\n const r = e.path, o = new ht(r.child(\"\")), u = this.docs.getIteratorFrom(o);\n for (;u.hasNext(); ) {\n const {key: t, value: {document: o}} = u.getNext();\n if (!r.isPrefixOf(t.path)) break;\n t.path.length > r.length + 1 || (Tt(pt(o), n) <= 0 || (s.has(o.key) || ns(e, o)) && (i = i.insert(o.key, o.mutableCopy())));\n }\n return Rt.resolve(i);\n }\n getAllFromCollectionGroup(t, e, n, s) {\n // This method should only be called from the IndexBackfiller if persistence\n // is enabled.\n O();\n }\n Cs(t, e) {\n return Rt.forEach(this.docs, (t => e(t)));\n }\n newChangeBuffer(t) {\n // `trackRemovals` is ignores since the MemoryRemoteDocumentCache keeps\n // a separate changelog and does not need special handling for removals.\n return new qo(this);\n }\n getSize(t) {\n return Rt.resolve(this.size);\n }\n}\n\n/**\n * Creates a new memory-only RemoteDocumentCache.\n *\n * @param sizer - Used to assess the size of a document. For eager GC, this is\n * expected to just return 0 to avoid unnecessarily doing the work of\n * calculating the size.\n */\n/**\n * Handles the details of adding and updating documents in the MemoryRemoteDocumentCache.\n */\nclass qo extends vo {\n constructor(t) {\n super(), this.os = t;\n }\n applyChanges(t) {\n const e = [];\n return this.changes.forEach(((n, s) => {\n s.isValidDocument() ? e.push(this.os.addEntry(t, s)) : this.os.removeEntry(n);\n })), Rt.waitFor(e);\n }\n getFromCache(t, e) {\n return this.os.getEntry(t, e);\n }\n getAllFromCache(t, e) {\n return this.os.getEntries(t, e);\n }\n}\n\n/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */ class Uo {\n constructor(t) {\n this.persistence = t, \n /**\n * Maps a target to the data about that target\n */\n this.xs = new os((t => $n(t)), On), \n /** The last received snapshot version. */\n this.lastRemoteSnapshotVersion = rt.min(), \n /** The highest numbered target ID encountered. */\n this.highestTargetId = 0, \n /** The highest sequence number encountered. */\n this.Ns = 0, \n /**\n * A ordered bidirectional mapping between documents and the remote target\n * IDs.\n */\n this.ks = new Oo, this.targetCount = 0, this.Ms = lo.kn();\n }\n forEachTarget(t, e) {\n return this.xs.forEach(((t, n) => e(n))), Rt.resolve();\n }\n getLastRemoteSnapshotVersion(t) {\n return Rt.resolve(this.lastRemoteSnapshotVersion);\n }\n getHighestSequenceNumber(t) {\n return Rt.resolve(this.Ns);\n }\n allocateTargetId(t) {\n return this.highestTargetId = this.Ms.next(), Rt.resolve(this.highestTargetId);\n }\n setTargetsMetadata(t, e, n) {\n return n && (this.lastRemoteSnapshotVersion = n), e > this.Ns && (this.Ns = e), \n Rt.resolve();\n }\n Fn(t) {\n this.xs.set(t.target, t);\n const e = t.targetId;\n e > this.highestTargetId && (this.Ms = new lo(e), this.highestTargetId = e), t.sequenceNumber > this.Ns && (this.Ns = t.sequenceNumber);\n }\n addTargetData(t, e) {\n return this.Fn(e), this.targetCount += 1, Rt.resolve();\n }\n updateTargetData(t, e) {\n return this.Fn(e), Rt.resolve();\n }\n removeTargetData(t, e) {\n return this.xs.delete(e.target), this.ks.Is(e.targetId), this.targetCount -= 1, \n Rt.resolve();\n }\n removeTargets(t, e, n) {\n let s = 0;\n const i = [];\n return this.xs.forEach(((r, o) => {\n o.sequenceNumber <= e && null === n.get(o.targetId) && (this.xs.delete(r), i.push(this.removeMatchingKeysForTargetId(t, o.targetId)), \n s++);\n })), Rt.waitFor(i).next((() => s));\n }\n getTargetCount(t) {\n return Rt.resolve(this.targetCount);\n }\n getTargetData(t, e) {\n const n = this.xs.get(e) || null;\n return Rt.resolve(n);\n }\n addMatchingKeys(t, e, n) {\n return this.ks.gs(e, n), Rt.resolve();\n }\n removeMatchingKeys(t, e, n) {\n this.ks.ps(e, n);\n const s = this.persistence.referenceDelegate, i = [];\n return s && e.forEach((e => {\n i.push(s.markPotentiallyOrphaned(t, e));\n })), Rt.waitFor(i);\n }\n removeMatchingKeysForTargetId(t, e) {\n return this.ks.Is(e), Rt.resolve();\n }\n getMatchingKeysForTargetId(t, e) {\n const n = this.ks.Es(e);\n return Rt.resolve(n);\n }\n containsKey(t, e) {\n return Rt.resolve(this.ks.containsKey(e));\n }\n}\n\n/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * A memory-backed instance of Persistence. Data is stored only in RAM and\n * not persisted across sessions.\n */\nclass Ko {\n /**\n * The constructor accepts a factory for creating a reference delegate. This\n * allows both the delegate and this instance to have strong references to\n * each other without having nullable fields that would then need to be\n * checked or asserted on every access.\n */\n constructor(t, e) {\n this.$s = {}, this.overlays = {}, this.Os = new Ot(0), this.Fs = !1, this.Fs = !0, \n this.referenceDelegate = t(this), this.Bs = new Uo(this);\n this.indexManager = new zr, this.remoteDocumentCache = function(t) {\n return new Lo(t);\n }((t => this.referenceDelegate.Ls(t))), this.serializer = new ar(e), this.qs = new Mo(this.serializer);\n }\n start() {\n return Promise.resolve();\n }\n shutdown() {\n // No durable state to ensure is closed on shutdown.\n return this.Fs = !1, Promise.resolve();\n }\n get started() {\n return this.Fs;\n }\n setDatabaseDeletedListener() {\n // No op.\n }\n setNetworkEnabled() {\n // No op.\n }\n getIndexManager(t) {\n // We do not currently support indices for memory persistence, so we can\n // return the same shared instance of the memory index manager.\n return this.indexManager;\n }\n getDocumentOverlayCache(t) {\n let e = this.overlays[t.toKey()];\n return e || (e = new $o, this.overlays[t.toKey()] = e), e;\n }\n getMutationQueue(t, e) {\n let n = this.$s[t.toKey()];\n return n || (n = new Bo(e, this.referenceDelegate), this.$s[t.toKey()] = n), n;\n }\n getTargetCache() {\n return this.Bs;\n }\n getRemoteDocumentCache() {\n return this.remoteDocumentCache;\n }\n getBundleCache() {\n return this.qs;\n }\n runTransaction(t, e, n) {\n N(\"MemoryPersistence\", \"Starting transaction:\", t);\n const s = new Go(this.Os.next());\n return this.referenceDelegate.Us(), n(s).next((t => this.referenceDelegate.Ks(s).next((() => t)))).toPromise().then((t => (s.raiseOnCommittedEvent(), \n t)));\n }\n Gs(t, e) {\n return Rt.or(Object.values(this.$s).map((n => () => n.containsKey(t, e))));\n }\n}\n\n/**\n * Memory persistence is not actually transactional, but future implementations\n * may have transaction-scoped state.\n */ class Go extends At {\n constructor(t) {\n super(), this.currentSequenceNumber = t;\n }\n}\n\nclass Qo {\n constructor(t) {\n this.persistence = t, \n /** Tracks all documents that are active in Query views. */\n this.Qs = new Oo, \n /** The list of documents that are potentially GCed after each transaction. */\n this.js = null;\n }\n static zs(t) {\n return new Qo(t);\n }\n get Ws() {\n if (this.js) return this.js;\n throw O();\n }\n addReference(t, e, n) {\n return this.Qs.addReference(n, e), this.Ws.delete(n.toString()), Rt.resolve();\n }\n removeReference(t, e, n) {\n return this.Qs.removeReference(n, e), this.Ws.add(n.toString()), Rt.resolve();\n }\n markPotentiallyOrphaned(t, e) {\n return this.Ws.add(e.toString()), Rt.resolve();\n }\n removeTarget(t, e) {\n this.Qs.Is(e.targetId).forEach((t => this.Ws.add(t.toString())));\n const n = this.persistence.getTargetCache();\n return n.getMatchingKeysForTargetId(t, e.targetId).next((t => {\n t.forEach((t => this.Ws.add(t.toString())));\n })).next((() => n.removeTargetData(t, e)));\n }\n Us() {\n this.js = new Set;\n }\n Ks(t) {\n // Remove newly orphaned documents.\n const e = this.persistence.getRemoteDocumentCache().newChangeBuffer();\n return Rt.forEach(this.Ws, (n => {\n const s = ht.fromPath(n);\n return this.Hs(t, s).next((t => {\n t || e.removeEntry(s, rt.min());\n }));\n })).next((() => (this.js = null, e.apply(t))));\n }\n updateLimboDocument(t, e) {\n return this.Hs(t, e).next((t => {\n t ? this.Ws.delete(e.toString()) : this.Ws.add(e.toString());\n }));\n }\n Ls(t) {\n // For eager GC, we don't care about the document size, there are no size thresholds.\n return 0;\n }\n Hs(t, e) {\n return Rt.or([ () => Rt.resolve(this.Qs.containsKey(e)), () => this.persistence.getTargetCache().containsKey(t, e), () => this.persistence.Gs(t, e) ]);\n }\n}\n\nclass jo {\n constructor(t, e) {\n this.persistence = t, this.Js = new os((t => qt(t.path)), ((t, e) => t.isEqual(e))), \n this.garbageCollector = To(this, e);\n }\n static zs(t, e) {\n return new jo(t, e);\n }\n // No-ops, present so memory persistence doesn't have to care which delegate\n // it has.\n Us() {}\n Ks(t) {\n return Rt.resolve();\n }\n forEachTarget(t, e) {\n return this.persistence.getTargetCache().forEachTarget(t, e);\n }\n zn(t) {\n const e = this.Jn(t);\n return this.persistence.getTargetCache().getTargetCount(t).next((t => e.next((e => t + e))));\n }\n Jn(t) {\n let e = 0;\n return this.Wn(t, (t => {\n e++;\n })).next((() => e));\n }\n Wn(t, e) {\n return Rt.forEach(this.Js, ((n, s) => this.Xn(t, n, s).next((t => t ? Rt.resolve() : e(s)))));\n }\n removeTargets(t, e, n) {\n return this.persistence.getTargetCache().removeTargets(t, e, n);\n }\n removeOrphanedDocuments(t, e) {\n let n = 0;\n const s = this.persistence.getRemoteDocumentCache(), i = s.newChangeBuffer();\n return s.Cs(t, (s => this.Xn(t, s, e).next((t => {\n t || (n++, i.removeEntry(s, rt.min()));\n })))).next((() => i.apply(t))).next((() => n));\n }\n markPotentiallyOrphaned(t, e) {\n return this.Js.set(e, t.currentSequenceNumber), Rt.resolve();\n }\n removeTarget(t, e) {\n const n = e.withSequenceNumber(t.currentSequenceNumber);\n return this.persistence.getTargetCache().updateTargetData(t, n);\n }\n addReference(t, e, n) {\n return this.Js.set(n, t.currentSequenceNumber), Rt.resolve();\n }\n removeReference(t, e, n) {\n return this.Js.set(n, t.currentSequenceNumber), Rt.resolve();\n }\n updateLimboDocument(t, e) {\n return this.Js.set(e, t.currentSequenceNumber), Rt.resolve();\n }\n Ls(t) {\n let e = t.key.toString().length;\n return t.isFoundDocument() && (e += ze(t.data.value)), e;\n }\n Xn(t, e, n) {\n return Rt.or([ () => this.persistence.Gs(t, e), () => this.persistence.getTargetCache().containsKey(t, e), () => {\n const t = this.Js.get(e);\n return Rt.resolve(void 0 !== t && t > n);\n } ]);\n }\n getCacheSize(t) {\n return this.persistence.getRemoteDocumentCache().getSize(t);\n }\n}\n\n/**\n * @license\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/** Performs database creation and schema upgrades. */ class zo {\n constructor(t) {\n this.serializer = t;\n }\n /**\n * Performs database creation and schema upgrades.\n *\n * Note that in production, this method is only ever used to upgrade the schema\n * to SCHEMA_VERSION. Different values of toVersion are only used for testing\n * and local feature development.\n */ O(t, e, n, s) {\n const i = new Pt(\"createOrUpgrade\", e);\n n < 1 && s >= 1 && (function(t) {\n t.createObjectStore(\"owner\");\n }(t), function(t) {\n t.createObjectStore(\"mutationQueues\", {\n keyPath: \"userId\"\n });\n t.createObjectStore(\"mutations\", {\n keyPath: \"batchId\",\n autoIncrement: !0\n }).createIndex(\"userMutationsIndex\", Qt, {\n unique: !0\n }), t.createObjectStore(\"documentMutations\");\n }\n /**\n * Upgrade function to migrate the 'mutations' store from V1 to V3. Loads\n * and rewrites all data.\n */ (t), Wo(t), function(t) {\n t.createObjectStore(\"remoteDocuments\");\n }(t));\n // Migration 2 to populate the targetGlobal object no longer needed since\n // migration 3 unconditionally clears it.\n let r = Rt.resolve();\n return n < 3 && s >= 3 && (\n // Brand new clients don't need to drop and recreate--only clients that\n // potentially have corrupt data.\n 0 !== n && (!function(t) {\n t.deleteObjectStore(\"targetDocuments\"), t.deleteObjectStore(\"targets\"), t.deleteObjectStore(\"targetGlobal\");\n }(t), Wo(t)), r = r.next((() => \n /**\n * Creates the target global singleton row.\n *\n * @param txn - The version upgrade transaction for indexeddb\n */\n function(t) {\n const e = t.store(\"targetGlobal\"), n = {\n highestTargetId: 0,\n highestListenSequenceNumber: 0,\n lastRemoteSnapshotVersion: rt.min().toTimestamp(),\n targetCount: 0\n };\n return e.put(\"targetGlobalKey\", n);\n }(i)))), n < 4 && s >= 4 && (0 !== n && (\n // Schema version 3 uses auto-generated keys to generate globally unique\n // mutation batch IDs (this was previously ensured internally by the\n // client). To migrate to the new schema, we have to read all mutations\n // and write them back out. We preserve the existing batch IDs to guarantee\n // consistency with other object stores. Any further mutation batch IDs will\n // be auto-generated.\n r = r.next((() => function(t, e) {\n return e.store(\"mutations\").j().next((n => {\n t.deleteObjectStore(\"mutations\");\n t.createObjectStore(\"mutations\", {\n keyPath: \"batchId\",\n autoIncrement: !0\n }).createIndex(\"userMutationsIndex\", Qt, {\n unique: !0\n });\n const s = e.store(\"mutations\"), i = n.map((t => s.put(t)));\n return Rt.waitFor(i);\n }));\n }(t, i)))), r = r.next((() => {\n !function(t) {\n t.createObjectStore(\"clientMetadata\", {\n keyPath: \"clientId\"\n });\n }(t);\n }))), n < 5 && s >= 5 && (r = r.next((() => this.Ys(i)))), n < 6 && s >= 6 && (r = r.next((() => (function(t) {\n t.createObjectStore(\"remoteDocumentGlobal\");\n }(t), this.Xs(i))))), n < 7 && s >= 7 && (r = r.next((() => this.Zs(i)))), n < 8 && s >= 8 && (r = r.next((() => this.ti(t, i)))), \n n < 9 && s >= 9 && (r = r.next((() => {\n // Multi-Tab used to manage its own changelog, but this has been moved\n // to the DbRemoteDocument object store itself. Since the previous change\n // log only contained transient data, we can drop its object store.\n !function(t) {\n t.objectStoreNames.contains(\"remoteDocumentChanges\") && t.deleteObjectStore(\"remoteDocumentChanges\");\n }(t);\n // Note: Schema version 9 used to create a read time index for the\n // RemoteDocumentCache. This is now done with schema version 13.\n }))), n < 10 && s >= 10 && (r = r.next((() => this.ei(i)))), n < 11 && s >= 11 && (r = r.next((() => {\n !function(t) {\n t.createObjectStore(\"bundles\", {\n keyPath: \"bundleId\"\n });\n }(t), function(t) {\n t.createObjectStore(\"namedQueries\", {\n keyPath: \"name\"\n });\n }(t);\n }))), n < 12 && s >= 12 && (r = r.next((() => {\n !function(t) {\n const e = t.createObjectStore(\"documentOverlays\", {\n keyPath: oe\n });\n e.createIndex(\"collectionPathOverlayIndex\", ue, {\n unique: !1\n }), e.createIndex(\"collectionGroupOverlayIndex\", ce, {\n unique: !1\n });\n }(t);\n }))), n < 13 && s >= 13 && (r = r.next((() => function(t) {\n const e = t.createObjectStore(\"remoteDocumentsV14\", {\n keyPath: Ht\n });\n e.createIndex(\"documentKeyIndex\", Jt), e.createIndex(\"collectionGroupIndex\", Yt);\n }(t))).next((() => this.ni(t, i))).next((() => t.deleteObjectStore(\"remoteDocuments\")))), \n n < 14 && s >= 14 && (r = r.next((() => this.si(t, i)))), n < 15 && s >= 15 && (r = r.next((() => function(t) {\n t.createObjectStore(\"indexConfiguration\", {\n keyPath: \"indexId\",\n autoIncrement: !0\n }).createIndex(\"collectionGroupIndex\", \"collectionGroup\", {\n unique: !1\n });\n t.createObjectStore(\"indexState\", {\n keyPath: ne\n }).createIndex(\"sequenceNumberIndex\", se, {\n unique: !1\n });\n t.createObjectStore(\"indexEntries\", {\n keyPath: ie\n }).createIndex(\"documentKeyIndex\", re, {\n unique: !1\n });\n }(t)))), r;\n }\n Xs(t) {\n let e = 0;\n return t.store(\"remoteDocuments\").X(((t, n) => {\n e += ro(n);\n })).next((() => {\n const n = {\n byteSize: e\n };\n return t.store(\"remoteDocumentGlobal\").put(\"remoteDocumentGlobalKey\", n);\n }));\n }\n Ys(t) {\n const e = t.store(\"mutationQueues\"), n = t.store(\"mutations\");\n return e.j().next((e => Rt.forEach(e, (e => {\n const s = IDBKeyRange.bound([ e.userId, -1 ], [ e.userId, e.lastAcknowledgedBatchId ]);\n return n.j(\"userMutationsIndex\", s).next((n => Rt.forEach(n, (n => {\n F(n.userId === e.userId);\n const s = _r(this.serializer, n);\n return io(t, e.userId, s).next((() => {}));\n }))));\n }))));\n }\n /**\n * Ensures that every document in the remote document cache has a corresponding sentinel row\n * with a sequence number. Missing rows are given the most recently used sequence number.\n */ Zs(t) {\n const e = t.store(\"targetDocuments\"), n = t.store(\"remoteDocuments\");\n return t.store(\"targetGlobal\").get(\"targetGlobalKey\").next((t => {\n const s = [];\n return n.X(((n, i) => {\n const r = new ut(n), o = function(t) {\n return [ 0, qt(t) ];\n }(r);\n s.push(e.get(o).next((n => n ? Rt.resolve() : (n => e.put({\n targetId: 0,\n path: qt(n),\n sequenceNumber: t.highestListenSequenceNumber\n }))(r))));\n })).next((() => Rt.waitFor(s)));\n }));\n }\n ti(t, e) {\n // Create the index.\n t.createObjectStore(\"collectionParents\", {\n keyPath: ee\n });\n const n = e.store(\"collectionParents\"), s = new Wr, i = t => {\n if (s.add(t)) {\n const e = t.lastSegment(), s = t.popLast();\n return n.put({\n collectionId: e,\n parent: qt(s)\n });\n }\n };\n // Helper to add an index entry iff we haven't already written it.\n // Index existing remote documents.\n return e.store(\"remoteDocuments\").X({\n Y: !0\n }, ((t, e) => {\n const n = new ut(t);\n return i(n.popLast());\n })).next((() => e.store(\"documentMutations\").X({\n Y: !0\n }, (([t, e, n], s) => {\n const r = Gt(e);\n return i(r.popLast());\n }))));\n }\n ei(t) {\n const e = t.store(\"targets\");\n return e.X(((t, n) => {\n const s = mr(n), i = gr(this.serializer, s);\n return e.put(i);\n }));\n }\n ni(t, e) {\n const n = e.store(\"remoteDocuments\"), s = [];\n return n.X(((t, n) => {\n const i = e.store(\"remoteDocumentsV14\"), r = (o = n, o.document ? new ht(ut.fromString(o.document.name).popFirst(5)) : o.noDocument ? ht.fromSegments(o.noDocument.path) : o.unknownDocument ? ht.fromSegments(o.unknownDocument.path) : O()).path.toArray();\n var o;\n /**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */ const u = {\n prefixPath: r.slice(0, r.length - 2),\n collectionGroup: r[r.length - 2],\n documentId: r[r.length - 1],\n readTime: n.readTime || [ 0, 0 ],\n unknownDocument: n.unknownDocument,\n noDocument: n.noDocument,\n document: n.document,\n hasCommittedMutations: !!n.hasCommittedMutations\n };\n s.push(i.put(u));\n })).next((() => Rt.waitFor(s)));\n }\n si(t, e) {\n const n = e.store(\"mutations\"), s = Po(this.serializer), i = new Ko(Qo.zs, this.serializer.fe);\n return n.j().next((t => {\n const n = new Map;\n return t.forEach((t => {\n var e;\n let s = null !== (e = n.get(t.userId)) && void 0 !== e ? e : gs();\n _r(this.serializer, t).keys().forEach((t => s = s.add(t))), n.set(t.userId, s);\n })), Rt.forEach(n, ((t, n) => {\n const r = new V(n), o = Rr.de(this.serializer, r), u = i.getIndexManager(r), c = oo.de(r, this.serializer, u, i.referenceDelegate);\n return new ko(s, c, o, u).recalculateAndSaveOverlaysForDocumentKeys(new we(e, Ot.ct), t).next();\n }));\n }));\n }\n}\n\nfunction Wo(t) {\n t.createObjectStore(\"targetDocuments\", {\n keyPath: Zt\n }).createIndex(\"documentTargetsIndex\", te, {\n unique: !0\n });\n // NOTE: This is unique only because the TargetId is the suffix.\n t.createObjectStore(\"targets\", {\n keyPath: \"targetId\"\n }).createIndex(\"queryTargetsIndex\", Xt, {\n unique: !0\n }), t.createObjectStore(\"targetGlobal\");\n}\n\nconst Ho = \"Failed to obtain exclusive access to the persistence layer. To allow shared access, multi-tab synchronization has to be enabled in all tabs. If you are using `experimentalForceOwningTab:true`, make sure that only one tab has persistence enabled at any given time.\";\n\n/**\n * Oldest acceptable age in milliseconds for client metadata before the client\n * is considered inactive and its associated data is garbage collected.\n */\n/**\n * An IndexedDB-backed instance of Persistence. Data is stored persistently\n * across sessions.\n *\n * On Web only, the Firestore SDKs support shared access to its persistence\n * layer. This allows multiple browser tabs to read and write to IndexedDb and\n * to synchronize state even without network connectivity. Shared access is\n * currently optional and not enabled unless all clients invoke\n * `enablePersistence()` with `{synchronizeTabs:true}`.\n *\n * In multi-tab mode, if multiple clients are active at the same time, the SDK\n * will designate one client as the “primary client”. An effort is made to pick\n * a visible, network-connected and active client, and this client is\n * responsible for letting other clients know about its presence. The primary\n * client writes a unique client-generated identifier (the client ID) to\n * IndexedDb’s “owner” store every 4 seconds. If the primary client fails to\n * update this entry, another client can acquire the lease and take over as\n * primary.\n *\n * Some persistence operations in the SDK are designated as primary-client only\n * operations. This includes the acknowledgment of mutations and all updates of\n * remote documents. The effects of these operations are written to persistence\n * and then broadcast to other tabs via LocalStorage (see\n * `WebStorageSharedClientState`), which then refresh their state from\n * persistence.\n *\n * Similarly, the primary client listens to notifications sent by secondary\n * clients to discover persistence changes written by secondary clients, such as\n * the addition of new mutations and query targets.\n *\n * If multi-tab is not enabled and another tab already obtained the primary\n * lease, IndexedDbPersistence enters a failed state and all subsequent\n * operations will automatically fail.\n *\n * Additionally, there is an optimization so that when a tab is closed, the\n * primary lease is released immediately (this is especially important to make\n * sure that a refreshed tab is able to immediately re-acquire the primary\n * lease). Unfortunately, IndexedDB cannot be reliably used in window.unload\n * since it is an asynchronous API. So in addition to attempting to give up the\n * lease, the leaseholder writes its client ID to a \"zombiedClient\" entry in\n * LocalStorage which acts as an indicator that another tab should go ahead and\n * take the primary lease immediately regardless of the current lease timestamp.\n *\n * TODO(b/114226234): Remove `synchronizeTabs` section when multi-tab is no\n * longer optional.\n */\nclass Jo {\n constructor(\n /**\n * Whether to synchronize the in-memory state of multiple tabs and share\n * access to local persistence.\n */\n t, e, n, s, i, r, o, u, c, \n /**\n * If set to true, forcefully obtains database access. Existing tabs will\n * no longer be able to access IndexedDB.\n */\n a, h = 15) {\n if (this.allowTabSynchronization = t, this.persistenceKey = e, this.clientId = n, \n this.ii = i, this.window = r, this.document = o, this.ri = c, this.oi = a, this.ui = h, \n this.Os = null, this.Fs = !1, this.isPrimary = !1, this.networkEnabled = !0, \n /** Our window.unload handler, if registered. */\n this.ci = null, this.inForeground = !1, \n /** Our 'visibilitychange' listener if registered. */\n this.ai = null, \n /** The client metadata refresh task. */\n this.hi = null, \n /** The last time we garbage collected the client metadata object store. */\n this.li = Number.NEGATIVE_INFINITY, \n /** A listener to notify on primary state changes. */\n this.fi = t => Promise.resolve(), !Jo.D()) throw new U(q.UNIMPLEMENTED, \"This platform is either missing IndexedDB or is known to have an incomplete implementation. Offline persistence has been disabled.\");\n this.referenceDelegate = new Eo(this, s), this.di = e + \"main\", this.serializer = new ar(u), \n this.wi = new bt(this.di, this.ui, new zo(this.serializer)), this.Bs = new fo(this.referenceDelegate, this.serializer), \n this.remoteDocumentCache = Po(this.serializer), this.qs = new Er, this.window && this.window.localStorage ? this._i = this.window.localStorage : (this._i = null, \n !1 === a && k(\"IndexedDbPersistence\", \"LocalStorage is unavailable. As a result, persistence may not work reliably. In particular enablePersistence() could fail immediately after refreshing the page.\"));\n }\n /**\n * Attempt to start IndexedDb persistence.\n *\n * @returns Whether persistence was enabled.\n */ start() {\n // NOTE: This is expected to fail sometimes (in the case of another tab\n // already having the persistence lock), so it's the first thing we should\n // do.\n return this.mi().then((() => {\n if (!this.isPrimary && !this.allowTabSynchronization) \n // Fail `start()` if `synchronizeTabs` is disabled and we cannot\n // obtain the primary lease.\n throw new U(q.FAILED_PRECONDITION, Ho);\n return this.gi(), this.yi(), this.pi(), this.runTransaction(\"getHighestListenSequenceNumber\", \"readonly\", (t => this.Bs.getHighestSequenceNumber(t)));\n })).then((t => {\n this.Os = new Ot(t, this.ri);\n })).then((() => {\n this.Fs = !0;\n })).catch((t => (this.wi && this.wi.close(), Promise.reject(t))));\n }\n /**\n * Registers a listener that gets called when the primary state of the\n * instance changes. Upon registering, this listener is invoked immediately\n * with the current primary state.\n *\n * PORTING NOTE: This is only used for Web multi-tab.\n */ Ii(t) {\n return this.fi = async e => {\n if (this.started) return t(e);\n }, t(this.isPrimary);\n }\n /**\n * Registers a listener that gets called when the database receives a\n * version change event indicating that it has deleted.\n *\n * PORTING NOTE: This is only used for Web multi-tab.\n */ setDatabaseDeletedListener(t) {\n this.wi.B((async e => {\n // Check if an attempt is made to delete IndexedDB.\n null === e.newVersion && await t();\n }));\n }\n /**\n * Adjusts the current network state in the client's metadata, potentially\n * affecting the primary lease.\n *\n * PORTING NOTE: This is only used for Web multi-tab.\n */ setNetworkEnabled(t) {\n this.networkEnabled !== t && (this.networkEnabled = t, \n // Schedule a primary lease refresh for immediate execution. The eventual\n // lease update will be propagated via `primaryStateListener`.\n this.ii.enqueueAndForget((async () => {\n this.started && await this.mi();\n })));\n }\n /**\n * Updates the client metadata in IndexedDb and attempts to either obtain or\n * extend the primary lease for the local client. Asynchronously notifies the\n * primary state listener if the client either newly obtained or released its\n * primary lease.\n */ mi() {\n return this.runTransaction(\"updateClientMetadataAndTryBecomePrimary\", \"readwrite\", (t => Xo(t).put({\n clientId: this.clientId,\n updateTimeMs: Date.now(),\n networkEnabled: this.networkEnabled,\n inForeground: this.inForeground\n }).next((() => {\n if (this.isPrimary) return this.Ti(t).next((t => {\n t || (this.isPrimary = !1, this.ii.enqueueRetryable((() => this.fi(!1))));\n }));\n })).next((() => this.Ei(t))).next((e => this.isPrimary && !e ? this.Ai(t).next((() => !1)) : !!e && this.vi(t).next((() => !0)))))).catch((t => {\n if (Dt(t)) \n // Proceed with the existing state. Any subsequent access to\n // IndexedDB will verify the lease.\n return N(\"IndexedDbPersistence\", \"Failed to extend owner lease: \", t), this.isPrimary;\n if (!this.allowTabSynchronization) throw t;\n return N(\"IndexedDbPersistence\", \"Releasing owner lease after error during lease refresh\", t), \n /* isPrimary= */ !1;\n })).then((t => {\n this.isPrimary !== t && this.ii.enqueueRetryable((() => this.fi(t))), this.isPrimary = t;\n }));\n }\n Ti(t) {\n return Yo(t).get(\"owner\").next((t => Rt.resolve(this.Ri(t))));\n }\n Pi(t) {\n return Xo(t).delete(this.clientId);\n }\n /**\n * If the garbage collection threshold has passed, prunes the\n * RemoteDocumentChanges and the ClientMetadata store based on the last update\n * time of all clients.\n */ async bi() {\n if (this.isPrimary && !this.Vi(this.li, 18e5)) {\n this.li = Date.now();\n const t = await this.runTransaction(\"maybeGarbageCollectMultiClientState\", \"readwrite-primary\", (t => {\n const e = _e(t, \"clientMetadata\");\n return e.j().next((t => {\n const n = this.Si(t, 18e5), s = t.filter((t => -1 === n.indexOf(t)));\n // Delete metadata for clients that are no longer considered active.\n return Rt.forEach(s, (t => e.delete(t.clientId))).next((() => s));\n }));\n })).catch((() => []));\n // Delete potential leftover entries that may continue to mark the\n // inactive clients as zombied in LocalStorage.\n // Ideally we'd delete the IndexedDb and LocalStorage zombie entries for\n // the client atomically, but we can't. So we opt to delete the IndexedDb\n // entries first to avoid potentially reviving a zombied client.\n if (this._i) for (const e of t) this._i.removeItem(this.Di(e.clientId));\n }\n }\n /**\n * Schedules a recurring timer to update the client metadata and to either\n * extend or acquire the primary lease if the client is eligible.\n */ pi() {\n this.hi = this.ii.enqueueAfterDelay(\"client_metadata_refresh\" /* TimerId.ClientMetadataRefresh */ , 4e3, (() => this.mi().then((() => this.bi())).then((() => this.pi()))));\n }\n /** Checks whether `client` is the local client. */ Ri(t) {\n return !!t && t.ownerId === this.clientId;\n }\n /**\n * Evaluate the state of all active clients and determine whether the local\n * client is or can act as the holder of the primary lease. Returns whether\n * the client is eligible for the lease, but does not actually acquire it.\n * May return 'false' even if there is no active leaseholder and another\n * (foreground) client should become leaseholder instead.\n */ Ei(t) {\n if (this.oi) return Rt.resolve(!0);\n return Yo(t).get(\"owner\").next((e => {\n // A client is eligible for the primary lease if:\n // - its network is enabled and the client's tab is in the foreground.\n // - its network is enabled and no other client's tab is in the\n // foreground.\n // - every clients network is disabled and the client's tab is in the\n // foreground.\n // - every clients network is disabled and no other client's tab is in\n // the foreground.\n // - the `forceOwningTab` setting was passed in.\n if (null !== e && this.Vi(e.leaseTimestampMs, 5e3) && !this.Ci(e.ownerId)) {\n if (this.Ri(e) && this.networkEnabled) return !0;\n if (!this.Ri(e)) {\n if (!e.allowTabSynchronization) \n // Fail the `canActAsPrimary` check if the current leaseholder has\n // not opted into multi-tab synchronization. If this happens at\n // client startup, we reject the Promise returned by\n // `enablePersistence()` and the user can continue to use Firestore\n // with in-memory persistence.\n // If this fails during a lease refresh, we will instead block the\n // AsyncQueue from executing further operations. Note that this is\n // acceptable since mixing & matching different `synchronizeTabs`\n // settings is not supported.\n // TODO(b/114226234): Remove this check when `synchronizeTabs` can\n // no longer be turned off.\n throw new U(q.FAILED_PRECONDITION, Ho);\n return !1;\n }\n }\n return !(!this.networkEnabled || !this.inForeground) || Xo(t).j().next((t => void 0 === this.Si(t, 5e3).find((t => {\n if (this.clientId !== t.clientId) {\n const e = !this.networkEnabled && t.networkEnabled, n = !this.inForeground && t.inForeground, s = this.networkEnabled === t.networkEnabled;\n if (e || n && s) return !0;\n }\n return !1;\n }))));\n })).next((t => (this.isPrimary !== t && N(\"IndexedDbPersistence\", `Client ${t ? \"is\" : \"is not\"} eligible for a primary lease.`), \n t)));\n }\n async shutdown() {\n // The shutdown() operations are idempotent and can be called even when\n // start() aborted (e.g. because it couldn't acquire the persistence lease).\n this.Fs = !1, this.xi(), this.hi && (this.hi.cancel(), this.hi = null), this.Ni(), \n this.ki(), \n // Use `SimpleDb.runTransaction` directly to avoid failing if another tab\n // has obtained the primary lease.\n await this.wi.runTransaction(\"shutdown\", \"readwrite\", [ \"owner\", \"clientMetadata\" ], (t => {\n const e = new we(t, Ot.ct);\n return this.Ai(e).next((() => this.Pi(e)));\n })), this.wi.close(), \n // Remove the entry marking the client as zombied from LocalStorage since\n // we successfully deleted its metadata from IndexedDb.\n this.Mi();\n }\n /**\n * Returns clients that are not zombied and have an updateTime within the\n * provided threshold.\n */ Si(t, e) {\n return t.filter((t => this.Vi(t.updateTimeMs, e) && !this.Ci(t.clientId)));\n }\n /**\n * Returns the IDs of the clients that are currently active. If multi-tab\n * is not supported, returns an array that only contains the local client's\n * ID.\n *\n * PORTING NOTE: This is only used for Web multi-tab.\n */ $i() {\n return this.runTransaction(\"getActiveClients\", \"readonly\", (t => Xo(t).j().next((t => this.Si(t, 18e5).map((t => t.clientId))))));\n }\n get started() {\n return this.Fs;\n }\n getMutationQueue(t, e) {\n return oo.de(t, this.serializer, e, this.referenceDelegate);\n }\n getTargetCache() {\n return this.Bs;\n }\n getRemoteDocumentCache() {\n return this.remoteDocumentCache;\n }\n getIndexManager(t) {\n return new Jr(t, this.serializer.fe.databaseId);\n }\n getDocumentOverlayCache(t) {\n return Rr.de(this.serializer, t);\n }\n getBundleCache() {\n return this.qs;\n }\n runTransaction(t, e, n) {\n N(\"IndexedDbPersistence\", \"Starting transaction:\", t);\n const s = \"readonly\" === e ? \"readonly\" : \"readwrite\", i = 15 === (r = this.ui) ? de : 14 === r ? fe : 13 === r ? le : 12 === r ? he : 11 === r ? ae : void O();\n /** Returns the object stores for the provided schema. */\n var r;\n let o;\n // Do all transactions as readwrite against all object stores, since we\n // are the only reader/writer.\n return this.wi.runTransaction(t, s, i, (s => (o = new we(s, this.Os ? this.Os.next() : Ot.ct), \n \"readwrite-primary\" === e ? this.Ti(o).next((t => !!t || this.Ei(o))).next((e => {\n if (!e) throw k(`Failed to obtain primary lease for action '${t}'.`), this.isPrimary = !1, \n this.ii.enqueueRetryable((() => this.fi(!1))), new U(q.FAILED_PRECONDITION, Et);\n return n(o);\n })).next((t => this.vi(o).next((() => t)))) : this.Oi(o).next((() => n(o)))))).then((t => (o.raiseOnCommittedEvent(), \n t)));\n }\n /**\n * Verifies that the current tab is the primary leaseholder or alternatively\n * that the leaseholder has opted into multi-tab synchronization.\n */\n // TODO(b/114226234): Remove this check when `synchronizeTabs` can no longer\n // be turned off.\n Oi(t) {\n return Yo(t).get(\"owner\").next((t => {\n if (null !== t && this.Vi(t.leaseTimestampMs, 5e3) && !this.Ci(t.ownerId) && !this.Ri(t) && !(this.oi || this.allowTabSynchronization && t.allowTabSynchronization)) throw new U(q.FAILED_PRECONDITION, Ho);\n }));\n }\n /**\n * Obtains or extends the new primary lease for the local client. This\n * method does not verify that the client is eligible for this lease.\n */ vi(t) {\n const e = {\n ownerId: this.clientId,\n allowTabSynchronization: this.allowTabSynchronization,\n leaseTimestampMs: Date.now()\n };\n return Yo(t).put(\"owner\", e);\n }\n static D() {\n return bt.D();\n }\n /** Checks the primary lease and removes it if we are the current primary. */ Ai(t) {\n const e = Yo(t);\n return e.get(\"owner\").next((t => this.Ri(t) ? (N(\"IndexedDbPersistence\", \"Releasing primary lease.\"), \n e.delete(\"owner\")) : Rt.resolve()));\n }\n /** Verifies that `updateTimeMs` is within `maxAgeMs`. */ Vi(t, e) {\n const n = Date.now();\n return !(t < n - e) && (!(t > n) || (k(`Detected an update time that is in the future: ${t} > ${n}`), \n !1));\n }\n gi() {\n null !== this.document && \"function\" == typeof this.document.addEventListener && (this.ai = () => {\n this.ii.enqueueAndForget((() => (this.inForeground = \"visible\" === this.document.visibilityState, \n this.mi())));\n }, this.document.addEventListener(\"visibilitychange\", this.ai), this.inForeground = \"visible\" === this.document.visibilityState);\n }\n Ni() {\n this.ai && (this.document.removeEventListener(\"visibilitychange\", this.ai), this.ai = null);\n }\n /**\n * Attaches a window.unload handler that will synchronously write our\n * clientId to a \"zombie client id\" location in LocalStorage. This can be used\n * by tabs trying to acquire the primary lease to determine that the lease\n * is no longer valid even if the timestamp is recent. This is particularly\n * important for the refresh case (so the tab correctly re-acquires the\n * primary lease). LocalStorage is used for this rather than IndexedDb because\n * it is a synchronous API and so can be used reliably from an unload\n * handler.\n */ yi() {\n var t;\n \"function\" == typeof (null === (t = this.window) || void 0 === t ? void 0 : t.addEventListener) && (this.ci = () => {\n // Note: In theory, this should be scheduled on the AsyncQueue since it\n // accesses internal state. We execute this code directly during shutdown\n // to make sure it gets a chance to run.\n this.xi();\n const t = /(?:Version|Mobile)\\/1[456]/;\n isSafari() && (navigator.appVersion.match(t) || navigator.userAgent.match(t)) && \n // On Safari 14, 15, and 16, we do not run any cleanup actions as it might\n // trigger a bug that prevents Safari from re-opening IndexedDB during\n // the next page load.\n // See https://bugs.webkit.org/show_bug.cgi?id=226547\n this.ii.enterRestrictedMode(/* purgeExistingTasks= */ !0), this.ii.enqueueAndForget((() => this.shutdown()));\n }, this.window.addEventListener(\"pagehide\", this.ci));\n }\n ki() {\n this.ci && (this.window.removeEventListener(\"pagehide\", this.ci), this.ci = null);\n }\n /**\n * Returns whether a client is \"zombied\" based on its LocalStorage entry.\n * Clients become zombied when their tab closes without running all of the\n * cleanup logic in `shutdown()`.\n */ Ci(t) {\n var e;\n try {\n const n = null !== (null === (e = this._i) || void 0 === e ? void 0 : e.getItem(this.Di(t)));\n return N(\"IndexedDbPersistence\", `Client '${t}' ${n ? \"is\" : \"is not\"} zombied in LocalStorage`), \n n;\n } catch (t) {\n // Gracefully handle if LocalStorage isn't working.\n return k(\"IndexedDbPersistence\", \"Failed to get zombied client id.\", t), !1;\n }\n }\n /**\n * Record client as zombied (a client that had its tab closed). Zombied\n * clients are ignored during primary tab selection.\n */ xi() {\n if (this._i) try {\n this._i.setItem(this.Di(this.clientId), String(Date.now()));\n } catch (t) {\n // Gracefully handle if LocalStorage isn't available / working.\n k(\"Failed to set zombie client id.\", t);\n }\n }\n /** Removes the zombied client entry if it exists. */ Mi() {\n if (this._i) try {\n this._i.removeItem(this.Di(this.clientId));\n } catch (t) {\n // Ignore\n }\n }\n Di(t) {\n return `firestore_zombie_${this.persistenceKey}_${t}`;\n }\n}\n\n/**\n * Helper to get a typed SimpleDbStore for the primary client object store.\n */ function Yo(t) {\n return _e(t, \"owner\");\n}\n\n/**\n * Helper to get a typed SimpleDbStore for the client metadata object store.\n */ function Xo(t) {\n return _e(t, \"clientMetadata\");\n}\n\n/**\n * Generates a string used as a prefix when storing data in IndexedDB and\n * LocalStorage.\n */ function Zo(t, e) {\n // Use two different prefix formats:\n // * firestore / persistenceKey / projectID . databaseID / ...\n // * firestore / persistenceKey / projectID / ...\n // projectIDs are DNS-compatible names and cannot contain dots\n // so there's no danger of collisions.\n let n = t.projectId;\n return t.isDefaultDatabase || (n += \".\" + t.database), \"firestore/\" + e + \"/\" + n + \"/\";\n}\n\n/**\n * @license\n * Copyright 2017 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * A set of changes to what documents are currently in view and out of view for\n * a given query. These changes are sent to the LocalStore by the View (via\n * the SyncEngine) and are used to pin / unpin documents as appropriate.\n */\nclass tu {\n constructor(t, e, n, s) {\n this.targetId = t, this.fromCache = e, this.Fi = n, this.Bi = s;\n }\n static Li(t, e) {\n let n = gs(), s = gs();\n for (const t of e.docChanges) switch (t.type) {\n case 0 /* ChangeType.Added */ :\n n = n.add(t.doc.key);\n break;\n\n case 1 /* ChangeType.Removed */ :\n s = s.add(t.doc.key);\n // do nothing\n }\n return new tu(t, e.fromCache, n, s);\n }\n}\n\n/**\n * @license\n * Copyright 2019 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * The Firestore query engine.\n *\n * Firestore queries can be executed in three modes. The Query Engine determines\n * what mode to use based on what data is persisted. The mode only determines\n * the runtime complexity of the query - the result set is equivalent across all\n * implementations.\n *\n * The Query engine will use indexed-based execution if a user has configured\n * any index that can be used to execute query (via `setIndexConfiguration()`).\n * Otherwise, the engine will try to optimize the query by re-using a previously\n * persisted query result. If that is not possible, the query will be executed\n * via a full collection scan.\n *\n * Index-based execution is the default when available. The query engine\n * supports partial indexed execution and merges the result from the index\n * lookup with documents that have not yet been indexed. The index evaluation\n * matches the backend's format and as such, the SDK can use indexing for all\n * queries that the backend supports.\n *\n * If no index exists, the query engine tries to take advantage of the target\n * document mapping in the TargetCache. These mappings exists for all queries\n * that have been synced with the backend at least once and allow the query\n * engine to only read documents that previously matched a query plus any\n * documents that were edited after the query was last listened to.\n *\n * There are some cases when this optimization is not guaranteed to produce\n * the same results as full collection scans. In these cases, query\n * processing falls back to full scans. These cases are:\n *\n * - Limit queries where a document that matched the query previously no longer\n * matches the query.\n *\n * - Limit queries where a document edit may cause the document to sort below\n * another document that is in the local cache.\n *\n * - Queries that have never been CURRENT or free of limbo documents.\n */ class eu {\n constructor() {\n this.qi = !1;\n }\n /** Sets the document view to query against. */ initialize(t, e) {\n this.Ui = t, this.indexManager = e, this.qi = !0;\n }\n /** Returns all local documents matching the specified query. */ getDocumentsMatchingQuery(t, e, n, s) {\n return this.Ki(t, e).next((i => i || this.Gi(t, e, s, n))).next((n => n || this.Qi(t, e)));\n }\n /**\n * Performs an indexed query that evaluates the query based on a collection's\n * persisted index values. Returns `null` if an index is not available.\n */ Ki(t, e) {\n if (Qn(e)) \n // Queries that match all documents don't benefit from using\n // key-based lookups. It is more efficient to scan all documents in a\n // collection, rather than to perform individual lookups.\n return Rt.resolve(null);\n let n = Jn(e);\n return this.indexManager.getIndexType(t, n).next((s => 0 /* IndexType.NONE */ === s ? null : (null !== e.limit && 1 /* IndexType.PARTIAL */ === s && (\n // We cannot apply a limit for targets that are served using a partial\n // index. If a partial index will be used to serve the target, the\n // query may return a superset of documents that match the target\n // (e.g. if the index doesn't include all the target's filters), or\n // may return the correct set of documents in the wrong order (e.g. if\n // the index doesn't include a segment for one of the orderBys).\n // Therefore, a limit should not be applied in such cases.\n e = Xn(e, null, \"F\" /* LimitType.First */), n = Jn(e)), this.indexManager.getDocumentsMatchingTarget(t, n).next((s => {\n const i = gs(...s);\n return this.Ui.getDocuments(t, i).next((s => this.indexManager.getMinOffset(t, n).next((n => {\n const r = this.ji(e, s);\n return this.zi(e, r, i, n.readTime) ? this.Ki(t, Xn(e, null, \"F\" /* LimitType.First */)) : this.Wi(t, r, e, n);\n }))));\n })))));\n }\n /**\n * Performs a query based on the target's persisted query mapping. Returns\n * `null` if the mapping is not available or cannot be used.\n */ Gi(t, e, n, s) {\n return Qn(e) || s.isEqual(rt.min()) ? this.Qi(t, e) : this.Ui.getDocuments(t, n).next((i => {\n const r = this.ji(e, i);\n return this.zi(e, r, n, s) ? this.Qi(t, e) : (C() <= LogLevel.DEBUG && N(\"QueryEngine\", \"Re-using previous result from %s to execute query: %s\", s.toString(), es(e)), \n this.Wi(t, r, e, yt(s, -1)));\n }));\n // Queries that have never seen a snapshot without limbo free documents\n // should also be run as a full collection scan.\n }\n /** Applies the query filter and sorting to the provided documents. */ ji(t, e) {\n // Sort the documents and re-apply the query filter since previously\n // matching documents do not necessarily still match the query.\n let n = new Ee(is(t));\n return e.forEach(((e, s) => {\n ns(t, s) && (n = n.add(s));\n })), n;\n }\n /**\n * Determines if a limit query needs to be refilled from cache, making it\n * ineligible for index-free execution.\n *\n * @param query - The query.\n * @param sortedPreviousResults - The documents that matched the query when it\n * was last synchronized, sorted by the query's comparator.\n * @param remoteKeys - The document keys that matched the query at the last\n * snapshot.\n * @param limboFreeSnapshotVersion - The version of the snapshot when the\n * query was last synchronized.\n */ zi(t, e, n, s) {\n if (null === t.limit) \n // Queries without limits do not need to be refilled.\n return !1;\n if (n.size !== e.size) \n // The query needs to be refilled if a previously matching document no\n // longer matches.\n return !0;\n // Limit queries are not eligible for index-free query execution if there is\n // a potential that an older document from cache now sorts before a document\n // that was previously part of the limit. This, however, can only happen if\n // the document at the edge of the limit goes out of limit.\n // If a document that is not the limit boundary sorts differently,\n // the boundary of the limit itself did not change and documents from cache\n // will continue to be \"rejected\" by this boundary. Therefore, we can ignore\n // any modifications that don't affect the last document.\n const i = \"F\" /* LimitType.First */ === t.limitType ? e.last() : e.first();\n return !!i && (i.hasPendingWrites || i.version.compareTo(s) > 0);\n }\n Qi(t, e) {\n return C() <= LogLevel.DEBUG && N(\"QueryEngine\", \"Using full collection scan to execute query:\", es(e)), \n this.Ui.getDocumentsMatchingQuery(t, e, It.min());\n }\n /**\n * Combines the results from an indexed execution with the remaining documents\n * that have not yet been indexed.\n */ Wi(t, e, n, s) {\n // Retrieve all results for documents that were updated since the offset.\n return this.Ui.getDocumentsMatchingQuery(t, n, s).next((t => (\n // Merge with existing results\n e.forEach((e => {\n t = t.insert(e.key, e);\n })), t)));\n }\n}\n\n/**\n * @license\n * Copyright 2020 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */\n/**\n * Implements `LocalStore` interface.\n *\n * Note: some field defined in this class might have public access level, but\n * the class is not exported so they are only accessible from this module.\n * This is useful to implement optional features (like bundles) in free\n * functions, such that they are tree-shakeable.\n */\nclass nu {\n constructor(\n /** Manages our in-memory or durable persistence. */\n t, e, n, s) {\n this.persistence = t, this.Hi = e, this.serializer = s, \n /**\n * Maps a targetID to data about its target.\n *\n * PORTING NOTE: We are using an immutable data structure on Web to make re-runs\n * of `applyRemoteEvent()` idempotent.\n */\n this.Ji = new pe(et), \n /** Maps a target to its targetID. */\n // TODO(wuandy): Evaluate if TargetId can be part of Target.\n this.Yi = new os((t => $n(t)), On), \n /**\n * A per collection group index of the last read time processed by\n * `getNewDocumentChanges()`.\n *\n * PORTING NOTE: This is only used for multi-tab synchronization.\n */\n this.Xi = new Map, this.Zi = t.getRemoteDocumentCache(), this.Bs = t.getTargetCache(), \n this.qs = t.getBundleCache(), this.tr(n);\n }\n tr(t) {\n // TODO(indexing): Add spec tests that test these components change after a\n // user change\n this.documentOverlayCache = this.persistence.getDocumentOverlayCache(t), this.indexManager = this.persistence.getIndexManager(t), \n this.mutationQueue = this.persistence.getMutationQueue(t, this.indexManager), this.localDocuments = new ko(this.Zi, this.mutationQueue, this.documentOverlayCache, this.indexManager), \n this.Zi.setIndexManager(this.indexManager), this.Hi.initialize(this.localDocuments, this.indexManager);\n }\n collectGarbage(t) {\n return this.persistence.runTransaction(\"Collect garbage\", \"readwrite-primary\", (e => t.collect(e, this.Ji)));\n }\n}\n\nfunction su(\n/** Manages our in-memory or durable persistence. */\nt, e, n, s) {\n return new nu(t, e, n, s);\n}\n\n/**\n * Tells the LocalStore that the currently authenticated user has changed.\n *\n * In response the local store switches the mutation queue to the new user and\n * returns any resulting document changes.\n */\n// PORTING NOTE: Android and iOS only return the documents affected by the\n// change.\nasync function iu(t, e) {\n const n = L(t);\n return await n.persistence.runTransaction(\"Handle user change\", \"readonly\", (t => {\n // Swap out the mutation queue, grabbing the pending mutation batches\n // before and after.\n let s;\n return n.mutationQueue.getAllMutationBatches(t).next((i => (s = i, n.tr(e), n.mutationQueue.getAllMutationBatches(t)))).next((e => {\n const i = [], r = [];\n // Union the old/new changed keys.\n let o = gs();\n for (const t of s) {\n i.push(t.batchId);\n for (const e of t.mutations) o = o.add(e.key);\n }\n for (const t of e) {\n r.push(t.batchId);\n for (const e of t.mutations) o = o.add(e.key);\n }\n // Return the set of all (potentially) changed documents and the list\n // of mutation batch IDs that were affected by change.\n return n.localDocuments.getDocuments(t, o).next((t => ({\n er: t,\n removedBatchIds: i,\n addedBatchIds: r\n })));\n }));\n }));\n}\n\n/* Accepts locally generated Mutations and commit them to storage. */\n/**\n * Acknowledges the given batch.\n *\n * On the happy path when a batch is acknowledged, the local store will\n *\n * + remove the batch from the mutation queue;\n * + apply the changes to the remote document cache;\n * + recalculate the latency compensated view implied by those changes (there\n * may be mutations in the queue that affect the documents but haven't been\n * acknowledged yet); and\n * + give the changed documents back the sync engine\n *\n * @returns The resulting (modified) documents.\n */\nfunction ru(t, e) {\n const n = L(t);\n return n.persistence.runTransaction(\"Acknowledge batch\", \"readwrite-primary\", (t => {\n const s = e.batch.keys(), i = n.Zi.newChangeBuffer({\n trackRemovals: !0\n });\n return function(t, e, n, s) {\n const i = n.batch, r = i.keys();\n let o = Rt.resolve();\n return r.forEach((t => {\n o = o.next((() => s.getEntry(e, t))).next((e => {\n const r = n.docVersions.get(t);\n F(null !== r), e.version.compareTo(r) < 0 && (i.applyToRemoteDocument(e, n), e.isValidDocument() && (\n // We use the commitVersion as the readTime rather than the\n // document's updateTime since the updateTime is not advanced\n // for updates that do not modify the underlying document.\n e.setReadTime(n.commitVersion), s.addEntry(e)));\n }));\n })), o.next((() => t.mutationQueue.removeMutationBatch(e, i)));\n }\n /** Returns the local view of the documents affected by a mutation batch. */\n // PORTING NOTE: Multi-Tab only.\n (n, t, e, i).next((() => i.apply(t))).next((() => n.mutationQueue.performConsistencyCheck(t))).next((() => n.documentOverlayCache.removeOverlaysForBatchId(t, s, e.batch.batchId))).next((() => n.localDocuments.recalculateAndSaveOverlaysForDocumentKeys(t, function(t) {\n let e = gs();\n for (let n = 0; n < t.mutationResults.length; ++n) {\n t.mutationResults[n].transformResults.length > 0 && (e = e.add(t.batch.mutations[n].key));\n }\n return e;\n }\n /**\n * Removes mutations from the MutationQueue for the specified batch;\n * LocalDocuments will be recalculated.\n *\n * @returns The resulting modified documents.\n */ (e)))).next((() => n.localDocuments.getDocuments(t, s)));\n }));\n}\n\n/**\n * Returns the last consistent snapshot processed (used by the RemoteStore to\n * determine whether to buffer incoming snapshots from the backend).\n */\nfunction ou(t) {\n const e = L(t);\n return e.persistence.runTransaction(\"Get last remote snapshot version\", \"readonly\", (t => e.Bs.getLastRemoteSnapshotVersion(t)));\n}\n\n/**\n * Updates the \"ground-state\" (remote) documents. We assume that the remote\n * event reflects any write batches that have been acknowledged or rejected\n * (i.e. we do not re-apply local mutations to updates from this event).\n *\n * LocalDocuments are re-calculated if there are remaining mutations in the\n * queue.\n */ function uu(t, e) {\n const n = L(t), s = e.snapshotVersion;\n let i = n.Ji;\n return n.persistence.runTransaction(\"Apply remote event\", \"readwrite-primary\", (t => {\n const r = n.Zi.newChangeBuffer({\n trackRemovals: !0\n });\n // Reset newTargetDataByTargetMap in case this transaction gets re-run.\n i = n.Ji;\n const o = [];\n e.targetChanges.forEach(((r, u) => {\n const c = i.get(u);\n if (!c) return;\n // Only update the remote keys if the target is still active. This\n // ensures that we can persist the updated target data along with\n // the updated assignment.\n o.push(n.Bs.removeMatchingKeys(t, r.removedDocuments, u).next((() => n.Bs.addMatchingKeys(t, r.addedDocuments, u))));\n let a = c.withSequenceNumber(t.currentSequenceNumber);\n null !== e.targetMismatches.get(u) ? a = a.withResumeToken(Ve.EMPTY_BYTE_STRING, rt.min()).withLastLimboFreeSnapshotVersion(rt.min()) : r.resumeToken.approximateByteSize() > 0 && (a = a.withResumeToken(r.resumeToken, s)), \n i = i.insert(u, a), \n // Update the target data if there are target changes (or if\n // sufficient time has passed since the last update).\n /**\n * Returns true if the newTargetData should be persisted during an update of\n * an active target. TargetData should always be persisted when a target is\n * being released and should not call this function.\n *\n * While the target is active, TargetData updates can be omitted when nothing\n * about the target has changed except metadata like the resume token or\n * snapshot version. Occasionally it's worth the extra write to prevent these\n * values from getting too stale after a crash, but this doesn't have to be\n * too frequent.\n */\n function(t, e, n) {\n // Always persist target data if we don't already have a resume token.\n if (0 === t.resumeToken.approximateByteSize()) return !0;\n // Don't allow resume token changes to be buffered indefinitely. This\n // allows us to be reasonably up-to-date after a crash and avoids needing\n // to loop over all active queries on shutdown. Especially in the browser\n // we may not get time to do anything interesting while the current tab is\n // closing.\n if (e.snapshotVersion.toMicroseconds() - t.snapshotVersion.toMicroseconds() >= 3e8) return !0;\n // Otherwise if the only thing that has changed about a target is its resume\n // token it's not worth persisting. Note that the RemoteStore keeps an\n // in-memory view of the currently active targets which includes the current\n // resume token, so stream failure or user changes will still use an\n // up-to-date resume token regardless of what we do here.\n return n.addedDocuments.size + n.modifiedDocuments.size + n.removedDocuments.size > 0;\n }\n /**\n * Notifies local store of the changed views to locally pin documents.\n */ (c, a, r) && o.push(n.Bs.updateTargetData(t, a));\n }));\n let u = cs(), c = gs();\n // HACK: The only reason we allow a null snapshot version is so that we\n // can synthesize remote events when we get permission denied errors while\n // trying to resolve the state of a locally cached document that is in\n // limbo.\n if (e.documentUpdates.forEach((s => {\n e.resolvedLimboDocuments.has(s) && o.push(n.persistence.referenceDelegate.updateLimboDocument(t, s));\n })), \n // Each loop iteration only affects its \"own\" doc, so it's safe to get all\n // the remote documents in advance in a single call.\n o.push(cu(t, r, e.documentUpdates).next((t => {\n u = t.nr, c = t.sr;\n }))), !s.isEqual(rt.min())) {\n const e = n.Bs.getLastRemoteSnapshotVersion(t).next((e => n.Bs.setTargetsMetadata(t, t.currentSequenceNumber, s)));\n o.push(e);\n }\n return Rt.waitFor(o).next((() => r.apply(t))).next((() => n.localDocuments.getLocalViewOfDocuments(t, u, c))).next((() => u));\n })).then((t => (n.Ji = i, t)));\n}\n\n/**\n * Populates document change buffer with documents from backend or a bundle.\n * Returns the document changes resulting from applying those documents, and\n * also a set of documents whose existence state are changed as a result.\n *\n * @param txn - Transaction to use to read existing documents from storage.\n * @param documentBuffer - Document buffer to collect the resulted changes to be\n * applied to storage.\n * @param documents - Documents to be applied.\n */ function cu(t, e, n) {\n let s = gs(), i = gs();\n return n.forEach((t => s = s.add(t))), e.getEntries(t, s).next((t => {\n let s = cs();\n return n.forEach(((n, r) => {\n const o = t.get(n);\n // Check if see if there is a existence state change for this document.\n r.isFoundDocument() !== o.isFoundDocument() && (i = i.add(n)), \n // Note: The order of the steps below is important, since we want\n // to ensure that rejected limbo resolutions (which fabricate\n // NoDocuments with SnapshotVersion.min()) never add documents to\n // cache.\n r.isNoDocument() && r.version.isEqual(rt.min()) ? (\n // NoDocuments with SnapshotVersion.min() are used in manufactured\n // events. We remove these documents from cache since we lost\n // access.\n e.removeEntry(n, r.readTime), s = s.insert(n, r)) : !o.isValidDocument() || r.version.compareTo(o.version) > 0 || 0 === r.version.compareTo(o.version) && o.hasPendingWrites ? (e.addEntry(r), \n s = s.insert(n, r)) : N(\"LocalStore\", \"Ignoring outdated watch update for \", n, \". Current version:\", o.version, \" Watch version:\", r.version);\n })), {\n nr: s,\n sr: i\n };\n }));\n}\n\n/**\n * Gets the mutation batch after the passed in batchId in the mutation queue\n * or null if empty.\n * @param afterBatchId - If provided, the batch to search after.\n * @returns The next mutation or null if there wasn't one.\n */\nfunction au(t, e) {\n const n = L(t);\n return n.persistence.runTransaction(\"Get next mutation batch\", \"readonly\", (t => (void 0 === e && (e = -1), \n n.mutationQueue.getNextMutationBatchAfterBatchId(t, e))));\n}\n\n/**\n * Reads the current value of a Document with a given key or null if not\n * found - used for testing.\n */\n/**\n * Assigns the given target an internal ID so that its results can be pinned so\n * they don't get GC'd. A target must be allocated in the local store before\n * the store can be used to manage its view.\n *\n * Allocating an already allocated `Target` will return the existing `TargetData`\n * for that `Target`.\n */\nfunction hu(t, e) {\n const n = L(t);\n return n.persistence.runTransaction(\"Allocate target\", \"readwrite\", (t => {\n let s;\n return n.Bs.getTargetData(t, e).next((i => i ? (\n // This target has been listened to previously, so reuse the\n // previous targetID.\n // TODO(mcg): freshen last accessed date?\n s = i, Rt.resolve(s)) : n.Bs.allocateTargetId(t).next((i => (s = new cr(e, i, \"TargetPurposeListen\" /* TargetPurpose.Listen */ , t.currentSequenceNumber), \n n.Bs.addTargetData(t, s).next((() => s)))))));\n })).then((t => {\n // If Multi-Tab is enabled, the existing target data may be newer than\n // the in-memory data\n const s = n.Ji.get(t.targetId);\n return (null === s || t.snapshotVersion.compareTo(s.snapshotVersion) > 0) && (n.Ji = n.Ji.insert(t.targetId, t), \n n.Yi.set(e, t.targetId)), t;\n }));\n}\n\n/**\n * Returns the TargetData as seen by the LocalStore, including updates that may\n * have not yet been persisted to the TargetCache.\n */\n// Visible for testing.\n/**\n * Unpins all the documents associated with the given target. If\n * `keepPersistedTargetData` is set to false and Eager GC enabled, the method\n * directly removes the associated target data from the target cache.\n *\n * Releasing a non-existing `Target` is a no-op.\n */\n// PORTING NOTE: `keepPersistedTargetData` is multi-tab only.\nasync function lu(t, e, n) {\n const s = L(t), i = s.Ji.get(e), r = n ? \"readwrite\" : \"readwrite-primary\";\n try {\n n || await s.persistence.runTransaction(\"Release target\", r, (t => s.persistence.referenceDelegate.removeTarget(t, i)));\n } catch (t) {\n if (!Dt(t)) throw t;\n // All `releaseTarget` does is record the final metadata state for the\n // target, but we've been recording this periodically during target\n // activity. If we lose this write this could cause a very slight\n // difference in the order of target deletion during GC, but we\n // don't define exact LRU semantics so this is acceptable.\n N(\"LocalStore\", `Failed to update sequence numbers for target ${e}: ${t}`);\n }\n s.Ji = s.Ji.remove(e), s.Yi.delete(i.target);\n}\n\n/**\n * Runs the specified query against the local store and returns the results,\n * potentially taking advantage of query data from previous executions (such\n * as the set of remote keys).\n *\n * @param usePreviousResults - Whether results from previous executions can\n * be used to optimize this query execution.\n */ function fu(t, e, n) {\n const s = L(t);\n let i = rt.min(), r = gs();\n return s.persistence.runTransaction(\"Execute query\", \"readonly\", (t => function(t, e, n) {\n const s = L(t), i = s.Yi.get(n);\n return void 0 !== i ? Rt.resolve(s.Ji.get(i)) : s.Bs.getTargetData(e, n);\n }(s, t, Jn(e)).next((e => {\n if (e) return i = e.lastLimboFreeSnapshotVersion, s.Bs.getMatchingKeysForTargetId(t, e.targetId).next((t => {\n r = t;\n }));\n })).next((() => s.Hi.getDocumentsMatchingQuery(t, e, n ? i : rt.min(), n ? r : gs()))).next((t => (_u(s, ss(e), t), \n {\n documents: t,\n ir: r\n })))));\n}\n\n// PORTING NOTE: Multi-Tab only.\nfunction du(t, e) {\n const n = L(t), s = L(n.Bs), i = n.Ji.get(e);\n return i ? Promise.resolve(i.target) : n.persistence.runTransaction(\"Get target data\", \"readonly\", (t => s.le(t, e).next((t => t ? t.target : null))));\n}\n\n/**\n * Returns the set of documents that have been updated since the last call.\n * If this is the first call, returns the set of changes since client\n * initialization. Further invocations will return document that have changed\n * since the prior call.\n */\n// PORTING NOTE: Multi-Tab only.\nfunction wu(t, e) {\n const n = L(t), s = n.Xi.get(e) || rt.min();\n // Get the current maximum read time for the collection. This should always\n // exist, but to reduce the chance for regressions we default to\n // SnapshotVersion.Min()\n // TODO(indexing): Consider removing the default value.\n return n.persistence.runTransaction(\"Get new document changes\", \"readonly\", (t => n.Zi.getAllFromCollectionGroup(t, e, yt(s, -1), \n /* limit= */ Number.MAX_SAFE_INTEGER))).then((t => (_u(n, e, t), t)));\n}\n\n/** Sets the collection group's maximum read time from the given documents. */\n// PORTING NOTE: Multi-Tab only.\nfunction _u(t, e, n) {\n let s = t.Xi.get(e) || rt.min();\n n.forEach(((t, e) => {\n e.readTime.compareTo(s) > 0 && (s = e.readTime);\n })), t.Xi.set(e, s);\n}\n\n/**\n * Creates a new target using the given bundle name, which will be used to\n * hold the keys of all documents from the bundle in query-document mappings.\n * This ensures that the loaded documents do not get garbage collected\n * right away.\n */\n/**\n * Applies the documents from a bundle to the \"ground-state\" (remote)\n * documents.\n *\n * LocalDocuments are re-calculated if there are remaining mutations in the\n * queue.\n */\nasync function mu(t, e, n, s) {\n const i = L(t);\n let r = gs(), o = cs();\n for (const t of n) {\n const n = e.rr(t.metadata.name);\n t.document && (r = r.add(n));\n const s = e.ur(t);\n s.setReadTime(e.cr(t.metadata.readTime)), o = o.insert(n, s);\n }\n const u = i.Zi.newChangeBuffer({\n trackRemovals: !0\n }), c = await hu(i, function(t) {\n // It is OK that the path used for the query is not valid, because this will\n // not be read and queried.\n return Jn(Gn(ut.fromString(`__bundle__/docs/${t}`)));\n }(s));\n // Allocates a target to hold all document keys from the bundle, such that\n // they will not get garbage collected right away.\n return i.persistence.runTransaction(\"Apply bundle documents\", \"readwrite\", (t => cu(t, u, o).next((e => (u.apply(t), \n e))).next((e => i.Bs.removeMatchingKeysForTargetId(t, c.targetId).next((() => i.Bs.addMatchingKeys(t, r, c.targetId))).next((() => i.localDocuments.getLocalViewOfDocuments(t, e.nr, e.sr))).next((() => e.nr))))));\n}\n\n/**\n * Returns a promise of a boolean to indicate if the given bundle has already\n * been loaded and the create time is newer than the current loading bundle.\n */\n/**\n * Saves the given `NamedQuery` to local persistence.\n */\nasync function gu(t, e, n = gs()) {\n // Allocate a target for the named query such that it can be resumed\n // from associated read time if users use it to listen.\n // NOTE: this also means if no corresponding target exists, the new target\n // will remain active and will not get collected, unless users happen to\n // unlisten the query somehow.\n const s = await hu(t, Jn(yr(e.bundledQuery))), i = L(t);\n return i.persistence.runTransaction(\"Save named query\", \"readwrite\", (t => {\n const r = Ni(e.readTime);\n // Simply save the query itself if it is older than what the SDK already\n // has.\n if (s.snapshotVersion.compareTo(r) >= 0) return i.qs.saveNamedQuery(t, e);\n // Update existing target data because the query from the bundle is newer.\n const o = s.withResumeToken(Ve.EMPTY_BYTE_STRING, r);\n return i.Ji = i.Ji.insert(o.targetId, o), i.Bs.updateTargetData(t, o).next((() => i.Bs.removeMatchingKeysForTargetId(t, s.targetId))).next((() => i.Bs.addMatchingKeys(t, n, s.targetId))).next((() => i.qs.saveNamedQuery(t, e)));\n }));\n}\n\n/** Assembles the key for a client state in WebStorage */\nfunction yu(t, e) {\n return `firestore_clients_${t}_${e}`;\n}\n\n// The format of the WebStorage key that stores the mutation state is:\n// firestore_mutations__\n// (for unauthenticated users)\n// or: firestore_mutations___\n\n// 'user_uid' is last to avoid needing to escape '_' characters that it might\n// contain.\n/** Assembles the key for a mutation batch in WebStorage */\nfunction pu(t, e, n) {\n let s = `firestore_mutations_${t}_${n}`;\n return e.isAuthenticated() && (s += `_${e.uid}`), s;\n}\n\n// The format of the WebStorage key that stores a query target's metadata is:\n// firestore_targets__\n/** Assembles the key for a query state in WebStorage */\nfunction Iu(t, e) {\n return `firestore_targets_${t}_${e}`;\n}\n\n// The WebStorage prefix that stores the primary tab's online state. The\n// format of the key is:\n// firestore_online_state_\n/**\n * Holds the state of a mutation batch, including its user ID, batch ID and\n * whether the batch is 'pending', 'acknowledged' or 'rejected'.\n */\n// Visible for testing\nclass Tu {\n constructor(t, e, n, s) {\n this.user = t, this.batchId = e, this.state = n, this.error = s;\n }\n /**\n * Parses a MutationMetadata from its JSON representation in WebStorage.\n * Logs a warning and returns null if the format of the data is not valid.\n */ static ar(t, e, n) {\n const s = JSON.parse(n);\n let i, r = \"object\" == typeof s && -1 !== [ \"pending\", \"acknowledged\", \"rejected\" ].indexOf(s.state) && (void 0 === s.error || \"object\" == typeof s.error);\n return r && s.error && (r = \"string\" == typeof s.error.message && \"string\" == typeof s.error.code, \n r && (i = new U(s.error.code, s.error.message))), r ? new Tu(t, e, s.state, i) : (k(\"SharedClientState\", `Failed to parse mutation state for ID '${e}': ${n}`), \n null);\n }\n hr() {\n const t = {\n state: this.state,\n updateTimeMs: Date.now()\n };\n return this.error && (t.error = {\n code: this.error.code,\n message: this.error.message\n }), JSON.stringify(t);\n }\n}\n\n/**\n * Holds the state of a query target, including its target ID and whether the\n * target is 'not-current', 'current' or 'rejected'.\n */\n// Visible for testing\nclass Eu {\n constructor(t, e, n) {\n this.targetId = t, this.state = e, this.error = n;\n }\n /**\n * Parses a QueryTargetMetadata from its JSON representation in WebStorage.\n * Logs a warning and returns null if the format of the data is not valid.\n */ static ar(t, e) {\n const n = JSON.parse(e);\n let s, i = \"object\" == typeof n && -1 !== [ \"not-current\", \"current\", \"rejected\" ].indexOf(n.state) && (void 0 === n.error || \"object\" == typeof n.error);\n return i && n.error && (i = \"string\" == typeof n.error.message && \"string\" == typeof n.error.code, \n i && (s = new U(n.error.code, n.error.message))), i ? new Eu(t, n.state, s) : (k(\"SharedClientState\", `Failed to parse target state for ID '${t}': ${e}`), \n null);\n }\n hr() {\n const t = {\n state: this.state,\n updateTimeMs: Date.now()\n };\n return this.error && (t.error = {\n code: this.error.code,\n message: this.error.message\n }), JSON.stringify(t);\n }\n}\n\n/**\n * This class represents the immutable ClientState for a client read from\n * WebStorage, containing the list of active query targets.\n */ class Au {\n constructor(t, e) {\n this.clientId = t, this.activeTargetIds = e;\n }\n /**\n * Parses a RemoteClientState from the JSON representation in WebStorage.\n * Logs a warning and returns null if the format of the data is not valid.\n */ static ar(t, e) {\n const n = JSON.parse(e);\n let s = \"object\" == typeof n && n.activeTargetIds instanceof Array, i = ps();\n for (let t = 0; s && t < n.activeTargetIds.length; ++t) s = Lt(n.activeTargetIds[t]), \n i = i.add(n.activeTargetIds[t]);\n return s ? new Au(t, i) : (k(\"SharedClientState\", `Failed to parse client data for instance '${t}': ${e}`), \n null);\n }\n}\n\n/**\n * This class represents the online state for all clients participating in\n * multi-tab. The online state is only written to by the primary client, and\n * used in secondary clients to update their query views.\n */ class vu {\n constructor(t, e) {\n this.clientId = t, this.onlineState = e;\n }\n /**\n * Parses a SharedOnlineState from its JSON representation in WebStorage.\n * Logs a warning and returns null if the format of the data is not valid.\n */ static ar(t) {\n const e = JSON.parse(t);\n return \"object\" == typeof e && -1 !== [ \"Unknown\", \"Online\", \"Offline\" ].indexOf(e.onlineState) && \"string\" == typeof e.clientId ? new vu(e.clientId, e.onlineState) : (k(\"SharedClientState\", `Failed to parse online state: ${t}`), \n null);\n }\n}\n\n/**\n * Metadata state of the local client. Unlike `RemoteClientState`, this class is\n * mutable and keeps track of all pending mutations, which allows us to\n * update the range of pending mutation batch IDs as new mutations are added or\n * removed.\n *\n * The data in `LocalClientState` is not read from WebStorage and instead\n * updated via its instance methods. The updated state can be serialized via\n * `toWebStorageJSON()`.\n */\n// Visible for testing.\nclass Ru {\n constructor() {\n this.activeTargetIds = ps();\n }\n lr(t) {\n this.activeTargetIds = this.activeTargetIds.add(t);\n }\n dr(t) {\n this.activeTargetIds = this.activeTargetIds.delete(t);\n }\n /**\n * Converts this entry into a JSON-encoded format we can use for WebStorage.\n * Does not encode `clientId` as it is part of the key in WebStorage.\n */ hr() {\n const t = {\n activeTargetIds: this.activeTargetIds.toArray(),\n updateTimeMs: Date.now()\n };\n return JSON.stringify(t);\n }\n}\n\n/**\n * `WebStorageSharedClientState` uses WebStorage (window.localStorage) as the\n * backing store for the SharedClientState. It keeps track of all active\n * clients and supports modifications of the local client's data.\n */ class Pu {\n constructor(t, e, n, s, i) {\n this.window = t, this.ii = e, this.persistenceKey = n, this.wr = s, this.syncEngine = null, \n this.onlineStateHandler = null, this.sequenceNumberHandler = null, this._r = this.mr.bind(this), \n this.gr = new pe(et), this.started = !1, \n /**\n * Captures WebStorage events that occur before `start()` is called. These\n * events are replayed once `WebStorageSharedClientState` is started.\n */\n this.yr = [];\n // Escape the special characters mentioned here:\n // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions\n const r = n.replace(/[.*+?^${}()|[\\]\\\\]/g, \"\\\\$&\");\n this.storage = this.window.localStorage, this.currentUser = i, this.pr = yu(this.persistenceKey, this.wr), \n this.Ir = \n /** Assembles the key for the current sequence number. */\n function(t) {\n return `firestore_sequence_number_${t}`;\n }\n /**\n * @license\n * Copyright 2018 Google LLC\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http://www.apache.org/licenses/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n */ (this.persistenceKey), this.gr = this.gr.insert(this.wr, new Ru), this.Tr = new RegExp(`^firestore_clients_${r}_([^_]*)$`), \n this.Er = new RegExp(`^firestore_mutations_${r}_(\\\\d+)(?:_(.*))?$`), this.Ar = new RegExp(`^firestore_targets_${r}_(\\\\d+)$`), \n this.vr = \n /** Assembles the key for the online state of the primary tab. */\n function(t) {\n return `firestore_online_state_${t}`;\n }\n // The WebStorage prefix that plays as a event to indicate the remote documents\n // might have changed due to some secondary tabs loading a bundle.\n // format of the key is:\n // firestore_bundle_loaded_v2_\n // The version ending with \"v2\" stores the list of modified collection groups.\n (this.persistenceKey), this.Rr = function(t) {\n return `firestore_bundle_loaded_v2_${t}`;\n }\n // The WebStorage key prefix for the key that stores the last sequence number allocated. The key\n // looks like 'firestore_sequence_number_