|
| 1 | +/** |
| 2 | + * @license |
| 3 | + * Copyright 2022 Google LLC |
| 4 | + * |
| 5 | + * Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | + * you may not use this file except in compliance with the License. |
| 7 | + * You may obtain a copy of the License at |
| 8 | + * |
| 9 | + * http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | + * |
| 11 | + * Unless required by applicable law or agreed to in writing, software |
| 12 | + * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | + * See the License for the specific language governing permissions and |
| 15 | + * limitations under the License. |
| 16 | + */ |
| 17 | +import { DocumentMap } from '../model/collections'; |
| 18 | +import { |
| 19 | + IndexOffset, |
| 20 | + indexOffsetComparator, |
| 21 | + newIndexOffsetFromDocument |
| 22 | +} from '../model/field_index'; |
| 23 | +import { debugAssert } from '../util/assert'; |
| 24 | +import { AsyncQueue, DelayedOperation, TimerId } from '../util/async_queue'; |
| 25 | +import { logDebug } from '../util/log'; |
| 26 | + |
| 27 | +import { INDEXING_ENABLED } from './indexeddb_schema'; |
| 28 | +import { ignoreIfPrimaryLeaseLoss, LocalStore } from './local_store'; |
| 29 | +import { LocalWriteResult } from './local_store_impl'; |
| 30 | +import { Persistence, Scheduler } from './persistence'; |
| 31 | +import { PersistencePromise } from './persistence_promise'; |
| 32 | +import { PersistenceTransaction } from './persistence_transaction'; |
| 33 | +import { isIndexedDbTransactionError } from './simple_db'; |
| 34 | + |
| 35 | +const LOG_TAG = 'IndexBackiller'; |
| 36 | + |
| 37 | +/** How long we wait to try running index backfill after SDK initialization. */ |
| 38 | +const INITIAL_BACKFILL_DELAY_MS = 15; |
| 39 | + |
| 40 | +/** Minimum amount of time between backfill checks, after the first one. */ |
| 41 | +const REGULAR_BACKFILL_DELAY_MS = 1; |
| 42 | + |
| 43 | +/** The maximum number of documents to process each time backfill() is called. */ |
| 44 | +const MAX_DOCUMENTS_TO_PROCESS = 50; |
| 45 | + |
| 46 | +/** This class is responsible for the scheduling of Index Backfiller. */ |
| 47 | +export class IndexBackfillerScheduler implements Scheduler { |
| 48 | + private task: DelayedOperation<void> | null; |
| 49 | + |
| 50 | + constructor( |
| 51 | + private readonly asyncQueue: AsyncQueue, |
| 52 | + private readonly backfiller: IndexBackfiller |
| 53 | + ) { |
| 54 | + this.task = null; |
| 55 | + } |
| 56 | + |
| 57 | + start(): void { |
| 58 | + debugAssert( |
| 59 | + this.task === null, |
| 60 | + 'Cannot start an already started IndexBackfillerScheduler' |
| 61 | + ); |
| 62 | + if (INDEXING_ENABLED) { |
| 63 | + this.schedule(INITIAL_BACKFILL_DELAY_MS); |
| 64 | + } |
| 65 | + } |
| 66 | + |
| 67 | + stop(): void { |
| 68 | + if (this.task) { |
| 69 | + this.task.cancel(); |
| 70 | + this.task = null; |
| 71 | + } |
| 72 | + } |
| 73 | + |
| 74 | + get started(): boolean { |
| 75 | + return this.task !== null; |
| 76 | + } |
| 77 | + |
| 78 | + private schedule(delay: number): void { |
| 79 | + debugAssert( |
| 80 | + this.task === null, |
| 81 | + 'Cannot schedule IndexBackiller while a task is pending' |
| 82 | + ); |
| 83 | + logDebug(LOG_TAG, `Scheduled in ${delay}ms`); |
| 84 | + this.task = this.asyncQueue.enqueueAfterDelay( |
| 85 | + TimerId.IndexBackfill, |
| 86 | + delay, |
| 87 | + async () => { |
| 88 | + this.task = null; |
| 89 | + try { |
| 90 | + const documentsProcessed = await this.backfiller.backfill(); |
| 91 | + logDebug(LOG_TAG, `Documents written: ${documentsProcessed}`); |
| 92 | + } catch (e) { |
| 93 | + if (isIndexedDbTransactionError(e)) { |
| 94 | + logDebug( |
| 95 | + LOG_TAG, |
| 96 | + 'Ignoring IndexedDB error during index backfill: ', |
| 97 | + e |
| 98 | + ); |
| 99 | + } else { |
| 100 | + await ignoreIfPrimaryLeaseLoss(e); |
| 101 | + } |
| 102 | + } |
| 103 | + await this.schedule(REGULAR_BACKFILL_DELAY_MS); |
| 104 | + } |
| 105 | + ); |
| 106 | + } |
| 107 | +} |
| 108 | + |
| 109 | +/** Implements the steps for backfilling indexes. */ |
| 110 | +export class IndexBackfiller { |
| 111 | + constructor( |
| 112 | + /** |
| 113 | + * LocalStore provides access to IndexManager and LocalDocumentView. |
| 114 | + * These properties will update when the user changes. Consequently, |
| 115 | + * making a local copy of IndexManager and LocalDocumentView will require |
| 116 | + * updates over time. The simpler solution is to rely on LocalStore to have |
| 117 | + * an up-to-date references to IndexManager and LocalDocumentStore. |
| 118 | + */ |
| 119 | + private readonly localStore: LocalStore, |
| 120 | + private readonly persistence: Persistence |
| 121 | + ) {} |
| 122 | + |
| 123 | + async backfill( |
| 124 | + maxDocumentsToProcess: number = MAX_DOCUMENTS_TO_PROCESS |
| 125 | + ): Promise<number> { |
| 126 | + return this.persistence.runTransaction( |
| 127 | + 'Backfill Indexes', |
| 128 | + 'readwrite-primary', |
| 129 | + txn => this.writeIndexEntries(txn, maxDocumentsToProcess) |
| 130 | + ); |
| 131 | + } |
| 132 | + |
| 133 | + /** Writes index entries until the cap is reached. Returns the number of documents processed. */ |
| 134 | + private writeIndexEntries( |
| 135 | + transation: PersistenceTransaction, |
| 136 | + maxDocumentsToProcess: number |
| 137 | + ): PersistencePromise<number> { |
| 138 | + const processedCollectionGroups = new Set<string>(); |
| 139 | + let documentsRemaining = maxDocumentsToProcess; |
| 140 | + let continueLoop = true; |
| 141 | + return PersistencePromise.doWhile( |
| 142 | + () => continueLoop === true && documentsRemaining > 0, |
| 143 | + () => { |
| 144 | + return this.localStore.indexManager |
| 145 | + .getNextCollectionGroupToUpdate(transation) |
| 146 | + .next((collectionGroup: string | null) => { |
| 147 | + if ( |
| 148 | + collectionGroup === null || |
| 149 | + processedCollectionGroups.has(collectionGroup) |
| 150 | + ) { |
| 151 | + continueLoop = false; |
| 152 | + } else { |
| 153 | + logDebug(LOG_TAG, `Processing collection: ${collectionGroup}`); |
| 154 | + return this.writeEntriesForCollectionGroup( |
| 155 | + transation, |
| 156 | + collectionGroup, |
| 157 | + documentsRemaining |
| 158 | + ).next(documentsProcessed => { |
| 159 | + documentsRemaining -= documentsProcessed; |
| 160 | + processedCollectionGroups.add(collectionGroup); |
| 161 | + }); |
| 162 | + } |
| 163 | + }); |
| 164 | + } |
| 165 | + ).next(() => maxDocumentsToProcess - documentsRemaining); |
| 166 | + } |
| 167 | + |
| 168 | + /** |
| 169 | + * Writes entries for the provided collection group. Returns the number of documents processed. |
| 170 | + */ |
| 171 | + private writeEntriesForCollectionGroup( |
| 172 | + transaction: PersistenceTransaction, |
| 173 | + collectionGroup: string, |
| 174 | + documentsRemainingUnderCap: number |
| 175 | + ): PersistencePromise<number> { |
| 176 | + // Use the earliest offset of all field indexes to query the local cache. |
| 177 | + return this.localStore.indexManager |
| 178 | + .getMinOffsetFromCollectionGroup(transaction, collectionGroup) |
| 179 | + .next(existingOffset => |
| 180 | + this.localStore.localDocuments |
| 181 | + .getNextDocuments( |
| 182 | + transaction, |
| 183 | + collectionGroup, |
| 184 | + existingOffset, |
| 185 | + documentsRemainingUnderCap |
| 186 | + ) |
| 187 | + .next(nextBatch => { |
| 188 | + const docs: DocumentMap = nextBatch.changes; |
| 189 | + return this.localStore.indexManager |
| 190 | + .updateIndexEntries(transaction, docs) |
| 191 | + .next(() => this.getNewOffset(existingOffset, nextBatch)) |
| 192 | + .next(newOffset => { |
| 193 | + logDebug(LOG_TAG, `Updating offset: ${newOffset}`); |
| 194 | + return this.localStore.indexManager.updateCollectionGroup( |
| 195 | + transaction, |
| 196 | + collectionGroup, |
| 197 | + newOffset |
| 198 | + ); |
| 199 | + }) |
| 200 | + .next(() => docs.size); |
| 201 | + }) |
| 202 | + ); |
| 203 | + } |
| 204 | + |
| 205 | + /** Returns the next offset based on the provided documents. */ |
| 206 | + private getNewOffset( |
| 207 | + existingOffset: IndexOffset, |
| 208 | + lookupResult: LocalWriteResult |
| 209 | + ): IndexOffset { |
| 210 | + let maxOffset: IndexOffset = existingOffset; |
| 211 | + lookupResult.changes.forEach((key, document) => { |
| 212 | + const newOffset: IndexOffset = newIndexOffsetFromDocument(document); |
| 213 | + if (indexOffsetComparator(newOffset, maxOffset) > 0) { |
| 214 | + maxOffset = newOffset; |
| 215 | + } |
| 216 | + }); |
| 217 | + return new IndexOffset( |
| 218 | + maxOffset.readTime, |
| 219 | + maxOffset.documentKey, |
| 220 | + Math.max(lookupResult.batchId, existingOffset.largestBatchId) |
| 221 | + ); |
| 222 | + } |
| 223 | +} |
0 commit comments