diff --git a/packages/firestore/src/core/sync_engine.ts b/packages/firestore/src/core/sync_engine.ts index e4447143178..3a33ce4e3be 100644 --- a/packages/firestore/src/core/sync_engine.ts +++ b/packages/firestore/src/core/sync_engine.ts @@ -36,7 +36,6 @@ import { assert, fail } from '../util/assert'; import { FirestoreError } from '../util/error'; import * as log from '../util/log'; import { AnyJs, primitiveComparator } from '../util/misc'; -import * as objUtils from '../util/obj'; import { ObjectMap } from '../util/obj_map'; import { Deferred } from '../util/promise'; import { SortedMap } from '../util/sorted_map'; @@ -194,13 +193,7 @@ export class SyncEngine implements RemoteSyncer, SharedClientStateSyncer { this.syncEngineListener === null, 'SyncEngine already has a subscriber.' ); -<<<<<<< HEAD this.syncEngineListener = syncEngineListener; - this.limboCollector.addGarbageSource(this.limboDocumentRefs); -======= - this.viewHandler = viewHandler; - this.errorHandler = errorHandler; ->>>>>>> master } /** @@ -501,16 +494,13 @@ export class SyncEngine implements RemoteSyncer, SharedClientStateSyncer { async rejectListen(targetId: TargetId, err: FirestoreError): Promise { this.assertSubscribed('rejectListens()'); -<<<<<<< HEAD // PORTING NOTE: Multi-tab only. this.sharedClientState.trackQueryUpdate(targetId, 'rejected', err); - const limboKey = this.limboKeysByTarget[targetId]; -======= const limboResolution = this.limboResolutionsByTarget[targetId]; const limboKey = limboResolution && limboResolution.key; ->>>>>>> master + if (limboKey) { // Since this query failed, we won't want to manually unlisten to it. // So go ahead and remove it from bookkeeping. diff --git a/packages/firestore/src/local/indexeddb_schema.ts b/packages/firestore/src/local/indexeddb_schema.ts index d754d43d4be..a1ef19bea44 100644 --- a/packages/firestore/src/local/indexeddb_schema.ts +++ b/packages/firestore/src/local/indexeddb_schema.ts @@ -29,25 +29,21 @@ import { SnapshotVersion } from '../core/snapshot_version'; * Schema Version for the Web client: * 1. Initial version including Mutation Queue, Query Cache, and Remote Document * Cache -<<<<<<< HEAD - * 2. Added targetCount to targetGlobal row. - * 3. Multi-Tab Support. -======= * 2. Used to ensure a targetGlobal object exists and add targetCount to it. No * longer required because migration 3 unconditionally clears it. * 3. Dropped and re-created Query Cache to deal with cache corruption related * to limbo resolution. Addresses * https://github.com/firebase/firebase-ios-sdk/issues/1548 ->>>>>>> master + * 4. Multi-Tab Support. */ -export const SCHEMA_VERSION = 3; +export const SCHEMA_VERSION = 4; /** * Performs database creation and schema upgrades. * * Note that in production, this method is only ever used to upgrade the schema - * to SCHEMA_VERSION. Different versions are only used for testing and - * local feature development. + * to SCHEMA_VERSION. Different values of toVersion are only used for testing + * and local feature development. */ export function createOrUpgradeDb( db: IDBDatabase, @@ -56,14 +52,9 @@ export function createOrUpgradeDb( toVersion: number ): PersistencePromise { assert( -<<<<<<< HEAD - fromVersion < toVersion && fromVersion >= 0 && toVersion <= 3, -======= fromVersion < toVersion && fromVersion >= 0 && toVersion <= SCHEMA_VERSION, ->>>>>>> master 'Unexpected schema upgrade from v${fromVersion} to v{toVersion}.' ); - let p = PersistencePromise.resolve(); if (fromVersion < 1 && toVersion >= 1) { createOwnerStore(db); @@ -72,29 +63,6 @@ export function createOrUpgradeDb( createRemoteDocumentCache(db); } -<<<<<<< HEAD - if (fromVersion < 2 && toVersion >= 2) { - p = p - .next(() => ensureTargetGlobalExists(txn)) - .next(targetGlobal => saveTargetCount(txn, targetGlobal)); - } - - if (fromVersion > 0 && fromVersion < 3 && toVersion >= 3) { - // Schema version 3 uses auto-generated keys to generate globally unique - // mutation batch IDs (this was previously ensured internally by the - // client). To migrate to the new schema, we have to read all mutations - // and write them back out. We preserve the existing batch IDs to guarantee - // consistency with other object stores. Any further mutation batch IDs will - // be auto-generated. - p = p.next(() => upgradeMutationBatchSchemaAndMigrateData(db, txn)); - } - - if (fromVersion < 3 && toVersion >= 3) { - p = p.next(() => { - createClientMetadataStore(db); - createRemoteDocumentChangesStore(db); - }); -======= // Migration 2 to populate the targetGlobal object no longer needed since // migration 3 unconditionally clears it. @@ -107,7 +75,23 @@ export function createOrUpgradeDb( createQueryCache(db); } p = p.next(() => writeEmptyTargetGlobalEntry(txn)); ->>>>>>> master + } + + if (fromVersion < 4 && toVersion >= 4) { + if (fromVersion !== 0) { + // Schema version 3 uses auto-generated keys to generate globally unique + // mutation batch IDs (this was previously ensured internally by the + // client). To migrate to the new schema, we have to read all mutations + // and write them back out. We preserve the existing batch IDs to guarantee + // consistency with other object stores. Any further mutation batch IDs will + // be auto-generated. + p = p.next(() => upgradeMutationBatchSchemaAndMigrateData(db, txn)); + } + + p = p.next(() => { + createClientMetadataStore(db); + createRemoteDocumentChangesStore(db); + }); } return p; @@ -711,12 +695,14 @@ export const V1_STORES = [ DbTargetDocument.store ]; +// V2 is no longer usable (see comment at top of file) + // Visible for testing -export const V2_STORES = V1_STORES; +export const V3_STORES = V1_STORES; // Visible for testing -export const V3_STORES = [ - ...V2_STORES, +export const V4_STORES = [ + ...V3_STORES, DbClientMetadata.store, DbRemoteDocumentChanges.store ]; @@ -726,4 +712,4 @@ export const V3_STORES = [ * used when creating transactions so that access across all stores is done * atomically. */ -export const ALL_STORES = V3_STORES; +export const ALL_STORES = V4_STORES; diff --git a/packages/firestore/test/unit/local/indexeddb_schema.test.ts b/packages/firestore/test/unit/local/indexeddb_schema.test.ts index eadc83ac35a..76e4686e7ad 100644 --- a/packages/firestore/test/unit/local/indexeddb_schema.test.ts +++ b/packages/firestore/test/unit/local/indexeddb_schema.test.ts @@ -22,16 +22,17 @@ import { createOrUpgradeDb, DbMutationBatch, DbMutationBatchKey, -<<<<<<< HEAD DbOwner, DbOwnerKey, DbTarget, DbTargetGlobal, DbTargetGlobalKey, + DbTargetKey, + DbTimestamp, SCHEMA_VERSION, V1_STORES, - V2_STORES, - V3_STORES + V3_STORES, + V4_STORES } from '../../../src/local/indexeddb_schema'; import { SimpleDb, SimpleDbTransaction } from '../../../src/local/simple_db'; import { PersistencePromise } from '../../../src/local/persistence_promise'; @@ -41,16 +42,7 @@ import { JsonProtoSerializer } from '../../../src/remote/serializer'; import { PlatformSupport } from '../../../src/platform/platform'; import { AsyncQueue } from '../../../src/util/async_queue'; import { SharedFakeWebStorage, TestPlatform } from '../../util/test_platform'; -======= - DbTarget, - DbTargetGlobal, - DbTargetGlobalKey, - DbTargetKey, - DbTimestamp -} from '../../../src/local/indexeddb_schema'; -import { SimpleDb, SimpleDbTransaction } from '../../../src/local/simple_db'; import { SnapshotVersion } from '../../../src/core/snapshot_version'; ->>>>>>> master const INDEXEDDB_TEST_DATABASE_PREFIX = 'schemaTest/'; const INDEXEDDB_TEST_DATABASE = @@ -144,31 +136,6 @@ describe('IndexedDbSchema: createOrUpgradeDb', () => { }); }); -<<<<<<< HEAD - it('can install schema version 2', () => { - return withDb(2, db => { - expect(db.version).to.equal(2); - // We should have all of the stores, we should have the target global row - // and we should not have any targets counted, because there are none. - expect(getAllObjectStores(db)).to.have.members(V2_STORES); - // Check the target count. We haven't added any targets, so we expect 0. - return getTargetCount(db).then(targetCount => { - expect(targetCount).to.equal(0); - }); - }); - }); - - it('can install schema version 3', () => { - return withDb(3, async db => { - expect(db.version).to.be.equal(3); - expect(getAllObjectStores(db)).to.have.members(V3_STORES); - }); - }); - - it('can upgrade from schema version 1 to 2', () => { - const expectedTargetCount = 5; - return withDb(1, db => { -======= it('drops the query cache from 2 to 3', () => { const userId = 'user'; const batchId = 1; @@ -189,7 +156,6 @@ describe('IndexedDbSchema: createOrUpgradeDb', () => { ); return withDb(2, db => { ->>>>>>> master const sdb = new SimpleDb(db); return sdb.runTransaction( 'readwrite', @@ -213,24 +179,11 @@ describe('IndexedDbSchema: createOrUpgradeDb', () => { .next(() => mutations.put(expectedMutation)) ); } -<<<<<<< HEAD - return p; - }); - }).then(() => - withDb(2, db => { - expect(db.version).to.equal(2); - expect(getAllObjectStores(db)).to.have.members(V2_STORES); - return getTargetCount(db).then(targetCount => { - expect(targetCount).to.equal(expectedTargetCount); - }); - }) - ); -======= ); }).then(() => { return withDb(3, db => { expect(db.version).to.equal(3); - expect(getAllObjectStores(db)).to.have.members(ALL_STORES); + expect(getAllObjectStores(db)).to.have.members(V3_STORES); const sdb = new SimpleDb(db); return sdb.runTransaction( @@ -259,7 +212,7 @@ describe('IndexedDbSchema: createOrUpgradeDb', () => { const expected = JSON.parse(JSON.stringify(resetTargetGlobal)); expect(targetGlobalEntry).to.deep.equal(expected); }) - .next(() => mutations.get([userId, batchId])) + .next(() => mutations.get(batchId)) .next(mutation => { // Mutations should be unaffected. expect(mutation.userId).to.equal(userId); @@ -269,10 +222,9 @@ describe('IndexedDbSchema: createOrUpgradeDb', () => { ); }); }); ->>>>>>> master }); - it('can upgrade from schema version 2 to 3', () => { + it('can upgrade from schema version 3 to 4', () => { const testWrite = { delete: 'foo' }; const testMutations = [ { @@ -295,7 +247,7 @@ describe('IndexedDbSchema: createOrUpgradeDb', () => { } ]; - return withDb(2, db => { + return withDb(3, db => { const sdb = new SimpleDb(db); return sdb.runTransaction('readwrite', [DbMutationBatch.store], txn => { const store = txn.store(DbMutationBatch.store); @@ -306,9 +258,9 @@ describe('IndexedDbSchema: createOrUpgradeDb', () => { return p; }); }).then(() => - withDb(3, db => { - expect(db.version).to.be.equal(3); - expect(getAllObjectStores(db)).to.have.members(V3_STORES); + withDb(4, db => { + expect(db.version).to.be.equal(4); + expect(getAllObjectStores(db)).to.have.members(V4_STORES); const sdb = new SimpleDb(db); return sdb.runTransaction('readwrite', [DbMutationBatch.store], txn => { diff --git a/packages/firestore/test/unit/specs/limbo_spec.test.ts b/packages/firestore/test/unit/specs/limbo_spec.test.ts index 9625b322b29..4c8ffe745e8 100644 --- a/packages/firestore/test/unit/specs/limbo_spec.test.ts +++ b/packages/firestore/test/unit/specs/limbo_spec.test.ts @@ -253,7 +253,74 @@ describeSpec('Limbo Documents:', [], () => { ); }); -<<<<<<< HEAD + // Same as above test, except docB no longer exists so we do not get a + // documentUpdate for it during limbo resolution, so a delete should be + // synthesized. + specTest( + 'Limbo resolution handles snapshot before CURRENT [no document update]', + [], + () => { + const fullQuery = Query.atPath(path('collection')); + const limitQuery = Query.atPath(path('collection')) + .addFilter(filter('include', '==', true)) + .withLimit(1); + const docA = doc('collection/a', 1000, { key: 'a', include: true }); + const docB = doc('collection/b', 1000, { key: 'b', include: true }); + const docBQuery = Query.atPath(docB.key.path); + return ( + spec() + // No GC so we can keep the cache populated. + .withGCEnabled(false) + + // Full query to populate the cache with docA and docB + .userListens(fullQuery) + .watchAcksFull(fullQuery, 1000, docA, docB) + .expectEvents(fullQuery, { + added: [docA, docB] + }) + .userUnlistens(fullQuery) + + // Perform limit(1) query. + .userListens(limitQuery) + .expectEvents(limitQuery, { + added: [docA], + fromCache: true + }) + .watchAcksFull(limitQuery, 2000, docA) + .expectEvents(limitQuery, { + fromCache: false + }) + + // Edit docA so it no longer matches the query and we pull in docB + // from cache. + .userPatches('collection/a', { include: false }) + .expectEvents(limitQuery, { + removed: [docA], + added: [docB], + fromCache: true + }) + // docB is in limbo since we haven't gotten the watch update to pull + // it in yet. + .expectLimboDocs(docB.key) + + // Suppose docB was actually deleted server-side and so we receive an + // ack, a snapshot, CURRENT, and then another snapshot. This should + // resolve the limbo resolution and docB should disappear. + .watchAcks(docBQuery) + .watchSnapshots(2000) + .watchCurrents(docBQuery, 'resume-token-3000') + .watchSnapshots(3000) + .expectLimboDocs() + .expectEvents(limitQuery, { removed: [docB], fromCache: false }) + + // Watch catches up to the local write to docA, and broadcasts its + // removal. + .watchSends({ removed: [limitQuery] }, docA) + .watchSnapshots(4000) + ); + } + ); + specTest( 'Limbo docs are resolved by primary client', ['multi-client'], @@ -329,73 +396,4 @@ describeSpec('Limbo Documents:', [], () => { // TODO(multitab): We need a test case that verifies that a primary client // that loses its primary lease while documents are in limbo correctly handles // these documents even when it picks up its lease again. -======= - // Same as above test, except docB no longer exists so we do not get a - // documentUpdate for it during limbo resolution, so a delete should be - // synthesized. - specTest( - 'Limbo resolution handles snapshot before CURRENT [no document update]', - [], - () => { - const fullQuery = Query.atPath(path('collection')); - const limitQuery = Query.atPath(path('collection')) - .addFilter(filter('include', '==', true)) - .withLimit(1); - const docA = doc('collection/a', 1000, { key: 'a', include: true }); - const docB = doc('collection/b', 1000, { key: 'b', include: true }); - const docBQuery = Query.atPath(docB.key.path); - return ( - spec() - // No GC so we can keep the cache populated. - .withGCEnabled(false) - - // Full query to populate the cache with docA and docB - .userListens(fullQuery) - .watchAcksFull(fullQuery, 1000, docA, docB) - .expectEvents(fullQuery, { - added: [docA, docB] - }) - .userUnlistens(fullQuery) - - // Perform limit(1) query. - .userListens(limitQuery) - .expectEvents(limitQuery, { - added: [docA], - fromCache: true - }) - .watchAcksFull(limitQuery, 2000, docA) - .expectEvents(limitQuery, { - fromCache: false - }) - - // Edit docA so it no longer matches the query and we pull in docB - // from cache. - .userPatches('collection/a', { include: false }) - .expectEvents(limitQuery, { - removed: [docA], - added: [docB], - fromCache: true - }) - // docB is in limbo since we haven't gotten the watch update to pull - // it in yet. - .expectLimboDocs(docB.key) - - // Suppose docB was actually deleted server-side and so we receive an - // ack, a snapshot, CURRENT, and then another snapshot. This should - // resolve the limbo resolution and docB should disappear. - .watchAcks(docBQuery) - .watchSnapshots(2000) - .watchCurrents(docBQuery, 'resume-token-3000') - .watchSnapshots(3000) - .expectLimboDocs() - .expectEvents(limitQuery, { removed: [docB], fromCache: false }) - - // Watch catches up to the local write to docA, and broadcasts its - // removal. - .watchSends({ removed: [limitQuery] }, docA) - .watchSnapshots(4000) - ); - } - ); ->>>>>>> master });