Skip to content
Merged
Show file tree
Hide file tree
Changes from 23 commits
Commits
Show all changes
48 commits
Select commit Hold shift + click to select a range
c361764
indexeddb: improve allDocs() perf with skip & key ranges
Feb 15, 2023
e0dfb7a
Rename index
Feb 17, 2023
6c9f86d
Merge branch 'master' into indexeddb-faster-alldocs
alxndrsn Aug 30, 2023
dbe2405
Merge branch 'master' into indexeddb-faster-alldocs
alxndrsn Sep 13, 2023
44a0a15
Merge branch 'master' into indexeddb-faster-alldocs
alxndrsn Oct 7, 2023
6eb3972
Merge branch 'master' into indexeddb-faster-alldocs
alxndrsn Oct 11, 2023
9bc7927
fix eol-last
Oct 11, 2023
42cc853
add migration
Oct 11, 2023
dd6ed4c
Use isLocalId()
Oct 16, 2023
796e1e4
Merge branch 'master' into indexeddb-faster-alldocs
alxndrsn Oct 18, 2023
f470aa7
Merge branch 'master' into indexeddb-faster-alldocs
alxndrsn Dec 11, 2023
8d72af6
rename META_STORE to META_LOCAL_STORE
Dec 11, 2023
7f3e980
get(): use isLocalId()
Dec 11, 2023
8b95c7f
Implement getLocal()
Dec 11, 2023
7d468da
Revert "Implement getLocal()"
Dec 11, 2023
e2ea6ee
Implement getLocal()
Dec 11, 2023
1cde95c
Merge branch 'master' into indexeddb-faster-alldocs
alxndrsn Dec 30, 2023
2187822
mocha v10: fix failure handling
Dec 31, 2023
d50645d
mocha v10: fix failure handling
Dec 31, 2023
85fbca8
indexeddb._getLocal(): process attachments
Dec 31, 2023
dec066a
Merge branch 'master' into indexeddb-faster-alldocs
Mar 24, 2024
02a837c
more comment
Mar 24, 2024
ad652f8
Merge remote-tracking branch 'upstream/master' into indexeddb-faster-…
Mar 24, 2024
1ec03cd
Merge remote-tracking branch 'upstream/master' into indexeddb-faster-…
Mar 25, 2024
9d518e8
Resolve REVIEW comment
Mar 25, 2024
81ed8d5
getRevisionTree(): only open DOC_STORE
Mar 25, 2024
06c2b26
fetchResults() batch
Mar 25, 2024
b9992fc
Add a big test
Mar 25, 2024
32ab14a
add some more to the test
Mar 25, 2024
9f3399c
lint
Mar 25, 2024
f86dd92
getLocal(): remove use strict & attachment support
Mar 27, 2024
f889598
lint
Mar 27, 2024
dd62ea2
Revert "getLocal(): remove use strict & attachment support"
Mar 27, 2024
213d8c4
Revert "lint"
Mar 27, 2024
aed6f98
remove use strict
Mar 28, 2024
56a17f4
use const
Mar 28, 2024
18f706f
Merge remote-tracking branch 'upstream/master' into indexeddb-faster-…
Apr 2, 2024
f9a5ba3
delete seq index; don't create other index
Apr 3, 2024
cb90ef3
don't delete seq - seems to be required somewhere
Apr 3, 2024
179cc49
delete seq; maintain _isDeleted_id
Apr 3, 2024
a8b0c63
Merge remote-tracking branch 'upstream/master' into indexeddb-faster-…
Apr 3, 2024
4fd2f51
Merge remote-tracking branch 'upstream/master' into indexeddb-faster-…
Apr 4, 2024
8c0b977
allDocs: don't recurse
Apr 5, 2024
063c725
rename deleted,id db index
Apr 5, 2024
0b06fbc
Merge remote-tracking branch 'upstream/master' into indexeddb-faster-…
Apr 8, 2024
569557d
new test: make more robust in CI
Apr 8, 2024
1412422
Update/add tests for local doc attachments
Apr 9, 2024
d8d9f42
update github issue link
Apr 9, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
214 changes: 117 additions & 97 deletions packages/node_modules/pouchdb-adapter-indexeddb/src/allDocs.js
Original file line number Diff line number Diff line change
Expand Up @@ -28,31 +28,16 @@ function allDocsKeys(keys, docStore, allDocsInner) {

function createKeyRange(start, end, inclusiveEnd, key, descending) {
try {
if (start && end) {
if (descending) {
return IDBKeyRange.bound(end, start, !inclusiveEnd, false);
} else {
return IDBKeyRange.bound(start, end, false, !inclusiveEnd);
}
} else if (start) {
if (descending) {
return IDBKeyRange.upperBound(start);
} else {
return IDBKeyRange.lowerBound(start);
}
} else if (end) {
if (descending) {
return IDBKeyRange.lowerBound(end, !inclusiveEnd);
} else {
return IDBKeyRange.upperBound(end, !inclusiveEnd);
}
} else if (key) {
return IDBKeyRange.only(key);
if (key) {
return IDBKeyRange.only([0, key]);
} else if (descending) {
return IDBKeyRange.bound(end, start, !inclusiveEnd, false);
} else {
return IDBKeyRange.bound(start, end, false, !inclusiveEnd);
}
} catch (e) {
return {error: e};
}
return null;
}

function handleKeyRangeError(opts, metadata, err, callback) {
Expand Down Expand Up @@ -95,31 +80,46 @@ export default function (txn, metadata, opts, callback) {
var results = [];
var processing = [];

var start = 'startkey' in opts ? opts.startkey : false;
var end = 'endkey' in opts ? opts.endkey : false;
var key = 'key' in opts ? opts.key : false;
var keys = 'keys' in opts ? opts.keys : false;
var skip = opts.skip || 0;
var limit = typeof opts.limit === 'number' ? opts.limit : -1;
var limit = typeof opts.limit === 'number' ? opts.limit : undefined;
var inclusiveEnd = opts.inclusive_end !== false;
var descending = 'descending' in opts && opts.descending ? 'prev' : null;

var keyRange;
if (!keys) {
keyRange = createKeyRange(start, end, inclusiveEnd, key, descending);
if (keyRange && keyRange.error) {
return handleKeyRangeError(opts, metadata, keyRange.error, callback);
}
}
var start = 'startkey' in opts ? opts.startkey : (descending ? '\uffff' : '');
var end = 'endkey' in opts ? opts.endkey : (descending ? '' : '\uffff');

var docStore = txn.txn.objectStore(DOC_STORE);

txn.txn.oncomplete = onTxnComplete;

if (keys) {
return allDocsKeys(opts.keys, docStore, allDocsInner);
txn.txn.oncomplete = onTxnComplete;
const allDocsInner = doc => {
if (doc.error) {
return results.push(doc);
}

const row = { id:doc.id, key:doc.id, value:{ rev:doc.rev } };

if (doc.deleted) {
row.value.deleted = true;
row.doc = null;
} else if (opts.include_docs) {
include_doc(row, doc);
}

results.push(row);
};
return allDocsKeys(keys, docStore, allDocsInner);
}

let keyRange = createKeyRange([0, start], [0, end], inclusiveEnd, key, descending);
if (keyRange.error) {
return handleKeyRangeError(opts, metadata, keyRange.error, callback);
}

// txn.oncomplete must be set AFTER key-range-error is generated
txn.txn.oncomplete = onTxnComplete;

function include_doc(row, doc) {
var docData = doc.revs[doc.rev].data;

Expand All @@ -140,79 +140,99 @@ export default function (txn, metadata, opts, callback) {
}
}

function allDocsInner(doc) {
if (doc.error && keys) {
// key was not found with "keys" requests
results.push(doc);
return true;
}

var row = {
id: doc.id,
key: doc.id,
value: {
rev: doc.rev
}
function onTxnComplete() {
const returnVal = {
total_rows: metadata.doc_count,
offset: 0,
rows: results
};
/* istanbul ignore if */
if (opts.update_seq) {
returnVal.update_seq = metadata.seq;
}

var deleted = doc.deleted;
if (deleted) {
if (keys) {
results.push(row);
row.value.deleted = true;
row.doc = null;
}
} else if (skip-- <= 0) {
results.push(row);
if (opts.include_docs) {
include_doc(row, doc);
}
if (--limit === 0) {
return false;
}
if (processing.length) {
Promise.all(processing).then(function () {
callback(null, returnVal);
});
} else {
callback(null, returnVal);
}
return true;
}

function onTxnComplete() {
Promise.all(processing).then(function () {
var returnVal = {
total_rows: metadata.doc_count,
offset: 0,
rows: results
};
const dbIndex = docStore.index('_isDeleted_id');

/* istanbul ignore if */
if (opts.update_seq) {
returnVal.update_seq = metadata.seq;
if (skip) {
dbIndex.openKeyCursor(keyRange, descending || 'next').onsuccess = (e) => {
const cursor = e.target.result;
if (!cursor) {
return txn.txn.commit();
}
callback(null, returnVal);
});
}

var cursor = descending ?
docStore.openCursor(keyRange, descending) :
docStore.openCursor(keyRange);

cursor.onsuccess = function (e) {
if (skip) {
cursor.advance(skip);
skip = 0;
return;
}

var doc = e.target.result && e.target.result.value;
const lastSkipped = cursor.key;
if (lastSkipped === undefined) {
// no results
return txn.txn.commit();
}

// Happens if opts does not have limit,
// because cursor will end normally then,
// when all docs are retrieved.
// Would not be needed, if getAll() optimization was used like in #6059
if (!doc) { return; }
// At this point, existing keyRange bounds are already compound keys.
if (descending) {
keyRange = createKeyRange(lastSkipped, keyRange.lower, inclusiveEnd, key, descending);
} else {
keyRange = createKeyRange(lastSkipped, keyRange.upper, inclusiveEnd, key, descending);
}
if (keyRange.error) {
txn.txn.abort();
return handleKeyRangeError(opts, metadata, keyRange.error, callback);
}

// Skip local docs
if (doc.id.startsWith('_local/')) {
return e.target.result.continue();
}
fetchResults();
};
} else {
fetchResults();
}

var continueCursor = allDocsInner(doc);
if (continueCursor) {
e.target.result.continue();
function fetchResults() {
// REVIEW: there is a risk here with getting all results into memory - if they have multiple
// revs, then we risk loading loads of extra data which is then discarded. This could be
// reduced with batching.
// REVIEW: this also loads a lot of unused data when include_docs is false. The fastest and
// most pragmatic approach here may be batching...
if (descending && limit) {
// getAll() does not support descending, so this can either be implemented with a cursor, or
// by calling getAll(), iterating from the top, and discarding. It is currently using the
// latter approach, but may need thought and optimisation
dbIndex.getAll(keyRange).onsuccess = (e) => {
const values = e.target.result;
for (let i=values.length-1; i>=0 && limit--; --i) {
const doc = values[i];
const row = { id:doc.id, key:doc.id, value:{ rev:doc.rev } };
if (opts.include_docs) {
include_doc(row, doc);
}
results[values.length - i - 1] = row;
}
return txn.txn.commit();
};
} else {
dbIndex.getAll(keyRange, limit).onsuccess = (e) => {
const values = e.target.result;
for (let i=0; i<values.length; ++i) {
const doc = values[descending ? values.length-i-1 : i];
const row = { id:doc.id, key:doc.id, value:{ rev:doc.rev } };
if (opts.include_docs) {
include_doc(row, doc);
}
results[i] = row;
}
return txn.txn.commit();
};
}
};

}
}
24 changes: 14 additions & 10 deletions packages/node_modules/pouchdb-adapter-indexeddb/src/bulkDocs.js
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@ import {
blobOrBufferToBase64 as blufferToBase64,
} from 'pouchdb-binary-utils';

import { parseDoc } from 'pouchdb-adapter-utils';
import { isLocalId, parseDoc } from 'pouchdb-adapter-utils';
import { binaryMd5 as md5 } from 'pouchdb-md5';
import { winningRev as calculateWinningRev, merge, compactTree } from 'pouchdb-merge';

import { DOC_STORE, META_STORE, idbError } from './util';
import { DOC_STORE, META_LOCAL_STORE, idbError } from './util';

import { rewrite } from './rewrite';

Expand Down Expand Up @@ -62,7 +62,8 @@ export default function (api, req, opts, metadata, dbOpts, idbChanges, callback)
}

docs.forEach(function (doc) {
txn.objectStore(DOC_STORE).get(doc.id).onsuccess = readDone;
const docStore = isLocalId(doc.id) ? META_LOCAL_STORE : DOC_STORE;
txn.objectStore(docStore).get(doc.id).onsuccess = readDone;
});
}

Expand Down Expand Up @@ -187,7 +188,7 @@ export default function (api, req, opts, metadata, dbOpts, idbChanges, callback)
var winningRev = calculateWinningRev(doc);
// rev of new doc for attachments and to return it
var writtenRev = doc.rev;
var isLocal = doc.id.startsWith('_local/');
const isLocal = isLocalId(doc.id);

var theDoc = doc.revs[winningRev].data;

Expand All @@ -212,8 +213,6 @@ export default function (api, req, opts, metadata, dbOpts, idbChanges, callback)
doc.deleted = doc.revs[winningRev].deleted ? 1 : 0;

// Bump the seq for every new (non local) revision written
// TODO: index expects a unique seq, not sure if ignoring local will
// work
if (!isLocal) {
doc.seq = ++metadata.seq;

Expand Down Expand Up @@ -285,7 +284,7 @@ export default function (api, req, opts, metadata, dbOpts, idbChanges, callback)

// Local documents have different revision handling
if (isLocal && doc.deleted) {
txn.objectStore(DOC_STORE).delete(doc.id).onsuccess = function () {
txn.objectStore(META_LOCAL_STORE).delete(doc.id).onsuccess = function () {
results[i] = {
ok: true,
id: doc.id,
Expand All @@ -296,7 +295,8 @@ export default function (api, req, opts, metadata, dbOpts, idbChanges, callback)
return;
}

txn.objectStore(DOC_STORE).put(doc).onsuccess = function () {
const docStore = isLocal ? META_LOCAL_STORE : DOC_STORE;
txn.objectStore(docStore).put(doc).onsuccess = function () {
results[i] = {
ok: true,
id: doc.id,
Expand All @@ -308,7 +308,7 @@ export default function (api, req, opts, metadata, dbOpts, idbChanges, callback)

function updateSeq(i) {
if (i === lastWriteIndex) {
txn.objectStore(META_STORE).put(metadata);
txn.objectStore(META_LOCAL_STORE).put(metadata);
}
}

Expand Down Expand Up @@ -396,7 +396,11 @@ export default function (api, req, opts, metadata, dbOpts, idbChanges, callback)
}

preProcessAttachments().then(function () {
api._openTransactionSafely([DOC_STORE, META_STORE], 'readwrite', function (err, _txn) {
// REVIEW: We _could_ check doc ids here, and only open the META_LOCAL_STORE if all docs are local.
// This will marginally slow things down for non-local docs, as they always cause updates to the
// META_LOCAL_STORE. It seems pragmatic to keep the code simple and optimise for calls to bulkDocs()
// which include non-local docs.
api._openTransactionSafely([DOC_STORE, META_LOCAL_STORE], 'readwrite', function (err, _txn) {
if (err) {
return callback(err);
}
Expand Down
39 changes: 39 additions & 0 deletions packages/node_modules/pouchdb-adapter-indexeddb/src/getLocal.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
'use strict';

import { createError, MISSING_DOC } from 'pouchdb-errors';

import { META_LOCAL_STORE, processAttachment } from './util';

// _getLocal() doesn't know if opts.binary is set or not, so assume it's not.
const BINARY_ATTACHMENTS = false;

export default function (txn, id, api, callback) {
if (txn.error) {
return callback(txn.error);
}

txn.txn.objectStore(META_LOCAL_STORE).get(id).onsuccess = function (e) {
const doc = e.target.result;

if (!doc) {
callback(createError(MISSING_DOC, 'missing'));
return;
}

const result = doc.revs[doc.rev].data;
result._id = doc.id;
result._rev = doc.rev;

if (result._attachments) {
const processing = [];
for (var name in result._attachments) {
processing.push(processAttachment(name, doc, result, BINARY_ATTACHMENTS, api.blobSupport));
}
Promise.all(processing)
.then(() => callback(null, result))
.catch(callback);
} else {
callback(null, result);
}
};
}
Loading