3078 lines
85 KiB
JavaScript
3078 lines
85 KiB
JavaScript
'use strict';
|
|
|
|
/* jshint ignore:start */
|
|
|
|
function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; }
|
|
|
|
var jsExtend = require('js-extend');
|
|
var inherits = _interopDefault(require('inherits'));
|
|
var vuvuzela = _interopDefault(require('vuvuzela'));
|
|
var events = require('events');
|
|
var lie = _interopDefault(require('lie'));
|
|
var getArguments = _interopDefault(require('argsarray'));
|
|
var pouchdbCollections = require('pouchdb-collections');
|
|
var crypto = _interopDefault(require('crypto'));
|
|
var openDatabase = _interopDefault(require('../'));
|
|
|
|
function isBinaryObject(object) {
|
|
return object instanceof Buffer;
|
|
}
|
|
|
|
function cloneBinaryObject(object) {
|
|
var copy = new Buffer(object.length);
|
|
object.copy(copy);
|
|
return copy;
|
|
}
|
|
|
|
function clone(object) {
|
|
var newObject;
|
|
var i;
|
|
var len;
|
|
|
|
if (!object || typeof object !== 'object') {
|
|
return object;
|
|
}
|
|
|
|
if (Array.isArray(object)) {
|
|
newObject = [];
|
|
for (i = 0, len = object.length; i < len; i++) {
|
|
newObject[i] = clone(object[i]);
|
|
}
|
|
return newObject;
|
|
}
|
|
|
|
// special case: to avoid inconsistencies between IndexedDB
|
|
// and other backends, we automatically stringify Dates
|
|
if (object instanceof Date) {
|
|
return object.toISOString();
|
|
}
|
|
|
|
if (isBinaryObject(object)) {
|
|
return cloneBinaryObject(object);
|
|
}
|
|
|
|
newObject = {};
|
|
for (i in object) {
|
|
if (Object.prototype.hasOwnProperty.call(object, i)) {
|
|
var value = clone(object[i]);
|
|
if (typeof value !== 'undefined') {
|
|
newObject[i] = value;
|
|
}
|
|
}
|
|
}
|
|
return newObject;
|
|
}
|
|
|
|
// BEGIN Math.uuid.js
|
|
|
|
/*!
|
|
Math.uuid.js (v1.4)
|
|
http://www.broofa.com
|
|
mailto:robert@broofa.com
|
|
|
|
Copyright (c) 2010 Robert Kieffer
|
|
Dual licensed under the MIT and GPL licenses.
|
|
*/
|
|
|
|
/*
|
|
* Generate a random uuid.
|
|
*
|
|
* USAGE: Math.uuid(length, radix)
|
|
* length - the desired number of characters
|
|
* radix - the number of allowable values for each character.
|
|
*
|
|
* EXAMPLES:
|
|
* // No arguments - returns RFC4122, version 4 ID
|
|
* >>> Math.uuid()
|
|
* "92329D39-6F5C-4520-ABFC-AAB64544E172"
|
|
*
|
|
* // One argument - returns ID of the specified length
|
|
* >>> Math.uuid(15) // 15 character ID (default base=62)
|
|
* "VcydxgltxrVZSTV"
|
|
*
|
|
* // Two arguments - returns ID of the specified length, and radix.
|
|
* // (Radix must be <= 62)
|
|
* >>> Math.uuid(8, 2) // 8 character ID (base=2)
|
|
* "01001010"
|
|
* >>> Math.uuid(8, 10) // 8 character ID (base=10)
|
|
* "47473046"
|
|
* >>> Math.uuid(8, 16) // 8 character ID (base=16)
|
|
* "098F4D35"
|
|
*/
|
|
var chars = (
|
|
'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' +
|
|
'abcdefghijklmnopqrstuvwxyz'
|
|
).split('');
|
|
function getValue(radix) {
|
|
return 0 | Math.random() * radix;
|
|
}
|
|
function uuid(len, radix) {
|
|
radix = radix || chars.length;
|
|
var out = '';
|
|
var i = -1;
|
|
|
|
if (len) {
|
|
// Compact form
|
|
while (++i < len) {
|
|
out += chars[getValue(radix)];
|
|
}
|
|
return out;
|
|
}
|
|
// rfc4122, version 4 form
|
|
// Fill in random data. At i==19 set the high bits of clock sequence as
|
|
// per rfc4122, sec. 4.1.5
|
|
while (++i < 36) {
|
|
switch (i) {
|
|
case 8:
|
|
case 13:
|
|
case 18:
|
|
case 23:
|
|
out += '-';
|
|
break;
|
|
case 19:
|
|
out += chars[(getValue(16) & 0x3) | 0x8];
|
|
break;
|
|
default:
|
|
out += chars[getValue(16)];
|
|
}
|
|
}
|
|
|
|
return out;
|
|
}
|
|
|
|
// like underscore/lodash _.pick()
|
|
function pick(obj, arr) {
|
|
var res = {};
|
|
for (var i = 0, len = arr.length; i < len; i++) {
|
|
var prop = arr[i];
|
|
if (prop in obj) {
|
|
res[prop] = obj[prop];
|
|
}
|
|
}
|
|
return res;
|
|
}
|
|
|
|
inherits(PouchError, Error);
|
|
|
|
function PouchError(opts) {
|
|
Error.call(this, opts.reason);
|
|
this.status = opts.status;
|
|
this.name = opts.error;
|
|
this.message = opts.reason;
|
|
this.error = true;
|
|
}
|
|
|
|
PouchError.prototype.toString = function () {
|
|
return JSON.stringify({
|
|
status: this.status,
|
|
name: this.name,
|
|
message: this.message,
|
|
reason: this.reason
|
|
});
|
|
};
|
|
|
|
var UNAUTHORIZED = new PouchError({
|
|
status: 401,
|
|
error: 'unauthorized',
|
|
reason: "Name or password is incorrect."
|
|
});
|
|
|
|
var MISSING_BULK_DOCS = new PouchError({
|
|
status: 400,
|
|
error: 'bad_request',
|
|
reason: "Missing JSON list of 'docs'"
|
|
});
|
|
|
|
var MISSING_DOC = new PouchError({
|
|
status: 404,
|
|
error: 'not_found',
|
|
reason: 'missing'
|
|
});
|
|
|
|
var REV_CONFLICT = new PouchError({
|
|
status: 409,
|
|
error: 'conflict',
|
|
reason: 'Document update conflict'
|
|
});
|
|
|
|
var INVALID_ID = new PouchError({
|
|
status: 400,
|
|
error: 'invalid_id',
|
|
reason: '_id field must contain a string'
|
|
});
|
|
|
|
var MISSING_ID = new PouchError({
|
|
status: 412,
|
|
error: 'missing_id',
|
|
reason: '_id is required for puts'
|
|
});
|
|
|
|
var RESERVED_ID = new PouchError({
|
|
status: 400,
|
|
error: 'bad_request',
|
|
reason: 'Only reserved document ids may start with underscore.'
|
|
});
|
|
|
|
var NOT_OPEN = new PouchError({
|
|
status: 412,
|
|
error: 'precondition_failed',
|
|
reason: 'Database not open'
|
|
});
|
|
|
|
var UNKNOWN_ERROR = new PouchError({
|
|
status: 500,
|
|
error: 'unknown_error',
|
|
reason: 'Database encountered an unknown error'
|
|
});
|
|
|
|
var BAD_ARG = new PouchError({
|
|
status: 500,
|
|
error: 'badarg',
|
|
reason: 'Some query argument is invalid'
|
|
});
|
|
|
|
var INVALID_REQUEST = new PouchError({
|
|
status: 400,
|
|
error: 'invalid_request',
|
|
reason: 'Request was invalid'
|
|
});
|
|
|
|
var QUERY_PARSE_ERROR = new PouchError({
|
|
status: 400,
|
|
error: 'query_parse_error',
|
|
reason: 'Some query parameter is invalid'
|
|
});
|
|
|
|
var DOC_VALIDATION = new PouchError({
|
|
status: 500,
|
|
error: 'doc_validation',
|
|
reason: 'Bad special document member'
|
|
});
|
|
|
|
var BAD_REQUEST = new PouchError({
|
|
status: 400,
|
|
error: 'bad_request',
|
|
reason: 'Something wrong with the request'
|
|
});
|
|
|
|
var NOT_AN_OBJECT = new PouchError({
|
|
status: 400,
|
|
error: 'bad_request',
|
|
reason: 'Document must be a JSON object'
|
|
});
|
|
|
|
var DB_MISSING = new PouchError({
|
|
status: 404,
|
|
error: 'not_found',
|
|
reason: 'Database not found'
|
|
});
|
|
|
|
var IDB_ERROR = new PouchError({
|
|
status: 500,
|
|
error: 'indexed_db_went_bad',
|
|
reason: 'unknown'
|
|
});
|
|
|
|
var WSQ_ERROR = new PouchError({
|
|
status: 500,
|
|
error: 'web_sql_went_bad',
|
|
reason: 'unknown'
|
|
});
|
|
|
|
var LDB_ERROR = new PouchError({
|
|
status: 500,
|
|
error: 'levelDB_went_went_bad',
|
|
reason: 'unknown'
|
|
});
|
|
|
|
var FORBIDDEN = new PouchError({
|
|
status: 403,
|
|
error: 'forbidden',
|
|
reason: 'Forbidden by design doc validate_doc_update function'
|
|
});
|
|
|
|
var INVALID_REV = new PouchError({
|
|
status: 400,
|
|
error: 'bad_request',
|
|
reason: 'Invalid rev format'
|
|
});
|
|
|
|
var FILE_EXISTS = new PouchError({
|
|
status: 412,
|
|
error: 'file_exists',
|
|
reason: 'The database could not be created, the file already exists.'
|
|
});
|
|
|
|
var MISSING_STUB = new PouchError({
|
|
status: 412,
|
|
error: 'missing_stub'
|
|
});
|
|
|
|
var INVALID_URL = new PouchError({
|
|
status: 413,
|
|
error: 'invalid_url',
|
|
reason: 'Provided URL is invalid'
|
|
});
|
|
|
|
var allErrors = {
|
|
UNAUTHORIZED: UNAUTHORIZED,
|
|
MISSING_BULK_DOCS: MISSING_BULK_DOCS,
|
|
MISSING_DOC: MISSING_DOC,
|
|
REV_CONFLICT: REV_CONFLICT,
|
|
INVALID_ID: INVALID_ID,
|
|
MISSING_ID: MISSING_ID,
|
|
RESERVED_ID: RESERVED_ID,
|
|
NOT_OPEN: NOT_OPEN,
|
|
UNKNOWN_ERROR: UNKNOWN_ERROR,
|
|
BAD_ARG: BAD_ARG,
|
|
INVALID_REQUEST: INVALID_REQUEST,
|
|
QUERY_PARSE_ERROR: QUERY_PARSE_ERROR,
|
|
DOC_VALIDATION: DOC_VALIDATION,
|
|
BAD_REQUEST: BAD_REQUEST,
|
|
NOT_AN_OBJECT: NOT_AN_OBJECT,
|
|
DB_MISSING: DB_MISSING,
|
|
WSQ_ERROR: WSQ_ERROR,
|
|
LDB_ERROR: LDB_ERROR,
|
|
FORBIDDEN: FORBIDDEN,
|
|
INVALID_REV: INVALID_REV,
|
|
FILE_EXISTS: FILE_EXISTS,
|
|
MISSING_STUB: MISSING_STUB,
|
|
IDB_ERROR: IDB_ERROR,
|
|
INVALID_URL: INVALID_URL
|
|
};
|
|
|
|
function createError(error, reason, name) {
|
|
function CustomPouchError(reason) {
|
|
// inherit error properties from our parent error manually
|
|
// so as to allow proper JSON parsing.
|
|
for (var p in error) {
|
|
if (typeof error[p] !== 'function') {
|
|
this[p] = error[p];
|
|
}
|
|
}
|
|
if (name !== undefined) {
|
|
this.name = name;
|
|
}
|
|
if (reason !== undefined) {
|
|
this.reason = reason;
|
|
}
|
|
}
|
|
CustomPouchError.prototype = PouchError.prototype;
|
|
return new CustomPouchError(reason);
|
|
}
|
|
|
|
function tryFilter(filter, doc, req) {
|
|
try {
|
|
return !filter(doc, req);
|
|
} catch (err) {
|
|
var msg = 'Filter function threw: ' + err.toString();
|
|
return createError(BAD_REQUEST, msg);
|
|
}
|
|
}
|
|
|
|
function filterChange(opts) {
|
|
var req = {};
|
|
var hasFilter = opts.filter && typeof opts.filter === 'function';
|
|
req.query = opts.query_params;
|
|
|
|
return function filter(change) {
|
|
if (!change.doc) {
|
|
// CSG sends events on the changes feed that don't have documents,
|
|
// this hack makes a whole lot of existing code robust.
|
|
change.doc = {};
|
|
}
|
|
|
|
var filterReturn = hasFilter && tryFilter(opts.filter, change.doc, req);
|
|
|
|
if (typeof filterReturn === 'object') {
|
|
return filterReturn;
|
|
}
|
|
|
|
if (filterReturn) {
|
|
return false;
|
|
}
|
|
|
|
if (!opts.include_docs) {
|
|
delete change.doc;
|
|
} else if (!opts.attachments) {
|
|
for (var att in change.doc._attachments) {
|
|
/* istanbul ignore else */
|
|
if (change.doc._attachments.hasOwnProperty(att)) {
|
|
change.doc._attachments[att].stub = true;
|
|
}
|
|
}
|
|
}
|
|
return true;
|
|
};
|
|
}
|
|
|
|
// We fetch all leafs of the revision tree, and sort them based on tree length
|
|
// and whether they were deleted, undeleted documents with the longest revision
|
|
// tree (most edits) win
|
|
// The final sort algorithm is slightly documented in a sidebar here:
|
|
// http://guide.couchdb.org/draft/conflicts.html
|
|
function winningRev(metadata) {
|
|
var winningId;
|
|
var winningPos;
|
|
var winningDeleted;
|
|
var toVisit = metadata.rev_tree.slice();
|
|
var node;
|
|
while ((node = toVisit.pop())) {
|
|
var tree = node.ids;
|
|
var branches = tree[2];
|
|
var pos = node.pos;
|
|
if (branches.length) { // non-leaf
|
|
for (var i = 0, len = branches.length; i < len; i++) {
|
|
toVisit.push({pos: pos + 1, ids: branches[i]});
|
|
}
|
|
continue;
|
|
}
|
|
var deleted = !!tree[1].deleted;
|
|
var id = tree[0];
|
|
// sort by deleted, then pos, then id
|
|
if (!winningId || (winningDeleted !== deleted ? winningDeleted :
|
|
winningPos !== pos ? winningPos < pos : winningId < id)) {
|
|
winningId = id;
|
|
winningPos = pos;
|
|
winningDeleted = deleted;
|
|
}
|
|
}
|
|
|
|
return winningPos + '-' + winningId;
|
|
}
|
|
|
|
function getTrees(node) {
|
|
return node.ids;
|
|
}
|
|
|
|
// check if a specific revision of a doc has been deleted
|
|
// - metadata: the metadata object from the doc store
|
|
// - rev: (optional) the revision to check. defaults to winning revision
|
|
function isDeleted(metadata, rev) {
|
|
if (!rev) {
|
|
rev = winningRev(metadata);
|
|
}
|
|
var id = rev.substring(rev.indexOf('-') + 1);
|
|
var toVisit = metadata.rev_tree.map(getTrees);
|
|
|
|
var tree;
|
|
while ((tree = toVisit.pop())) {
|
|
if (tree[0] === id) {
|
|
return !!tree[1].deleted;
|
|
}
|
|
toVisit = toVisit.concat(tree[2]);
|
|
}
|
|
}
|
|
|
|
function isLocalId(id) {
|
|
return (/^_local/).test(id);
|
|
}
|
|
|
|
//
|
|
// Parsing hex strings. Yeah.
|
|
//
|
|
// So basically we need this because of a bug in WebSQL:
|
|
// https://code.google.com/p/chromium/issues/detail?id=422690
|
|
// https://bugs.webkit.org/show_bug.cgi?id=137637
|
|
//
|
|
// UTF-8 and UTF-16 are provided as separate functions
|
|
// for meager performance improvements
|
|
//
|
|
|
|
function decodeUtf8(str) {
|
|
return decodeURIComponent(window.escape(str));
|
|
}
|
|
|
|
function hexToInt(charCode) {
|
|
// '0'-'9' is 48-57
|
|
// 'A'-'F' is 65-70
|
|
// SQLite will only give us uppercase hex
|
|
return charCode < 65 ? (charCode - 48) : (charCode - 55);
|
|
}
|
|
|
|
|
|
// Example:
|
|
// pragma encoding=utf8;
|
|
// select hex('A');
|
|
// returns '41'
|
|
function parseHexUtf8(str, start, end) {
|
|
var result = '';
|
|
while (start < end) {
|
|
result += String.fromCharCode(
|
|
(hexToInt(str.charCodeAt(start++)) << 4) |
|
|
hexToInt(str.charCodeAt(start++)));
|
|
}
|
|
return result;
|
|
}
|
|
|
|
// Example:
|
|
// pragma encoding=utf16;
|
|
// select hex('A');
|
|
// returns '4100'
|
|
// notice that the 00 comes after the 41 (i.e. it's swizzled)
|
|
function parseHexUtf16(str, start, end) {
|
|
var result = '';
|
|
while (start < end) {
|
|
// UTF-16, so swizzle the bytes
|
|
result += String.fromCharCode(
|
|
(hexToInt(str.charCodeAt(start + 2)) << 12) |
|
|
(hexToInt(str.charCodeAt(start + 3)) << 8) |
|
|
(hexToInt(str.charCodeAt(start)) << 4) |
|
|
hexToInt(str.charCodeAt(start + 1)));
|
|
start += 4;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
function parseHexString(str, encoding) {
|
|
if (encoding === 'UTF-8') {
|
|
return decodeUtf8(parseHexUtf8(str, 0, str.length));
|
|
} else {
|
|
return parseHexUtf16(str, 0, str.length);
|
|
}
|
|
}
|
|
|
|
//this solely exists so we can exclude it in browserify
|
|
var buffer = Buffer;
|
|
|
|
function typedBuffer(binString, buffType, type) {
|
|
// buffType is either 'binary' or 'base64'
|
|
var buff = new buffer(binString, buffType);
|
|
buff.type = type; // non-standard, but used for consistency with the browser
|
|
return buff;
|
|
}
|
|
|
|
function binStringToBluffer(binString, type) {
|
|
return typedBuffer(binString, 'binary', type);
|
|
}
|
|
|
|
// in Node of course this is false
|
|
function hasLocalStorage() {
|
|
return false;
|
|
}
|
|
|
|
// Pretty much all below can be combined into a higher order function to
|
|
// traverse revisions
|
|
// The return value from the callback will be passed as context to all
|
|
// children of that node
|
|
function traverseRevTree(revs, callback) {
|
|
var toVisit = revs.slice();
|
|
|
|
var node;
|
|
while ((node = toVisit.pop())) {
|
|
var pos = node.pos;
|
|
var tree = node.ids;
|
|
var branches = tree[2];
|
|
var newCtx =
|
|
callback(branches.length === 0, pos, tree[0], node.ctx, tree[1]);
|
|
for (var i = 0, len = branches.length; i < len; i++) {
|
|
toVisit.push({pos: pos + 1, ids: branches[i], ctx: newCtx});
|
|
}
|
|
}
|
|
}
|
|
|
|
function sortByPos(a, b) {
|
|
return a.pos - b.pos;
|
|
}
|
|
|
|
function collectLeaves(revs) {
|
|
var leaves = [];
|
|
traverseRevTree(revs, function (isLeaf, pos, id, acc, opts) {
|
|
if (isLeaf) {
|
|
leaves.push({rev: pos + "-" + id, pos: pos, opts: opts});
|
|
}
|
|
});
|
|
leaves.sort(sortByPos).reverse();
|
|
for (var i = 0, len = leaves.length; i < len; i++) {
|
|
delete leaves[i].pos;
|
|
}
|
|
return leaves;
|
|
}
|
|
|
|
// returns revs of all conflicts that is leaves such that
|
|
// 1. are not deleted and
|
|
// 2. are different than winning revision
|
|
function collectConflicts(metadata) {
|
|
var win = winningRev(metadata);
|
|
var leaves = collectLeaves(metadata.rev_tree);
|
|
var conflicts = [];
|
|
for (var i = 0, len = leaves.length; i < len; i++) {
|
|
var leaf = leaves[i];
|
|
if (leaf.rev !== win && !leaf.opts.deleted) {
|
|
conflicts.push(leaf.rev);
|
|
}
|
|
}
|
|
return conflicts;
|
|
}
|
|
|
|
function slowJsonParse(str) {
|
|
try {
|
|
return JSON.parse(str);
|
|
} catch (e) {
|
|
/* istanbul ignore next */
|
|
return vuvuzela.parse(str);
|
|
}
|
|
}
|
|
|
|
function safeJsonParse(str) {
|
|
// try/catch is deoptimized in V8, leading to slower
|
|
// times than we'd like to have. Most documents are _not_
|
|
// huge, and do not require a slower code path just to parse them.
|
|
// We can be pretty sure that a document under 50000 characters
|
|
// will not be so deeply nested as to throw a stack overflow error
|
|
// (depends on the engine and available memory, though, so this is
|
|
// just a hunch). 50000 was chosen based on the average length
|
|
// of this string in our test suite, to try to find a number that covers
|
|
// most of our test cases (26 over this size, 26378 under it).
|
|
if (str.length < 50000) {
|
|
return JSON.parse(str);
|
|
}
|
|
return slowJsonParse(str);
|
|
}
|
|
|
|
function safeJsonStringify(json) {
|
|
try {
|
|
return JSON.stringify(json);
|
|
} catch (e) {
|
|
/* istanbul ignore next */
|
|
return vuvuzela.stringify(json);
|
|
}
|
|
}
|
|
|
|
// in Node of course this is false
|
|
function isChromeApp() {
|
|
return false;
|
|
}
|
|
|
|
inherits(Changes, events.EventEmitter);
|
|
|
|
/* istanbul ignore next */
|
|
function attachBrowserEvents(self) {
|
|
if (isChromeApp()) {
|
|
chrome.storage.onChanged.addListener(function (e) {
|
|
// make sure it's event addressed to us
|
|
if (e.db_name != null) {
|
|
//object only has oldValue, newValue members
|
|
self.emit(e.dbName.newValue);
|
|
}
|
|
});
|
|
} else if (hasLocalStorage()) {
|
|
if (typeof addEventListener !== 'undefined') {
|
|
addEventListener("storage", function (e) {
|
|
self.emit(e.key);
|
|
});
|
|
} else { // old IE
|
|
window.attachEvent("storage", function (e) {
|
|
self.emit(e.key);
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
function Changes() {
|
|
events.EventEmitter.call(this);
|
|
this._listeners = {};
|
|
|
|
attachBrowserEvents(this);
|
|
}
|
|
Changes.prototype.addListener = function (dbName, id, db, opts) {
|
|
/* istanbul ignore if */
|
|
if (this._listeners[id]) {
|
|
return;
|
|
}
|
|
var self = this;
|
|
var inprogress = false;
|
|
function eventFunction() {
|
|
/* istanbul ignore if */
|
|
if (!self._listeners[id]) {
|
|
return;
|
|
}
|
|
if (inprogress) {
|
|
inprogress = 'waiting';
|
|
return;
|
|
}
|
|
inprogress = true;
|
|
var changesOpts = pick(opts, [
|
|
'style', 'include_docs', 'attachments', 'conflicts', 'filter',
|
|
'doc_ids', 'view', 'since', 'query_params', 'binary'
|
|
]);
|
|
|
|
/* istanbul ignore next */
|
|
function onError() {
|
|
inprogress = false;
|
|
}
|
|
|
|
db.changes(changesOpts).on('change', function (c) {
|
|
if (c.seq > opts.since && !opts.cancelled) {
|
|
opts.since = c.seq;
|
|
opts.onChange(c);
|
|
}
|
|
}).on('complete', function () {
|
|
if (inprogress === 'waiting') {
|
|
setTimeout(function (){
|
|
eventFunction();
|
|
},0);
|
|
}
|
|
inprogress = false;
|
|
}).on('error', onError);
|
|
}
|
|
this._listeners[id] = eventFunction;
|
|
this.on(dbName, eventFunction);
|
|
};
|
|
|
|
Changes.prototype.removeListener = function (dbName, id) {
|
|
/* istanbul ignore if */
|
|
if (!(id in this._listeners)) {
|
|
return;
|
|
}
|
|
events.EventEmitter.prototype.removeListener.call(this, dbName,
|
|
this._listeners[id]);
|
|
};
|
|
|
|
|
|
/* istanbul ignore next */
|
|
Changes.prototype.notifyLocalWindows = function (dbName) {
|
|
//do a useless change on a storage thing
|
|
//in order to get other windows's listeners to activate
|
|
if (isChromeApp()) {
|
|
chrome.storage.local.set({dbName: dbName});
|
|
} else if (hasLocalStorage()) {
|
|
localStorage[dbName] = (localStorage[dbName] === "a") ? "b" : "a";
|
|
}
|
|
};
|
|
|
|
Changes.prototype.notify = function (dbName) {
|
|
this.emit(dbName);
|
|
this.notifyLocalWindows(dbName);
|
|
};
|
|
|
|
/* istanbul ignore next */
|
|
var PouchPromise = typeof Promise === 'function' ? Promise : lie;
|
|
|
|
function once(fun) {
|
|
var called = false;
|
|
return getArguments(function (args) {
|
|
/* istanbul ignore if */
|
|
if (called) {
|
|
// this is a smoke test and should never actually happen
|
|
throw new Error('once called more than once');
|
|
} else {
|
|
called = true;
|
|
fun.apply(this, args);
|
|
}
|
|
});
|
|
}
|
|
|
|
function toPromise(func) {
|
|
//create the function we will be returning
|
|
return getArguments(function (args) {
|
|
// Clone arguments
|
|
args = clone(args);
|
|
var self = this;
|
|
var tempCB =
|
|
(typeof args[args.length - 1] === 'function') ? args.pop() : false;
|
|
// if the last argument is a function, assume its a callback
|
|
var usedCB;
|
|
if (tempCB) {
|
|
// if it was a callback, create a new callback which calls it,
|
|
// but do so async so we don't trap any errors
|
|
usedCB = function (err, resp) {
|
|
process.nextTick(function () {
|
|
tempCB(err, resp);
|
|
});
|
|
};
|
|
}
|
|
var promise = new PouchPromise(function (fulfill, reject) {
|
|
var resp;
|
|
try {
|
|
var callback = once(function (err, mesg) {
|
|
if (err) {
|
|
reject(err);
|
|
} else {
|
|
fulfill(mesg);
|
|
}
|
|
});
|
|
// create a callback for this invocation
|
|
// apply the function in the orig context
|
|
args.push(callback);
|
|
resp = func.apply(self, args);
|
|
if (resp && typeof resp.then === 'function') {
|
|
fulfill(resp);
|
|
}
|
|
} catch (e) {
|
|
reject(e);
|
|
}
|
|
});
|
|
// if there is a callback, call it back
|
|
if (usedCB) {
|
|
promise.then(function (result) {
|
|
usedCB(null, result);
|
|
}, usedCB);
|
|
}
|
|
return promise;
|
|
});
|
|
}
|
|
|
|
function atob(str) {
|
|
var base64 = new buffer(str, 'base64');
|
|
// Node.js will just skip the characters it can't decode instead of
|
|
// throwing an exception
|
|
if (base64.toString('base64') !== str) {
|
|
throw new Error("attachment is not a valid base64 string");
|
|
}
|
|
return base64.toString('binary');
|
|
}
|
|
|
|
function btoa(str) {
|
|
return new buffer(str, 'binary').toString('base64');
|
|
}
|
|
|
|
// In Node, this is just a Buffer rather than an ArrayBuffer
|
|
function arrayBufferToBinaryString(buffer) {
|
|
return buffer.toString('binary');
|
|
}
|
|
|
|
// In Node.js, just convert the Buffer to a Buffer rather than
|
|
// convert a Blob to an ArrayBuffer. This function is just a convenience
|
|
// function so we can easily switch Node vs browser environments.
|
|
function readAsArrayBuffer(buffer, callback) {
|
|
process.nextTick(function () {
|
|
callback(buffer);
|
|
});
|
|
}
|
|
|
|
// In Node, this is just a Buffer rather than an ArrayBuffer
|
|
function arrayBufferToBase64(buffer) {
|
|
return buffer.toString('binary');
|
|
}
|
|
|
|
var res = toPromise(function (data, callback) {
|
|
var base64 = crypto.createHash('md5').update(data).digest('base64');
|
|
callback(null, base64);
|
|
});
|
|
|
|
function preprocessAttachments(docInfos, blobType, callback) {
|
|
|
|
if (!docInfos.length) {
|
|
return callback();
|
|
}
|
|
|
|
var docv = 0;
|
|
|
|
function parseBase64(data) {
|
|
try {
|
|
return atob(data);
|
|
} catch (e) {
|
|
var err = createError(BAD_ARG,
|
|
'Attachment is not a valid base64 string');
|
|
return {error: err};
|
|
}
|
|
}
|
|
|
|
function preprocessAttachment(att, callback) {
|
|
if (att.stub) {
|
|
return callback();
|
|
}
|
|
if (typeof att.data === 'string') {
|
|
// input is assumed to be a base64 string
|
|
|
|
var asBinary = parseBase64(att.data);
|
|
if (asBinary.error) {
|
|
return callback(asBinary.error);
|
|
}
|
|
|
|
att.length = asBinary.length;
|
|
if (blobType === 'blob') {
|
|
att.data = binStringToBluffer(asBinary, att.content_type);
|
|
} else if (blobType === 'base64') {
|
|
att.data = btoa(asBinary);
|
|
} else { // binary
|
|
att.data = asBinary;
|
|
}
|
|
res(asBinary).then(function (result) {
|
|
att.digest = 'md5-' + result;
|
|
callback();
|
|
});
|
|
} else { // input is a blob
|
|
readAsArrayBuffer(att.data, function (buff) {
|
|
if (blobType === 'binary') {
|
|
att.data = arrayBufferToBinaryString(buff);
|
|
} else if (blobType === 'base64') {
|
|
att.data = arrayBufferToBase64(buff);
|
|
}
|
|
res(buff).then(function (result) {
|
|
att.digest = 'md5-' + result;
|
|
att.length = buff.byteLength;
|
|
callback();
|
|
});
|
|
});
|
|
}
|
|
}
|
|
|
|
var overallErr;
|
|
|
|
docInfos.forEach(function (docInfo) {
|
|
var attachments = docInfo.data && docInfo.data._attachments ?
|
|
Object.keys(docInfo.data._attachments) : [];
|
|
var recv = 0;
|
|
|
|
if (!attachments.length) {
|
|
return done();
|
|
}
|
|
|
|
function processedAttachment(err) {
|
|
overallErr = err;
|
|
recv++;
|
|
if (recv === attachments.length) {
|
|
done();
|
|
}
|
|
}
|
|
|
|
for (var key in docInfo.data._attachments) {
|
|
if (docInfo.data._attachments.hasOwnProperty(key)) {
|
|
preprocessAttachment(docInfo.data._attachments[key],
|
|
processedAttachment);
|
|
}
|
|
}
|
|
});
|
|
|
|
function done() {
|
|
docv++;
|
|
if (docInfos.length === docv) {
|
|
if (overallErr) {
|
|
callback(overallErr);
|
|
} else {
|
|
callback();
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
function toObject(array) {
|
|
return array.reduce(function (obj, item) {
|
|
obj[item] = true;
|
|
return obj;
|
|
}, {});
|
|
}
|
|
// List of top level reserved words for doc
|
|
var reservedWords = toObject([
|
|
'_id',
|
|
'_rev',
|
|
'_attachments',
|
|
'_deleted',
|
|
'_revisions',
|
|
'_revs_info',
|
|
'_conflicts',
|
|
'_deleted_conflicts',
|
|
'_local_seq',
|
|
'_rev_tree',
|
|
//replication documents
|
|
'_replication_id',
|
|
'_replication_state',
|
|
'_replication_state_time',
|
|
'_replication_state_reason',
|
|
'_replication_stats',
|
|
// Specific to Couchbase Sync Gateway
|
|
'_removed'
|
|
]);
|
|
|
|
// List of reserved words that should end up the document
|
|
var dataWords = toObject([
|
|
'_attachments',
|
|
//replication documents
|
|
'_replication_id',
|
|
'_replication_state',
|
|
'_replication_state_time',
|
|
'_replication_state_reason',
|
|
'_replication_stats'
|
|
]);
|
|
|
|
// Determine id an ID is valid
|
|
// - invalid IDs begin with an underescore that does not begin '_design' or
|
|
// '_local'
|
|
// - any other string value is a valid id
|
|
// Returns the specific error object for each case
|
|
function invalidIdError(id) {
|
|
var err;
|
|
if (!id) {
|
|
err = createError(MISSING_ID);
|
|
} else if (typeof id !== 'string') {
|
|
err = createError(INVALID_ID);
|
|
} else if (/^_/.test(id) && !(/^_(design|local)/).test(id)) {
|
|
err = createError(RESERVED_ID);
|
|
}
|
|
if (err) {
|
|
throw err;
|
|
}
|
|
}
|
|
|
|
function parseRevisionInfo(rev) {
|
|
if (!/^\d+\-./.test(rev)) {
|
|
return createError(INVALID_REV);
|
|
}
|
|
var idx = rev.indexOf('-');
|
|
var left = rev.substring(0, idx);
|
|
var right = rev.substring(idx + 1);
|
|
return {
|
|
prefix: parseInt(left, 10),
|
|
id: right
|
|
};
|
|
}
|
|
|
|
function makeRevTreeFromRevisions(revisions, opts) {
|
|
var pos = revisions.start - revisions.ids.length + 1;
|
|
|
|
var revisionIds = revisions.ids;
|
|
var ids = [revisionIds[0], opts, []];
|
|
|
|
for (var i = 1, len = revisionIds.length; i < len; i++) {
|
|
ids = [revisionIds[i], {status: 'missing'}, [ids]];
|
|
}
|
|
|
|
return [{
|
|
pos: pos,
|
|
ids: ids
|
|
}];
|
|
}
|
|
|
|
// Preprocess documents, parse their revisions, assign an id and a
|
|
// revision for new writes that are missing them, etc
|
|
function parseDoc(doc, newEdits) {
|
|
|
|
var nRevNum;
|
|
var newRevId;
|
|
var revInfo;
|
|
var opts = {status: 'available'};
|
|
if (doc._deleted) {
|
|
opts.deleted = true;
|
|
}
|
|
|
|
if (newEdits) {
|
|
if (!doc._id) {
|
|
doc._id = uuid();
|
|
}
|
|
newRevId = uuid(32, 16).toLowerCase();
|
|
if (doc._rev) {
|
|
revInfo = parseRevisionInfo(doc._rev);
|
|
if (revInfo.error) {
|
|
return revInfo;
|
|
}
|
|
doc._rev_tree = [{
|
|
pos: revInfo.prefix,
|
|
ids: [revInfo.id, {status: 'missing'}, [[newRevId, opts, []]]]
|
|
}];
|
|
nRevNum = revInfo.prefix + 1;
|
|
} else {
|
|
doc._rev_tree = [{
|
|
pos: 1,
|
|
ids : [newRevId, opts, []]
|
|
}];
|
|
nRevNum = 1;
|
|
}
|
|
} else {
|
|
if (doc._revisions) {
|
|
doc._rev_tree = makeRevTreeFromRevisions(doc._revisions, opts);
|
|
nRevNum = doc._revisions.start;
|
|
newRevId = doc._revisions.ids[0];
|
|
}
|
|
if (!doc._rev_tree) {
|
|
revInfo = parseRevisionInfo(doc._rev);
|
|
if (revInfo.error) {
|
|
return revInfo;
|
|
}
|
|
nRevNum = revInfo.prefix;
|
|
newRevId = revInfo.id;
|
|
doc._rev_tree = [{
|
|
pos: nRevNum,
|
|
ids: [newRevId, opts, []]
|
|
}];
|
|
}
|
|
}
|
|
|
|
invalidIdError(doc._id);
|
|
|
|
doc._rev = nRevNum + '-' + newRevId;
|
|
|
|
var result = {metadata : {}, data : {}};
|
|
for (var key in doc) {
|
|
/* istanbul ignore else */
|
|
if (Object.prototype.hasOwnProperty.call(doc, key)) {
|
|
var specialKey = key[0] === '_';
|
|
if (specialKey && !reservedWords[key]) {
|
|
var error = createError(DOC_VALIDATION, key);
|
|
error.message = DOC_VALIDATION.message + ': ' + key;
|
|
throw error;
|
|
} else if (specialKey && !dataWords[key]) {
|
|
result.metadata[key.slice(1)] = doc[key];
|
|
} else {
|
|
result.data[key] = doc[key];
|
|
}
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
// build up a list of all the paths to the leafs in this revision tree
|
|
function rootToLeaf(revs) {
|
|
var paths = [];
|
|
var toVisit = revs.slice();
|
|
var node;
|
|
while ((node = toVisit.pop())) {
|
|
var pos = node.pos;
|
|
var tree = node.ids;
|
|
var id = tree[0];
|
|
var opts = tree[1];
|
|
var branches = tree[2];
|
|
var isLeaf = branches.length === 0;
|
|
|
|
var history = node.history ? node.history.slice() : [];
|
|
history.push({id: id, opts: opts});
|
|
if (isLeaf) {
|
|
paths.push({pos: (pos + 1 - history.length), ids: history});
|
|
}
|
|
for (var i = 0, len = branches.length; i < len; i++) {
|
|
toVisit.push({pos: pos + 1, ids: branches[i], history: history});
|
|
}
|
|
}
|
|
return paths.reverse();
|
|
}
|
|
|
|
function sortByPos$1(a, b) {
|
|
return a.pos - b.pos;
|
|
}
|
|
|
|
// classic binary search
|
|
function binarySearch(arr, item, comparator) {
|
|
var low = 0;
|
|
var high = arr.length;
|
|
var mid;
|
|
while (low < high) {
|
|
mid = (low + high) >>> 1;
|
|
if (comparator(arr[mid], item) < 0) {
|
|
low = mid + 1;
|
|
} else {
|
|
high = mid;
|
|
}
|
|
}
|
|
return low;
|
|
}
|
|
|
|
// assuming the arr is sorted, insert the item in the proper place
|
|
function insertSorted(arr, item, comparator) {
|
|
var idx = binarySearch(arr, item, comparator);
|
|
arr.splice(idx, 0, item);
|
|
}
|
|
|
|
// Turn a path as a flat array into a tree with a single branch.
|
|
// If any should be stemmed from the beginning of the array, that's passed
|
|
// in as the second argument
|
|
function pathToTree(path, numStemmed) {
|
|
var root;
|
|
var leaf;
|
|
for (var i = numStemmed, len = path.length; i < len; i++) {
|
|
var node = path[i];
|
|
var currentLeaf = [node.id, node.opts, []];
|
|
if (leaf) {
|
|
leaf[2].push(currentLeaf);
|
|
leaf = currentLeaf;
|
|
} else {
|
|
root = leaf = currentLeaf;
|
|
}
|
|
}
|
|
return root;
|
|
}
|
|
|
|
// compare the IDs of two trees
|
|
function compareTree(a, b) {
|
|
return a[0] < b[0] ? -1 : 1;
|
|
}
|
|
|
|
// Merge two trees together
|
|
// The roots of tree1 and tree2 must be the same revision
|
|
function mergeTree(in_tree1, in_tree2) {
|
|
var queue = [{tree1: in_tree1, tree2: in_tree2}];
|
|
var conflicts = false;
|
|
while (queue.length > 0) {
|
|
var item = queue.pop();
|
|
var tree1 = item.tree1;
|
|
var tree2 = item.tree2;
|
|
|
|
if (tree1[1].status || tree2[1].status) {
|
|
tree1[1].status =
|
|
(tree1[1].status === 'available' ||
|
|
tree2[1].status === 'available') ? 'available' : 'missing';
|
|
}
|
|
|
|
for (var i = 0; i < tree2[2].length; i++) {
|
|
if (!tree1[2][0]) {
|
|
conflicts = 'new_leaf';
|
|
tree1[2][0] = tree2[2][i];
|
|
continue;
|
|
}
|
|
|
|
var merged = false;
|
|
for (var j = 0; j < tree1[2].length; j++) {
|
|
if (tree1[2][j][0] === tree2[2][i][0]) {
|
|
queue.push({tree1: tree1[2][j], tree2: tree2[2][i]});
|
|
merged = true;
|
|
}
|
|
}
|
|
if (!merged) {
|
|
conflicts = 'new_branch';
|
|
insertSorted(tree1[2], tree2[2][i], compareTree);
|
|
}
|
|
}
|
|
}
|
|
return {conflicts: conflicts, tree: in_tree1};
|
|
}
|
|
|
|
function doMerge(tree, path, dontExpand) {
|
|
var restree = [];
|
|
var conflicts = false;
|
|
var merged = false;
|
|
var res;
|
|
|
|
if (!tree.length) {
|
|
return {tree: [path], conflicts: 'new_leaf'};
|
|
}
|
|
|
|
for (var i = 0, len = tree.length; i < len; i++) {
|
|
var branch = tree[i];
|
|
if (branch.pos === path.pos && branch.ids[0] === path.ids[0]) {
|
|
// Paths start at the same position and have the same root, so they need
|
|
// merged
|
|
res = mergeTree(branch.ids, path.ids);
|
|
restree.push({pos: branch.pos, ids: res.tree});
|
|
conflicts = conflicts || res.conflicts;
|
|
merged = true;
|
|
} else if (dontExpand !== true) {
|
|
// The paths start at a different position, take the earliest path and
|
|
// traverse up until it as at the same point from root as the path we
|
|
// want to merge. If the keys match we return the longer path with the
|
|
// other merged After stemming we dont want to expand the trees
|
|
|
|
var t1 = branch.pos < path.pos ? branch : path;
|
|
var t2 = branch.pos < path.pos ? path : branch;
|
|
var diff = t2.pos - t1.pos;
|
|
|
|
var candidateParents = [];
|
|
|
|
var trees = [];
|
|
trees.push({ids: t1.ids, diff: diff, parent: null, parentIdx: null});
|
|
while (trees.length > 0) {
|
|
var item = trees.pop();
|
|
if (item.diff === 0) {
|
|
if (item.ids[0] === t2.ids[0]) {
|
|
candidateParents.push(item);
|
|
}
|
|
continue;
|
|
}
|
|
var elements = item.ids[2];
|
|
for (var j = 0, elementsLen = elements.length; j < elementsLen; j++) {
|
|
trees.push({
|
|
ids: elements[j],
|
|
diff: item.diff - 1,
|
|
parent: item.ids,
|
|
parentIdx: j
|
|
});
|
|
}
|
|
}
|
|
|
|
var el = candidateParents[0];
|
|
|
|
if (!el) {
|
|
restree.push(branch);
|
|
} else {
|
|
res = mergeTree(el.ids, t2.ids);
|
|
el.parent[2][el.parentIdx] = res.tree;
|
|
restree.push({pos: t1.pos, ids: t1.ids});
|
|
conflicts = conflicts || res.conflicts;
|
|
merged = true;
|
|
}
|
|
} else {
|
|
restree.push(branch);
|
|
}
|
|
}
|
|
|
|
// We didnt find
|
|
if (!merged) {
|
|
restree.push(path);
|
|
}
|
|
|
|
restree.sort(sortByPos$1);
|
|
|
|
return {
|
|
tree: restree,
|
|
conflicts: conflicts || 'internal_node'
|
|
};
|
|
}
|
|
|
|
// To ensure we dont grow the revision tree infinitely, we stem old revisions
|
|
function stem(tree, depth) {
|
|
// First we break out the tree into a complete list of root to leaf paths
|
|
var paths = rootToLeaf(tree);
|
|
var maybeStem = {};
|
|
|
|
var result;
|
|
for (var i = 0, len = paths.length; i < len; i++) {
|
|
// Then for each path, we cut off the start of the path based on the
|
|
// `depth` to stem to, and generate a new set of flat trees
|
|
var path = paths[i];
|
|
var stemmed = path.ids;
|
|
var numStemmed = Math.max(0, stemmed.length - depth);
|
|
var stemmedNode = {
|
|
pos: path.pos + numStemmed,
|
|
ids: pathToTree(stemmed, numStemmed)
|
|
};
|
|
|
|
for (var s = 0; s < numStemmed; s++) {
|
|
var rev = (path.pos + s) + '-' + stemmed[s].id;
|
|
maybeStem[rev] = true;
|
|
}
|
|
|
|
// Then we remerge all those flat trees together, ensuring that we dont
|
|
// connect trees that would go beyond the depth limit
|
|
if (result) {
|
|
result = doMerge(result, stemmedNode, true).tree;
|
|
} else {
|
|
result = [stemmedNode];
|
|
}
|
|
}
|
|
|
|
traverseRevTree(result, function (isLeaf, pos, revHash) {
|
|
// some revisions may have been removed in a branch but not in another
|
|
delete maybeStem[pos + '-' + revHash];
|
|
});
|
|
|
|
return {
|
|
tree: result,
|
|
revs: Object.keys(maybeStem)
|
|
};
|
|
}
|
|
|
|
function merge(tree, path, depth) {
|
|
var newTree = doMerge(tree, path);
|
|
var stemmed = stem(newTree.tree, depth);
|
|
return {
|
|
tree: stemmed.tree,
|
|
stemmedRevs: stemmed.revs,
|
|
conflicts: newTree.conflicts
|
|
};
|
|
}
|
|
|
|
// return true if a rev exists in the rev tree, false otherwise
|
|
function revExists(revs, rev) {
|
|
var toVisit = revs.slice();
|
|
var splitRev = rev.split('-');
|
|
var targetPos = parseInt(splitRev[0], 10);
|
|
var targetId = splitRev[1];
|
|
|
|
var node;
|
|
while ((node = toVisit.pop())) {
|
|
if (node.pos === targetPos && node.ids[0] === targetId) {
|
|
return true;
|
|
}
|
|
var branches = node.ids[2];
|
|
for (var i = 0, len = branches.length; i < len; i++) {
|
|
toVisit.push({pos: node.pos + 1, ids: branches[i]});
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
function updateDoc(revLimit, prev, docInfo, results,
|
|
i, cb, writeDoc, newEdits) {
|
|
|
|
if (revExists(prev.rev_tree, docInfo.metadata.rev)) {
|
|
results[i] = docInfo;
|
|
return cb();
|
|
}
|
|
|
|
// sometimes this is pre-calculated. historically not always
|
|
var previousWinningRev = prev.winningRev || winningRev(prev);
|
|
var previouslyDeleted = 'deleted' in prev ? prev.deleted :
|
|
isDeleted(prev, previousWinningRev);
|
|
var deleted = 'deleted' in docInfo.metadata ? docInfo.metadata.deleted :
|
|
isDeleted(docInfo.metadata);
|
|
var isRoot = /^1-/.test(docInfo.metadata.rev);
|
|
|
|
if (previouslyDeleted && !deleted && newEdits && isRoot) {
|
|
var newDoc = docInfo.data;
|
|
newDoc._rev = previousWinningRev;
|
|
newDoc._id = docInfo.metadata.id;
|
|
docInfo = parseDoc(newDoc, newEdits);
|
|
}
|
|
|
|
var merged = merge(prev.rev_tree, docInfo.metadata.rev_tree[0], revLimit);
|
|
|
|
var inConflict = newEdits && (((previouslyDeleted && deleted) ||
|
|
(!previouslyDeleted && merged.conflicts !== 'new_leaf') ||
|
|
(previouslyDeleted && !deleted && merged.conflicts === 'new_branch')));
|
|
|
|
if (inConflict) {
|
|
var err = createError(REV_CONFLICT);
|
|
results[i] = err;
|
|
return cb();
|
|
}
|
|
|
|
var newRev = docInfo.metadata.rev;
|
|
docInfo.metadata.rev_tree = merged.tree;
|
|
docInfo.stemmedRevs = merged.stemmedRevs || [];
|
|
/* istanbul ignore else */
|
|
if (prev.rev_map) {
|
|
docInfo.metadata.rev_map = prev.rev_map; // used only by leveldb
|
|
}
|
|
|
|
// recalculate
|
|
var winningRev$$ = winningRev(docInfo.metadata);
|
|
var winningRevIsDeleted = isDeleted(docInfo.metadata, winningRev$$);
|
|
|
|
// calculate the total number of documents that were added/removed,
|
|
// from the perspective of total_rows/doc_count
|
|
var delta = (previouslyDeleted === winningRevIsDeleted) ? 0 :
|
|
previouslyDeleted < winningRevIsDeleted ? -1 : 1;
|
|
|
|
var newRevIsDeleted;
|
|
if (newRev === winningRev$$) {
|
|
// if the new rev is the same as the winning rev, we can reuse that value
|
|
newRevIsDeleted = winningRevIsDeleted;
|
|
} else {
|
|
// if they're not the same, then we need to recalculate
|
|
newRevIsDeleted = isDeleted(docInfo.metadata, newRev);
|
|
}
|
|
|
|
writeDoc(docInfo, winningRev$$, winningRevIsDeleted, newRevIsDeleted,
|
|
true, delta, i, cb);
|
|
}
|
|
|
|
function rootIsMissing(docInfo) {
|
|
return docInfo.metadata.rev_tree[0].ids[1].status === 'missing';
|
|
}
|
|
|
|
function processDocs(revLimit, docInfos, api, fetchedDocs, tx, results,
|
|
writeDoc, opts, overallCallback) {
|
|
|
|
// Default to 1000 locally
|
|
revLimit = revLimit || 1000;
|
|
|
|
function insertDoc(docInfo, resultsIdx, callback) {
|
|
// Cant insert new deleted documents
|
|
var winningRev$$ = winningRev(docInfo.metadata);
|
|
var deleted = isDeleted(docInfo.metadata, winningRev$$);
|
|
if ('was_delete' in opts && deleted) {
|
|
results[resultsIdx] = createError(MISSING_DOC, 'deleted');
|
|
return callback();
|
|
}
|
|
|
|
// 4712 - detect whether a new document was inserted with a _rev
|
|
var inConflict = newEdits && rootIsMissing(docInfo);
|
|
|
|
if (inConflict) {
|
|
var err = createError(REV_CONFLICT);
|
|
results[resultsIdx] = err;
|
|
return callback();
|
|
}
|
|
|
|
var delta = deleted ? 0 : 1;
|
|
|
|
writeDoc(docInfo, winningRev$$, deleted, deleted, false,
|
|
delta, resultsIdx, callback);
|
|
}
|
|
|
|
var newEdits = opts.new_edits;
|
|
var idsToDocs = new pouchdbCollections.Map();
|
|
|
|
var docsDone = 0;
|
|
var docsToDo = docInfos.length;
|
|
|
|
function checkAllDocsDone() {
|
|
if (++docsDone === docsToDo && overallCallback) {
|
|
overallCallback();
|
|
}
|
|
}
|
|
|
|
docInfos.forEach(function (currentDoc, resultsIdx) {
|
|
|
|
if (currentDoc._id && isLocalId(currentDoc._id)) {
|
|
var fun = currentDoc._deleted ? '_removeLocal' : '_putLocal';
|
|
api[fun](currentDoc, {ctx: tx}, function (err, res) {
|
|
results[resultsIdx] = err || res;
|
|
checkAllDocsDone();
|
|
});
|
|
return;
|
|
}
|
|
|
|
var id = currentDoc.metadata.id;
|
|
if (idsToDocs.has(id)) {
|
|
docsToDo--; // duplicate
|
|
idsToDocs.get(id).push([currentDoc, resultsIdx]);
|
|
} else {
|
|
idsToDocs.set(id, [[currentDoc, resultsIdx]]);
|
|
}
|
|
});
|
|
|
|
// in the case of new_edits, the user can provide multiple docs
|
|
// with the same id. these need to be processed sequentially
|
|
idsToDocs.forEach(function (docs, id) {
|
|
var numDone = 0;
|
|
|
|
function docWritten() {
|
|
if (++numDone < docs.length) {
|
|
nextDoc();
|
|
} else {
|
|
checkAllDocsDone();
|
|
}
|
|
}
|
|
function nextDoc() {
|
|
var value = docs[numDone];
|
|
var currentDoc = value[0];
|
|
var resultsIdx = value[1];
|
|
|
|
if (fetchedDocs.has(id)) {
|
|
updateDoc(revLimit, fetchedDocs.get(id), currentDoc, results,
|
|
resultsIdx, docWritten, writeDoc, newEdits);
|
|
} else {
|
|
// Ensure stemming applies to new writes as well
|
|
var merged = merge([], currentDoc.metadata.rev_tree[0], revLimit);
|
|
currentDoc.metadata.rev_tree = merged.tree;
|
|
currentDoc.stemmedRevs = merged.stemmedRevs || [];
|
|
insertDoc(currentDoc, resultsIdx, docWritten);
|
|
}
|
|
}
|
|
nextDoc();
|
|
});
|
|
}
|
|
|
|
// compact a tree by marking its non-leafs as missing,
|
|
// and return a list of revs to delete
|
|
function compactTree(metadata) {
|
|
var revs = [];
|
|
traverseRevTree(metadata.rev_tree, function (isLeaf, pos,
|
|
revHash, ctx, opts) {
|
|
if (opts.status === 'available' && !isLeaf) {
|
|
revs.push(pos + '-' + revHash);
|
|
opts.status = 'missing';
|
|
}
|
|
});
|
|
return revs;
|
|
}
|
|
|
|
function quote(str) {
|
|
return "'" + str + "'";
|
|
}
|
|
|
|
var ADAPTER_VERSION = 7; // used to manage migrations
|
|
|
|
// The object stores created for each database
|
|
// DOC_STORE stores the document meta data, its revision history and state
|
|
var DOC_STORE = quote('document-store');
|
|
// BY_SEQ_STORE stores a particular version of a document, keyed by its
|
|
// sequence id
|
|
var BY_SEQ_STORE = quote('by-sequence');
|
|
// Where we store attachments
|
|
var ATTACH_STORE = quote('attach-store');
|
|
var LOCAL_STORE = quote('local-store');
|
|
var META_STORE = quote('metadata-store');
|
|
// where we store many-to-many relations between attachment
|
|
// digests and seqs
|
|
var ATTACH_AND_SEQ_STORE = quote('attach-seq-store');
|
|
|
|
// nodejs version of websql
|
|
|
|
function createOpenDBFunction() {
|
|
return function openDB(opts) {
|
|
return openDatabase('testdbs/' +
|
|
opts.name, opts.version, opts.description, opts.size);
|
|
};
|
|
}
|
|
|
|
function valid() {
|
|
return true; // in Node, this is always true
|
|
}
|
|
|
|
// escapeBlob and unescapeBlob are workarounds for a websql bug:
|
|
// https://code.google.com/p/chromium/issues/detail?id=422690
|
|
// https://bugs.webkit.org/show_bug.cgi?id=137637
|
|
// The goal is to never actually insert the \u0000 character
|
|
// in the database.
|
|
function escapeBlob(str) {
|
|
return str
|
|
.replace(/\u0002/g, '\u0002\u0002')
|
|
.replace(/\u0001/g, '\u0001\u0002')
|
|
.replace(/\u0000/g, '\u0001\u0001');
|
|
}
|
|
|
|
function unescapeBlob(str) {
|
|
return str
|
|
.replace(/\u0001\u0001/g, '\u0000')
|
|
.replace(/\u0001\u0002/g, '\u0001')
|
|
.replace(/\u0002\u0002/g, '\u0002');
|
|
}
|
|
|
|
function stringifyDoc(doc) {
|
|
// don't bother storing the id/rev. it uses lots of space,
|
|
// in persistent map/reduce especially
|
|
delete doc._id;
|
|
delete doc._rev;
|
|
return JSON.stringify(doc);
|
|
}
|
|
|
|
function unstringifyDoc(doc, id, rev) {
|
|
doc = JSON.parse(doc);
|
|
doc._id = id;
|
|
doc._rev = rev;
|
|
return doc;
|
|
}
|
|
|
|
// question mark groups IN queries, e.g. 3 -> '(?,?,?)'
|
|
function qMarks(num) {
|
|
var s = '(';
|
|
while (num--) {
|
|
s += '?';
|
|
if (num) {
|
|
s += ',';
|
|
}
|
|
}
|
|
return s + ')';
|
|
}
|
|
|
|
function select(selector, table, joiner, where, orderBy) {
|
|
return 'SELECT ' + selector + ' FROM ' +
|
|
(typeof table === 'string' ? table : table.join(' JOIN ')) +
|
|
(joiner ? (' ON ' + joiner) : '') +
|
|
(where ? (' WHERE ' +
|
|
(typeof where === 'string' ? where : where.join(' AND '))) : '') +
|
|
(orderBy ? (' ORDER BY ' + orderBy) : '');
|
|
}
|
|
|
|
function compactRevs(revs, docId, tx) {
|
|
|
|
if (!revs.length) {
|
|
return;
|
|
}
|
|
|
|
var numDone = 0;
|
|
var seqs = [];
|
|
|
|
function checkDone() {
|
|
if (++numDone === revs.length) { // done
|
|
deleteOrphans();
|
|
}
|
|
}
|
|
|
|
function deleteOrphans() {
|
|
// find orphaned attachment digests
|
|
|
|
if (!seqs.length) {
|
|
return;
|
|
}
|
|
|
|
var sql = 'SELECT DISTINCT digest AS digest FROM ' +
|
|
ATTACH_AND_SEQ_STORE + ' WHERE seq IN ' + qMarks(seqs.length);
|
|
|
|
tx.executeSql(sql, seqs, function (tx, res) {
|
|
|
|
var digestsToCheck = [];
|
|
for (var i = 0; i < res.rows.length; i++) {
|
|
digestsToCheck.push(res.rows.item(i).digest);
|
|
}
|
|
if (!digestsToCheck.length) {
|
|
return;
|
|
}
|
|
|
|
var sql = 'DELETE FROM ' + ATTACH_AND_SEQ_STORE +
|
|
' WHERE seq IN (' +
|
|
seqs.map(function () { return '?'; }).join(',') +
|
|
')';
|
|
tx.executeSql(sql, seqs, function (tx) {
|
|
|
|
var sql = 'SELECT digest FROM ' + ATTACH_AND_SEQ_STORE +
|
|
' WHERE digest IN (' +
|
|
digestsToCheck.map(function () { return '?'; }).join(',') +
|
|
')';
|
|
tx.executeSql(sql, digestsToCheck, function (tx, res) {
|
|
var nonOrphanedDigests = new pouchdbCollections.Set();
|
|
for (var i = 0; i < res.rows.length; i++) {
|
|
nonOrphanedDigests.add(res.rows.item(i).digest);
|
|
}
|
|
digestsToCheck.forEach(function (digest) {
|
|
if (nonOrphanedDigests.has(digest)) {
|
|
return;
|
|
}
|
|
tx.executeSql(
|
|
'DELETE FROM ' + ATTACH_AND_SEQ_STORE + ' WHERE digest=?',
|
|
[digest]);
|
|
tx.executeSql(
|
|
'DELETE FROM ' + ATTACH_STORE + ' WHERE digest=?', [digest]);
|
|
});
|
|
});
|
|
});
|
|
});
|
|
}
|
|
|
|
// update by-seq and attach stores in parallel
|
|
revs.forEach(function (rev) {
|
|
var sql = 'SELECT seq FROM ' + BY_SEQ_STORE +
|
|
' WHERE doc_id=? AND rev=?';
|
|
|
|
tx.executeSql(sql, [docId, rev], function (tx, res) {
|
|
if (!res.rows.length) { // already deleted
|
|
return checkDone();
|
|
}
|
|
var seq = res.rows.item(0).seq;
|
|
seqs.push(seq);
|
|
|
|
tx.executeSql(
|
|
'DELETE FROM ' + BY_SEQ_STORE + ' WHERE seq=?', [seq], checkDone);
|
|
});
|
|
});
|
|
}
|
|
|
|
function websqlError(callback) {
|
|
return function (event) {
|
|
console.error('WebSQL threw an error', event);
|
|
// event may actually be a SQLError object, so report is as such
|
|
var errorNameMatch = event && event.constructor.toString()
|
|
.match(/function ([^\(]+)/);
|
|
var errorName = (errorNameMatch && errorNameMatch[1]) || event.type;
|
|
var errorReason = event.target || event.message;
|
|
callback(createError(WSQ_ERROR, errorReason, errorName));
|
|
};
|
|
}
|
|
|
|
function getSize(opts) {
|
|
if ('size' in opts) {
|
|
// triggers immediate popup in iOS, fixes #2347
|
|
// e.g. 5000001 asks for 5 MB, 10000001 asks for 10 MB,
|
|
return opts.size * 1000000;
|
|
}
|
|
// In iOS, doesn't matter as long as it's <= 5000000.
|
|
// Except that if you request too much, our tests fail
|
|
// because of the native "do you accept?" popup.
|
|
// In Android <=4.3, this value is actually used as an
|
|
// honest-to-god ceiling for data, so we need to
|
|
// set it to a decently high number.
|
|
var isAndroid = typeof navigator !== 'undefined' &&
|
|
/Android/.test(navigator.userAgent);
|
|
return isAndroid ? 5000000 : 1; // in PhantomJS, if you use 0 it will crash
|
|
}
|
|
|
|
function openDBSafely(openDBFunction, opts) {
|
|
try {
|
|
return {
|
|
db: openDBFunction(opts)
|
|
};
|
|
} catch (err) {
|
|
return {
|
|
error: err
|
|
};
|
|
}
|
|
}
|
|
|
|
var cachedDatabases = new pouchdbCollections.Map();
|
|
|
|
function openDB(opts) {
|
|
var cachedResult = cachedDatabases.get(opts.name);
|
|
if (!cachedResult) {
|
|
var openDBFun = createOpenDBFunction();
|
|
cachedResult = openDBSafely(openDBFun, opts);
|
|
cachedDatabases.set(opts.name, cachedResult);
|
|
if (cachedResult.db) {
|
|
cachedResult.db._sqlitePlugin = typeof sqlitePlugin !== 'undefined';
|
|
}
|
|
}
|
|
return cachedResult;
|
|
}
|
|
|
|
function websqlBulkDocs(dbOpts, req, opts, api, db, websqlChanges, callback) {
|
|
var newEdits = opts.new_edits;
|
|
var userDocs = req.docs;
|
|
|
|
// Parse the docs, give them a sequence number for the result
|
|
var docInfos = userDocs.map(function (doc) {
|
|
if (doc._id && isLocalId(doc._id)) {
|
|
return doc;
|
|
}
|
|
var newDoc = parseDoc(doc, newEdits);
|
|
return newDoc;
|
|
});
|
|
|
|
var docInfoErrors = docInfos.filter(function (docInfo) {
|
|
return docInfo.error;
|
|
});
|
|
if (docInfoErrors.length) {
|
|
return callback(docInfoErrors[0]);
|
|
}
|
|
|
|
var tx;
|
|
var results = new Array(docInfos.length);
|
|
var fetchedDocs = new pouchdbCollections.Map();
|
|
|
|
var preconditionErrored;
|
|
function complete() {
|
|
if (preconditionErrored) {
|
|
return callback(preconditionErrored);
|
|
}
|
|
websqlChanges.notify(api._name);
|
|
api._docCount = -1; // invalidate
|
|
callback(null, results);
|
|
}
|
|
|
|
function verifyAttachment(digest, callback) {
|
|
var sql = 'SELECT count(*) as cnt FROM ' + ATTACH_STORE +
|
|
' WHERE digest=?';
|
|
tx.executeSql(sql, [digest], function (tx, result) {
|
|
if (result.rows.item(0).cnt === 0) {
|
|
var err = createError(MISSING_STUB,
|
|
'unknown stub attachment with digest ' +
|
|
digest);
|
|
callback(err);
|
|
} else {
|
|
callback();
|
|
}
|
|
});
|
|
}
|
|
|
|
function verifyAttachments(finish) {
|
|
var digests = [];
|
|
docInfos.forEach(function (docInfo) {
|
|
if (docInfo.data && docInfo.data._attachments) {
|
|
Object.keys(docInfo.data._attachments).forEach(function (filename) {
|
|
var att = docInfo.data._attachments[filename];
|
|
if (att.stub) {
|
|
digests.push(att.digest);
|
|
}
|
|
});
|
|
}
|
|
});
|
|
if (!digests.length) {
|
|
return finish();
|
|
}
|
|
var numDone = 0;
|
|
var err;
|
|
|
|
function checkDone() {
|
|
if (++numDone === digests.length) {
|
|
finish(err);
|
|
}
|
|
}
|
|
digests.forEach(function (digest) {
|
|
verifyAttachment(digest, function (attErr) {
|
|
if (attErr && !err) {
|
|
err = attErr;
|
|
}
|
|
checkDone();
|
|
});
|
|
});
|
|
}
|
|
|
|
function writeDoc(docInfo, winningRev, winningRevIsDeleted, newRevIsDeleted,
|
|
isUpdate, delta, resultsIdx, callback) {
|
|
|
|
function finish() {
|
|
var data = docInfo.data;
|
|
var deletedInt = newRevIsDeleted ? 1 : 0;
|
|
|
|
var id = data._id;
|
|
var rev = data._rev;
|
|
var json = stringifyDoc(data);
|
|
var sql = 'INSERT INTO ' + BY_SEQ_STORE +
|
|
' (doc_id, rev, json, deleted) VALUES (?, ?, ?, ?);';
|
|
var sqlArgs = [id, rev, json, deletedInt];
|
|
|
|
// map seqs to attachment digests, which
|
|
// we will need later during compaction
|
|
function insertAttachmentMappings(seq, callback) {
|
|
var attsAdded = 0;
|
|
var attsToAdd = Object.keys(data._attachments || {});
|
|
|
|
if (!attsToAdd.length) {
|
|
return callback();
|
|
}
|
|
function checkDone() {
|
|
if (++attsAdded === attsToAdd.length) {
|
|
callback();
|
|
}
|
|
return false; // ack handling a constraint error
|
|
}
|
|
function add(att) {
|
|
var sql = 'INSERT INTO ' + ATTACH_AND_SEQ_STORE +
|
|
' (digest, seq) VALUES (?,?)';
|
|
var sqlArgs = [data._attachments[att].digest, seq];
|
|
tx.executeSql(sql, sqlArgs, checkDone, checkDone);
|
|
// second callback is for a constaint error, which we ignore
|
|
// because this docid/rev has already been associated with
|
|
// the digest (e.g. when new_edits == false)
|
|
}
|
|
for (var i = 0; i < attsToAdd.length; i++) {
|
|
add(attsToAdd[i]); // do in parallel
|
|
}
|
|
}
|
|
|
|
tx.executeSql(sql, sqlArgs, function (tx, result) {
|
|
var seq = result.insertId;
|
|
insertAttachmentMappings(seq, function () {
|
|
dataWritten(tx, seq);
|
|
});
|
|
}, function () {
|
|
// constraint error, recover by updating instead (see #1638)
|
|
var fetchSql = select('seq', BY_SEQ_STORE, null,
|
|
'doc_id=? AND rev=?');
|
|
tx.executeSql(fetchSql, [id, rev], function (tx, res) {
|
|
var seq = res.rows.item(0).seq;
|
|
var sql = 'UPDATE ' + BY_SEQ_STORE +
|
|
' SET json=?, deleted=? WHERE doc_id=? AND rev=?;';
|
|
var sqlArgs = [json, deletedInt, id, rev];
|
|
tx.executeSql(sql, sqlArgs, function (tx) {
|
|
insertAttachmentMappings(seq, function () {
|
|
dataWritten(tx, seq);
|
|
});
|
|
});
|
|
});
|
|
return false; // ack that we've handled the error
|
|
});
|
|
}
|
|
|
|
function collectResults(attachmentErr) {
|
|
if (!err) {
|
|
if (attachmentErr) {
|
|
err = attachmentErr;
|
|
callback(err);
|
|
} else if (recv === attachments.length) {
|
|
finish();
|
|
}
|
|
}
|
|
}
|
|
|
|
var err = null;
|
|
var recv = 0;
|
|
|
|
docInfo.data._id = docInfo.metadata.id;
|
|
docInfo.data._rev = docInfo.metadata.rev;
|
|
var attachments = Object.keys(docInfo.data._attachments || {});
|
|
|
|
|
|
if (newRevIsDeleted) {
|
|
docInfo.data._deleted = true;
|
|
}
|
|
|
|
function attachmentSaved(err) {
|
|
recv++;
|
|
collectResults(err);
|
|
}
|
|
|
|
attachments.forEach(function (key) {
|
|
var att = docInfo.data._attachments[key];
|
|
if (!att.stub) {
|
|
var data = att.data;
|
|
delete att.data;
|
|
att.revpos = parseInt(winningRev, 10);
|
|
var digest = att.digest;
|
|
saveAttachment(digest, data, attachmentSaved);
|
|
} else {
|
|
recv++;
|
|
collectResults();
|
|
}
|
|
});
|
|
|
|
if (!attachments.length) {
|
|
finish();
|
|
}
|
|
|
|
function dataWritten(tx, seq) {
|
|
var id = docInfo.metadata.id;
|
|
if (isUpdate && api.auto_compaction) {
|
|
compactRevs(compactTree(docInfo.metadata), id, tx);
|
|
} else if (docInfo.stemmedRevs.length) {
|
|
compactRevs(docInfo.stemmedRevs, id, tx);
|
|
}
|
|
|
|
docInfo.metadata.seq = seq;
|
|
delete docInfo.metadata.rev;
|
|
|
|
var sql = isUpdate ?
|
|
'UPDATE ' + DOC_STORE +
|
|
' SET json=?, max_seq=?, winningseq=' +
|
|
'(SELECT seq FROM ' + BY_SEQ_STORE +
|
|
' WHERE doc_id=' + DOC_STORE + '.id AND rev=?) WHERE id=?'
|
|
: 'INSERT INTO ' + DOC_STORE +
|
|
' (id, winningseq, max_seq, json) VALUES (?,?,?,?);';
|
|
var metadataStr = safeJsonStringify(docInfo.metadata);
|
|
var params = isUpdate ?
|
|
[metadataStr, seq, winningRev, id] :
|
|
[id, seq, seq, metadataStr];
|
|
tx.executeSql(sql, params, function () {
|
|
results[resultsIdx] = {
|
|
ok: true,
|
|
id: docInfo.metadata.id,
|
|
rev: winningRev
|
|
};
|
|
fetchedDocs.set(id, docInfo.metadata);
|
|
callback();
|
|
});
|
|
}
|
|
}
|
|
|
|
function websqlProcessDocs() {
|
|
processDocs(dbOpts.revs_limit, docInfos, api, fetchedDocs, tx,
|
|
results, writeDoc, opts);
|
|
}
|
|
|
|
function fetchExistingDocs(callback) {
|
|
if (!docInfos.length) {
|
|
return callback();
|
|
}
|
|
|
|
var numFetched = 0;
|
|
|
|
function checkDone() {
|
|
if (++numFetched === docInfos.length) {
|
|
callback();
|
|
}
|
|
}
|
|
|
|
docInfos.forEach(function (docInfo) {
|
|
if (docInfo._id && isLocalId(docInfo._id)) {
|
|
return checkDone(); // skip local docs
|
|
}
|
|
var id = docInfo.metadata.id;
|
|
tx.executeSql('SELECT json FROM ' + DOC_STORE +
|
|
' WHERE id = ?', [id], function (tx, result) {
|
|
if (result.rows.length) {
|
|
var metadata = safeJsonParse(result.rows.item(0).json);
|
|
fetchedDocs.set(id, metadata);
|
|
}
|
|
checkDone();
|
|
});
|
|
});
|
|
}
|
|
|
|
function saveAttachment(digest, data, callback) {
|
|
var sql = 'SELECT digest FROM ' + ATTACH_STORE + ' WHERE digest=?';
|
|
tx.executeSql(sql, [digest], function (tx, result) {
|
|
if (result.rows.length) { // attachment already exists
|
|
return callback();
|
|
}
|
|
// we could just insert before selecting and catch the error,
|
|
// but my hunch is that it's cheaper not to serialize the blob
|
|
// from JS to C if we don't have to (TODO: confirm this)
|
|
sql = 'INSERT INTO ' + ATTACH_STORE +
|
|
' (digest, body, escaped) VALUES (?,?,1)';
|
|
tx.executeSql(sql, [digest, escapeBlob(data)], function () {
|
|
callback();
|
|
}, function () {
|
|
// ignore constaint errors, means it already exists
|
|
callback();
|
|
return false; // ack we handled the error
|
|
});
|
|
});
|
|
}
|
|
|
|
preprocessAttachments(docInfos, 'binary', function (err) {
|
|
if (err) {
|
|
return callback(err);
|
|
}
|
|
db.transaction(function (txn) {
|
|
tx = txn;
|
|
verifyAttachments(function (err) {
|
|
if (err) {
|
|
preconditionErrored = err;
|
|
} else {
|
|
fetchExistingDocs(websqlProcessDocs);
|
|
}
|
|
});
|
|
}, websqlError(callback), complete);
|
|
});
|
|
}
|
|
|
|
var websqlChanges = new Changes();
|
|
|
|
function fetchAttachmentsIfNecessary(doc, opts, api, txn, cb) {
|
|
var attachments = Object.keys(doc._attachments || {});
|
|
if (!attachments.length) {
|
|
return cb && cb();
|
|
}
|
|
var numDone = 0;
|
|
|
|
function checkDone() {
|
|
if (++numDone === attachments.length && cb) {
|
|
cb();
|
|
}
|
|
}
|
|
|
|
function fetchAttachment(doc, att) {
|
|
var attObj = doc._attachments[att];
|
|
var attOpts = {binary: opts.binary, ctx: txn};
|
|
api._getAttachment(attObj, attOpts, function (_, data) {
|
|
doc._attachments[att] = jsExtend.extend(
|
|
pick(attObj, ['digest', 'content_type']),
|
|
{ data: data }
|
|
);
|
|
checkDone();
|
|
});
|
|
}
|
|
|
|
attachments.forEach(function (att) {
|
|
if (opts.attachments && opts.include_docs) {
|
|
fetchAttachment(doc, att);
|
|
} else {
|
|
doc._attachments[att].stub = true;
|
|
checkDone();
|
|
}
|
|
});
|
|
}
|
|
|
|
var POUCH_VERSION = 1;
|
|
|
|
// these indexes cover the ground for most allDocs queries
|
|
var BY_SEQ_STORE_DELETED_INDEX_SQL =
|
|
'CREATE INDEX IF NOT EXISTS \'by-seq-deleted-idx\' ON ' +
|
|
BY_SEQ_STORE + ' (seq, deleted)';
|
|
var BY_SEQ_STORE_DOC_ID_REV_INDEX_SQL =
|
|
'CREATE UNIQUE INDEX IF NOT EXISTS \'by-seq-doc-id-rev\' ON ' +
|
|
BY_SEQ_STORE + ' (doc_id, rev)';
|
|
var DOC_STORE_WINNINGSEQ_INDEX_SQL =
|
|
'CREATE INDEX IF NOT EXISTS \'doc-winningseq-idx\' ON ' +
|
|
DOC_STORE + ' (winningseq)';
|
|
var ATTACH_AND_SEQ_STORE_SEQ_INDEX_SQL =
|
|
'CREATE INDEX IF NOT EXISTS \'attach-seq-seq-idx\' ON ' +
|
|
ATTACH_AND_SEQ_STORE + ' (seq)';
|
|
var ATTACH_AND_SEQ_STORE_ATTACH_INDEX_SQL =
|
|
'CREATE UNIQUE INDEX IF NOT EXISTS \'attach-seq-digest-idx\' ON ' +
|
|
ATTACH_AND_SEQ_STORE + ' (digest, seq)';
|
|
|
|
var DOC_STORE_AND_BY_SEQ_JOINER = BY_SEQ_STORE +
|
|
'.seq = ' + DOC_STORE + '.winningseq';
|
|
|
|
var SELECT_DOCS = BY_SEQ_STORE + '.seq AS seq, ' +
|
|
BY_SEQ_STORE + '.deleted AS deleted, ' +
|
|
BY_SEQ_STORE + '.json AS data, ' +
|
|
BY_SEQ_STORE + '.rev AS rev, ' +
|
|
DOC_STORE + '.json AS metadata';
|
|
|
|
function WebSqlPouch(opts, callback) {
|
|
var api = this;
|
|
var instanceId = null;
|
|
var size = getSize(opts);
|
|
var idRequests = [];
|
|
var encoding;
|
|
|
|
api._docCount = -1; // cache sqlite count(*) for performance
|
|
api._name = opts.name;
|
|
|
|
// extend the options here, because sqlite plugin has a ton of options
|
|
// and they are constantly changing, so it's more prudent to allow anything
|
|
var websqlOpts = jsExtend.extend({}, opts, {size: size, version: POUCH_VERSION});
|
|
var openDBResult = openDB(websqlOpts);
|
|
if (openDBResult.error) {
|
|
return websqlError(callback)(openDBResult.error);
|
|
}
|
|
var db = openDBResult.db;
|
|
if (typeof db.readTransaction !== 'function') {
|
|
// doesn't exist in sqlite plugin
|
|
db.readTransaction = db.transaction;
|
|
}
|
|
|
|
function dbCreated() {
|
|
// note the db name in case the browser upgrades to idb
|
|
if (hasLocalStorage()) {
|
|
window.localStorage['_pouch__websqldb_' + api._name] = true;
|
|
}
|
|
callback(null, api);
|
|
}
|
|
|
|
// In this migration, we added the 'deleted' and 'local' columns to the
|
|
// by-seq and doc store tables.
|
|
// To preserve existing user data, we re-process all the existing JSON
|
|
// and add these values.
|
|
// Called migration2 because it corresponds to adapter version (db_version) #2
|
|
function runMigration2(tx, callback) {
|
|
// index used for the join in the allDocs query
|
|
tx.executeSql(DOC_STORE_WINNINGSEQ_INDEX_SQL);
|
|
|
|
tx.executeSql('ALTER TABLE ' + BY_SEQ_STORE +
|
|
' ADD COLUMN deleted TINYINT(1) DEFAULT 0', [], function () {
|
|
tx.executeSql(BY_SEQ_STORE_DELETED_INDEX_SQL);
|
|
tx.executeSql('ALTER TABLE ' + DOC_STORE +
|
|
' ADD COLUMN local TINYINT(1) DEFAULT 0', [], function () {
|
|
tx.executeSql('CREATE INDEX IF NOT EXISTS \'doc-store-local-idx\' ON ' +
|
|
DOC_STORE + ' (local, id)');
|
|
|
|
var sql = 'SELECT ' + DOC_STORE + '.winningseq AS seq, ' + DOC_STORE +
|
|
'.json AS metadata FROM ' + BY_SEQ_STORE + ' JOIN ' + DOC_STORE +
|
|
' ON ' + BY_SEQ_STORE + '.seq = ' + DOC_STORE + '.winningseq';
|
|
|
|
tx.executeSql(sql, [], function (tx, result) {
|
|
|
|
var deleted = [];
|
|
var local = [];
|
|
|
|
for (var i = 0; i < result.rows.length; i++) {
|
|
var item = result.rows.item(i);
|
|
var seq = item.seq;
|
|
var metadata = JSON.parse(item.metadata);
|
|
if (isDeleted(metadata)) {
|
|
deleted.push(seq);
|
|
}
|
|
if (isLocalId(metadata.id)) {
|
|
local.push(metadata.id);
|
|
}
|
|
}
|
|
tx.executeSql('UPDATE ' + DOC_STORE + 'SET local = 1 WHERE id IN ' +
|
|
qMarks(local.length), local, function () {
|
|
tx.executeSql('UPDATE ' + BY_SEQ_STORE +
|
|
' SET deleted = 1 WHERE seq IN ' +
|
|
qMarks(deleted.length), deleted, callback);
|
|
});
|
|
});
|
|
});
|
|
});
|
|
}
|
|
|
|
// in this migration, we make all the local docs unversioned
|
|
function runMigration3(tx, callback) {
|
|
var local = 'CREATE TABLE IF NOT EXISTS ' + LOCAL_STORE +
|
|
' (id UNIQUE, rev, json)';
|
|
tx.executeSql(local, [], function () {
|
|
var sql = 'SELECT ' + DOC_STORE + '.id AS id, ' +
|
|
BY_SEQ_STORE + '.json AS data ' +
|
|
'FROM ' + BY_SEQ_STORE + ' JOIN ' +
|
|
DOC_STORE + ' ON ' + BY_SEQ_STORE + '.seq = ' +
|
|
DOC_STORE + '.winningseq WHERE local = 1';
|
|
tx.executeSql(sql, [], function (tx, res) {
|
|
var rows = [];
|
|
for (var i = 0; i < res.rows.length; i++) {
|
|
rows.push(res.rows.item(i));
|
|
}
|
|
function doNext() {
|
|
if (!rows.length) {
|
|
return callback(tx);
|
|
}
|
|
var row = rows.shift();
|
|
var rev = JSON.parse(row.data)._rev;
|
|
tx.executeSql('INSERT INTO ' + LOCAL_STORE +
|
|
' (id, rev, json) VALUES (?,?,?)',
|
|
[row.id, rev, row.data], function (tx) {
|
|
tx.executeSql('DELETE FROM ' + DOC_STORE + ' WHERE id=?',
|
|
[row.id], function (tx) {
|
|
tx.executeSql('DELETE FROM ' + BY_SEQ_STORE + ' WHERE seq=?',
|
|
[row.seq], function () {
|
|
doNext();
|
|
});
|
|
});
|
|
});
|
|
}
|
|
doNext();
|
|
});
|
|
});
|
|
}
|
|
|
|
// in this migration, we remove doc_id_rev and just use rev
|
|
function runMigration4(tx, callback) {
|
|
|
|
function updateRows(rows) {
|
|
function doNext() {
|
|
if (!rows.length) {
|
|
return callback(tx);
|
|
}
|
|
var row = rows.shift();
|
|
var doc_id_rev = parseHexString(row.hex, encoding);
|
|
var idx = doc_id_rev.lastIndexOf('::');
|
|
var doc_id = doc_id_rev.substring(0, idx);
|
|
var rev = doc_id_rev.substring(idx + 2);
|
|
var sql = 'UPDATE ' + BY_SEQ_STORE +
|
|
' SET doc_id=?, rev=? WHERE doc_id_rev=?';
|
|
tx.executeSql(sql, [doc_id, rev, doc_id_rev], function () {
|
|
doNext();
|
|
});
|
|
}
|
|
doNext();
|
|
}
|
|
|
|
var sql = 'ALTER TABLE ' + BY_SEQ_STORE + ' ADD COLUMN doc_id';
|
|
tx.executeSql(sql, [], function (tx) {
|
|
var sql = 'ALTER TABLE ' + BY_SEQ_STORE + ' ADD COLUMN rev';
|
|
tx.executeSql(sql, [], function (tx) {
|
|
tx.executeSql(BY_SEQ_STORE_DOC_ID_REV_INDEX_SQL, [], function (tx) {
|
|
var sql = 'SELECT hex(doc_id_rev) as hex FROM ' + BY_SEQ_STORE;
|
|
tx.executeSql(sql, [], function (tx, res) {
|
|
var rows = [];
|
|
for (var i = 0; i < res.rows.length; i++) {
|
|
rows.push(res.rows.item(i));
|
|
}
|
|
updateRows(rows);
|
|
});
|
|
});
|
|
});
|
|
});
|
|
}
|
|
|
|
// in this migration, we add the attach_and_seq table
|
|
// for issue #2818
|
|
function runMigration5(tx, callback) {
|
|
|
|
function migrateAttsAndSeqs(tx) {
|
|
// need to actually populate the table. this is the expensive part,
|
|
// so as an optimization, check first that this database even
|
|
// contains attachments
|
|
var sql = 'SELECT COUNT(*) AS cnt FROM ' + ATTACH_STORE;
|
|
tx.executeSql(sql, [], function (tx, res) {
|
|
var count = res.rows.item(0).cnt;
|
|
if (!count) {
|
|
return callback(tx);
|
|
}
|
|
|
|
var offset = 0;
|
|
var pageSize = 10;
|
|
function nextPage() {
|
|
var sql = select(
|
|
SELECT_DOCS + ', ' + DOC_STORE + '.id AS id',
|
|
[DOC_STORE, BY_SEQ_STORE],
|
|
DOC_STORE_AND_BY_SEQ_JOINER,
|
|
null,
|
|
DOC_STORE + '.id '
|
|
);
|
|
sql += ' LIMIT ' + pageSize + ' OFFSET ' + offset;
|
|
offset += pageSize;
|
|
tx.executeSql(sql, [], function (tx, res) {
|
|
if (!res.rows.length) {
|
|
return callback(tx);
|
|
}
|
|
var digestSeqs = {};
|
|
function addDigestSeq(digest, seq) {
|
|
// uniq digest/seq pairs, just in case there are dups
|
|
var seqs = digestSeqs[digest] = (digestSeqs[digest] || []);
|
|
if (seqs.indexOf(seq) === -1) {
|
|
seqs.push(seq);
|
|
}
|
|
}
|
|
for (var i = 0; i < res.rows.length; i++) {
|
|
var row = res.rows.item(i);
|
|
var doc = unstringifyDoc(row.data, row.id, row.rev);
|
|
var atts = Object.keys(doc._attachments || {});
|
|
for (var j = 0; j < atts.length; j++) {
|
|
var att = doc._attachments[atts[j]];
|
|
addDigestSeq(att.digest, row.seq);
|
|
}
|
|
}
|
|
var digestSeqPairs = [];
|
|
Object.keys(digestSeqs).forEach(function (digest) {
|
|
var seqs = digestSeqs[digest];
|
|
seqs.forEach(function (seq) {
|
|
digestSeqPairs.push([digest, seq]);
|
|
});
|
|
});
|
|
if (!digestSeqPairs.length) {
|
|
return nextPage();
|
|
}
|
|
var numDone = 0;
|
|
digestSeqPairs.forEach(function (pair) {
|
|
var sql = 'INSERT INTO ' + ATTACH_AND_SEQ_STORE +
|
|
' (digest, seq) VALUES (?,?)';
|
|
tx.executeSql(sql, pair, function () {
|
|
if (++numDone === digestSeqPairs.length) {
|
|
nextPage();
|
|
}
|
|
});
|
|
});
|
|
});
|
|
}
|
|
nextPage();
|
|
});
|
|
}
|
|
|
|
var attachAndRev = 'CREATE TABLE IF NOT EXISTS ' +
|
|
ATTACH_AND_SEQ_STORE + ' (digest, seq INTEGER)';
|
|
tx.executeSql(attachAndRev, [], function (tx) {
|
|
tx.executeSql(
|
|
ATTACH_AND_SEQ_STORE_ATTACH_INDEX_SQL, [], function (tx) {
|
|
tx.executeSql(
|
|
ATTACH_AND_SEQ_STORE_SEQ_INDEX_SQL, [],
|
|
migrateAttsAndSeqs);
|
|
});
|
|
});
|
|
}
|
|
|
|
// in this migration, we use escapeBlob() and unescapeBlob()
|
|
// instead of reading out the binary as HEX, which is slow
|
|
function runMigration6(tx, callback) {
|
|
var sql = 'ALTER TABLE ' + ATTACH_STORE +
|
|
' ADD COLUMN escaped TINYINT(1) DEFAULT 0';
|
|
tx.executeSql(sql, [], callback);
|
|
}
|
|
|
|
// issue #3136, in this migration we need a "latest seq" as well
|
|
// as the "winning seq" in the doc store
|
|
function runMigration7(tx, callback) {
|
|
var sql = 'ALTER TABLE ' + DOC_STORE +
|
|
' ADD COLUMN max_seq INTEGER';
|
|
tx.executeSql(sql, [], function (tx) {
|
|
var sql = 'UPDATE ' + DOC_STORE + ' SET max_seq=(SELECT MAX(seq) FROM ' +
|
|
BY_SEQ_STORE + ' WHERE doc_id=id)';
|
|
tx.executeSql(sql, [], function (tx) {
|
|
// add unique index after filling, else we'll get a constraint
|
|
// error when we do the ALTER TABLE
|
|
var sql =
|
|
'CREATE UNIQUE INDEX IF NOT EXISTS \'doc-max-seq-idx\' ON ' +
|
|
DOC_STORE + ' (max_seq)';
|
|
tx.executeSql(sql, [], callback);
|
|
});
|
|
});
|
|
}
|
|
|
|
function checkEncoding(tx, cb) {
|
|
// UTF-8 on chrome/android, UTF-16 on safari < 7.1
|
|
tx.executeSql('SELECT HEX("a") AS hex', [], function (tx, res) {
|
|
var hex = res.rows.item(0).hex;
|
|
encoding = hex.length === 2 ? 'UTF-8' : 'UTF-16';
|
|
cb();
|
|
}
|
|
);
|
|
}
|
|
|
|
function onGetInstanceId() {
|
|
while (idRequests.length > 0) {
|
|
var idCallback = idRequests.pop();
|
|
idCallback(null, instanceId);
|
|
}
|
|
}
|
|
|
|
function onGetVersion(tx, dbVersion) {
|
|
if (dbVersion === 0) {
|
|
// initial schema
|
|
|
|
var meta = 'CREATE TABLE IF NOT EXISTS ' + META_STORE +
|
|
' (dbid, db_version INTEGER)';
|
|
var attach = 'CREATE TABLE IF NOT EXISTS ' + ATTACH_STORE +
|
|
' (digest UNIQUE, escaped TINYINT(1), body BLOB)';
|
|
var attachAndRev = 'CREATE TABLE IF NOT EXISTS ' +
|
|
ATTACH_AND_SEQ_STORE + ' (digest, seq INTEGER)';
|
|
// TODO: migrate winningseq to INTEGER
|
|
var doc = 'CREATE TABLE IF NOT EXISTS ' + DOC_STORE +
|
|
' (id unique, json, winningseq, max_seq INTEGER UNIQUE)';
|
|
var seq = 'CREATE TABLE IF NOT EXISTS ' + BY_SEQ_STORE +
|
|
' (seq INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, ' +
|
|
'json, deleted TINYINT(1), doc_id, rev)';
|
|
var local = 'CREATE TABLE IF NOT EXISTS ' + LOCAL_STORE +
|
|
' (id UNIQUE, rev, json)';
|
|
|
|
// creates
|
|
tx.executeSql(attach);
|
|
tx.executeSql(local);
|
|
tx.executeSql(attachAndRev, [], function () {
|
|
tx.executeSql(ATTACH_AND_SEQ_STORE_SEQ_INDEX_SQL);
|
|
tx.executeSql(ATTACH_AND_SEQ_STORE_ATTACH_INDEX_SQL);
|
|
});
|
|
tx.executeSql(doc, [], function () {
|
|
tx.executeSql(DOC_STORE_WINNINGSEQ_INDEX_SQL);
|
|
tx.executeSql(seq, [], function () {
|
|
tx.executeSql(BY_SEQ_STORE_DELETED_INDEX_SQL);
|
|
tx.executeSql(BY_SEQ_STORE_DOC_ID_REV_INDEX_SQL);
|
|
tx.executeSql(meta, [], function () {
|
|
// mark the db version, and new dbid
|
|
var initSeq = 'INSERT INTO ' + META_STORE +
|
|
' (db_version, dbid) VALUES (?,?)';
|
|
instanceId = uuid();
|
|
var initSeqArgs = [ADAPTER_VERSION, instanceId];
|
|
tx.executeSql(initSeq, initSeqArgs, function () {
|
|
onGetInstanceId();
|
|
});
|
|
});
|
|
});
|
|
});
|
|
} else { // version > 0
|
|
|
|
var setupDone = function () {
|
|
var migrated = dbVersion < ADAPTER_VERSION;
|
|
if (migrated) {
|
|
// update the db version within this transaction
|
|
tx.executeSql('UPDATE ' + META_STORE + ' SET db_version = ' +
|
|
ADAPTER_VERSION);
|
|
}
|
|
// notify db.id() callers
|
|
var sql = 'SELECT dbid FROM ' + META_STORE;
|
|
tx.executeSql(sql, [], function (tx, result) {
|
|
instanceId = result.rows.item(0).dbid;
|
|
onGetInstanceId();
|
|
});
|
|
};
|
|
|
|
// would love to use promises here, but then websql
|
|
// ends the transaction early
|
|
var tasks = [
|
|
runMigration2,
|
|
runMigration3,
|
|
runMigration4,
|
|
runMigration5,
|
|
runMigration6,
|
|
runMigration7,
|
|
setupDone
|
|
];
|
|
|
|
// run each migration sequentially
|
|
var i = dbVersion;
|
|
var nextMigration = function (tx) {
|
|
tasks[i - 1](tx, nextMigration);
|
|
i++;
|
|
};
|
|
nextMigration(tx);
|
|
}
|
|
}
|
|
|
|
function setup() {
|
|
db.transaction(function (tx) {
|
|
// first check the encoding
|
|
checkEncoding(tx, function () {
|
|
// then get the version
|
|
fetchVersion(tx);
|
|
});
|
|
}, websqlError(callback), dbCreated);
|
|
}
|
|
|
|
function fetchVersion(tx) {
|
|
var sql = 'SELECT sql FROM sqlite_master WHERE tbl_name = ' + META_STORE;
|
|
tx.executeSql(sql, [], function (tx, result) {
|
|
if (!result.rows.length) {
|
|
// database hasn't even been created yet (version 0)
|
|
onGetVersion(tx, 0);
|
|
} else if (!/db_version/.test(result.rows.item(0).sql)) {
|
|
// table was created, but without the new db_version column,
|
|
// so add it.
|
|
tx.executeSql('ALTER TABLE ' + META_STORE +
|
|
' ADD COLUMN db_version INTEGER', [], function () {
|
|
// before version 2, this column didn't even exist
|
|
onGetVersion(tx, 1);
|
|
});
|
|
} else { // column exists, we can safely get it
|
|
tx.executeSql('SELECT db_version FROM ' + META_STORE,
|
|
[], function (tx, result) {
|
|
var dbVersion = result.rows.item(0).db_version;
|
|
onGetVersion(tx, dbVersion);
|
|
});
|
|
}
|
|
});
|
|
}
|
|
|
|
setup();
|
|
|
|
api.type = function () {
|
|
return 'websql';
|
|
};
|
|
|
|
api._id = toPromise(function (callback) {
|
|
callback(null, instanceId);
|
|
});
|
|
|
|
api._info = function (callback) {
|
|
db.readTransaction(function (tx) {
|
|
countDocs(tx, function (docCount) {
|
|
var sql = 'SELECT MAX(seq) AS seq FROM ' + BY_SEQ_STORE;
|
|
tx.executeSql(sql, [], function (tx, res) {
|
|
var updateSeq = res.rows.item(0).seq || 0;
|
|
callback(null, {
|
|
doc_count: docCount,
|
|
update_seq: updateSeq,
|
|
// for debugging
|
|
sqlite_plugin: db._sqlitePlugin,
|
|
websql_encoding: encoding
|
|
});
|
|
});
|
|
});
|
|
}, websqlError(callback));
|
|
};
|
|
|
|
api._bulkDocs = function (req, reqOpts, callback) {
|
|
websqlBulkDocs(opts, req, reqOpts, api, db, websqlChanges, callback);
|
|
};
|
|
|
|
api._get = function (id, opts, callback) {
|
|
var doc;
|
|
var metadata;
|
|
var err;
|
|
var tx = opts.ctx;
|
|
if (!tx) {
|
|
return db.readTransaction(function (txn) {
|
|
api._get(id, jsExtend.extend({ctx: txn}, opts), callback);
|
|
});
|
|
}
|
|
|
|
function finish() {
|
|
callback(err, {doc: doc, metadata: metadata, ctx: tx});
|
|
}
|
|
|
|
var sql;
|
|
var sqlArgs;
|
|
if (opts.rev) {
|
|
sql = select(
|
|
SELECT_DOCS,
|
|
[DOC_STORE, BY_SEQ_STORE],
|
|
DOC_STORE + '.id=' + BY_SEQ_STORE + '.doc_id',
|
|
[BY_SEQ_STORE + '.doc_id=?', BY_SEQ_STORE + '.rev=?']);
|
|
sqlArgs = [id, opts.rev];
|
|
} else {
|
|
sql = select(
|
|
SELECT_DOCS,
|
|
[DOC_STORE, BY_SEQ_STORE],
|
|
DOC_STORE_AND_BY_SEQ_JOINER,
|
|
DOC_STORE + '.id=?');
|
|
sqlArgs = [id];
|
|
}
|
|
tx.executeSql(sql, sqlArgs, function (a, results) {
|
|
if (!results.rows.length) {
|
|
err = createError(MISSING_DOC, 'missing');
|
|
return finish();
|
|
}
|
|
var item = results.rows.item(0);
|
|
metadata = safeJsonParse(item.metadata);
|
|
if (item.deleted && !opts.rev) {
|
|
err = createError(MISSING_DOC, 'deleted');
|
|
return finish();
|
|
}
|
|
doc = unstringifyDoc(item.data, metadata.id, item.rev);
|
|
finish();
|
|
});
|
|
};
|
|
|
|
function countDocs(tx, callback) {
|
|
|
|
if (api._docCount !== -1) {
|
|
return callback(api._docCount);
|
|
}
|
|
|
|
// count the total rows
|
|
var sql = select(
|
|
'COUNT(' + DOC_STORE + '.id) AS \'num\'',
|
|
[DOC_STORE, BY_SEQ_STORE],
|
|
DOC_STORE_AND_BY_SEQ_JOINER,
|
|
BY_SEQ_STORE + '.deleted=0');
|
|
|
|
tx.executeSql(sql, [], function (tx, result) {
|
|
api._docCount = result.rows.item(0).num;
|
|
callback(api._docCount);
|
|
});
|
|
}
|
|
|
|
api._allDocs = function (opts, callback) {
|
|
var results = [];
|
|
var totalRows;
|
|
|
|
var start = 'startkey' in opts ? opts.startkey : false;
|
|
var end = 'endkey' in opts ? opts.endkey : false;
|
|
var key = 'key' in opts ? opts.key : false;
|
|
var descending = 'descending' in opts ? opts.descending : false;
|
|
var limit = 'limit' in opts ? opts.limit : -1;
|
|
var offset = 'skip' in opts ? opts.skip : 0;
|
|
var inclusiveEnd = opts.inclusive_end !== false;
|
|
|
|
var sqlArgs = [];
|
|
var criteria = [];
|
|
|
|
if (key !== false) {
|
|
criteria.push(DOC_STORE + '.id = ?');
|
|
sqlArgs.push(key);
|
|
} else if (start !== false || end !== false) {
|
|
if (start !== false) {
|
|
criteria.push(DOC_STORE + '.id ' + (descending ? '<=' : '>=') + ' ?');
|
|
sqlArgs.push(start);
|
|
}
|
|
if (end !== false) {
|
|
var comparator = descending ? '>' : '<';
|
|
if (inclusiveEnd) {
|
|
comparator += '=';
|
|
}
|
|
criteria.push(DOC_STORE + '.id ' + comparator + ' ?');
|
|
sqlArgs.push(end);
|
|
}
|
|
if (key !== false) {
|
|
criteria.push(DOC_STORE + '.id = ?');
|
|
sqlArgs.push(key);
|
|
}
|
|
}
|
|
|
|
if (opts.deleted !== 'ok') {
|
|
// report deleted if keys are specified
|
|
criteria.push(BY_SEQ_STORE + '.deleted = 0');
|
|
}
|
|
|
|
db.readTransaction(function (tx) {
|
|
|
|
// first count up the total rows
|
|
countDocs(tx, function (count) {
|
|
totalRows = count;
|
|
|
|
if (limit === 0) {
|
|
return;
|
|
}
|
|
|
|
// then actually fetch the documents
|
|
var sql = select(
|
|
SELECT_DOCS,
|
|
[DOC_STORE, BY_SEQ_STORE],
|
|
DOC_STORE_AND_BY_SEQ_JOINER,
|
|
criteria,
|
|
DOC_STORE + '.id ' + (descending ? 'DESC' : 'ASC')
|
|
);
|
|
sql += ' LIMIT ' + limit + ' OFFSET ' + offset;
|
|
|
|
tx.executeSql(sql, sqlArgs, function (tx, result) {
|
|
for (var i = 0, l = result.rows.length; i < l; i++) {
|
|
var item = result.rows.item(i);
|
|
var metadata = safeJsonParse(item.metadata);
|
|
var id = metadata.id;
|
|
var data = unstringifyDoc(item.data, id, item.rev);
|
|
var winningRev = data._rev;
|
|
var doc = {
|
|
id: id,
|
|
key: id,
|
|
value: {rev: winningRev}
|
|
};
|
|
if (opts.include_docs) {
|
|
doc.doc = data;
|
|
doc.doc._rev = winningRev;
|
|
if (opts.conflicts) {
|
|
doc.doc._conflicts = collectConflicts(metadata);
|
|
}
|
|
fetchAttachmentsIfNecessary(doc.doc, opts, api, tx);
|
|
}
|
|
if (item.deleted) {
|
|
if (opts.deleted === 'ok') {
|
|
doc.value.deleted = true;
|
|
doc.doc = null;
|
|
} else {
|
|
continue;
|
|
}
|
|
}
|
|
results.push(doc);
|
|
}
|
|
});
|
|
});
|
|
}, websqlError(callback), function () {
|
|
callback(null, {
|
|
total_rows: totalRows,
|
|
offset: opts.skip,
|
|
rows: results
|
|
});
|
|
});
|
|
};
|
|
|
|
api._changes = function (opts) {
|
|
opts = clone(opts);
|
|
|
|
if (opts.continuous) {
|
|
var id = api._name + ':' + uuid();
|
|
websqlChanges.addListener(api._name, id, api, opts);
|
|
websqlChanges.notify(api._name);
|
|
return {
|
|
cancel: function () {
|
|
websqlChanges.removeListener(api._name, id);
|
|
}
|
|
};
|
|
}
|
|
|
|
var descending = opts.descending;
|
|
|
|
// Ignore the `since` parameter when `descending` is true
|
|
opts.since = opts.since && !descending ? opts.since : 0;
|
|
|
|
var limit = 'limit' in opts ? opts.limit : -1;
|
|
if (limit === 0) {
|
|
limit = 1; // per CouchDB _changes spec
|
|
}
|
|
|
|
var returnDocs;
|
|
if ('return_docs' in opts) {
|
|
returnDocs = opts.return_docs;
|
|
} else if ('returnDocs' in opts) {
|
|
// TODO: Remove 'returnDocs' in favor of 'return_docs' in a future release
|
|
returnDocs = opts.returnDocs;
|
|
} else {
|
|
returnDocs = true;
|
|
}
|
|
var results = [];
|
|
var numResults = 0;
|
|
|
|
function fetchChanges() {
|
|
|
|
var selectStmt =
|
|
DOC_STORE + '.json AS metadata, ' +
|
|
DOC_STORE + '.max_seq AS maxSeq, ' +
|
|
BY_SEQ_STORE + '.json AS winningDoc, ' +
|
|
BY_SEQ_STORE + '.rev AS winningRev ';
|
|
|
|
var from = DOC_STORE + ' JOIN ' + BY_SEQ_STORE;
|
|
|
|
var joiner = DOC_STORE + '.id=' + BY_SEQ_STORE + '.doc_id' +
|
|
' AND ' + DOC_STORE + '.winningseq=' + BY_SEQ_STORE + '.seq';
|
|
|
|
var criteria = ['maxSeq > ?'];
|
|
var sqlArgs = [opts.since];
|
|
|
|
if (opts.doc_ids) {
|
|
criteria.push(DOC_STORE + '.id IN ' + qMarks(opts.doc_ids.length));
|
|
sqlArgs = sqlArgs.concat(opts.doc_ids);
|
|
}
|
|
|
|
var orderBy = 'maxSeq ' + (descending ? 'DESC' : 'ASC');
|
|
|
|
var sql = select(selectStmt, from, joiner, criteria, orderBy);
|
|
|
|
var filter = filterChange(opts);
|
|
if (!opts.view && !opts.filter) {
|
|
// we can just limit in the query
|
|
sql += ' LIMIT ' + limit;
|
|
}
|
|
|
|
var lastSeq = opts.since || 0;
|
|
db.readTransaction(function (tx) {
|
|
tx.executeSql(sql, sqlArgs, function (tx, result) {
|
|
function reportChange(change) {
|
|
return function () {
|
|
opts.onChange(change);
|
|
};
|
|
}
|
|
for (var i = 0, l = result.rows.length; i < l; i++) {
|
|
var item = result.rows.item(i);
|
|
var metadata = safeJsonParse(item.metadata);
|
|
lastSeq = item.maxSeq;
|
|
|
|
var doc = unstringifyDoc(item.winningDoc, metadata.id,
|
|
item.winningRev);
|
|
var change = opts.processChange(doc, metadata, opts);
|
|
change.seq = item.maxSeq;
|
|
|
|
var filtered = filter(change);
|
|
if (typeof filtered === 'object') {
|
|
return opts.complete(filtered);
|
|
}
|
|
|
|
if (filtered) {
|
|
numResults++;
|
|
if (returnDocs) {
|
|
results.push(change);
|
|
}
|
|
// process the attachment immediately
|
|
// for the benefit of live listeners
|
|
if (opts.attachments && opts.include_docs) {
|
|
fetchAttachmentsIfNecessary(doc, opts, api, tx,
|
|
reportChange(change));
|
|
} else {
|
|
reportChange(change)();
|
|
}
|
|
}
|
|
if (numResults === limit) {
|
|
break;
|
|
}
|
|
}
|
|
});
|
|
}, websqlError(opts.complete), function () {
|
|
if (!opts.continuous) {
|
|
opts.complete(null, {
|
|
results: results,
|
|
last_seq: lastSeq
|
|
});
|
|
}
|
|
});
|
|
}
|
|
|
|
fetchChanges();
|
|
};
|
|
|
|
api._close = function (callback) {
|
|
//WebSQL databases do not need to be closed
|
|
callback();
|
|
};
|
|
|
|
api._getAttachment = function (attachment, opts, callback) {
|
|
var res;
|
|
var tx = opts.ctx;
|
|
var digest = attachment.digest;
|
|
var type = attachment.content_type;
|
|
var sql = 'SELECT escaped, ' +
|
|
'CASE WHEN escaped = 1 THEN body ELSE HEX(body) END AS body FROM ' +
|
|
ATTACH_STORE + ' WHERE digest=?';
|
|
tx.executeSql(sql, [digest], function (tx, result) {
|
|
// websql has a bug where \u0000 causes early truncation in strings
|
|
// and blobs. to work around this, we used to use the hex() function,
|
|
// but that's not performant. after migration 6, we remove \u0000
|
|
// and add it back in afterwards
|
|
var item = result.rows.item(0);
|
|
var data = item.escaped ? unescapeBlob(item.body) :
|
|
parseHexString(item.body, encoding);
|
|
if (opts.binary) {
|
|
res = binStringToBluffer(data, type);
|
|
} else {
|
|
res = btoa(data);
|
|
}
|
|
callback(null, res);
|
|
});
|
|
};
|
|
|
|
api._getRevisionTree = function (docId, callback) {
|
|
db.readTransaction(function (tx) {
|
|
var sql = 'SELECT json AS metadata FROM ' + DOC_STORE + ' WHERE id = ?';
|
|
tx.executeSql(sql, [docId], function (tx, result) {
|
|
if (!result.rows.length) {
|
|
callback(createError(MISSING_DOC));
|
|
} else {
|
|
var data = safeJsonParse(result.rows.item(0).metadata);
|
|
callback(null, data.rev_tree);
|
|
}
|
|
});
|
|
});
|
|
};
|
|
|
|
api._doCompaction = function (docId, revs, callback) {
|
|
if (!revs.length) {
|
|
return callback();
|
|
}
|
|
db.transaction(function (tx) {
|
|
|
|
// update doc store
|
|
var sql = 'SELECT json AS metadata FROM ' + DOC_STORE + ' WHERE id = ?';
|
|
tx.executeSql(sql, [docId], function (tx, result) {
|
|
var metadata = safeJsonParse(result.rows.item(0).metadata);
|
|
traverseRevTree(metadata.rev_tree, function (isLeaf, pos,
|
|
revHash, ctx, opts) {
|
|
var rev = pos + '-' + revHash;
|
|
if (revs.indexOf(rev) !== -1) {
|
|
opts.status = 'missing';
|
|
}
|
|
});
|
|
|
|
var sql = 'UPDATE ' + DOC_STORE + ' SET json = ? WHERE id = ?';
|
|
tx.executeSql(sql, [safeJsonStringify(metadata), docId]);
|
|
});
|
|
|
|
compactRevs(revs, docId, tx);
|
|
}, websqlError(callback), function () {
|
|
callback();
|
|
});
|
|
};
|
|
|
|
api._getLocal = function (id, callback) {
|
|
db.readTransaction(function (tx) {
|
|
var sql = 'SELECT json, rev FROM ' + LOCAL_STORE + ' WHERE id=?';
|
|
tx.executeSql(sql, [id], function (tx, res) {
|
|
if (res.rows.length) {
|
|
var item = res.rows.item(0);
|
|
var doc = unstringifyDoc(item.json, id, item.rev);
|
|
callback(null, doc);
|
|
} else {
|
|
callback(createError(MISSING_DOC));
|
|
}
|
|
});
|
|
});
|
|
};
|
|
|
|
api._putLocal = function (doc, opts, callback) {
|
|
if (typeof opts === 'function') {
|
|
callback = opts;
|
|
opts = {};
|
|
}
|
|
delete doc._revisions; // ignore this, trust the rev
|
|
var oldRev = doc._rev;
|
|
var id = doc._id;
|
|
var newRev;
|
|
if (!oldRev) {
|
|
newRev = doc._rev = '0-1';
|
|
} else {
|
|
newRev = doc._rev = '0-' + (parseInt(oldRev.split('-')[1], 10) + 1);
|
|
}
|
|
var json = stringifyDoc(doc);
|
|
|
|
var ret;
|
|
function putLocal(tx) {
|
|
var sql;
|
|
var values;
|
|
if (oldRev) {
|
|
sql = 'UPDATE ' + LOCAL_STORE + ' SET rev=?, json=? ' +
|
|
'WHERE id=? AND rev=?';
|
|
values = [newRev, json, id, oldRev];
|
|
} else {
|
|
sql = 'INSERT INTO ' + LOCAL_STORE + ' (id, rev, json) VALUES (?,?,?)';
|
|
values = [id, newRev, json];
|
|
}
|
|
tx.executeSql(sql, values, function (tx, res) {
|
|
if (res.rowsAffected) {
|
|
ret = {ok: true, id: id, rev: newRev};
|
|
if (opts.ctx) { // return immediately
|
|
callback(null, ret);
|
|
}
|
|
} else {
|
|
callback(createError(REV_CONFLICT));
|
|
}
|
|
}, function () {
|
|
callback(createError(REV_CONFLICT));
|
|
return false; // ack that we handled the error
|
|
});
|
|
}
|
|
|
|
if (opts.ctx) {
|
|
putLocal(opts.ctx);
|
|
} else {
|
|
db.transaction(putLocal, websqlError(callback), function () {
|
|
if (ret) {
|
|
callback(null, ret);
|
|
}
|
|
});
|
|
}
|
|
};
|
|
|
|
api._removeLocal = function (doc, opts, callback) {
|
|
if (typeof opts === 'function') {
|
|
callback = opts;
|
|
opts = {};
|
|
}
|
|
var ret;
|
|
|
|
function removeLocal(tx) {
|
|
var sql = 'DELETE FROM ' + LOCAL_STORE + ' WHERE id=? AND rev=?';
|
|
var params = [doc._id, doc._rev];
|
|
tx.executeSql(sql, params, function (tx, res) {
|
|
if (!res.rowsAffected) {
|
|
return callback(createError(MISSING_DOC));
|
|
}
|
|
ret = {ok: true, id: doc._id, rev: '0-0'};
|
|
if (opts.ctx) { // return immediately
|
|
callback(null, ret);
|
|
}
|
|
});
|
|
}
|
|
|
|
if (opts.ctx) {
|
|
removeLocal(opts.ctx);
|
|
} else {
|
|
db.transaction(removeLocal, websqlError(callback), function () {
|
|
if (ret) {
|
|
callback(null, ret);
|
|
}
|
|
});
|
|
}
|
|
};
|
|
|
|
api._destroy = function (opts, callback) {
|
|
websqlChanges.removeAllListeners(api._name);
|
|
db.transaction(function (tx) {
|
|
var stores = [DOC_STORE, BY_SEQ_STORE, ATTACH_STORE, META_STORE,
|
|
LOCAL_STORE, ATTACH_AND_SEQ_STORE];
|
|
stores.forEach(function (store) {
|
|
tx.executeSql('DROP TABLE IF EXISTS ' + store, []);
|
|
});
|
|
}, websqlError(callback), function () {
|
|
if (hasLocalStorage()) {
|
|
delete window.localStorage['_pouch__websqldb_' + api._name];
|
|
delete window.localStorage[api._name];
|
|
}
|
|
callback(null, {'ok': true});
|
|
});
|
|
};
|
|
}
|
|
|
|
// in the browser, use a prefix. in Node, don't bother having one
|
|
WebSqlPouch.use_prefix = !!(typeof process === 'undefined' || process.browser);
|
|
|
|
WebSqlPouch.valid = valid;
|
|
|
|
module.exports = WebSqlPouch;
|
|
|
|
/* jshint ignore:end */ |