1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
|
// Infrastructure shared by interleaved-cursors-{small,large}.html
// Number of objects that each iterator goes over.
const itemCount = 10;
// Ratio of small objects to large objects.
const largeObjectRatio = 5;
// Size of large objects. This should exceed the size of a block in the storage
// method underlying the browser's IndexedDB implementation. For example, this
// needs to exceed the LevelDB block size on Chrome, and the SQLite block size
// on Firefox.
const largeObjectSize = 48 * 1024;
function objectKey(cursorIndex, itemIndex) {
return `${cursorIndex}-key-${itemIndex}`;
}
function objectValue(cursorIndex, itemIndex) {
if ((cursorIndex * itemCount + itemIndex) % largeObjectRatio === 0) {
// We use a typed array (as opposed to a string) because IndexedDB
// implementations may serialize strings using UTF-8 or UTF-16, yielding
// larger IndexedDB entries than we'd expect. It's very unlikely that an
// IndexedDB implementation would use anything other than the raw buffer to
// serialize a typed array.
const buffer = new Uint8Array(largeObjectSize);
// Some IndexedDB implementations, like LevelDB, compress their data blocks
// before storing them to disk. We use a simple 32-bit xorshift PRNG, which
// should be sufficient to foil any fast generic-purpose compression scheme.
// 32-bit xorshift - the seed can't be zero
let state = 1000 + (cursorIndex * itemCount + itemIndex);
for (let i = 0; i < largeObjectSize; ++i) {
state ^= state << 13;
state ^= state >> 17;
state ^= state << 5;
buffer[i] = state & 0xff;
}
return buffer;
}
return [cursorIndex, 'small', itemIndex];
}
// Writes the objects to be read by one cursor. Returns a promise that resolves
// when the write completes.
//
// We want to avoid creating a large transaction, because that is outside the
// test's scope, and it's a bad practice. So we break up the writes across
// multiple transactions. For simplicity, each transaction writes all the
// objects that will be read by a cursor.
function writeCursorObjects(database, cursorIndex) {
return new Promise((resolve, reject) => {
const transaction = database.transaction('cache', 'readwrite');
transaction.onabort = () => { reject(transaction.error); };
const store = transaction.objectStore('cache');
for (let i = 0; i < itemCount; ++i) {
store.put({
key: objectKey(cursorIndex, i), value: objectValue(cursorIndex, i)});
}
transaction.oncomplete = resolve;
});
}
// Returns a promise that resolves when the store has been populated.
function populateTestStore(testCase, database, cursorCount) {
let promiseChain = Promise.resolve();
for (let i = 0; i < cursorCount; ++i)
promiseChain = promiseChain.then(() => writeCursorObjects(database, i));
return promiseChain;
}
// Reads cursors in an interleaved fashion, as shown below.
//
// Given N cursors, each of which points to the beginning of a K-item sequence,
// the following accesses will be made.
//
// OC(i) = open cursor i
// RD(i, j) = read result of cursor i, which should be at item j
// CC(i) = continue cursor i
// | = wait for onsuccess on the previous OC or CC
//
// OC(1) | RD(1, 1) OC(2) | RD(2, 1) OC(3) | ... | RD(n-1, 1) CC(n) |
// RD(n, 1) CC(1) | RD(1, 2) CC(2) | RD(2, 2) CC(3) | ... | RD(n-1, 2) CC(n) |
// RD(n, 2) CC(1) | RD(1, 3) CC(2) | RD(2, 3) CC(3) | ... | RD(n-1, 3) CC(n) |
// ...
// RD(n, k-1) CC(1) | RD(1, k) CC(2) | RD(2, k) CC(3) | ... | RD(n-1, k) CC(n) |
// RD(n, k) done
function interleaveCursors(testCase, store, cursorCount) {
return new Promise((resolve, reject) => {
// The cursors used for iteration are stored here so each cursor's onsuccess
// handler can call continue() on the next cursor.
const cursors = [];
// The results of IDBObjectStore.openCursor() calls are stored here so we
// we can change the requests' onsuccess handler after every
// IDBCursor.continue() call.
const requests = [];
const checkCursorState = (cursorIndex, itemIndex) => {
const cursor = cursors[cursorIndex];
assert_equals(cursor.key, objectKey(cursorIndex, itemIndex));
assert_equals(cursor.value.key, objectKey(cursorIndex, itemIndex));
assert_equals(
cursor.value.value.join('-'),
objectValue(cursorIndex, itemIndex).join('-'));
};
const openCursor = (cursorIndex, callback) => {
const request = store.openCursor(
IDBKeyRange.lowerBound(objectKey(cursorIndex, 0)));
requests[cursorIndex] = request;
request.onsuccess = testCase.step_func(() => {
const cursor = request.result;
cursors[cursorIndex] = cursor;
checkCursorState(cursorIndex, 0);
callback();
});
request.onerror = event => reject(request.error);
};
const readItemFromCursor = (cursorIndex, itemIndex, callback) => {
const request = requests[cursorIndex];
request.onsuccess = testCase.step_func(() => {
const cursor = request.result;
cursors[cursorIndex] = cursor;
checkCursorState(cursorIndex, itemIndex);
callback();
});
const cursor = cursors[cursorIndex];
cursor.continue();
};
// We open all the cursors one at a time, then cycle through the cursors and
// call continue() on each of them. This access pattern causes maximal
// trashing to an LRU cursor cache. Eviction scheme aside, any cache will
// have to evict some cursors, and this access pattern verifies that the
// cache correctly restores the state of evicted cursors.
const steps = [];
for (let cursorIndex = 0; cursorIndex < cursorCount; ++cursorIndex)
steps.push(openCursor.bind(null, cursorIndex));
for (let itemIndex = 1; itemIndex < itemCount; ++itemIndex) {
for (let cursorIndex = 0; cursorIndex < cursorCount; ++cursorIndex)
steps.push(readItemFromCursor.bind(null, cursorIndex, itemIndex));
}
const runStep = (stepIndex) => {
if (stepIndex === steps.length) {
resolve();
return;
}
steps[stepIndex](() => { runStep(stepIndex + 1); });
};
runStep(0);
});
}
function cursorTest(cursorCount) {
promise_test(testCase => {
return createDatabase(testCase, (database, transaction) => {
const store = database.createObjectStore('cache',
{ keyPath: 'key', autoIncrement: true });
}).then(database => {
return populateTestStore(testCase, database, cursorCount).then(
() => database);
}).then(database => {
database.close();
}).then(() => {
return openDatabase(testCase);
}).then(database => {
const transaction = database.transaction('cache', 'readonly');
transaction.onabort = () => { reject(transaction.error); };
const store = transaction.objectStore('cache');
return interleaveCursors(testCase, store, cursorCount).then(
() => database);
}).then(database => {
database.close();
});
}, `${cursorCount} cursors`);
}
|