1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
|
/**
* This test file tests our automatic recovery and any related mitigating
* heuristics that occur during intercepted navigation fetch request.
* Specifically, we should be resetting interception so that we go to the
* network in these cases and then potentially taking actions like unregistering
* the ServiceWorker and/or clearing QuotaManager-managed storage for the
* origin.
*
* See specific test permutations for specific details inline in the test.
*
* NOTE THAT CURRENTLY THIS TEST IS DISCUSSING MITIGATIONS THAT ARE NOT YET
* IMPLEMENTED, JUST PLANNED. These will be iterated on and added to the rest
* of the stack of patches on Bug 1503072.
*
* ## Test Mechanics
*
* ### Fetch Fault Injection
*
* We expose:
* - On nsIServiceWorkerInfo, the per-ServiceWorker XPCOM interface:
* - A mechanism for creating synthetic faults by setting the
* `nsIServiceWorkerInfo::testingInjectCancellation` attribute to a failing
* nsresult. The fault is applied at the beginning of the steps to dispatch
* the fetch event on the global.
* - A count of the number of times we experienced these navigation faults
* that had to be reset as `nsIServiceWorkerInfo::navigationFaultCount`.
* (This would also include real faults, but we only expect to see synthetic
* faults in this test.)
* - On nsIServiceWorkerRegistrationInfo, the per-registration XPCOM interface:
* - A readonly attribute that indicates how many times an origin storage
* usage check has been initiated.
*
* We also use:
* - `nsIServiceWorkerManager::addListener(nsIServiceWorkerManagerListener)`
* allows our test to listen for the unregistration of registrations. This
* allows us to be notified when unregistering or origin-clearing actions have
* been taken as a mitigation.
*
* ### General Test Approach
*
* For each test we:
* - Ensure/confirm the testing origin has no QuotaManager storage in use.
* - Install the ServiceWorker.
* - If we are testing the situation where we want to simulate the origin being
* near its quota limit, we also generate Cache API and IDB storage usage
* sufficient to put our origin over the threshold.
* - We run a quota check on the origin after doing this in order to make sure
* that we did this correctly and that we properly constrained the limit for
* the origin. We fail the test for test implementation reasons if we
* didn't accomplish this.
* - Verify a fetch navigation to the SW works without any fault injection,
* producing a result produced by the ServiceWorker.
* - Begin fault permutations in a loop, where for each pass of the loop:
* - We trigger a navigation which will result in an intercepted fetch
* which will fault. We wait until the navigation completes.
* - We verify that we got the request from the network.
* - We verify that the ServiceWorker's navigationFaultCount increased.
* - If this the count at which we expect a mitigation to take place, we wait
* for the registration to become unregistered AND:
* - We check whether the storage for the origin was cleared or not, which
* indicates which mitigation of the following happened:
* - Unregister the registration directly.
* - Clear the origin's data which will also unregister the registration
* as a side effect.
* - We check whether the registration indicates an origin quota check
* happened or not.
*
* ### Disk Usage Limits
*
* In order to avoid gratuitous disk I/O and related overheads, we limit QM
* ("temporary") storage to 10 MiB which ends up limiting group usage to 10 MiB.
* This lets us set a threshold situation where we claim that a SW needs at
* least 4 MiB of storage for installation/operation, meaning that any usage
* beyond 6 MiB in the group will constitute a need to clear the group or
* origin. We fill with the storage with 8 MiB of artificial usage to this end,
* storing 4 MiB in Cache API and 4 MiB in IDB.
**/
// Because of the amount of I/O involved in this test, pernosco reproductions
// may experience timeouts without a timeout multiplier.
requestLongerTimeout(2);
/* import-globals-from browser_head.js */
Services.scriptloader.loadSubScript(
"chrome://mochitests/content/browser/dom/serviceworkers/test/browser_head.js",
this
);
// The origin we run the tests on.
const TEST_ORIGIN = "https://test1.example.org";
// An origin in the same group that impacts the usage of the TEST_ORIGIN. Used
// to verify heuristics related to group-clearing (where clearing the
// TEST_ORIGIN itself would not be sufficient for us to mitigate quota limits
// being reached.)
const SAME_GROUP_ORIGIN = "https://test2.example.org";
const TEST_SW_SETUP = {
origin: TEST_ORIGIN,
// Page with a body textContent of "NETWORK" and has utils.js loaded.
scope: "network_with_utils.html",
// SW that serves a body with a textContent of "SERVICEWORKER" and
// has utils.js loaded.
script: "sw_respondwith_serviceworker.js",
};
const TEST_STORAGE_SETUP = {
cacheBytes: 4 * 1024 * 1024, // 4 MiB
idbBytes: 4 * 1024 * 1024, // 4 MiB
};
const FAULTS_BEFORE_MITIGATION = 3;
/**
* Core test iteration logic.
*
* Parameters:
* - name: Human readable name of the fault we're injecting.
* - useError: The nsresult failure code to inject into fetch.
* - errorPage: The "about" page that we expect errors to leave us on.
* - consumeQuotaOrigin: If truthy, the origin to place the storage usage in.
* If falsey, we won't fill storage.
*/
async function do_fault_injection_test({
name,
useError,
errorPage,
consumeQuotaOrigin,
}) {
info(
`### testing: error: ${name} (${useError}) consumeQuotaOrigin: ${consumeQuotaOrigin}`
);
// ## Ensure/confirm the testing origins have no QuotaManager storage in use.
await clear_qm_origin_group_via_clearData(TEST_ORIGIN);
// ## Install the ServiceWorker
const reg = await install_sw(TEST_SW_SETUP);
const sw = reg.activeWorker;
// ## Generate quota usage if appropriate
if (consumeQuotaOrigin) {
await consume_storage(consumeQuotaOrigin, TEST_STORAGE_SETUP);
}
// ## Verify normal navigation is served by the SW.
info(`## Checking normal operation.`);
{
const debugTag = `err=${name}&fault=0`;
const docInfo = await navigate_and_get_body(TEST_SW_SETUP, debugTag);
is(
docInfo.body,
"SERVICEWORKER",
"navigation without injected fault originates from ServiceWorker"
);
is(
docInfo.controlled,
true,
"successfully intercepted navigation should be controlled"
);
}
// Make sure the test is listening on the ServiceWorker unregistration, since
// we expect it happens after navigation fault threshold reached.
const unregisteredPromise = waitForUnregister(reg.scope);
// Make sure the test is listening on the finish of quota checking, since we
// expect it happens after navigation fault threshold reached.
const quotaUsageCheckFinishPromise = waitForQuotaUsageCheckFinish(reg.scope);
// ## Inject faults in a loop until expected mitigation.
sw.testingInjectCancellation = useError;
for (let iFault = 0; iFault < FAULTS_BEFORE_MITIGATION; iFault++) {
info(`## Testing with injected fault number ${iFault + 1}`);
// We should never have triggered an origin quota usage check before the
// final fault injection.
is(reg.quotaUsageCheckCount, 0, "No quota usage check yet");
// Make sure our loads encode the specific
const debugTag = `err=${name}&fault=${iFault + 1}`;
const docInfo = await navigate_and_get_body(TEST_SW_SETUP, debugTag);
// We should always be receiving network fallback.
is(
docInfo.body,
"NETWORK",
"navigation with injected fault originates from network"
);
is(docInfo.controlled, false, "bypassed pages shouldn't be controlled");
// The fault count should have increased
is(
sw.navigationFaultCount,
iFault + 1,
"navigation fault increased (to expected value)"
);
}
await unregisteredPromise;
is(reg.unregistered, true, "registration should be unregistered");
//is(reg.quotaUsageCheckCount, 1, "Quota usage check must be started");
await quotaUsageCheckFinishPromise;
if (consumeQuotaOrigin) {
// Check that there is no longer any storage usaged by the origin in this
// case.
const originUsage = await get_qm_origin_usage(TEST_ORIGIN);
ok(
is_minimum_origin_usage(originUsage),
"origin usage should be mitigated"
);
if (consumeQuotaOrigin === SAME_GROUP_ORIGIN) {
const sameGroupUsage = await get_qm_origin_usage(SAME_GROUP_ORIGIN);
Assert.strictEqual(
sameGroupUsage,
0,
"same group usage should be mitigated"
);
}
}
}
add_task(async function test_navigation_fetch_fault_handling() {
await SpecialPowers.pushPrefEnv({
set: [
["dom.serviceWorkers.enabled", true],
["dom.serviceWorkers.exemptFromPerDomainMax", true],
["dom.serviceWorkers.testing.enabled", true],
["dom.serviceWorkers.mitigations.bypass_on_fault", true],
["dom.serviceWorkers.mitigations.group_usage_headroom_kb", 5 * 1024],
["dom.quotaManager.testing", true],
// We want the temporary global limit to be 10 MiB (the pref is in KiB).
// This will result in the group limit also being 10 MiB because on small
// disks we provide a group limit value of min(10 MiB, global limit).
["dom.quotaManager.temporaryStorage.fixedLimit", 10 * 1024],
],
});
// Need to reset the storages to make dom.quotaManager.temporaryStorage.fixedLimit
// works.
await qm_reset_storage();
const quotaOriginVariations = [
// Don't put us near the storage limit.
undefined,
// Put us near the storage limit in the SW origin itself.
TEST_ORIGIN,
// Put us near the storage limit in the SW origin's group but not the origin
// itself.
SAME_GROUP_ORIGIN,
];
for (const consumeQuotaOrigin of quotaOriginVariations) {
await do_fault_injection_test({
name: "NS_ERROR_DOM_ABORT_ERR",
useError: 0x80530014, // Not in `Cr`.
// Abort errors manifest as about:blank pages.
errorPage: "about:blank",
consumeQuotaOrigin,
});
await do_fault_injection_test({
name: "NS_ERROR_INTERCEPTION_FAILED",
useError: 0x804b0064, // Not in `Cr`.
// Interception failures manifest as corrupt content pages.
errorPage: "about:neterror",
consumeQuotaOrigin,
});
}
// Cleanup: wipe the origin and group so all the ServiceWorkers go away.
await clear_qm_origin_group_via_clearData(TEST_ORIGIN);
});
|