File: test_captureStream_audioLoopBack.html

package info (click to toggle)
firefox 149.0-1
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 4,767,760 kB
  • sloc: cpp: 7,416,064; javascript: 6,752,859; ansic: 3,774,850; python: 1,250,473; xml: 641,578; asm: 439,191; java: 186,617; sh: 56,634; makefile: 18,856; objc: 13,092; perl: 12,763; pascal: 5,960; yacc: 4,583; cs: 3,846; lex: 1,720; ruby: 1,002; php: 436; lisp: 258; awk: 105; sql: 66; sed: 53; csh: 10; exp: 6
file content (209 lines) | stat: -rw-r--r-- 7,323 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
<!DOCTYPE HTML>
<html>
<head>
  <title>audio loopback output check for captureStream() and MediaElementAudioSourceNode</title>
  <script src="/tests/SimpleTest/SimpleTest.js"></script>
  <script type="text/javascript" src="../webaudio/test/webaudio.js"></script>
  <link rel="stylesheet" href="/tests/SimpleTest/test.css"/>
</head>
<body>
<script>
"use strict";

add_task(async function setupTestPrefs() {
  await SpecialPowers.pushPrefEnv({
    set: [
      ["media.captureStream.enabled", true],
      // For mediaDevices and getUserMedia
      ["media.navigator.permission.disabled", true],
      ["media.devices.unfocused.enabled", true],
    ],
  });
});

// When calling captureStream() on a media element, audio playback should still
// be output through the media pipeline's audio device. We verify this by
// checking the loopback stream.
add_task(async function testCaptureStreamAudioPlayout() {
  if (!navigator.userAgent.includes("Linux")) {
    ok(true, "audio loopback is only supported on Linux");
    return;
  }

  if (!navigator.mediaDevices) {
    ok(true, "No mediaDevices, then gUM cannot have been called either");
    return;
  }

  info("creating and playing audio element");
  const { ac, source, loopbackStream } = await setupLoopbackAudioGraph();
  const audio = await createAndPlayAudio();

  info("capturing the audio element's media stream");
  const stream = audio.captureStream();
  is(stream.getAudioTracks().length, 1, "captured stream has an audio track");

  info("waiting for loopback audio to become audible");
  const SILENCE_RMS_THRESHOLD = 0.0;
  const AUDIBLE_RMS_THRESHOLD = 0.01;
  let curRms = await waitForRmsOverThreshold(ac, source, AUDIBLE_RMS_THRESHOLD);
  ok(curRms > 0.01, `Expected audible baseline RMS ${curRms}`);

  audio.volume = 0.3;
  info(`Lowering volume to ${audio.volume}`);
  let tempAudibleThreshold = AUDIBLE_RMS_THRESHOLD * audio.volume;
  curRms = await waitForRmsOverThreshold(ac, source, tempAudibleThreshold);
  ok(curRms > tempAudibleThreshold,
     `Expected non-silent RMS ${curRms} after volume=${audio.volume}`);

  audio.volume = 0.0;
  info(`Lowering volume to ${audio.volume}`);
  curRms = await waitForRmsEqualToThreshold(ac, source, SILENCE_RMS_THRESHOLD);
  is(curRms, SILENCE_RMS_THRESHOLD,
     `Expected silent RMS ${curRms} after volume=${audio.volume}`);

  audio.volume = 1.0;
  info(`Raising volume to ${audio.volume}`);
  curRms = await waitForRmsOverThreshold(ac, source, AUDIBLE_RMS_THRESHOLD);
  ok(curRms > AUDIBLE_RMS_THRESHOLD,
     `Expected non-silent RMS ${curRms} after volume=${audio.volume}`);

  audio.muted = true;
  info("Muting audio");
  curRms = await waitForRmsEqualToThreshold(ac, source, SILENCE_RMS_THRESHOLD);
  is(curRms, SILENCE_RMS_THRESHOLD, `Expected silent RMS ${curRms} after muting`);

  audio.muted = false;
  info("Unmuting audio");
  curRms = await waitForRmsOverThreshold(ac, source, AUDIBLE_RMS_THRESHOLD);
  ok(curRms > AUDIBLE_RMS_THRESHOLD,
     `Expected non-silent RMS ${curRms} after unmuting`);

  info("cleaning up");
  audio.pause();
  audio.remove();
  loopbackStream.getTracks().forEach(t => t.stop());
  await ac.close();
});

// When a MediaElementAudioSourceNode is created for a media element, audio
// output should be routed through the Web Audio graph, and the loopback stream
// should therefore be silent.
add_task(async function testMediaElementAudioSourceNodeAudioPlayout() {
  if (!navigator.userAgent.includes("Linux")) {
    ok(true, "audio loopback is only supported on Linux");
    return;
  }

  if (!navigator.mediaDevices) {
    ok(true, "No mediaDevices, then gUM cannot have been called either");
    return;
  }

  const { ac, source, loopbackStream } = await setupLoopbackAudioGraph();
  const audio = await createAndPlayAudio();

  info("connecting to a media element source node");
  const srcElementNode = ac.createMediaElementSource(audio);

  info("audio playback should now be routed through the Web Audio graph");
  const SILENCE_RMS_THRESHOLD = 0.0;
  const AUDIBLE_RMS_THRESHOLD = 0.01;
  let curRms = await waitForRmsEqualToThreshold(ac, source, SILENCE_RMS_THRESHOLD);
  is(curRms, SILENCE_RMS_THRESHOLD,
     `Expected silent RMS ${curRms} for loopback stream`);

  info("waiting for the Web Audio graph to produce audible sound");
  curRms = await waitForRmsOverThreshold(ac, srcElementNode, AUDIBLE_RMS_THRESHOLD);
  ok(curRms > AUDIBLE_RMS_THRESHOLD,
     `Expected audible RMS ${curRms} from the graph`);

  info("calling captureStream() on the media element");
  const captured = audio.captureStream();
  ok(!!captured.getTracks().length, "captureStream() returned tracks");

  info("verifying the loopback stream remains silent after captureStream()");
  curRms = await waitForRmsEqualToThreshold(ac, source, SILENCE_RMS_THRESHOLD);
  is(curRms, SILENCE_RMS_THRESHOLD,
     `Expected silent RMS ${curRms} for loopback stream`);

  info("verifying the Web Audio graph still produces audible sound");
  curRms = await waitForRmsOverThreshold(ac, srcElementNode, AUDIBLE_RMS_THRESHOLD);
  ok(curRms > AUDIBLE_RMS_THRESHOLD,
     `Expected audible RMS ${curRms} from the graph`);

  info("cleaning up");
  audio.pause();
  audio.remove();
  loopbackStream.getTracks().forEach(t => t.stop());
  await ac.close();
});

/**
 * Helper functions
 */
async function setupLoopbackAudioGraph() {
  info("opening loopback input device");
  const loopbackStream =
    await navigator.mediaDevices.getUserMedia({ audio: true });

  ok(loopbackStream.getAudioTracks().length === 1,
     "Got loopback audio track");

  info("setting up graph");
  const ac = new AudioContext();
  await ac.resume();

  const source = ac.createMediaStreamSource(loopbackStream);
  return { ac, source, loopbackStream };
}

async function createAndPlayAudio() {
  info("creating and playing audio element");
  const audio = document.createElement("audio");
  audio.src = "tone_1s_nosilence.mp3";
  audio.loop = true;
  document.body.appendChild(audio);
  ok(await audio.play().then(() => true, () => false), "audio started playing");
  return audio;
}

async function waitForRmsEqualToThreshold(ac, src, threshold) {
  return await waitForRmsPredicate(ac, src, threshold, rms => rms == threshold);
}

async function waitForRmsOverThreshold(ac, src, threshold) {
  return await waitForRmsPredicate(ac, src, threshold, rms => rms > threshold);
}

async function waitForRmsPredicate(ac, src, threshold, predicate, timeoutMs = 10000) {
  const start = performance.now();
  while (true) {
    const rms = await measureRMSOnceWithFreshScriptProcessor(ac, src);
    if (predicate(rms)) {
      return rms;
    }
    const elapsedTime = performance.now() - start;
    if (elapsedTime > timeoutMs) {
      throw new Error(`Timed out waitForRmsPredicate ${elapsedTime}`);
    }
  }
}

async function measureRMSOnceWithFreshScriptProcessor(ac, src) {
  // Use a new ScriptProcessorNode for each measurement, so that an event does
  // not contain samples from the past.
  const sp = ac.createScriptProcessor(2048, 1, 1);
  src.connect(sp);
  sp.connect(ac.destination);

  const { inputBuffer } = await new Promise(r => (sp.onaudioprocess = r));

  src.disconnect(sp);
  sp.disconnect();
  sp.onaudioprocess = null;
  return rms(inputBuffer); // from webaudio.js
}
</script>
</body>
</html>