File: RTCEncodedAudioFrame-audiolevel.html

package info (click to toggle)
firefox 143.0.3-1
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 4,617,328 kB
  • sloc: cpp: 7,478,492; javascript: 6,417,157; ansic: 3,720,058; python: 1,396,372; xml: 627,523; asm: 438,677; java: 186,156; sh: 63,477; makefile: 19,171; objc: 13,059; perl: 12,983; yacc: 4,583; cs: 3,846; pascal: 3,405; lex: 1,720; ruby: 1,003; exp: 762; php: 436; lisp: 258; awk: 247; sql: 66; sed: 53; csh: 10
file content (79 lines) | stat: -rw-r--r-- 2,413 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
<!doctype html>
<meta charset=utf-8>
<title>Audio Level in RTCEncodedAudioFrameMetadata</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script src="/webrtc/RTCPeerConnection-helper.js"></script>
<script src="RTCEncodedFrame-timestamps-helper.js"></script>
<script>
'use strict';

function doWorkExpectingAudioLevel() {
  onrtctransform = async e => {
    const reader = e.transformer.readable.getReader();
    const writer = e.transformer.writable.getWriter();
    for (let i = 0; i<10; i++) {
      const frameOrDone = await reader.read();
      if (frameOrDone.done) {
        self.postMessage("Unexpected end of stream");
        return;
      }
      const metadata = frameOrDone.value.getMetadata();
      if (metadata === undefined) {
        self.postMessage("No audioLevel ");
        return;
      }
      if (metadata.audioLevel < 0 || metadata.audioLevel > 1) {
        self.postMessage("Invalid audioLevel value");
        return;
      }
      if (metadata.senderCaptureTimeOffset != 0) {
        await writer.write(frameOrDone.value);
      }
    }
    self.postMessage("OK");
  };
}

promise_test(async t => {
  const worker = new Worker(`data:text/javascript,(${doWorkExpectingAudioLevel.toString()})()`);
  const workerPromise = new Promise((resolve, reject) => {
    worker.onmessage = t.step_func(message => {
      if (message.data == "OK") {
        resolve();
      } else {
        reject(message.data);
      }
    });
  });

  await initiateCall(
      t, /*streamOptions=*/{audio: true, video: false},
      /*enableAbsCaptureTime=*/false, worker, /*enableSenderTransform=*/false,
      /*enableReceiverTransform=*/true);

  return workerPromise;
}, 'audioLevel present in audio receiver');

promise_test(async t => {
  const worker = new Worker(`data:text/javascript,(${doWorkExpectingAudioLevel.toString()})()`);
  const workerPromise = new Promise((resolve, reject) => {
    worker.onmessage = t.step_func(message => {
      if (message.data == "OK") {
        resolve();
      } else {
        reject(message.data);
      }
    });
  });

  await initiateCall(
      t, /*streamOptions=*/{audio: true, video: false},
      /*enableAbsCaptureTime=*/false, worker, /*enableSenderTransform=*/true,
      /*enableReceiverTransform=*/false);

  return workerPromise;
}, 'audioLevel present in audio sender');


</script>