File: SpeechRecognition-mediaStreamTrack-manual.https.html

package info (click to toggle)
firefox 144.0-1
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 4,637,504 kB
  • sloc: cpp: 7,576,692; javascript: 6,430,831; ansic: 3,748,119; python: 1,398,978; xml: 628,810; asm: 438,679; java: 186,194; sh: 63,212; makefile: 19,159; objc: 13,086; perl: 12,986; yacc: 4,583; cs: 3,846; pascal: 3,448; lex: 1,720; ruby: 1,003; exp: 762; php: 436; lisp: 258; awk: 247; sql: 66; sed: 53; csh: 10
file content (55 lines) | stat: -rw-r--r-- 2,044 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
<!DOCTYPE html>
<html lang="en">
<title>SpeechRecognition MediaStreamTrack</title>

<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>

<script>
async function getAudioTrackFromFile(filePath) {
    const audioContext = new AudioContext();
    const response = await fetch(filePath);
    const arrayBuffer = await response.arrayBuffer();
    const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
    const source = audioContext.createBufferSource();
    source.buffer = audioBuffer;

    const destination = audioContext.createMediaStreamDestination();
    source.connect(destination);
    source.start();

    return destination.stream.getAudioTracks()[0];
}

promise_test(async (t) => {
    const lang = "en-US";
    window.SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
    const speechRecognition = new SpeechRecognition();
    speechRecognition.processLocally = false;
    speechRecognition.lang = lang;
    const audioTrack = await getAudioTrackFromFile("/media/speech.wav");

    assert_true(audioTrack instanceof MediaStreamTrack, "Audio track should be a valid MediaStreamTrack");

    const recognitionPromise = new Promise((resolve) => {
        speechRecognition.onresult = (event) => {
            const transcript = event.results[0][0].transcript;
            resolve(transcript);
        };
    });

    speechRecognition.start(audioTrack);

    const transcript = await recognitionPromise;
    assert_equals(transcript.toLowerCase(), "this is a sentence in a single segment", "Speech recognition should correctly recognize 'hello world'");

    // Start speech recognition without a media stream track on the same instance should fail.
    try {
      speechRecognition.start();
      assert_unreached();
    } catch (e) {
      assert_equals(e.name, "InvalidStateError", "Second call to start() should throw an InvalidStateError");
    }
}, "SpeechRecognition should recognize speech from an audio file.");
</script>
</html>