File: SpeechRecognition-concurrentMediaStreamTrack-manual.https.html

package info (click to toggle)
thunderbird 1%3A140.4.0esr-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 4,609,432 kB
  • sloc: cpp: 7,672,442; javascript: 5,901,613; ansic: 3,898,954; python: 1,413,343; xml: 653,997; asm: 462,286; java: 180,927; sh: 113,489; makefile: 20,460; perl: 14,288; objc: 13,059; yacc: 4,583; pascal: 3,352; lex: 1,720; ruby: 1,222; exp: 762; sql: 715; awk: 580; php: 436; lisp: 430; sed: 70; csh: 10
file content (66 lines) | stat: -rw-r--r-- 2,625 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
<!DOCTYPE html>
<html lang="en">
<title>SpeechRecognition Concurrent MediaStreamTracks</title>

<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>

<script>
async function getAudioTrackFromFile(filePath) {
    const audioContext = new AudioContext();
    const response = await fetch(filePath);
    const arrayBuffer = await response.arrayBuffer();
    const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
    const source = audioContext.createBufferSource();
    source.buffer = audioBuffer;

    const destination = audioContext.createMediaStreamDestination();
    source.connect(destination);
    source.start();

    return destination.stream.getAudioTracks()[0];
}

promise_test(async (t) => {
    const lang = "en-US";
    window.SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;

    // Create two SpeechRecognition instances
    const speechRecognition1 = new SpeechRecognition();
    speechRecognition1.mode = 'cloud-only';
    speechRecognition1.lang = lang;
    const speechRecognition2 = new SpeechRecognition();
    speechRecognition2.mode = 'cloud-only';
    speechRecognition2.lang = lang;

    const audioTrack1 = await getAudioTrackFromFile("/media/speech.wav");
    const audioTrack2 = await getAudioTrackFromFile("/media/speech.wav");

    assert_true(audioTrack1 instanceof MediaStreamTrack, "Audio track 1 should be a valid MediaStreamTrack");
    assert_true(audioTrack2 instanceof MediaStreamTrack, "Audio track 2 should be a valid MediaStreamTrack");

    const recognitionPromise1 = new Promise((resolve) => {
        speechRecognition1.onresult = (event) => {
            const transcript = event.results[0][0].transcript;
            resolve(transcript);
        };
    });

    const recognitionPromise2 = new Promise((resolve) => {
        speechRecognition2.onresult = (event) => {
            const transcript = event.results[0][0].transcript;
            resolve(transcript);
        };
    });

    speechRecognition1.start(audioTrack1);
    speechRecognition2.start(audioTrack2);

    const transcript1 = await recognitionPromise1;
    const transcript2 = await recognitionPromise2;

    assert_equals(transcript1.toLowerCase(), "this is a sentence in a single segment", "Speech recognition 1 should correctly recognize speech");
    assert_equals(transcript2.toLowerCase(), "this is a sentence in a single segment", "Speech recognition 2 should correctly recognize speech");
}, "Two SpeechRecognition instances should simultaneously recognize speech from audio files.");
</script>
</html>