File: speech_recognition_private.idl

package info (click to toggle)
chromium 139.0.7258.127-1
  • links: PTS, VCS
  • area: main
  • in suites:
  • size: 6,122,068 kB
  • sloc: cpp: 35,100,771; ansic: 7,163,530; javascript: 4,103,002; python: 1,436,920; asm: 946,517; xml: 746,709; pascal: 187,653; perl: 88,691; sh: 88,436; objc: 79,953; sql: 51,488; cs: 44,583; fortran: 24,137; makefile: 22,147; tcl: 15,277; php: 13,980; yacc: 8,984; ruby: 7,485; awk: 3,720; lisp: 3,096; lex: 1,327; ada: 727; jsp: 228; sed: 36
file content (88 lines) | stat: -rw-r--r-- 3,162 bytes parent folder | download | duplicates (9)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
// Copyright 2021 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

// The <code>chrome.speechRecognitionPrivate</code> API allows internal
// extensions to use either network-based or on-device speech recognition.
[platforms=("chromeos"), implemented_in="chrome/browser/ash/extensions/speech/speech_recognition_private_api.h"]

namespace speechRecognitionPrivate {
  // Possible types of speech recognition.
  enum SpeechRecognitionType {
    onDevice,
    network
  };

  // Interface for an onStop event.
  dictionary SpeechRecognitionStopEvent {
    // Optional client ID.
    long? clientId;
  };

  // Interface for a speech recognition result event.
  dictionary SpeechRecognitionResultEvent {
    // Optional client ID.
    long? clientId;
    // The recognized phrase or sentence.
    DOMString transcript;
    // Whether the result is a final or an interim result.
    boolean isFinal;
  };

  // Interface for a speech recognition error event.
  dictionary SpeechRecognitionErrorEvent {
    // Optional client ID.
    long? clientId;
    // A message describing the error.
    DOMString message;
  };

  // Interface for options used when starting speech recognition.
  dictionary StartOptions {
    // An optional ID to specify the client.
    long? clientId;
    // The locale, in BCP-47 format e.g. "en-US", to use for speech recognition.
    DOMString? locale;
    // Whether interim speech results should be returned.
    boolean? interimResults;
  };

  // Interface for options used when stopping speech recognition.
  dictionary StopOptions {
    // An optional ID to specify the client. This must match the clientId
    // used when starting speech recognition to work as intended.
    long? clientId;
  };

  // Called when speech recognition has begun listening to the user's audio.
  // The callback's parameter specifies which type of speech recognition
  // is being used.
  callback OnStartCallback = void(SpeechRecognitionType type);
  // Called when speech recognition has stopped listening to the user's audio.
  callback OnStopCallback = void();

  interface Functions {
    // Starts listening to audio from the user. The callback is invoked when
    // speech recognition has started. If speech recognition is already active
    // when calling start(), the callback is run with an error.
    static void start(
        StartOptions options,
        OnStartCallback callback);

    // Stops listening to audio from the user. The callback is invoked when
    // speech recognition has stopped. If speech recognition has already stopped
    // when calling stop(), the callback is run with an error.
    static void stop(
        StopOptions options,
        OnStopCallback callback);
  };

  interface Events {
    // Fired when speech recognition stops.
    static void onStop(SpeechRecognitionStopEvent event);
    // Fired when a speech recognition result is returned.
    static void onResult(SpeechRecognitionResultEvent event);
    // Fired when a speech recognition error occurs.
    static void onError(SpeechRecognitionErrorEvent event);
  };
};