File: tokenized_sentence.h

package info (click to toggle)
chromium 139.0.7258.127-1
  • links: PTS, VCS
  • area: main
  • in suites:
  • size: 6,122,068 kB
  • sloc: cpp: 35,100,771; ansic: 7,163,530; javascript: 4,103,002; python: 1,436,920; asm: 946,517; xml: 746,709; pascal: 187,653; perl: 88,691; sh: 88,436; objc: 79,953; sql: 51,488; cs: 44,583; fortran: 24,137; makefile: 22,147; tcl: 15,277; php: 13,980; yacc: 8,984; ruby: 7,485; awk: 3,720; lisp: 3,096; lex: 1,327; ada: 727; jsp: 228; sed: 36
file content (53 lines) | stat: -rw-r--r-- 2,039 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
// Copyright 2024 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef CHROME_RENDERER_ACCESSIBILITY_PHRASE_SEGMENTATION_TOKENIZED_SENTENCE_H_
#define CHROME_RENDERER_ACCESSIBILITY_PHRASE_SEGMENTATION_TOKENIZED_SENTENCE_H_

#include <string>
#include <string_view>
#include <utility>
#include <vector>

#include "chrome/renderer/accessibility/phrase_segmentation/tokenizer.h"

// Collection representing the output of the tokenization process, for use in
// downstream processing for phrase detection.
class TokenizedSentence {
 public:
  // Constructs a tokenized sentence from a string by running a tokenizer.
  explicit TokenizedSentence(const std::u16string& text);

  // Constructs a tokenized sentence from tokens.
  explicit TokenizedSentence(const std::u16string& text,
                             const std::vector<std::u16string>& tokens);

  ~TokenizedSentence();

  const std::vector<std::u16string_view>& tokens() const { return tokens_; }
  const std::vector<std::pair<int, int>>& token_boundaries() const {
    return token_boundaries_;
  }

  // Calculates the number of words between two token indices (both included).
  // This is different from simply (end_token-start_token+1), because special
  // characters such as punctuations are tokenized, but may not contribute to
  // the word count. For example, the string `(below 2 °C or 35 °F).` has 6
  // words but 11 tokens.
  int WordsBetween(unsigned int start_token, unsigned int end_token) const;

  // Calculate the number of characters between two tokens, both included. This
  // is trivially implemented, unlike WordsBetween.
  int CharactersBetween(int start, int end) const {
    return token_boundaries_[end].second - token_boundaries_[start].first;
  }

 private:
  std::vector<std::u16string_view> tokens_;
  std::vector<std::pair<int, int>> token_boundaries_;

  Tokenizer tokenizer_;
};

#endif  // CHROME_RENDERER_ACCESSIBILITY_PHRASE_SEGMENTATION_TOKENIZED_SENTENCE_H_