File: tokenized_sentence.cc

package info (click to toggle)
chromium 138.0.7204.183-1
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 6,071,908 kB
  • sloc: cpp: 34,937,088; ansic: 7,176,967; javascript: 4,110,704; python: 1,419,953; asm: 946,768; xml: 739,971; pascal: 187,324; sh: 89,623; perl: 88,663; objc: 79,944; sql: 50,304; cs: 41,786; fortran: 24,137; makefile: 21,806; php: 13,980; tcl: 13,166; yacc: 8,925; ruby: 7,485; awk: 3,720; lisp: 3,096; lex: 1,327; ada: 727; jsp: 228; sed: 36
file content (55 lines) | stat: -rw-r--r-- 1,884 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
// Copyright 2024 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "chrome/renderer/accessibility/phrase_segmentation/tokenized_sentence.h"

#include "tokenized_sentence.h"

TokenizedSentence::TokenizedSentence(const std::u16string& text) {
  token_boundaries_ = tokenizer_.Tokenize(text);
  tokens_.reserve(token_boundaries_.size());
  for (const std::pair<int, int>& boundary : token_boundaries_) {
    tokens_.emplace_back(std::u16string_view(text).substr(
        boundary.first, boundary.second - boundary.first));
  }
}

// Constructs a tokenized sentence from tokens.
TokenizedSentence::TokenizedSentence(
    const std::u16string& text,
    const std::vector<std::u16string>& tokens) {
  token_boundaries_.reserve(tokens.size());
  tokens_.reserve(tokens.size());
  for (unsigned int i = 0, start = 0; i < tokens.size(); i++) {
    int begin = text.find(tokens[i], start);
    int end = begin + tokens[i].size();
    token_boundaries_.emplace_back(begin, end);
    tokens_.emplace_back(std::u16string_view(text).substr(begin, end - begin));
    start = end;
  }
}

TokenizedSentence::~TokenizedSentence() = default;

int TokenizedSentence::WordsBetween(unsigned int start_token,
                                    unsigned int end_token) const {
  if (end_token > token_boundaries_.size()) {
    end_token = token_boundaries_.size() - 1;
  }
  if (start_token > end_token) {
    return 0;
  }

  int words_between = 1;

  for (unsigned int i = start_token; i < end_token; ++i) {
    if (token_boundaries_[i].second < token_boundaries_[i + 1].first) {
      // If there are spaces after a token, that shows up as end_offset being
      // less than next_start_offset (to accommodate the space), and thus
      // signifies a word.
      ++words_between;
    }
  }
  return words_between;
}