File: wordtokenizer.h

package info (click to toggle)
source-highlight 3.1.8-1.2~deb9u1
  • links: PTS
  • area: main
  • in suites: stretch
  • size: 10,224 kB
  • sloc: sh: 11,709; cpp: 10,226; ansic: 9,521; makefile: 1,902; lex: 1,200; yacc: 1,021; php: 213; perl: 211; awk: 98; erlang: 94; lisp: 90; java: 75; ruby: 69; python: 61; asm: 43; ada: 36; ml: 29; haskell: 27; xml: 23; cs: 11; sql: 8; tcl: 7; sed: 4
file content (38 lines) | stat: -rw-r--r-- 946 bytes parent folder | download | duplicates (8)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
//
// Author: Lorenzo Bettini <http://www.lorenzobettini.it>, (C) 2004-2008
//
// Copyright: See COPYING file that comes with this distribution
//

#ifndef WORDTOKENIZER_H_
#define WORDTOKENIZER_H_

#include <string>
#include <algorithm>
#include <list>

namespace srchilite {

/**
 * Tokenizes a paragraph separating words from spaces
 */
class WordTokenizer {
public:
    /**
     * Results of the tokenizer; each element is a pair where the first
     * string represents a possible space and the second string a possible word.
     * The two elements are mutually exclusive
     */
    typedef std::list<std::pair<std::string, std::string> > WordTokenizerResults;

    /**
     * Tokenizes the passed string and stores the results.
     * @param s the string to tokenize
     * @param results where to store the results
     */
    static void tokenize(const std::string &s, WordTokenizerResults &results);
};

}

#endif /*WORDTOKENIZER_H_*/