File: string_token_id.cpp

package info (click to toggle)
boost1.88 1.88.0-1
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 576,932 kB
  • sloc: cpp: 4,149,234; xml: 136,789; ansic: 35,092; python: 33,910; asm: 5,698; sh: 4,604; ada: 1,681; makefile: 1,633; pascal: 1,139; perl: 1,124; sql: 640; yacc: 478; ruby: 271; java: 77; lisp: 24; csh: 6
file content (87 lines) | stat: -rw-r--r-- 2,236 bytes parent folder | download | duplicates (8)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
//  Copyright (c) 2001-2010 Hartmut Kaiser
//
//  Distributed under the Boost Software License, Version 1.0. (See accompanying
//  file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)

#include <boost/spirit/include/lex_lexertl.hpp>

#include <boost/spirit/include/qi_parse.hpp>
#include <boost/spirit/include/qi_operator.hpp>
#include <boost/spirit/include/qi_action.hpp>
#include <boost/spirit/include/qi_grammar.hpp>

#include <boost/core/lightweight_test.hpp>
#include <boost/phoenix/operator.hpp>

#include <iostream>
#include <string>

namespace qi = boost::spirit::qi;
namespace lex = boost::spirit::lex;

enum tokenids
{
    IDWORD = lex::min_token_id, 
    IDCHAR,
    IDANY
};

template <typename Lexer>
struct word_count_tokens : lex::lexer<Lexer>
{
    word_count_tokens()
    {
        this->self.add_pattern
            ("TEST", "A")
        ;

        this->self = 
                lex::string("{TEST}", IDWORD) 
            |   lex::char_('a', IDCHAR)
            |   lex::string(".", IDANY)
            ;
    }
};

template <typename Iterator>
struct word_count_grammar : qi::grammar<Iterator>
{
    template <typename TokenDef>
    word_count_grammar(TokenDef const&)
      : word_count_grammar::base_type(start)
      , w(0), c(0), a(0)
    {
        using boost::phoenix::ref;
        using qi::token;

        start =  *(   token(IDWORD) [++ref(w)]
                  |   token(IDCHAR) [++ref(c)]
                  |   token(IDANY)  [++ref(a)]
                  )
              ;
    }
    std::size_t w, c, a;
    qi::rule<Iterator> start;
};


int main()
{
    typedef lex::lexertl::token<
        const char*, boost::mpl::vector<std::string>
    > token_type;

    typedef lex::lexertl::lexer<token_type> lexer_type;
    typedef word_count_tokens<lexer_type>::iterator_type iterator_type;
    word_count_tokens<lexer_type> word_count;          // Our lexer
    word_count_grammar<iterator_type> g (word_count);  // Our parser

    std::string str ("AaBCD");
    char const* first = str.c_str();
    char const* last = &first[str.size()];

    BOOST_TEST(lex::tokenize_and_parse(first, last, word_count, g));
    BOOST_TEST(g.w == 1 && g.c == 1 && g.a == 3);

    return boost::report_errors();
}