File: tablexer.py

package info (click to toggle)
syslog-ng 4.8.1-6
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 20,456 kB
  • sloc: ansic: 177,631; python: 13,035; cpp: 11,611; makefile: 7,012; sh: 5,147; java: 3,651; xml: 3,344; yacc: 1,377; lex: 599; perl: 193; awk: 190; objc: 162
file content (111 lines) | stat: -rw-r--r-- 3,853 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
#############################################################################
# Copyright (c) 2015-2016 Balabit
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
#
# As an additional exemption you are allowed to compile & link against the
# OpenSSL libraries as published by the OpenSSL project. See the file
# COPYING for details.
#
#############################################################################

from .lexertoken import LexerToken
from .lexer import Lexer


class TabLexer(Lexer):
    def __init__(self, lexer):
        self._lexer = lexer
        self._current_token = None
        self._end_of_tokens = False
        self._replaced_token = None
        self._buffered_tokens = None
        self._buffer_count = 0
        self._is_last_token = False

    def input(self, text):
        self._lexer.input(text)
        self._end_of_tokens = False
        self._buffered_tokens = None
        self._replaced_token = None

    def get_replaced_token(self):
        return self._replaced_token

    def set_drop_last_token(self, value):
        if value:
            self._buffer_count = 1
        else:
            self._buffer_count = 0

    def token(self):
        if self._end_of_tokens:
            return None
        if not self._is_buffer_initialized():
            self._fill_buffer()

        self._shift_and_inject_tab()
        return self._current_token

    def get_position(self):
        # This method is not implemented as it would require to look ahead
        # to the next token we may or may not have, making the code more
        # difficult. We are not using this anyway.
        raise NotImplementedError

    def _shift_and_inject_tab(self):
        self._shift_from_buffer()

        if self._is_last_token or self._is_current_token_partial():
            self._end_of_tokens = True
            self._replaced_token = self._current_token
            self._current_token = self._constuct_tab_token()

    def _is_current_token_partial(self):
        return (self._current_token is not None and
                (hasattr(self._current_token, 'partial') and
                 self._current_token.partial))

    def _is_buffer_initialized(self):
        return self._buffered_tokens is not None

    def _constuct_tab_token(self):
        if self._replaced_token is not None:
            lexpos = self._replaced_token.lexpos
        else:
            lexpos = self._lexer.get_position()
        return LexerToken(type="TAB", lexpos=lexpos)

    def _shift_from_buffer(self):
        self._fetch_token_to_buffer()
        self._is_last_token = len(self._buffered_tokens) <= self._buffer_count
        self._current_token = self._get_token_from_buffer()

    def _fill_buffer(self):
        self._buffered_tokens = []
        for _ in range(self._buffer_count):
            self._fetch_token_to_buffer()

    def _fetch_token_to_buffer(self):
        token = self._lexer.token()
        if token is not None:
            self._buffered_tokens.append(token)

    def _get_token_from_buffer(self):
        try:
            token = self._buffered_tokens[0]
            self._buffered_tokens = self._buffered_tokens[1:]
            return token
        except IndexError:
            return None