File: test_langcompleter.py

package info (click to toggle)
syslog-ng 4.8.1-6
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 20,456 kB
  • sloc: ansic: 177,631; python: 13,035; cpp: 11,611; makefile: 7,012; sh: 5,147; java: 3,651; xml: 3,344; yacc: 1,377; lex: 599; perl: 193; awk: 190; objc: 162
file content (153 lines) | stat: -rw-r--r-- 8,998 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
#############################################################################
# Copyright (c) 2015-2016 Balabit
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
#
# As an additional exemption you are allowed to compile & link against the
# OpenSSL libraries as published by the OpenSSL project. See the file
# COPYING for details.
#
#############################################################################

from __future__ import print_function, absolute_import
from .test_completer import CompleterTestCase
from ..langcompleter import LangBasedCompleter
from ..completerlang import CompleterLang
from ..choicecompleter import ChoiceCompleter
from ..lexertoken import LexerToken


class DummyLang(CompleterLang):
    # pylint: disable=super-init-not-called
    def __init__(self, expected_tokens, replaced_token=None, replaced_token_pos=-1):
        self._expected_tokens = expected_tokens
        self._replaced_token = replaced_token
        self._replaced_token_pos = replaced_token_pos

    def _construct_lexer(self):
        pass

    def get_expected_tokens(self, text, drop_last_token):
        if self._replaced_token is not None:
            replaced_token = LexerToken(type=self._expected_tokens,
                                        value=self._replaced_token,
                                        lexpos=self._replaced_token_pos)
        else:
            replaced_token = None
        return (self._expected_tokens, replaced_token, self._replaced_token_pos)


class TestLangCompleter(CompleterTestCase):
    default_expectation = "1ST_TOKEN"
    default_completers = {
        '1ST_TOKEN': ChoiceCompleter(("token1-a", "token1-b"), suffix=''),
        '2ND_TOKEN': ChoiceCompleter(("token2-a", "token2-b"), suffix=''),
        '3RD_TOKEN': ChoiceCompleter(("token3-a", "token3-b"), suffix=''),
        'PARTIAL_TOKEN': ChoiceCompleter(("tokenP-a", "tokenP-b"), prefix='@', suffix='')
    }

    # pylint: disable=arguments-differ,too-many-arguments,too-many-positional-arguments
    def _construct_completer(self, expected_token=None, expected_tokens=None,
                             replaced_token=None, replaced_token_pos=-1,
                             completers=None, prefix="<!--"):
        if expected_tokens is None:
            expected_tokens = [expected_token or self.default_expectation]
        return LangBasedCompleter(parser=DummyLang(expected_tokens=expected_tokens,
                                                   replaced_token=replaced_token,
                                                   replaced_token_pos=replaced_token_pos),
                                  completers=completers or self.default_completers,
                                  prefix=prefix)

    def test_completing_on_the_first_characters_of_prefix_offers_the_prefix(self):
        self._assert_completions_offered("", expected_completions=["<!--"])
        self._assert_completions_offered("<", expected_completions=["<!--"])
        self._assert_completions_offered("<!", expected_completions=["<!--"])
        self._assert_completions_offered("<!-", expected_completions=["<!--"])

    def test_completing_on_an_input_with_a_mismatching_prefix(self):
        self._assert_no_completions_are_offered("mismatch")

    def test_completing_on_prefix_only_offers_the_completions_of_the_first_token_with_prefix_prepended(self):
        self._assert_completions_offered("<!--", expected_completions=["<!--token1-a", "<!--token1-b"])

    def test_completing_the_1st_empty_token_offers_completions_on_the_token(self):
        self._completer = self._construct_completer(expected_token="1ST_TOKEN")
        self._assert_completions_offered(entire_input="<!-- ", word="", expected_completions=["token1-a", "token1-b"])

    def test_completing_the_2nd_empty_token_offers_completions_on_the_token(self):
        self._completer = self._construct_completer(expected_token="2ND_TOKEN")
        self._assert_completions_offered(entire_input="<!-- foo ", word="",
                                         expected_completions=['token2-a', 'token2-b'])

    def test_completing_the_3rd_empty_token_offers_completions_on_the_token(self):
        self._completer = self._construct_completer(expected_token="3RD_TOKEN")
        self._assert_completions_offered(entire_input="<!-- foo bar ", word="",
                                         expected_completions=['token3-a', 'token3-b'])

    def test_completing_a_partial_token_with_an_empty_word_produces_choices_with_the_partial_prefix(self):
        self._completer = self._construct_completer(expected_token="PARTIAL_TOKEN")
        self._assert_completions_offered(entire_input="<!-- foo ", word="",
                                         expected_completions=['@'])

    def test_completing_a_partial_token_with_only_the_prefix_produces_choices_with_the_partial_prefix(self):
        # NOTE: replaced_token_pos is calculated after the language prefix (<!--) is chopped!
        self._completer = self._construct_completer(expected_token="PARTIAL_TOKEN",
                                                    replaced_token="@",
                                                    replaced_token_pos=5)
        self._assert_completions_offered(entire_input="<!-- foo @", word="@",
                                         expected_completions=['@tokenP-a', '@tokenP-b'])

    def test_completing_a_partial_token_with_a_few_matching_characters_produces_choices_with_the_partial_prefix(self):
        # NOTE: replaced_token_pos is calculated after the language prefix (<!--) is chopped!
        self._completer = self._construct_completer(expected_token="PARTIAL_TOKEN",
                                                    replaced_token="@tok",
                                                    replaced_token_pos=5)
        self._assert_completions_offered(entire_input="<!-- foo @tok", word="@tok",
                                         expected_completions=['@tokenP-a', '@tokenP-b'])

    def test_completing_a_partial_token_with_unmatching_characters_produces_no_completions(self):
        # NOTE: replaced_token_pos is calculated after the language prefix (<!--) is chopped!
        self._completer = self._construct_completer(expected_token="PARTIAL_TOKEN",
                                                    replaced_token="@unmatching",
                                                    replaced_token_pos=5)
        self._assert_no_completions_are_offered(entire_input="<!-- foo @unmatching", word="@unmatching")

    def test_completing_a_partial_token_that_is_longer_than_a_word_produces_no_matches(self):

        # This is pretty much a corner case, as this would mean that the word
        # splitting features of readline is not in-line with the tokenization
        # rules of the language. A sample is encoded below in the testcase,
        # the word itself is shorter than the token, which means that we
        # basically can't complete on the entire token as readline would only replace
        # stuff partially.
        #
        # The best we can do here is to offer no completions, the syntax we wanted
        # to extend didn't have such a case, fortunately.

        # NOTE: replaced_token_pos is calculated after the language prefix (<!--) is chopped!
        self._completer = self._construct_completer(expected_token="PARTIAL_TOKEN",
                                                    replaced_token="@token tail",
                                                    replaced_token_pos=5)
        self._assert_no_completions_are_offered(entire_input="<!-- foo @token tail", word="tail")

    def test_completing_a_token_where_multiple_tokens_could_match_collects_all_matches(self):
        self._completer = self._construct_completer(expected_tokens=["1ST_TOKEN", "2ND_TOKEN", "3RD_TOKEN"])
        self._assert_completions_offered(entire_input="<!-- ", word="",
                                         expected_completions=["token1-a", "token2-b", "token3-a"])
        self._assert_completions_not_offered(entire_input="<!-- ", word="",
                                             unexpected_completions=["@tokenP-a"])

    def test_completing_a_token_that_has_no_registered_completer_results_in_no_matches(self):
        self._completer = self._construct_completer(expected_token="NOSUCHCOMPLETER")
        self._assert_no_completions_are_offered("<!-- ")