File: tokenizer.py

package info (click to toggle)
python-lunr 0.8.0-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 3,644 kB
  • sloc: python: 3,811; javascript: 114; makefile: 60
file content (64 lines) | stat: -rw-r--r-- 2,126 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
from copy import deepcopy

from lunr.token import Token
from lunr.utils import as_string

SEPARATOR_CHARS = " \t\n\r\f\v\xa0-"


def default_separator(char):
    return char and char in SEPARATOR_CHARS


def Tokenizer(obj, metadata=None, separator=None):
    """Splits a string into tokens ready to be inserted into the search index.

    Args:
        metadata (dict): Optional metadata can be passed to the tokenizer, this
            metadata will be cloned and added as metadata to every token that is
            created from the object to be tokenized.
        separator (callable or compiled regex): This tokenizer will convert its
            parameter to a string by calling `str` and then will split this
            string on characters for which `separator` is True. Lists will have
            their elements converted to strings and wrapped in a lunr `Token`.

    Returns:
        List of Token instances.
    """
    if obj is None:
        return []

    metadata = metadata or {}

    if isinstance(obj, (list, tuple)):
        return [
            Token(as_string(element).lower(), deepcopy(metadata)) for element in obj
        ]

    if separator is None:
        is_separator = default_separator
    elif callable(separator):
        is_separator = separator
    else:  # must be a regex, remove when dropping support for 2.7
        is_separator = lambda c: separator.match(c)  # noqa

    string = str(obj).lower()
    length = len(string)
    tokens = []
    slice_start = 0
    for slice_end in range(length + 1):
        char = string[slice_end] if slice_end != length else ""
        slice_length = slice_end - slice_start
        if is_separator(char) or slice_end == length:
            if slice_length > 0:
                token_metadata = {}
                token_metadata["position"] = [slice_start, slice_length]
                token_metadata["index"] = len(tokens)
                token_metadata.update(metadata)

                sl = slice(slice_start, slice_end)
                tokens.append(Token(string[sl], token_metadata))

            slice_start = slice_end + 1

    return tokens