File: test_bert_wordpiece.py

package info (click to toggle)
tokenizers 0.20.3%2Bdfsg-1
  • links: PTS, VCS
  • area: main
  • in suites: experimental
  • size: 5,480 kB
  • sloc: python: 4,499; javascript: 419; makefile: 124
file content (53 lines) | stat: -rw-r--r-- 1,905 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
from tokenizers import BertWordPieceTokenizer

from ..utils import bert_files, data_dir, multiprocessing_with_parallelism


class TestBertWordPieceTokenizer:
    def test_basic_encode(self, bert_files):
        tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"])

        # Encode with special tokens by default
        output = tokenizer.encode("My name is John", "pair")
        assert output.ids == [101, 2026, 2171, 2003, 2198, 102, 3940, 102]
        assert output.tokens == [
            "[CLS]",
            "my",
            "name",
            "is",
            "john",
            "[SEP]",
            "pair",
            "[SEP]",
        ]
        assert output.offsets == [
            (0, 0),
            (0, 2),
            (3, 7),
            (8, 10),
            (11, 15),
            (0, 0),
            (0, 4),
            (0, 0),
        ]
        assert output.type_ids == [0, 0, 0, 0, 0, 0, 1, 1]

        # Can encode without the special tokens
        output = tokenizer.encode("My name is John", "pair", add_special_tokens=False)
        assert output.ids == [2026, 2171, 2003, 2198, 3940]
        assert output.tokens == ["my", "name", "is", "john", "pair"]
        assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15), (0, 4)]
        assert output.type_ids == [0, 0, 0, 0, 1]

    def test_multiprocessing_with_parallelism(self, bert_files):
        tokenizer = BertWordPieceTokenizer.from_file(bert_files["vocab"])
        multiprocessing_with_parallelism(tokenizer, False)
        multiprocessing_with_parallelism(tokenizer, True)

    def test_train_from_iterator(self):
        text = ["A first sentence", "Another sentence", "And a last one"]
        tokenizer = BertWordPieceTokenizer()
        tokenizer.train_from_iterator(text, show_progress=False)

        output = tokenizer.encode("A sentence")
        assert output.tokens == ["a", "sentence"]