File: vectors.py

package info (click to toggle)
python-elasticsearch 9.1.0-1
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 22,728 kB
  • sloc: python: 104,053; makefile: 151; javascript: 75
file content (186 lines) | stat: -rw-r--r-- 5,610 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
#  Licensed to Elasticsearch B.V. under one or more contributor
#  license agreements. See the NOTICE file distributed with
#  this work for additional information regarding copyright
#  ownership. Elasticsearch B.V. licenses this file to you under
#  the Apache License, Version 2.0 (the "License"); you may
#  not use this file except in compliance with the License.
#  You may obtain a copy of the License at
#
# 	http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing,
#  software distributed under the License is distributed on an
#  "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#  KIND, either express or implied.  See the License for the
#  specific language governing permissions and limitations
#  under the License.

"""
# Vector database example

Requirements:

$ pip install nltk sentence_transformers tqdm "elasticsearch"

To run the example:

$ python vectors.py "text to search"

The index will be created automatically if it does not exist. Add
`--recreate-index` to regenerate it.

The example dataset includes a selection of workplace documents. The
following are good example queries to try out with this dataset:

$ python vectors.py "work from home"
$ python vectors.py "vacation time"
$ python vectors.py "can I bring a bird to work?"

When the index is created, the documents are split into short passages, and for
each passage an embedding is generated using the open source
"all-MiniLM-L6-v2" model. The documents that are returned as search results are
those that have the highest scored passages. Add `--show-inner-hits` to the
command to see individual passage results as well.
"""

import argparse
import json
import os
from datetime import datetime
from typing import Any, List, Optional, cast
from urllib.request import urlopen

import nltk
from sentence_transformers import SentenceTransformer
from tqdm import tqdm

from elasticsearch.dsl import (
    DenseVector,
    Document,
    InnerDoc,
    Keyword,
    M,
    Search,
    connections,
    mapped_field,
)

DATASET_URL = "https://raw.githubusercontent.com/elastic/elasticsearch-labs/main/datasets/workplace-documents.json"
MODEL_NAME = "all-MiniLM-L6-v2"

# initialize sentence tokenizer
nltk.download("punkt_tab", quiet=True)

# this will be the embedding model
embedding_model: Any = None


class Passage(InnerDoc):
    content: str
    embedding: List[float] = mapped_field(DenseVector())


class WorkplaceDoc(Document):
    class Index:
        name = "workplace_documents"

    name: str
    summary: str
    content: str
    created: datetime
    updated: Optional[datetime]
    url: str = mapped_field(Keyword(required=True))
    category: str = mapped_field(Keyword(required=True))
    passages: M[List[Passage]] = mapped_field(default=[])

    @classmethod
    def get_embedding(cls, input: str) -> List[float]:
        global embedding_model
        if embedding_model is None:
            embedding_model = SentenceTransformer(MODEL_NAME)
        return cast(List[float], list(embedding_model.encode(input)))

    def clean(self) -> None:
        # split the content into sentences
        passages = cast(List[str], nltk.sent_tokenize(self.content))

        # generate an embedding for each passage and save it as a nested document
        for passage in passages:
            self.passages.append(
                Passage(content=passage, embedding=self.get_embedding(passage))
            )


def create() -> None:
    # create the index
    WorkplaceDoc._index.delete(ignore_unavailable=True)
    WorkplaceDoc.init()

    # download the data
    dataset = json.loads(urlopen(DATASET_URL).read())

    # import the dataset
    for data in tqdm(dataset, desc="Indexing documents..."):
        doc = WorkplaceDoc(
            name=data["name"],
            summary=data["summary"],
            content=data["content"],
            created=data.get("created_on"),
            updated=data.get("updated_at"),
            url=data["url"],
            category=data["category"],
        )
        doc.save()


def search(query: str) -> Search[WorkplaceDoc]:
    return WorkplaceDoc.search().knn(
        field=WorkplaceDoc.passages.embedding,
        k=5,
        num_candidates=50,
        query_vector=list(WorkplaceDoc.get_embedding(query)),
        inner_hits={"size": 2},
    )


def parse_args() -> argparse.Namespace:
    parser = argparse.ArgumentParser(description="Vector database with Elasticsearch")
    parser.add_argument(
        "--recreate-index", action="store_true", help="Recreate and populate the index"
    )
    parser.add_argument(
        "--show-inner-hits",
        action="store_true",
        help="Show results for individual passages",
    )
    parser.add_argument("query", action="store", help="The search query")
    return parser.parse_args()


def main() -> None:
    args = parse_args()

    # initiate the default connection to elasticsearch
    connections.create_connection(hosts=[os.environ["ELASTICSEARCH_URL"]])

    if args.recreate_index or not WorkplaceDoc._index.exists():
        create()

    results = search(args.query)

    for hit in results:
        print(
            f"Document: {hit.name} [Category: {hit.category}] [Score: {hit.meta.score}]"
        )
        print(f"Summary: {hit.summary}")
        if args.show_inner_hits:
            for passage in hit.meta.inner_hits["passages"]:
                print(f"  - [Score: {passage.meta.score}] {passage.content!r}")
        print("")

    # close the connection
    connections.get_connection().close()


if __name__ == "__main__":
    main()