1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
|
// This file is autogenerated, DO NOT EDIT
// analysis/tokenizers/edgengram-tokenizer.asciidoc:264
[source, python]
----
resp = client.indices.create(
index="my-index-000001",
settings={
"analysis": {
"analyzer": {
"autocomplete": {
"tokenizer": "autocomplete",
"filter": [
"lowercase"
]
},
"autocomplete_search": {
"tokenizer": "lowercase"
}
},
"tokenizer": {
"autocomplete": {
"type": "edge_ngram",
"min_gram": 2,
"max_gram": 10,
"token_chars": [
"letter"
]
}
}
}
},
mappings={
"properties": {
"title": {
"type": "text",
"analyzer": "autocomplete",
"search_analyzer": "autocomplete_search"
}
}
},
)
print(resp)
resp1 = client.index(
index="my-index-000001",
id="1",
document={
"title": "Quick Foxes"
},
)
print(resp1)
resp2 = client.indices.refresh(
index="my-index-000001",
)
print(resp2)
resp3 = client.search(
index="my-index-000001",
query={
"match": {
"title": {
"query": "Quick Fo",
"operator": "and"
}
}
},
)
print(resp3)
----
|