Elasticsearch - 使用 n-gram 搜索通配符
Elasticsearch - searching wildcard using n-gram
我有一个要求,用户可以输入一些字符并期望得到类似于 SQL 查询的结果。我使用 n-gram 是因为我看到很多人建议避免使用通配符搜索。但是,return 数据有时非常不相关,因为它包含文本中的字符但混淆了。我添加了分数,但它不起作用。有人有什么建议吗?谢谢。
更新
下面是索引设置:
"settings": {
"index": {
"lifecycle": {
"name": "audit_log_policy",
"rollover_alias": "audit-log-alias-test"
},
"analysis": {
"analyzer": {
"abi_analyzer": {
"tokenizer": "n_gram_tokenizer"
}
},
"tokenizer": {
"n_gram_tokenizer": {
"token_chars": [
"letter",
"digit"
],
"min_gram": "3",
"type": "ngram",
"max_gram": "10"
}
}
},
"number_of_shards": "1",
"number_of_replicas": "1",
"max_ngram_diff": "10",
"max_result_window": "100000"
}
}
字段的映射方式如下:
"resourceCode": {
"type": "text",
"fields": {
"ngram": {
"analyzer": "abi_analyzer",
"type": "text"
},
"keyword": {
"ignore_above": 256,
"type": "keyword"
}
}
},
"logDetail": {
"type": "text",
"fields": {
"ngram": {
"analyzer": "abi_analyzer",
"type": "text"
},
"keyword": {
"ignore_above": 8191,
"type": "keyword"
}
}
}
下面是我的查询方式:
query_string: {
fields: ["logDetail.ngram", "resourceCode.ngram"],
query: data.searchInput.toLowerCase(),
}
示例
示例查询如下:
{
"query": {
"bool": {
"must": [
{
"terms": {
"organizationIds": [
...
]
}
},
{
"range": {
"createdAt": {
"gte": "2020-08-11T17:00:00.000Z",
"lte": "2020-08-31T16:59:59.999Z"
}
}
},
{
"multi_match": {
"fields": [
"logDetail.ngram",
"resourceCode.ngram"
],
"query": "0004"
}
}
]
}
},
"sort": [
{
"createdAt": "desc"
}
],
"track_scores": true,
"size": 20,
"from": 0
}
这是一个无关紧要的分数
{
"_index": "user-interaction-audit-log-test-000001",
"_type": "_doc",
"_id": "ae325b4a6b45442cbf8a44d595e9a747",
"_score": 3.4112902,
"_source": {
"logOperation": "UPDATE",
"resource": "CUSTOMER",
"resourceCode": "Htest11211",
"logDetail": "<div>Updated Mobile Number from <var isolate><b>+84966123451000<\/b><\/var> to <var isolate><b>+849<\/b><\/var><\/div>",
"organizationIds": [
"5e72ea0e4019f01fad0d91c9",
],
"createdAt": "2020-08-20T08:13:36.026Z",
"username": "test_user",
"module": "PARTNER",
"component": "WEB_APP"
},
"sort": [
1597911216026
]
}
问题是您没有 specified any search analyzer。因此,您的搜索输入也会被 abi_analyzer
分析,并且 0004
被标记化为 000
和 004
。前一个标记,即 000
匹配 logDetail.ngram
字段的一个标记。
您需要做的是为映射中的两个字段指定 standard
search_analyzer
,这样您就不会分析搜索输入,而只是尝试将其与相同的索引匹配令牌:
"resourceCode": {
"type": "text",
"fields": {
"ngram": {
"analyzer": "abi_analyzer",
"search_analyzer": "standard", <--- here
"type": "text"
},
"keyword": {
"ignore_above": 256,
"type": "keyword"
}
}
},
"logDetail": {
"type": "text",
"fields": {
"ngram": {
"analyzer": "abi_analyzer",
"search_analyzer": "standard", <--- here
"type": "text"
},
"keyword": {
"ignore_above": 8191,
"type": "keyword"
}
}
}
如果您因为不想重新索引数据而不想更改映射,您还可以在查询时指定搜索分析器:
{
"multi_match": {
"fields": [
"logDetail.ngram",
"resourceCode.ngram"
],
"analyzer": "standard", <--- here
"query": "0004"
}
}
更新:
"analyzer": {
"abi_analyzer": {
"tokenizer": "n_gram_tokenizer"
"filter": ["lowercase"]
}
},
我有一个要求,用户可以输入一些字符并期望得到类似于 SQL 查询的结果。我使用 n-gram 是因为我看到很多人建议避免使用通配符搜索。但是,return 数据有时非常不相关,因为它包含文本中的字符但混淆了。我添加了分数,但它不起作用。有人有什么建议吗?谢谢。
更新
下面是索引设置:
"settings": {
"index": {
"lifecycle": {
"name": "audit_log_policy",
"rollover_alias": "audit-log-alias-test"
},
"analysis": {
"analyzer": {
"abi_analyzer": {
"tokenizer": "n_gram_tokenizer"
}
},
"tokenizer": {
"n_gram_tokenizer": {
"token_chars": [
"letter",
"digit"
],
"min_gram": "3",
"type": "ngram",
"max_gram": "10"
}
}
},
"number_of_shards": "1",
"number_of_replicas": "1",
"max_ngram_diff": "10",
"max_result_window": "100000"
}
}
字段的映射方式如下:
"resourceCode": {
"type": "text",
"fields": {
"ngram": {
"analyzer": "abi_analyzer",
"type": "text"
},
"keyword": {
"ignore_above": 256,
"type": "keyword"
}
}
},
"logDetail": {
"type": "text",
"fields": {
"ngram": {
"analyzer": "abi_analyzer",
"type": "text"
},
"keyword": {
"ignore_above": 8191,
"type": "keyword"
}
}
}
下面是我的查询方式:
query_string: {
fields: ["logDetail.ngram", "resourceCode.ngram"],
query: data.searchInput.toLowerCase(),
}
示例
示例查询如下:
{
"query": {
"bool": {
"must": [
{
"terms": {
"organizationIds": [
...
]
}
},
{
"range": {
"createdAt": {
"gte": "2020-08-11T17:00:00.000Z",
"lte": "2020-08-31T16:59:59.999Z"
}
}
},
{
"multi_match": {
"fields": [
"logDetail.ngram",
"resourceCode.ngram"
],
"query": "0004"
}
}
]
}
},
"sort": [
{
"createdAt": "desc"
}
],
"track_scores": true,
"size": 20,
"from": 0
}
这是一个无关紧要的分数
{
"_index": "user-interaction-audit-log-test-000001",
"_type": "_doc",
"_id": "ae325b4a6b45442cbf8a44d595e9a747",
"_score": 3.4112902,
"_source": {
"logOperation": "UPDATE",
"resource": "CUSTOMER",
"resourceCode": "Htest11211",
"logDetail": "<div>Updated Mobile Number from <var isolate><b>+84966123451000<\/b><\/var> to <var isolate><b>+849<\/b><\/var><\/div>",
"organizationIds": [
"5e72ea0e4019f01fad0d91c9",
],
"createdAt": "2020-08-20T08:13:36.026Z",
"username": "test_user",
"module": "PARTNER",
"component": "WEB_APP"
},
"sort": [
1597911216026
]
}
问题是您没有 specified any search analyzer。因此,您的搜索输入也会被 abi_analyzer
分析,并且 0004
被标记化为 000
和 004
。前一个标记,即 000
匹配 logDetail.ngram
字段的一个标记。
您需要做的是为映射中的两个字段指定 standard
search_analyzer
,这样您就不会分析搜索输入,而只是尝试将其与相同的索引匹配令牌:
"resourceCode": {
"type": "text",
"fields": {
"ngram": {
"analyzer": "abi_analyzer",
"search_analyzer": "standard", <--- here
"type": "text"
},
"keyword": {
"ignore_above": 256,
"type": "keyword"
}
}
},
"logDetail": {
"type": "text",
"fields": {
"ngram": {
"analyzer": "abi_analyzer",
"search_analyzer": "standard", <--- here
"type": "text"
},
"keyword": {
"ignore_above": 8191,
"type": "keyword"
}
}
}
如果您因为不想重新索引数据而不想更改映射,您还可以在查询时指定搜索分析器:
{
"multi_match": {
"fields": [
"logDetail.ngram",
"resourceCode.ngram"
],
"analyzer": "standard", <--- here
"query": "0004"
}
}
更新:
"analyzer": {
"abi_analyzer": {
"tokenizer": "n_gram_tokenizer"
"filter": ["lowercase"]
}
},