如何按匹配文档字段的字段类型分组?
How to group by field type for matched document fields?
我正在使用术语聚合来计算字段值,但在聚合之前,我正在根据结果聚合进行过滤搜索。最后我需要 id 和聚合计数,例如
如果异常 ID 为 1 并且匹配,则我需要输出为
1 -> "key": "transfer"
"doc_count": 2
2 -> "key": "stock"
"doc_count": 4
并且我在下方突出显示了异常 ID,我希望将其作为指向每个存储桶的指针。
我怎样才能在弹性搜索中做到这一点我附上了示例响应。
{
"took": 250,
"timed_out": false,
"_shards": {
"total": 1,
"successful": 1,
"skipped": 0,
"failed": 0
},
"hits": {
"total": {
"value": 1,
"relation": "eq"
},
"max_score": 0.0,
"hits": [
{
"_index": "america",
"_type": "_doc",
"_id": "1",
"_score": 0.0,
"_source": {
"clusterId": "1",
"rank": 1,
"events": [
{
"eventId": "1",
"eventType": "Delayed",
"metaInfo": {
"batch_id": "batch_1"
},
"recommendationData": [
{
***"exceptionId": "1",***
"item": "Item1",
"location": "DC1",
"dueDate": "2019-01-10T05:30:00.000+0530",
"quantity": 100,
"metaInfo": {
"batch_id": "batch_1",
"dummy_id": "dummy_1"
},
"rank": 1,
"recommendations": [
{
"rank": 1,
"recommendationType": "transfer",
"customerName": "Walmart",
"stockTransfer": {
"primaryRecommendation": true,
"priority": 1,
"sourceLocation": "DC3",
"metaInfo": 40,
"shipDate": "2019-01-09T05:30:00.000+0530",
"arrivalDate": "2019-01-10T05:30:00.000+0530",
"transportMode": "Road",
"transferCost": 200.0,
"maxQtyAvailableForTransfer": 40,
"totalQtyAtSource": 40
},
"expedite": null
},
{
"rank": 1,
"recommendationType": "transfer",
"customerName": "Walmart",
"stockTransfer": {
"primaryRecommendation": true,
"priority": 2,
"sourceLocation": "DC2",
"transferQuantity": 60,
"shipDate": "2019-01-09T05:30:00.000+0530",
"arrivalDate": "2019-01-10T05:30:00.000+0530",
"transportMode": "Road",
"transferCost": 600.0,
"maxQtyAvailableForTransfer": 100,
"totalQtyAtSource": 100
},
"expedite": null
}
]
}
]
}
]
}
}
]
},
"aggregations": {
"recommendationTypes": {
"doc_count": 2,
"recommendationTypes": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "transfer",
"doc_count": 2
}
]
}
}
}
}
如果要对任何 exceptionId 或 recommendationType 进行聚合,这两者都在嵌套对象中,则需要使用嵌套聚合。
例如。如果您有一个包含两个嵌套文档的文档,exceptionId 为 1 和 2。您想在 exceptionId 为 2 的嵌套文档上聚合,即使您在 "query" 部分使用嵌套查询进行过滤,也需要使用嵌套聚合,因为即使嵌套对象匹配,也会返回整个文档,并且您必须在聚合中特别提到要在特定嵌套对象上聚合。
查询
{
"aggs": {
"recommendations": {
"nested": {
"path": "events.recommendationData"
},
"aggs": {
"exception": {
"filter": {
"terms": {
"events.recommendationData.exceptionId": [
"2"
]
}
},
"aggs": {
"exceptionIds": {
"terms": {
"field": "events.recommendationData.exceptionId.keyword",
"size": 10
},
"aggs": {
"recommendations": {
"nested": {
"path": "events.recommendationData.recommendations"
},
"aggs": {
"recommendationType": {
"terms": {
"field": "events.recommendationData.recommendations.recommendationType",
"size": 10
}
}
}
}
}
}
}
}
}
}
}
}
结果:
"aggregations" : {
"recommendations" : {
"doc_count" : 1,
"exception" : {
"doc_count" : 1,
"exceptionIds" : {
"doc_count_error_upper_bound" : 0,
"sum_other_doc_count" : 0,
"buckets" : [
{
"key" : "2",
"doc_count" : 1,
"recommendations" : {
"doc_count" : 2,
"recommendationType" : {
"doc_count_error_upper_bound" : 0,
"sum_other_doc_count" : 0,
"buckets" : [
{
"key" : "transfer",
"doc_count" : 2
}
]
}
}
}
]
}
}
}
}
我正在使用术语聚合来计算字段值,但在聚合之前,我正在根据结果聚合进行过滤搜索。最后我需要 id 和聚合计数,例如 如果异常 ID 为 1 并且匹配,则我需要输出为
1 -> "key": "transfer" "doc_count": 2
2 -> "key": "stock" "doc_count": 4
并且我在下方突出显示了异常 ID,我希望将其作为指向每个存储桶的指针。
我怎样才能在弹性搜索中做到这一点我附上了示例响应。
{
"took": 250,
"timed_out": false,
"_shards": {
"total": 1,
"successful": 1,
"skipped": 0,
"failed": 0
},
"hits": {
"total": {
"value": 1,
"relation": "eq"
},
"max_score": 0.0,
"hits": [
{
"_index": "america",
"_type": "_doc",
"_id": "1",
"_score": 0.0,
"_source": {
"clusterId": "1",
"rank": 1,
"events": [
{
"eventId": "1",
"eventType": "Delayed",
"metaInfo": {
"batch_id": "batch_1"
},
"recommendationData": [
{
***"exceptionId": "1",***
"item": "Item1",
"location": "DC1",
"dueDate": "2019-01-10T05:30:00.000+0530",
"quantity": 100,
"metaInfo": {
"batch_id": "batch_1",
"dummy_id": "dummy_1"
},
"rank": 1,
"recommendations": [
{
"rank": 1,
"recommendationType": "transfer",
"customerName": "Walmart",
"stockTransfer": {
"primaryRecommendation": true,
"priority": 1,
"sourceLocation": "DC3",
"metaInfo": 40,
"shipDate": "2019-01-09T05:30:00.000+0530",
"arrivalDate": "2019-01-10T05:30:00.000+0530",
"transportMode": "Road",
"transferCost": 200.0,
"maxQtyAvailableForTransfer": 40,
"totalQtyAtSource": 40
},
"expedite": null
},
{
"rank": 1,
"recommendationType": "transfer",
"customerName": "Walmart",
"stockTransfer": {
"primaryRecommendation": true,
"priority": 2,
"sourceLocation": "DC2",
"transferQuantity": 60,
"shipDate": "2019-01-09T05:30:00.000+0530",
"arrivalDate": "2019-01-10T05:30:00.000+0530",
"transportMode": "Road",
"transferCost": 600.0,
"maxQtyAvailableForTransfer": 100,
"totalQtyAtSource": 100
},
"expedite": null
}
]
}
]
}
]
}
}
]
},
"aggregations": {
"recommendationTypes": {
"doc_count": 2,
"recommendationTypes": {
"doc_count_error_upper_bound": 0,
"sum_other_doc_count": 0,
"buckets": [
{
"key": "transfer",
"doc_count": 2
}
]
}
}
}
}
如果要对任何 exceptionId 或 recommendationType 进行聚合,这两者都在嵌套对象中,则需要使用嵌套聚合。
例如。如果您有一个包含两个嵌套文档的文档,exceptionId 为 1 和 2。您想在 exceptionId 为 2 的嵌套文档上聚合,即使您在 "query" 部分使用嵌套查询进行过滤,也需要使用嵌套聚合,因为即使嵌套对象匹配,也会返回整个文档,并且您必须在聚合中特别提到要在特定嵌套对象上聚合。 查询
{
"aggs": {
"recommendations": {
"nested": {
"path": "events.recommendationData"
},
"aggs": {
"exception": {
"filter": {
"terms": {
"events.recommendationData.exceptionId": [
"2"
]
}
},
"aggs": {
"exceptionIds": {
"terms": {
"field": "events.recommendationData.exceptionId.keyword",
"size": 10
},
"aggs": {
"recommendations": {
"nested": {
"path": "events.recommendationData.recommendations"
},
"aggs": {
"recommendationType": {
"terms": {
"field": "events.recommendationData.recommendations.recommendationType",
"size": 10
}
}
}
}
}
}
}
}
}
}
}
}
结果:
"aggregations" : {
"recommendations" : {
"doc_count" : 1,
"exception" : {
"doc_count" : 1,
"exceptionIds" : {
"doc_count_error_upper_bound" : 0,
"sum_other_doc_count" : 0,
"buckets" : [
{
"key" : "2",
"doc_count" : 1,
"recommendations" : {
"doc_count" : 2,
"recommendationType" : {
"doc_count_error_upper_bound" : 0,
"sum_other_doc_count" : 0,
"buckets" : [
{
"key" : "transfer",
"doc_count" : 2
}
]
}
}
}
]
}
}
}
}