在多个 shingle 过滤器上调整聚合查询
Tuning an aggregation query on multiple shingle filters
我有 13,000 个网页的正文索引。目标是获取一个词、两个词、三个词……直到八个词的短语的前 200 个短语频率。
这些网页中总共有超过 1.5 亿个单词需要标记化。
问题是查询需要大约 15 分钟,之后它 运行 超出堆 space,无法完成。
我正在 4 cpu 核心、8GB RAM、SSD ubuntu 服务器上进行测试。 6GB 的 RAM 被分配为堆。交换已禁用。
现在,我可以通过拆分成 8 个不同的索引来做到这一点,查询/设置/映射组合适用于单一类型的词组。也就是说,我可以 运行 在单词短语、双词短语等上单独进行此操作,从而获得我期望的结果(尽管每次仍然需要大约 5 分钟)。我想知道是否有一种方法可以调整这个完整的聚合,以便通过一个索引和查询与我的硬件一起工作。
设置和映射:
{
"settings":{
"index":{
"number_of_shards" : 1,
"number_of_replicas" : 0,
"analysis":{
"analyzer":{
"analyzer_shingle_2":{
"tokenizer":"standard",
"filter":["standard", "lowercase", "filter_shingle_2"]
},
"analyzer_shingle_3":{
"tokenizer":"standard",
"filter":["standard", "lowercase", "filter_shingle_3"]
},
"analyzer_shingle_4":{
"tokenizer":"standard",
"filter":["standard", "lowercase", "filter_shingle_4"]
},
"analyzer_shingle_5":{
"tokenizer":"standard",
"filter":["standard", "lowercase", "filter_shingle_5"]
},
"analyzer_shingle_6":{
"tokenizer":"standard",
"filter":["standard", "lowercase", "filter_shingle_6"]
},
"analyzer_shingle_7":{
"tokenizer":"standard",
"filter":["standard", "lowercase", "filter_shingle_7"]
},
"analyzer_shingle_8":{
"tokenizer":"standard",
"filter":["standard", "lowercase", "filter_shingle_8"]
}
},
"filter":{
"filter_shingle_2":{
"type":"shingle",
"max_shingle_size":2,
"min_shingle_size":2,
"output_unigrams":"false"
},
"filter_shingle_3":{
"type":"shingle",
"max_shingle_size":3,
"min_shingle_size":3,
"output_unigrams":"false"
},
"filter_shingle_4":{
"type":"shingle",
"max_shingle_size":4,
"min_shingle_size":4,
"output_unigrams":"false"
},
"filter_shingle_5":{
"type":"shingle",
"max_shingle_size":5,
"min_shingle_size":5,
"output_unigrams":"false"
},
"filter_shingle_6":{
"type":"shingle",
"max_shingle_size":6,
"min_shingle_size":6,
"output_unigrams":"false"
},
"filter_shingle_7":{
"type":"shingle",
"max_shingle_size":7,
"min_shingle_size":7,
"output_unigrams":"false"
},
"filter_shingle_8":{
"type":"shingle",
"max_shingle_size":8,
"min_shingle_size":8,
"output_unigrams":"false"
}
}
}
}
},
"mappings":{
"items":{
"properties":{
"body":{
"type": "multi_field",
"fields": {
"two-word-phrases": {
"analyzer":"analyzer_shingle_2",
"type":"string"
},
"three-word-phrases": {
"analyzer":"analyzer_shingle_3",
"type":"string"
},
"four-word-phrases": {
"analyzer":"analyzer_shingle_4",
"type":"string"
},
"five-word-phrases": {
"analyzer":"analyzer_shingle_5",
"type":"string"
},
"six-word-phrases": {
"analyzer":"analyzer_shingle_6",
"type":"string"
},
"seven-word-phrases": {
"analyzer":"analyzer_shingle_7",
"type":"string"
},
"eight-word-phrases": {
"analyzer":"analyzer_shingle_8",
"type":"string"
}
}
}
}
}
}
}
查询:
{
"size" : 0,
"aggs" : {
"one-word-phrases" : {
"terms" : {
"field" : "body",
"size" : 200
}
},
"two-word-phrases" : {
"terms" : {
"field" : "body.two-word-phrases",
"size" : 200
}
},
"three-word-phrases" : {
"terms" : {
"field" : "body.three-word-phrases",
"size" : 200
}
},
"four-word-phrases" : {
"terms" : {
"field" : "body.four-word-phrases",
"size" : 200
}
},
"five-word-phrases" : {
"terms" : {
"field" : "body.five-word-phrases",
"size" : 200
}
},
"six-word-phrases" : {
"terms" : {
"field" : "body.six-word-phrases",
"size" : 200
}
},
"seven-word-phrases" : {
"terms" : {
"field" : "body.seven-word-phrases",
"size" : 200
}
},
"eight-word-phrases" : {
"terms" : {
"field" : "body.eight-word-phrases",
"size" : 200
}
}
}
}
您真的需要整个 collection 内存吗?您的分析可以重写为具有一小部分资源要求的批处理管道:
- 解析每个已抓取的站点并将带状疱疹输出到一系列平面文件:n-grams in python, four, five, six grams?
- 对 shingle 输出文件进行排序
- 解析 shingle 输出文件和输出 shingle 计数文件
- 解析所有的 shingle count 文件并输出一个 master aggregate shingle count 文件
- 按计数降序排列
(这种事情通常在 UNIX 管道中完成并并行化。)
或者你可以 运行 它有更多的内存。
我有 13,000 个网页的正文索引。目标是获取一个词、两个词、三个词……直到八个词的短语的前 200 个短语频率。
这些网页中总共有超过 1.5 亿个单词需要标记化。
问题是查询需要大约 15 分钟,之后它 运行 超出堆 space,无法完成。
我正在 4 cpu 核心、8GB RAM、SSD ubuntu 服务器上进行测试。 6GB 的 RAM 被分配为堆。交换已禁用。
现在,我可以通过拆分成 8 个不同的索引来做到这一点,查询/设置/映射组合适用于单一类型的词组。也就是说,我可以 运行 在单词短语、双词短语等上单独进行此操作,从而获得我期望的结果(尽管每次仍然需要大约 5 分钟)。我想知道是否有一种方法可以调整这个完整的聚合,以便通过一个索引和查询与我的硬件一起工作。
设置和映射:
{
"settings":{
"index":{
"number_of_shards" : 1,
"number_of_replicas" : 0,
"analysis":{
"analyzer":{
"analyzer_shingle_2":{
"tokenizer":"standard",
"filter":["standard", "lowercase", "filter_shingle_2"]
},
"analyzer_shingle_3":{
"tokenizer":"standard",
"filter":["standard", "lowercase", "filter_shingle_3"]
},
"analyzer_shingle_4":{
"tokenizer":"standard",
"filter":["standard", "lowercase", "filter_shingle_4"]
},
"analyzer_shingle_5":{
"tokenizer":"standard",
"filter":["standard", "lowercase", "filter_shingle_5"]
},
"analyzer_shingle_6":{
"tokenizer":"standard",
"filter":["standard", "lowercase", "filter_shingle_6"]
},
"analyzer_shingle_7":{
"tokenizer":"standard",
"filter":["standard", "lowercase", "filter_shingle_7"]
},
"analyzer_shingle_8":{
"tokenizer":"standard",
"filter":["standard", "lowercase", "filter_shingle_8"]
}
},
"filter":{
"filter_shingle_2":{
"type":"shingle",
"max_shingle_size":2,
"min_shingle_size":2,
"output_unigrams":"false"
},
"filter_shingle_3":{
"type":"shingle",
"max_shingle_size":3,
"min_shingle_size":3,
"output_unigrams":"false"
},
"filter_shingle_4":{
"type":"shingle",
"max_shingle_size":4,
"min_shingle_size":4,
"output_unigrams":"false"
},
"filter_shingle_5":{
"type":"shingle",
"max_shingle_size":5,
"min_shingle_size":5,
"output_unigrams":"false"
},
"filter_shingle_6":{
"type":"shingle",
"max_shingle_size":6,
"min_shingle_size":6,
"output_unigrams":"false"
},
"filter_shingle_7":{
"type":"shingle",
"max_shingle_size":7,
"min_shingle_size":7,
"output_unigrams":"false"
},
"filter_shingle_8":{
"type":"shingle",
"max_shingle_size":8,
"min_shingle_size":8,
"output_unigrams":"false"
}
}
}
}
},
"mappings":{
"items":{
"properties":{
"body":{
"type": "multi_field",
"fields": {
"two-word-phrases": {
"analyzer":"analyzer_shingle_2",
"type":"string"
},
"three-word-phrases": {
"analyzer":"analyzer_shingle_3",
"type":"string"
},
"four-word-phrases": {
"analyzer":"analyzer_shingle_4",
"type":"string"
},
"five-word-phrases": {
"analyzer":"analyzer_shingle_5",
"type":"string"
},
"six-word-phrases": {
"analyzer":"analyzer_shingle_6",
"type":"string"
},
"seven-word-phrases": {
"analyzer":"analyzer_shingle_7",
"type":"string"
},
"eight-word-phrases": {
"analyzer":"analyzer_shingle_8",
"type":"string"
}
}
}
}
}
}
}
查询:
{
"size" : 0,
"aggs" : {
"one-word-phrases" : {
"terms" : {
"field" : "body",
"size" : 200
}
},
"two-word-phrases" : {
"terms" : {
"field" : "body.two-word-phrases",
"size" : 200
}
},
"three-word-phrases" : {
"terms" : {
"field" : "body.three-word-phrases",
"size" : 200
}
},
"four-word-phrases" : {
"terms" : {
"field" : "body.four-word-phrases",
"size" : 200
}
},
"five-word-phrases" : {
"terms" : {
"field" : "body.five-word-phrases",
"size" : 200
}
},
"six-word-phrases" : {
"terms" : {
"field" : "body.six-word-phrases",
"size" : 200
}
},
"seven-word-phrases" : {
"terms" : {
"field" : "body.seven-word-phrases",
"size" : 200
}
},
"eight-word-phrases" : {
"terms" : {
"field" : "body.eight-word-phrases",
"size" : 200
}
}
}
}
您真的需要整个 collection 内存吗?您的分析可以重写为具有一小部分资源要求的批处理管道:
- 解析每个已抓取的站点并将带状疱疹输出到一系列平面文件:n-grams in python, four, five, six grams?
- 对 shingle 输出文件进行排序
- 解析 shingle 输出文件和输出 shingle 计数文件
- 解析所有的 shingle count 文件并输出一个 master aggregate shingle count 文件
- 按计数降序排列
(这种事情通常在 UNIX 管道中完成并并行化。)
或者你可以 运行 它有更多的内存。