Logstash 经过过滤器
Logstash elapsed filter
我正在尝试使用 ELK 堆栈中的 elapsed.rb 过滤器,但似乎无法弄清楚。我对 grok 不是很熟悉,我相信那是我的问题所在。有人可以帮忙吗?
示例日志文件:
{
"application_name": "Application.exe",
"machine_name": "Machine1",
"user_name": "testuser",
"entry_date": "2015-03-12T18:12:23.5187552Z",
"chef_environment_name": "chefenvironment1",
"chef_logging_cookbook_version": "0.1.9",
"logging_level": "INFO",
"performance": {
"process_name": "account_search",
"process_id": "Machine1|1|635617555435187552",
"event_type": "enter"
},
"thread_name": "1",
"logger_name": "TestLogger",
"@version": "1",
"@timestamp": "2015-03-12T18:18:48.918Z",
"type": "rabbit",
"log_from": "rabbit"
}
{
"application_name": "Application.exe",
"machine_name": "Machine1",
"user_name": "testuser",
"entry_date": "2015-03-12T18:12:23.7527462Z",
"chef_environment_name": "chefenvironment1",
"chef_logging_cookbook_version": "0.1.9",
"logging_level": "INFO",
"performance": {
"process_name": "account_search",
"process_id": "Machine1|1|635617555435187552",
"event_type": "exit"
},
"thread_name": "1",
"logger_name": "TestLogger",
"@version": "1",
"@timestamp": "2015-03-12T18:18:48.920Z",
"type": "rabbit",
"log_from": "rabbit"
}
示例 .conf 文件
input {
rabbitmq {
host => "SERVERNAME"
add_field => ["log_from", "rabbit"]
type => "rabbit"
user => "testuser"
password => "testuser"
durable => "true"
exchange => "Logging"
queue => "testqueue"
codec => "json"
exclusive => "false"
passive => "true"
}
}
filter {
grok {
match => ["message", "%{TIMESTAMP_ISO8601} START id: (?<process_id>.*)"]
add_tag => [ "taskStarted" ]
}
grok {
match => ["message", "%{TIMESTAMP_ISO8601} END id: (?<process_id>.*)"]
add_tag => [ "taskTerminated"]
}
elapsed {
start_tag => "taskStarted"
end_tag => "taskTerminated"
unique_id_field => "process_id"
timeout => 10000
new_event_on_match => false
}
}
output {
file {
codec => json { charset => "UTF-8" }
path => "test.log"
}
}
您不需要使用 grok 过滤器,因为您的输入已经是 json 格式。你需要做这样的事情:
if [performance][event_type] == "enter" {
mutate { add_tag => ["taskStarted"] }
} else if [performance][event_type] == "exit" {
mutate { add_tag => ["taskTerminated"] }
}
elapsed {
start_tag => "taskStarted"
end_tag => "taskTerminated"
unique_id_field => "performance.process_id"
timeout => 10000
new_event_on_match => false
}
我对此持否定态度 unique_id_field
-- 我认为它应该有效,但如果无效,您可以将其更改为仅 process_id
和 add_field => { "process_id" => "%{[performance][process_id]}" }
我正在尝试使用 ELK 堆栈中的 elapsed.rb 过滤器,但似乎无法弄清楚。我对 grok 不是很熟悉,我相信那是我的问题所在。有人可以帮忙吗?
示例日志文件:
{
"application_name": "Application.exe",
"machine_name": "Machine1",
"user_name": "testuser",
"entry_date": "2015-03-12T18:12:23.5187552Z",
"chef_environment_name": "chefenvironment1",
"chef_logging_cookbook_version": "0.1.9",
"logging_level": "INFO",
"performance": {
"process_name": "account_search",
"process_id": "Machine1|1|635617555435187552",
"event_type": "enter"
},
"thread_name": "1",
"logger_name": "TestLogger",
"@version": "1",
"@timestamp": "2015-03-12T18:18:48.918Z",
"type": "rabbit",
"log_from": "rabbit"
}
{
"application_name": "Application.exe",
"machine_name": "Machine1",
"user_name": "testuser",
"entry_date": "2015-03-12T18:12:23.7527462Z",
"chef_environment_name": "chefenvironment1",
"chef_logging_cookbook_version": "0.1.9",
"logging_level": "INFO",
"performance": {
"process_name": "account_search",
"process_id": "Machine1|1|635617555435187552",
"event_type": "exit"
},
"thread_name": "1",
"logger_name": "TestLogger",
"@version": "1",
"@timestamp": "2015-03-12T18:18:48.920Z",
"type": "rabbit",
"log_from": "rabbit"
}
示例 .conf 文件
input {
rabbitmq {
host => "SERVERNAME"
add_field => ["log_from", "rabbit"]
type => "rabbit"
user => "testuser"
password => "testuser"
durable => "true"
exchange => "Logging"
queue => "testqueue"
codec => "json"
exclusive => "false"
passive => "true"
}
}
filter {
grok {
match => ["message", "%{TIMESTAMP_ISO8601} START id: (?<process_id>.*)"]
add_tag => [ "taskStarted" ]
}
grok {
match => ["message", "%{TIMESTAMP_ISO8601} END id: (?<process_id>.*)"]
add_tag => [ "taskTerminated"]
}
elapsed {
start_tag => "taskStarted"
end_tag => "taskTerminated"
unique_id_field => "process_id"
timeout => 10000
new_event_on_match => false
}
}
output {
file {
codec => json { charset => "UTF-8" }
path => "test.log"
}
}
您不需要使用 grok 过滤器,因为您的输入已经是 json 格式。你需要做这样的事情:
if [performance][event_type] == "enter" {
mutate { add_tag => ["taskStarted"] }
} else if [performance][event_type] == "exit" {
mutate { add_tag => ["taskTerminated"] }
}
elapsed {
start_tag => "taskStarted"
end_tag => "taskTerminated"
unique_id_field => "performance.process_id"
timeout => 10000
new_event_on_match => false
}
我对此持否定态度 unique_id_field
-- 我认为它应该有效,但如果无效,您可以将其更改为仅 process_id
和 add_field => { "process_id" => "%{[performance][process_id]}" }