选择时间戳 & received_at 时,Kibana 未显示正确的数据

Kibana not showing the correct data while choosing with timestamp & received_at

我有 logstash.conf 文件,其中我看到数据处理正确,但今天我看到非常奇怪的问题,其中 noi-syslog 的索引没有显示正确的 syslog_timestamp

input {
  file {
    path => [ "/scratch/rsyslog/*/messages.log" ]
    start_position => beginning
    sincedb_path => "/dev/null"
    max_open_files => 64000
    type => "noi-syslog"
  }
  file {
    path => [ "/scratch/rsyslog_CISCO/*/network.log" ]
    start_position => beginning
    sincedb_path => "/dev/null"
    max_open_files => 64000
    type => "apic_logs"
  }
}

filter {
  if [type] == "noi-syslog" {
    grok {
      match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp } %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" }
      add_field => [ "received_at", "%{@timestamp}" ]
      remove_field => [ "host", "path" ]
    }
    syslog_pri { }
    date {
      match => [ "syslog_timestamp", "MMM  d HH:mm:ss", "MMM dd HH:mm:ss" ]
 }
}
  if [type] == "apic_logs" {
    grok {
      match => { "message" => "%{CISCOTIMESTAMP:syslog_timestamp} %{CISCOTIMESTAMP} %{SYSLOGHOST:syslog_hostname} (?<prog>[\w._/%-]+) %{SYSLOG5424SD:fault_code}%{SYSLOG5424SD:fault_state}%{SYSLOG5424SD:crit_info}%{SYSLOG5424SD:log_severity}%{SYSLOG5424SD:log_info} %{GREEDYDATA:syslog_message}" }
      add_field => [ "received_at", "%{@timestamp}" ]
      remove_field => [ "host", "path" ]
    }
    syslog_pri { }
    date {
      match => [ "syslog_timestamp", "MMM  d HH:mm:ss", "MMM dd HH:mm:ss" ]
  }
 }
}
output {
        if [type] == "noi-syslog" {
        elasticsearch {
                hosts => "noida-elk:9200"
                manage_template => false
                index => "noi-syslog-%{+YYYY.MM.dd}"
                document_type => "messages"
  }
 }
}

output {
        if [type] == "apic_logs" {
        elasticsearch {
                hosts => "noida-elk:9200"
                manage_template => false
                index => "apic_logs-%{+YYYY.MM.dd}"
                document_type => "messages"
  }
 }
}

apic_logsnoi-syslog 的索引:

$ curl -s -XGET http://127.0.0.1:9200/_cat/indices?v |  grep apic_logs
green  open   noi-syslog-2019.03.13           Fz1Rht65QDCYCshmSjWO4Q   5   1    6845696            0      2.2gb            1gb
green  open   noi-rmlog-2019.03.13            W_VW8Y1eTWq-TKHAma3DLg   5   1     148613            0     92.6mb           45mb
green  open   apic_logs-2019.03.13            pKz61TS5Q-W2yCsCtrVvcQ   5   1    1606765            0    788.6mb        389.7mb

Kibana 页面在为 apic_logs 索引选择 @timesatmp 时正确显示所有字段,但无法正常工作 对于 Linux 系统日志索引 noi-syslog.

noi-syslog在选择@timestamp时未显示所有字段=] 它显示所有字段但不显示及时数据。

下面是用received_at

选择的图片

下面是用 @timestamp

选择的图像

在 logstash 日志中:

# tail -5 log-cohort_deprecation.log
[2019-03-13T20:16:29,112][WARN ][o.e.d.a.a.i.t.p.PutIndexTemplateRequest] [noida-elk.cadence.com] Deprecated field [template] used, replaced by [index_patterns]
[2019-03-13T20:16:30,548][WARN ][o.e.d.a.a.i.t.p.PutIndexTemplateRequest] [noida-elk.cadence.com] Deprecated field [template] used, replaced by [index_patterns]
[2019-03-13T20:19:45,935][WARN ][o.e.d.a.a.i.t.p.PutIndexTemplateRequest] [noida-elk.cadence.com] Deprecated field [template] used, replaced by [index_patterns]
[2019-03-13T20:19:48,644][WARN ][o.e.d.a.a.i.t.p.PutIndexTemplateRequest] [noida-elk.cadence.com] Deprecated field [template] used, replaced by [index_patterns]
[2019-03-13T20:20:13,069][WARN ][o.e.d.a.a.i.t.p.PutIndexTemplateRequest] [noida-elk.cadence.com] Deprecated field [template] used, replaced by [index_patterns]

系统内存使用情况:

             total       used       free     shared    buffers     cached
Mem:         32057      31794        263          0        210      18206
-/+ buffers/cache:      13378      18679
Swap:       102399        115     102284

总内存 32GB 我已经为每个 Elastic 和 Logstash 分配了 8GB,我怀疑这是否会导致问题。

删除 grokparsefailure 标签的解决方法:

filter {
  if [type] == "noi-syslog" {
    grok {
      match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" }
      add_field => [ "received_at", "%{@timestamp}" ]
      remove_field => [ "host", "path" ]
    }
    syslog_pri { }
    date {
      match => [ "syslog_timestamp", "MMM  d HH:mm:ss", "MMM dd HH:mm:ss" ]
 }
 if "_grokparsefailure" in [tags] {
         drop { }
 }
}

1- 或者只是一个想法的替代方案..

filter {
  if [type] == "noi-syslog" {
    grok {
      match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" }
      add_field => [ "received_at", "%{@timestamp}" ]
      remove_field => [ "host", "path" ]
    }
    syslog_pri { }
    date {
      match => [ "syslog_timestamp", "MMM  d HH:mm:ss", "MMM dd HH:mm:ss" ]
 }
  if "_grokparsefailure" in [tags] {
    grok {
      match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp } %{SYSLOGHOST:syslog_hostname} %{GREEDYDATA:syslog_message}" }
      add_field => [ "received_at", "%{@timestamp}" ]
      remove_field => [ "host", "path" ]
   }
  }     
 }
}

2- 或者另一种选择只是一个想法..

filter {
  if [type] == "noi-syslog" {
    grok {
      match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp} %{SYSLOGHOST:syslog_hostname} %{DATA:syslog_program}(?:\[%{POSINT:syslog_pid}\])?: %{GREEDYDATA:syslog_message}" }
      add_field => [ "received_at", "%{@timestamp}" ]
      remove_field => [ "host", "path" ]
    }
    syslog_pri { }
    date {
      match => [ "syslog_timestamp", "MMM  d HH:mm:ss", "MMM dd HH:mm:ss" ]
 }
  elif "_grokparsefailure" in [tags] {
    grok {
      match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp } %{SYSLOGHOST:syslog_hostname} %{GREEDYDATA:syslog_message}" }
      add_field => [ "received_at", "%{@timestamp}" ]
      remove_field => [ "host", "path" ]
   }
   else "_grokparsefailure" in [tags] {
         drop { }
  }
 }

这里的问题是,您的示例中类型 noi-syslog 的消息彼此不同,并且您的 grok 过滤器仅适用于第一个,当 grok 失败时为了解析消息,它添加了一个名为 _grokparsefailure.

的标签

第一个适用于 grok 的示例是:

Mar 13 15:55:02 hostname /usr/bin/crontab[32708]: (root) LIST (root)

第二个 grok 失败的例子是:

Mar 12 11:01:02 hostname run-parts(/etc/cron.hourly)[3970 starting mcelog.cron

第二条消息是错误的,它在 PID 3970 之后缺少右括号 (]) 和冒号 (:),因此您的 grok模式不起作用。

由于你的grok失败了,字段syslog_timestamp不存在,所以你的date过滤器没有任何作用,@timestamp将设置为事件进入了 logstash 管道。

您需要为您拥有的每种消息模式设置一个 grok 模式,纠正 syslog_timestamp 的一种快速方法是捕获失败的 grok 消息并应用另一个 grok 过滤器以获取 syslog_timestamp 字段和另一个字段中的消息的其余部分。

尝试将以下条件添加到您的管道中。

if "_grokparsefailure" in [tags] {
  grok {
      match => { "message" => "%{SYSLOGTIMESTAMP:syslog_timestamp } %{SYSLOGHOST:syslog_hostname} %{GREEDYDATA:rest_of_syslog_message}" }
      add_field => [ "received_at", "%{@timestamp}" ]
      remove_field => [ "host", "path" ]
   } 
}

这个grok的结果会是这样的:

{
  "syslog_hostname": "hostname",
  "syslog_timestamp": "Mar 12 11:01:02",
  "rest_of_syslog_message": "run-parts(/etc/cron.hourly)[3970 starting mcelog.cron"
}