基于docker的json格式日志nginx pipeline
PUT _ingest/pipeline/docker-nginx{"description": "Pipeline for parsing docker Nginx access logs. ","processors": [{"grok": {"field": "log","patterns": ["(%{NGINX_HOST} )?\"?(?:%{NGINX_ADDRESS_LIST:ngi
·
docker 日志样本
{"log":"172.17.0.1 - - [02/Mar/2021:02:06:11 +0000] \"GET / HTTP/1.1\" 200 612 \"-\" \"curl/7.29.0\" \"-\"\r\n","stream":"stdout","time":"2021-03-02T02:06:11.391925182Z"}
PUT _ingest/pipeline/docker-nginx
{
"description": "Pipeline for parsing docker Nginx access logs. ",
"processors": [
{
"grok": {
"field": "log",
"patterns": [
"(%{NGINX_HOST} )?\"?(?:%{NGINX_ADDRESS_LIST:nginx.access.remote_ip_list}|%{NOTSPACE:source.address}) - (-|%{DATA:user.name}) \\[%{HTTPDATE:nginx.access.time}\\] \"%{DATA:nginx.access.info}\" %{NUMBER:http.response.status_code:long} %{NUMBER:http.response.body.bytes:long} \"(-|%{DATA:http.request.referrer})\" \"(-|%{DATA:user_agent.original})\""
],
"pattern_definitions": {
"NGINX_HOST": "(?:%{IP:destination.ip}|%{NGINX_NOTSEPARATOR:destination.domain})(:%{NUMBER:destination.port})?",
"NGINX_NOTSEPARATOR": "[^\t ,:]+",
"NGINX_ADDRESS_LIST": "(?:%{IP}|%{WORD})(\"?,?\\s*(?:%{IP}|%{WORD}))*"
},
"ignore_missing": true
}
},
{
"grok": {
"field": "nginx.access.info",
"patterns": [
"%{WORD:http.request.method} %{DATA:url.original} HTTP/%{NUMBER:http.version}",
""
],
"ignore_missing": true
}
},
{
"split": {
"field": "nginx.access.remote_ip_list",
"separator": "\"?,?\\s+",
"ignore_missing": true
}
},
{
"split": {
"field": "nginx.access.origin",
"separator": "\"?,?\\s+",
"ignore_missing": true
}
},
{
"set": {
"field": "source.address",
"if": "ctx.source?.address == null",
"value": ""
}
},
{
"script": {
"if": "ctx.nginx?.access?.remote_ip_list != null && ctx.nginx.access.remote_ip_list.length > 0",
"lang": "painless",
"source": "boolean isPrivate(def dot, def ip) {\n try {\n StringTokenizer tok = new StringTokenizer(ip, dot);\n int firstByte = Integer.parseInt(tok.nextToken());\n int secondByte = Integer.parseInt(tok.nextToken());\n if (firstByte == 10) {\n return true;\n }\n if (firstByte == 192 && secondByte == 168) {\n return true;\n }\n if (firstByte == 172 && secondByte >= 16 && secondByte <= 31) {\n return true;\n }\n if (firstByte == 127) {\n return true;\n }\n return false;\n }\n catch (Exception e) {\n return false;\n }\n} try {\n ctx.source.address = null;\n if (ctx.nginx.access.remote_ip_list == null) {\n return;\n }\n def found = false;\n for (def item : ctx.nginx.access.remote_ip_list) {\n if (!isPrivate(params.dot, item)) {\n ctx.source.address = item;\n found = true;\n break;\n }\n }\n if (!found) {\n ctx.source.address = ctx.nginx.access.remote_ip_list[0];\n }\n} catch (Exception e) {\n ctx.source.address = null;\n}",
"params": {
"dot": "."
}
}
},
{
"remove": {
"field": "source.address",
"if": "ctx.source.address == null"
}
},
{
"grok": {
"field": "source.address",
"patterns": [
"^%{IP:source.ip}$"
],
"ignore_failure": true
}
},
{
"rename": {
"field": "@timestamp",
"target_field": "event.created"
}
},
{
"date": {
"field": "nginx.access.time",
"target_field": "@timestamp",
"formats": [
"dd/MMM/yyyy:H:m:s Z"
],
"on_failure": [
{
"append": {
"field": "error.message",
"value": "{{ _ingest.on_failure_message }}"
}
}
]
}
},
{
"remove": {
"field": [
"stream",
"log",
"input.type",
"ecs.version",
"agent.type",
"agent.version",
"agent.id",
"agent.hostname",
"agent.ephemeral_id",
"event.created",
"nginx.access.info",
"nginx.access.time"
]
}
}
],
"on_failure": [
{
"set": {
"field": "error.message",
"value": "{{ _ingest.on_failure_message }}"
}
}
]
}
filebeat.inputs:
- type: log
enabled: true
paths:
- /tmp/test.json
# ignore_older: 2h
encoding: plain
json.keys_under_root: true
json.overwrite_keys: true
fields:
type: docker-nginx
pipeline: "docker-nginx"
#============================= Filebeat modules ===============================
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.ilm.enabled: false
setup.template.settings:
index.number_of_shards: 3
index.codec: best_compression
output.elasticsearch:
username: elastic
password: elasticsearch2021
hosts: ["http://192.168.11.111:9200", "http://192.168.11.112:9200", "http://192.168.11.113:9200"]
worker: 2
bulk_max_size: 256
indices:
- index: "docker-nginx"
when.equals:
fields:
type: "docker-nginx"
更多推荐
所有评论(0)