Search code examples
elasticsearchlogstashlogstash-grokelasticsearch-ruby

In LogStash, how remove any json/xml field larger than specific size


In few words, I have this stack in our company for our corporate logs:

All Request/Response Log Files -> Filebeat -> Kafka -> Logstash - ElasiicSearch

Pretty common approach.

Nevertheless, there might exist in unexpected request/response format a very large xml/json field. I want to remove only this specific field/node no matter which level either in json or xml structure since the request/response can either be SOAP (XML) or rest (json).

In other words, I don't know previously the response/request message tree/structure and I don't want to discard the whole message base on whole size, only a specific field/node large than certain size.

For instance:

2019-12-03 21:41:59.409  INFO 4055 --- [ntainer#0-0-C-1] Transaction Consumer                     : Message received successfully: {"serviceId":"insertEft_TransferPropias","sourceTransaction":"CMMO","xml":"PD94bWw some very large base 64 data ...}

My whole docker compose is:

version: '3.2'
services:

  zoo1:
    image: elevy/zookeeper:latest
    environment:
      MYID: 1
      SERVERS: zoo1
    ports:
      - "2181:2181"

  kafka1:
    image: wurstmeister/kafka
    command: [start-kafka.sh]
    depends_on:
      - zoo1
    links:
      - zoo1
    ports:
      - "9092:9092"
    environment:
      KAFKA_LISTENERS: PLAINTEXT://:9092
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9092
      KAFKA_BROKER_ID: 1
      KAFKA_ADVERTISED_PORT: 9092
      KAFKA_LOG_RETENTION_HOURS: "168"
      KAFKA_LOG_RETENTION_BYTES: "100000000"
      KAFKA_ZOOKEEPER_CONNECT:  zoo1:2181
      KAFKA_CREATE_TOPICS: "log:1:1"
      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'

  filebeat:
    image: docker.elastic.co/beats/filebeat:7.5.2
    command: filebeat -e -strict.perms=false
    volumes:
      - "//c/Users/Cast/docker_folders/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro"
      - "//c/Users/Cast/docker_folders/sample-logs:/sample-logs"
    links:
      - kafka1
    depends_on:
      - kafka1

  elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.5.2
    environment:
      - cluster.name=docker-cluster
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      - xpack.security.enabled=false
      - xpack.watcher.enabled=false
      - discovery.type=single-node
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - "//c/Users/Cast/docker_folders/esdata:/usr/share/elasticsearch/data"
    ports:
      - "9200:9200"

  kibana:
    image: docker.elastic.co/kibana/kibana:7.5.2
    volumes:
      - "//c/Users/Cast/docker_folders/kibana.yml:/usr/share/kibana/config/kibana.yml"
    restart: always
    environment:
    - SERVER_NAME=kibana.localhost
    - ELASTICSEARCH_HOSTS=http://elasticsearch:9200
    ports:
      - "5601:5601"
    links:
      - elasticsearch
    depends_on:
      - elasticsearch

  logstash:
    image: docker.elastic.co/logstash/logstash:7.5.2
    volumes:
      - "//c/Users/Cast/docker_folders/logstash.conf:/config-dir/logstash.conf"
    restart: always
    command: logstash -f /config-dir/logstash.conf
    ports:
      - "9600:9600"
      - "7777:7777"
    links:
      - elasticsearch
      - kafka1

logstash.conf

input{
  kafka{
    codec => "json"
    bootstrap_servers => "kafka1:9092"
    topics => ["app_logs","request_logs"]
    tags => ["my-app"]
  }
}

filter {    
    if [fields][topic_name] == "app_logs" {     
        grok {
            match => { "message" => "%{TIMESTAMP_ISO8601:timestamp} *%{LOGLEVEL:level} %{DATA:pid} --- *\[%{DATA:application}] *%{DATA:class} : %{GREEDYDATA:msglog}" }
            tag_on_failure => ["not_date_line"]
        }           
        date {
            match => ["timestamp", "ISO8601"]
            target => "timestamp"
        }   
        if "_grokparsefailure" in [tags] {
            mutate {
                add_field => { "level" => "UNKNOWN" }
            }
        }       
    } 
}

output {
  elasticsearch {
    hosts => ["elasticsearch:9200"]
    index => "%{[fields][topic_name]}-%{+YYYY.MM.dd}"
  }
}

Imagined solution

...
        grok {
            match => { "message" => "%{TIMESTAMP_ISO8601:timestamp} *%{LOGLEVEL:level} %{DATA:pid} --- *\[%{DATA:application}] *%{DATA:class} : %{GREEDYDATA:msglog}" }
            tag_on_failure => ["not_date_line"]
        }
...
        if "_grokparsefailure" in [tags] {
            filter {
              mutate { remove_field => [ "field1", "field2", "field3", ... "fieldN" dinamically discovered based on size ] }
            }
        }

*** edited

I am not sure how good is this approach mainly because it seems to me that I will force Logstash to behave as a block stage taking all json comming accross to memory and parsing it before saving to Elastic. BTW, not yet tested in stressed scenario, a collegue of mine proposed this alternative

input...
filter {
 if "JAVALOG" in [tags] {   
    grok {
            match => { "message" => "%{TIMESTAMP_ISO8601:timestamp} %{WORD:severity} (?<thread>\[.*]) (?<obj>.*)" }
        }

        json {
            source => "obj"
            target => "data"
            skip_on_invalid_json => true
        }
    json {
        source => "[data][entity]"
            target => "request"
            skip_on_invalid_json => true
    }
        mutate{ remove_field => [ "message" ]}
mutate{ remove_field => [ "obj" ]}
    mutate { lowercase => [ "[tags][0]" ]  }
    mutate { lowercase => [ "meta_path" ]  }
    ruby {
        code => '
        request_msg = JSON.parse(event.get("[data][entity]"))
                request_msg.to_hash.each do |key, value|        
            logger.info("field is: #{key}")
                        if value.to_s.length > 10
                                logger.info("field length is greater than 10!")
                request_msg.delete("#{key}")
                event.set("[data][entity]", request_msg.to_s)
                        end
                end
                '
    }
    mutate { remove_field => ["request"] }
json {
            source => "data"
            target => "data_1"
            skip_on_invalid_json => true
        }
}
}
output ...

Solution

  • Have you looked at using the setting available on logstash template ?

    Below is an example:

    PUT my_index
    {
      "mappings": {
        "properties": {
          "message": {
            "type": "keyword",
            "ignore_above": 20 
          }
        }
      }
    }
    

    Source: https://www.elastic.co/guide/en/elasticsearch/reference/current/ignore-above.html