Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

issue with publish data to elasticsearch #151

Open
MilindDivre opened this issue May 15, 2020 · 1 comment
Open

issue with publish data to elasticsearch #151

MilindDivre opened this issue May 15, 2020 · 1 comment

Comments

@MilindDivre
Copy link

i am getting below error
2020/05/15 17:46:10.154900 beat.go:173: INFO dockbeat sucessfully setup. Start running.
2020/05/15 17:46:10.154957 dockbeat.go:196: INFO dockbeat%!(EXTRA string=dockbeat is running! Hit CTRL-C to stop it.)
2020/05/15 17:46:16.970181 dockbeat.go:320: INFO dockbeat%!(EXTRA string=Publishing %v events, int=5)
2020/05/15 17:46:16.971792 dockbeat.go:320: INFO dockbeat%!(EXTRA string=Publishing %v events, int=5)
2020/05/15 17:46:17.119946 client.go:112: ERR Failed to perform any bulk index operations: 406 Not Acceptable
2020/05/15 17:46:17.120015 single.go:76: INFO Error publishing events (retrying): 406 Not Acceptable
2020/05/15 17:46:17.120303 single.go:152: INFO send fail
2020/05/15 17:46:17.120530 single.go:159: INFO backoff retry: 1s
2020/05/15 17:46:18.582133 client.go:112: ERR Failed to perform any bulk index operations: 406 Not Acceptable
2020/05/15 17:46:18.582187 single.go:76: INFO Error publishing events (retrying): 406 Not Acceptable
2020/05/15 17:46:18.582200 single.go:152: INFO send fail
2020/05/15 17:46:18.582541 single.go:159: INFO backoff retry: 2s
2020/05/15 17:46:20.992956 client.go:112: ERR Failed to perform any bulk index operations: 406 Not Acceptable
2020/05/15 17:46:20.993441 single.go:76: INFO Error publishing events (retrying): 406 Not Acceptable
2020/05/15 17:46:20.993600 single.go:152: INFO send fail
2020/05/15 17:46:20.993785 single.go:159: INFO backoff retry: 4s
2020/05/15 17:46:21.984438 dockbeat.go:320: INFO dockbeat%!(EXTRA string=Publishing %v events, int=5)
2020/05/15 17:46:21.985485 dockbeat.go:320: INFO dockbeat%!(EXTRA string=Publishing %v events, int=5)
^C2020/05/15 17:46:23.687210 dockbeat.go:235: INFO dockbeat%!(EXTRA string=Stopping dockbeat)
2020/05/15 17:46:23.687262 beat.go:183: INFO Cleaning up dockbeat before shutting down.

@devopsenggineer
Copy link

devopsenggineer commented Sep 5, 2020

This config has worked for me. I think it was version compatibility issue(Got a hint from this link.) https://discuss.elastic.co/t/filebeat-having-error-in-bulk-index-operations/141339

#Note!!! if index is not getting created in kibana GUI, use this " curl 'localhost:9200/_cat/indices?v' "command to get index name pattern dockbeat-yyyy.mm.dd(dockbeat-2020.09.05) and paste it that in kibana gui as dockerbeat* to get docker container metrics for everyday.

u can use this docker-compose file to monitor docker containers.

sudo docker-compose up -d (to start using it.)

################################Start of docker-compose.yml file#########################################
#docker-compose.yml file
version: "3"

services: 
  elasticsearch:
#    image: docker.elastic.co/elasticsearch/elasticsearch:6.0.0
    image: elasticsearch:5.0.0
    environment: 
      - discovery.type=single-node
#      - xpack.security.enabled=false
    ports:
      - 9200:9200
    networks: 
      - logging-network

  docker-beat:
    image: ingensi/dockbeat:latest
    depends_on:
      - elasticsearch
    volumes:
      - /var/run/docker.sock:/var/run/docker.sock
      - ./dockbeat.yml:/etc/dockbeat/dockbeat.yml
      - ./logs:/var/logs/dockbeat
    networks:
      - logging-network
   

  logstash:
#    image: docker.elastic.co/logstash/logstash:6.0.0
    image: logstash:5.0.0
    depends_on: 
      - elasticsearch
#    ports:
#      - 12201:12201/udp
#      - 5140:5140/udp
    volumes:
      - ./logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro 
    networks:
      - logging-network

  kibana:
#    image: docker.elastic.co/kibana/kibana:6.0.0
    image: kibana:5.0.0
    depends_on:
      - logstash
    ports: 
      - 5601:5601
    networks: 
      - logging-network

  httpd:
    image: httpd:latest
    depends_on:
      - logstash
    ports:
      - 8065:80
    logging:
      driver: gelf
#      driver: syslog
      options:
        # Use udp://host.docker.internal:12201 when you are using Docker Desktop for Mac
        # docs: https://docs.docker.com/docker-for-mac/networking/#i-want-to-connect-from-a-container-to-a-service-on-the-host
        # issue: https://github.com/lvthillo/docker-elk/issues/1
        gelf-address: "udp://localhost:12201"
#        gelf-address: "udp://localhost:5140"
#        syslog-address: "udp://localhost:5140"
       

networks: 
  logging-network:
    driver: bridge
################################End of docker-compose.yml file#########################################

################################start of dockbeat.yml file#########################################
#dockbeat.yml file
################### Dockbeat Configuration Example #########################

############################# Dockbeat ######################################

dockbeat:
  # Defines how often a docker stat is sent to the output
  period: ${PERIOD:5}

  # Defines the docker socket path
  # By default, this will get the unix:///var/run/docker.sock
  socket: ${DOCKER_SOCKET:unix:///var/run/docker.sock}

  # If dockbeat has to deal with a TLS-enabled docker daemon, you need to enable TLS and configure path for key and certificates.
  tls:
    # By default, TLS is disabled
    enable: ${DOCKER_ENABLE_TLS:false}

    # Path to the ca file
    ca_path: ${DOCKER_CA_PATH}

    # Path to the cert file
    cert_path: ${DOCKER_CERT_PATH}

    # Path to the key file
    key_path: ${DOCKER_KEY_PATH}
###############################################################################
############################# Libbeat Config ##################################
# Base config file used by all other beats for using libbeat features

############################# Output ##########################################

# Configure what outputs to use when sending the data collected by the beat.
# Multiple outputs may be used.
output:

  ### Elasticsearch as output
  elasticsearch:
    # Array of hosts to connect to.
    # Scheme and port can be left out and will be set to the default (http and 9200)
    # In case you specify and additional path, the scheme is required: http://localhost:9200/path
    # IPv6 addresses should always be defined as: https://[2001:db8::1]:9200
    hosts: ["elasticsearch:9200"]

    # Optional protocol and basic auth credentials.
    #protocol: "https"
    #username: "admin"
    #password: "s3cr3t"

    # Dictionary of HTTP parameters to pass within the url with index operations.
    #parameters:
      #param1: value1
      #param2: value2

    # Number of workers per Elasticsearch host.
    #worker: 1

    # Optional index name. The default is "dockbeat" and generates
    # [dockbeat-]YYYY.MM.DD keys.
    index: "dockbeat"

    # A template is used to set the mapping in Elasticsearch
    # By default template loading is disabled and no template is loaded.
    # These settings can be adjusted to load your own template or overwrite existing ones
    #template:

      # Template name. By default the template name is dockbeat.
      #name: "dockbeat"

      # Path to template file
      #path: "dockbeat.template.json"

      # Overwrite existing template
      #overwrite: false

    # Optional HTTP Path
    #path: "/elasticsearch"

    # Proxy server url
    #proxy_url: http://proxy:3128

    # The number of times a particular Elasticsearch index operation is attempted. If
    # the indexing operation doesn't succeed after this many retries, the events are
    # dropped. The default is 3.
    #max_retries: 3

    # The maximum number of events to bulk in a single Elasticsearch bulk API index request.
    # The default is 50.
    #bulk_max_size: 50

    # Configure http request timeout before failing an request to Elasticsearch.
    #timeout: 90

    # The number of seconds to wait for new events between two bulk API index requests.
    # If `bulk_max_size` is reached before this interval expires, addition bulk index
    # requests are made.
    #flush_interval: 1

    # Boolean that sets if the topology is kept in Elasticsearch. The default is
    # false. This option makes sense only for Packetbeat.
    #save_topology: false

    # The time to live in seconds for the topology information that is stored in
    # Elasticsearch. The default is 15 seconds.
    #topology_expire: 15

    # tls configuration. By default is off.
    #tls:
      # List of root certificates for HTTPS server verifications
      #certificate_authorities: ["/etc/pki/root/ca.pem"]

      # Certificate for TLS client authentication
      #certificate: "/etc/pki/client/cert.pem"

      # Client Certificate Key
      #certificate_key: "/etc/pki/client/cert.key"

      # Controls whether the client verifies server certificates and host name.
      # If insecure is set to true, all server host names and certificates will be
      # accepted. In this mode TLS based connections are susceptible to
      # man-in-the-middle attacks. Use only for testing.
      #insecure: true

      # Configure cipher suites to be used for TLS connections
      #cipher_suites: []

      # Configure curve types for ECDHE based cipher suites
      #curve_types: []

      # Configure minimum TLS version allowed for connection to logstash
      #min_version: 1.0

      # Configure maximum TLS version allowed for connection to logstash
      #max_version: 1.2


  ### Logstash as output
  #logstash:
    # The Logstash hosts
    #hosts: ["localhost:5044"]

    # Number of workers per Logstash host.
    #worker: 1

    # Set gzip compression level.
    #compression_level: 3

    # Optional load balance the events between the Logstash hosts
    #loadbalance: true

    # Optional index name. The default index name is set to name of the beat
    # in all lowercase.
    #index: dockbeat

    # SOCKS5 proxy server URL
    #proxy_url: socks5://user:password@socks5-server:2233

    # Resolve names locally when using a proxy server. Defaults to false.
    #proxy_use_local_resolver: false

    # Optional TLS. By default is off.
    #tls:
      # List of root certificates for HTTPS server verifications
      #certificate_authorities: ["/etc/pki/root/ca.pem"]

      # Certificate for TLS client authentication
      #certificate: "/etc/pki/client/cert.pem"

      # Client Certificate Key
      #certificate_key: "/etc/pki/client/cert.key"

      # Controls whether the client verifies server certificates and host name.
      # If insecure is set to true, all server host names and certificates will be
      # accepted. In this mode TLS based connections are susceptible to
      # man-in-the-middle attacks. Use only for testing.
      #insecure: true

      # Configure cipher suites to be used for TLS connections
      #cipher_suites: []

      # Configure curve types for ECDHE based cipher suites
      #curve_types: []


  ### File as output
  #file:
    # Path to the directory where to save the generated files. The option is mandatory.
    #path: "/tmp/dockbeat"

    # Name of the generated files. The default is `dockbeat` and it generates files: `dockbeat`, `dockbeat.1`, `dockbeat.2`, etc.
    #filename: dockbeat

    # Maximum size in kilobytes of each file. When this size is reached, the files are
    # rotated. The default value is 10240 kB.
    #rotate_every_kb: 10000

    # Maximum number of files under path. When this number of files is reached, the
    # oldest file is deleted and the rest are shifted from last to first. The default
    # is 7 files.
    #number_of_files: 7


  ### Console output
  # console:
    # Pretty print json event
    #pretty: false


############################# Shipper #########################################

shipper:
  # The name of the shipper that publishes the network data. It can be used to group
  # all the transactions sent by a single shipper in the web interface.
  # If this options is not defined, the hostname is used.
  #name:

  # The tags of the shipper are included in their own field with each
  # transaction published. Tags make it easy to group servers by different
  # logical properties.
  #tags: ["service-X", "web-tier"]

  # Optional fields that you can specify to add additional information to the
  # output. Fields can be scalar values, arrays, dictionaries, or any nested
  # combination of these.
  #fields:
  #  env: staging

  # If this option is set to true, the custom fields are stored as top-level
  # fields in the output document instead of being grouped under a fields
  # sub-dictionary. Default is false.
  #fields_under_root: false

  # Uncomment the following if you want to ignore transactions created
  # by the server on which the shipper is installed. This option is useful
  # to remove duplicates if shippers are installed on multiple servers.
  #ignore_outgoing: true

  # How often (in seconds) shippers are publishing their IPs to the topology map.
  # The default is 10 seconds.
  #refresh_topology_freq: 10

  # Expiration time (in seconds) of the IPs published by a shipper to the topology map.
  # All the IPs will be deleted afterwards. Note, that the value must be higher than
  # refresh_topology_freq. The default is 15 seconds.
  #topology_expire: 15

  # Internal queue size for single events in processing pipeline
  #queue_size: 1000

  # Sets the maximum number of CPUs that can be executing simultaneously. The
  # default is the number of logical CPUs available in the system.
  #max_procs:

  # Configure local GeoIP database support.
  # If no paths are not configured geoip is disabled.
  #geoip:
    #paths:
    #  - "/usr/share/GeoIP/GeoLiteCity.dat"
    #  - "/usr/local/var/GeoIP/GeoLiteCity.dat"


############################# Logging #########################################

# There are three options for the log output: syslog, file, stderr.
# Under Windows systems, the log files are per default sent to the file output,
# under all other system per default to syslog.
logging:

  # Send all logging output to syslog. On Windows default is false, otherwise
  # default is true.
  #to_syslog: true
  to_syslog: false

  # Write all logging output to files. Beats automatically rotate files if rotateeverybytes
  # limit is reached.
  #to_files: false

  # To enable logging to files, to_files option has to be set to true
  files:
    # The directory where the log files will written to.
    #path: /var/log/mybeat

    # The name of the files where the logs are written to.
    #name: mybeat

    # Configure log file size limit. If limit is reached, log file will be
    # automatically rotated
    rotateeverybytes: 10485760 # = 10MB

    # Number of rotated log files to keep. Oldest files will be deleted first.
    #keepfiles: 7

  # Enable debug output for selected components. To enable all selectors use ["*"]
  # Other available selectors are beat, publish, service
  # Multiple selectors can be chained.
  #selectors: [ ]

  # Sets log level. The default log level is error.
  # Available log levels are: critical, error, warning, info, debug
  #level: error
################################End of dockbeat.yml file#########################################




################################Start of logstash.conf file#########################################
# if required to collect logs using glef driver of docker with help of logstash use below logstash.conf file.
# logstash.conf

input {
  gelf {}
#  syslog {}
  
#  udp {
#    type => "gelf"
#    port => "5140"

#  }

#  tcp {
#    type => "syslog"
#    port => "5140"
#  }
}

output {
  elasticsearch {
    hosts => "elasticsearch:9200"
  }  
}


Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants