Search code examples
dockerelasticsearchkibanaelkelasticsearch-opendistro

kibana opendistro can't connect to ElasticSearch open distro container on Docker


I am trying to run Kibana opendistro in Elasticsearch opendistro through a docker-compose in a virtual machine in AZURE when i run the docker-compose i can access kibana on browser with : http://myipadress:5601/app/kibana but i can't for ElasticSearch .

my docker-compose :

version: '3'
services:
  odfe-node1:
    image: amazon/opendistro-for-elasticsearch:1.7.0
    container_name: odfe-node1
    environment:
      - cluster.name=odfe-cluster
      - node.name=odfe-node1
      - discovery.seed_hosts=odfe-node1
      - cluster.initial_master_nodes=odfe-node1
      - bootstrap.memory_lock=true # along with the memlock settings below, disables swapping
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m" # minimum and maximum Java heap size, recommend setting both to 50% of system RAM
    ulimits:
      memlock:
        soft: -1
        hard: -1
      nofile:
        soft: 65536 # maximum number of open files for the Elasticsearch user, set to at least 65536 on modern systems
        hard: 65536
    volumes:
      - odfe-elasticdata:/usr/share/elasticsearch/data
      - odfe-elasticconfig:/usr/share/elasticsearch/config
    ports:
      - 9200:9200
      - 9600:9600 # required for Performance Analyzer
    networks:
      - odfe-net
  kibana:
    image: amazon/opendistro-for-elasticsearch-kibana:1.7.0
    container_name: odfe-kibana
    ports:
      - 5601:5601
    expose:
      - "5601"
    volumes:
      - odfe-kibanaconfig:/usr/share/kibana/config
    environment:
      ELASTICSEARCH_URL: https://odfe-node1:9200
      ELASTICSEARCH_HOSTS: https://odfe-node1:9200
    networks:
      - odfe-net

volumes:
  odfe-elasticdata:
  odfe-elasticconfig:
  odfe-kibanaconfig:
networks:
  odfe-net:

Error messages :

odfe-kibana   | {"type":"log","@timestamp":"2020-05-28T18:23:11Z","tags":["error","elasticsearch","admin"],"pid":1,"message":"Request error, retrying\nGET https://odfe-node1:9200/_nodes?filter_path=nodes.*.version%2Cnodes.*.http.publish_address%2Cnodes.*.ip => connect ECONNREFUSED 172.22.0.3:9200"}
odfe-kibana   | {"type":"log","@timestamp":"2020-05-28T18:32:24Z","tags":["warning","elasticsearch","admin"],"pid":1,"message":"Unable to revive connection: https://odfe-node1:9200/"}
odfe-kibana   | {"type":"log","@timestamp":"2020-05-28T18:32:24Z","tags":["warning","elasticsearch","admin"],"pid":1,"message":"No living connections"}
odfe-kibana   | {"type":"log","@timestamp":"2020-05-28T18:32:24Z","tags":["error","elasticsearch-service"],"pid":1,"message":"Unable to retrieve version information from Elasticsearch nodes."}

If I do docker ps and curl test, it gives me following:

CONTAINER ID        IMAGE                                              COMMAND                  CREATED             STATUS              PORTS                                                      NAMES
41ded49c03e5        amazon/opendistro-for-elasticsearch:1.7.0          "/usr/local/bin/dock…"   48 minutes ago      Up 2 seconds        0.0.0.0:9200->9200/tcp, 0.0.0.0:9600->9600/tcp, 9300/tcp   odfe-node1
84bed086ab5c        amazon/opendistro-for-elasticsearch-kibana:1.7.0   "/usr/local/bin/kiba…"   48 minutes ago      Up 2 seconds        0.0.0.0:5601->5601/tcp                                     odfe-kibana

-------------------------------
[root@ServerEFK _data]# curl -XGET https://localhost:9200 -u admin:admin --insecure
{
  "name" : "odfe-node1",
  "cluster_name" : "odfe-cluster",
  "cluster_uuid" : "Ax2q2FrEQgCQHKZoDT7C0Q",
  "version" : {
    "number" : "7.6.1",
    "build_flavor" : "oss",
    "build_type" : "tar",
    "build_hash" : "aa751e09be0a5072e8570670309b1f12348f023b",
    "build_date" : "2020-02-29T00:15:25.529771Z",
    "build_snapshot" : false,
    "lucene_version" : "8.4.0",
    "minimum_wire_compatibility_version" : "6.8.0",
    "minimum_index_compatibility_version" : "6.0.0-beta1"
  },
  "tagline" : "You Know, for Search"
}

--------------------------------------
[root@ServerEFK _data]# curl -XGET https://localhost:9200/_cat/nodes?v -u admin:admin --insecure
ip         heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
172.22.0.3           22          72   4    0.16    0.81     0.86 dim       *      odfe-node1

--------------------------------------
[root@ServerEFK _data]# curl -XGET https://localhost:9200/_cat/plugins?v -u admin:admin --insecure
name       component                       version
odfe-node1 opendistro-anomaly-detection    1.7.0.0
odfe-node1 opendistro-job-scheduler        1.7.0.0
odfe-node1 opendistro-knn                  1.7.0.0
odfe-node1 opendistro_alerting             1.7.0.0
odfe-node1 opendistro_index_management     1.7.0.0
odfe-node1 opendistro_performance_analyzer 1.7.0.0
odfe-node1 opendistro_security             1.7.0.0
odfe-node1 opendistro_sql                  1.7.0.0

---------------------------------------
[root@ServerEFK _data]# curl -XGET https://localhost:9200/_cat/indices?pretty -u admin:admin --insecure
yellow open security-auditlog-2020.05.28 6xPW0yPyRGKG1owKbBl-Gw 1 1 18 0 144.6kb 144.6kb
green  open .kibana_92668751_admin_1     mgAiKHNKQJ-sgFDXw7Iwyw 1 0  1 0   3.7kb   3.7kb
green  open .kibana_92668751_admin_2     VvRiV16jRlualCWJvyYFTA 1 0  1 0   3.7kb   3.7kb
green  open .opendistro_security         NHxbWWv0RJu8kScOtsejTw 1 0  7 0  36.3kb  36.3kb
green  open .kibana_1                    s2DBw7Y_SUS9Go-u5qOrjg 1 0  1 0   4.1kb   4.1kb
green  open .tasks                       0kVxFOcqQzOxyAYTGUIWDw 1 0  1 0   6.3kb   6.3kb

anyone can help please


Solution

  • Ok, I was able to get a single node elastic & kibana working with this docker-compose.yml:

    version: '3'
    services:
      odfe-node1:
        image: amazon/opendistro-for-elasticsearch:1.8.0
        container_name: odfe-node1
        environment:
          - cluster.name=odfe-cluster
          - discovery.type=single-node
          - "ES_JAVA_OPTS=-Xms512m -Xmx512m" # minimum and maximum Java heap size, recommend setting both to 50% of system RAM
        ulimits:
          memlock:
            soft: -1
            hard: -1
          nofile:
            soft: 65536 # maximum number of open files for the Elasticsearch user, set to at least 65536 on modern systems
            hard: 65536
        volumes:
          - odfe-data1:/usr/share/elasticsearch/data
        ports:
          - 9200:9200
          - 9600:9600 # required for Performance Analyzer
        networks:
          - odfe-net
      kibana:
        image: amazon/opendistro-for-elasticsearch-kibana:1.8.0
        container_name: odfe-kibana
        ports:
          - 5601:5601
        expose:
          - "5601"
        environment:
          ELASTICSEARCH_URL: https://odfe-node1:9200
          ELASTICSEARCH_HOSTS: https://odfe-node1:9200
        volumes:
          - ./kibana.yml:/usr/share/kibana/config/kibana.yml
        networks:
          - odfe-net
    
    volumes:
      odfe-data1:
    
    networks:
      odfe-net:
    

    I started with this yaml file & changed the elastic environment variables to:

          - cluster.name=odfe-cluster
          - discovery.type=single-node
          - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
    

    I also overrode the kibana.yml file:

        volumes:
          - ./kibana.yml:/usr/share/kibana/config/kibana.yml
    

    with this:

    server.name: kibana
    server.host: "0"
    elasticsearch.hosts: https://odfe-node1:9200
    elasticsearch.ssl.verificationMode: none
    elasticsearch.username: admin
    elasticsearch.password: admin
    elasticsearch.requestHeadersWhitelist: ["securitytenant","Authorization"]
    
    opendistro_security.multitenancy.enabled: true
    opendistro_security.multitenancy.tenants.preferred: ["Private", "Global"]
    opendistro_security.readonly_mode.roles: ["kibana_read_only"]
    
    newsfeed.enabled: false
    telemetry.optIn: false
    telemetry.enabled: false
    

    I extract the default kibana.yml & changed:

    elasticsearch.hosts: https://odfe-node1:9200
    elasticsearch.username: admin
    elasticsearch.password: admin
    

    But the 2 node example in the documentation still doesn't work for me.

    Hope that helps