Search code examples
pythonelasticsearchelasticsearch-dslelasticsearch-py

Empty response while querying Elasticsearch multiple times


I write a script to get some data using nested queries and elaticsearch-dsl-py.

Everything worked well until I added while ids_left > 0 loop to get data from Elasticsearch by chunks.

Now I get data in response only for the first chunk. And I have empty response <Response: []> for all successive chunks.

Why is that? How to get the response data for every chunk?

from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search, Q

ES_HOST = 'es0.dev.lombardia'
ES_PORT = 9200
data = {'organizations': [[{'db_name': u'lombardia0', 'id': 10}]], 'ids': ['726GZWQ65682D,506GBBO25953J,977ENPZ91770F']}
ids = filter(
        lambda x: x != None,
        map(lambda x: x.strip() if re.match("^[a-zA-Z0-9_]*$", x.strip()) else None, data['ids'][0].split(','))
)
if len(ids) == 0:
    sys.exit("No valid IDs.")

organizations = data['organizations'][0]
total_num_of_ids = len(ids)
offset, chunk, ids_left = 0, 10, total_num_of_ids

root_path = 'Demographic_Details'
es = Elasticsearch(hosts = [{'host': ES_HOST, 'port': ES_PORT}])

for organization in organizations:
    index = 'logic_{0}'.format(organization['db_name'])

    while ids_left > 0:
        print('OFFSET %s' % str(offset))
        if (offset + chunk) <= total_num_of_ids:
            limit = offset + chunk
        else:
            limit = total_num_of_ids

        search = None
        search = Search(using=es).index(index).source(include=[root_path])
        q = Q('bool', must=[Q('nested', path=root_path, query=Q('bool', should=[], minimum_should_match=1))]) 
        search = search.query(q)

        for i in xrange(offset, limit):
            q = Q('match', **{'{0}.ID'.format(root_path): ids[i]})
            search.query.must[0].query.should.append(q)

        print(search.to_dict())
        search = search[offset:limit]
        response = search.execute()

        for hit in response:
            print(hit[root_path][0]['id'], hit[root_path][0]['match'])

        offset += chunk
        ids_left -= chunk

Printed results:

OFFSET 0 query

{'query': {'bool': {'must': [{'nested': {'path': 'Demographic_Details', 'query': {'bool': {'minimum_should_match': 1, 'should': [{'match': {'Demographic_Details.ID': u'726GZWQ65682D'}}, {'match': {'Demographic_Details.ID': u'506GBBO25953J'}}, {'match': {'Demographic_Details.ID': u'977ENPZ91770F'}}, {'match': {'Demographic_Details.ID': u'250GDPU44147B'}}, {'match': {'Demographic_Details.ID': u'528FAOH03019V'}}, {'match': {'Demographic_Details.ID': u'827GNXH29227B'}}, {'match': {'Demographic_Details.ID': u'836GWCX91596A'}}, {'match': {'Demographic_Details.ID': u'482VURG98816U'}}, {'match': {'Demographic_Details.ID': u'989VKQX13983W'}}, {'match': {'Demographic_Details.ID': u'900GJVU10735D'}}]}}}}]}}, '_source': {'include': ['Demographic_Details']}}

Response data:

-> for hit in response:
(Pdb) cont
(u'827GNXH29227B', u'Y')
(u'250GDPU44147B', u'Y')
(u'836GWCX91596A', u'Y')
(u'482VURG98816U', u'Y')
(u'977ENPZ91770F', u'Y')
(u'989VKQX13983W', u'Y')
(u'528FAOH03019V', u'Y')
(u'900GJVU10735D', u'Y')
(u'726GZWQ65682D', u'Y')
(u'506GBBO25953J', u'Y')

OFFSET 10 query

{'query': {'bool': {'must': [{'nested': {'path': 'Demographic_Details', 'query': {'bool': {'minimum_should_match': 1, 'should': [{'match': {'Demographic_Details.ID': u'731NBER88448A'}}, {'match': {'Demographic_Details.ID': u'963WLQD56637O'}}, {'match': {'Demographic_Details.ID': u'880RFWM18773C'}}, {'match': {'Demographic_Details.ID': u'037BASP48376D'}}, {'match': {'Demographic_Details.ID': u'554XZQP10563T'}}, {'match': {'Demographic_Details.ID': u'305KTYG96669R'}}, {'match': {'Demographic_Details.ID': u'056XZQI88874A'}}, {'match': {'Demographic_Details.ID': u'294OKUR30033G'}}, {'match': {'Demographic_Details.ID': u'404DDCN87823H'}}, {'match': {'Demographic_Details.ID': u'333UQAN69783V'}}]}}}}]}}, '_source': {'include': ['Demographic_Details']}}

Solution

  • There is .scan() method to get access to all the matched documents. Now there is no need in search[offset:limit] slicing anymore since a separate query is created for each chunk already and all I need is to get all results of the query.

    Now the code looks like this:

    ...
       while ids_left > 0:
            print('OFFSET %s' % str(offset))
            if (offset + chunk) <= total_num_of_ids:
                limit = offset + chunk
            else:
                limit = total_num_of_ids
    
            search = Search(using=es).index(index).source(include=[root_path])
            q = Q('bool', must=[Q('nested', path=root_path, query=Q('bool', should=[], minimum_should_match=1))]) 
            search = search.query(q)
    
            for i in xrange(offset, limit):
                q = Q('match', **{'{0}.ID'.format(root_path): ids[i]})
                search.query.must[0].query.should.append(q)
    
            print(search.to_dict())
    
            for hit in search.scan():
                print(hit[root_path][0]['id'], hit[root_path][0]['match'])
    
            offset += chunk
            ids_left -= chunk