Search code examples
pythonxmllxml

lxml element.clear() and access childelements


I am using lxml.iterparse to parse a rather large xml file. At a certain point an out of memory exception is thrown. I am aware of similar questions and that there is a tree built which you should normaly clear with element.clear() when you are not using it anymore.

My code looks like this (shortened):

for  event,element in context :
    if element.tag == xmlns + 'initialized':        
        attributes = element.findall(xmlns+'attribute')         
        heapsize = filter(lambda x:x.attrib['name']=='maxHeapSize', attributes)[0].attrib['value']
        characteristics['max_heap_size_MB'] = bytes_to_MB(int(heapsize, 16))

    #clear up the built tree to avoid mem alloc fails
    element.clear()
del context

This works if i am commenting out element.clear(). If I am using element.clear I get Keyerrors like this:

Traceback (most recent call last):
  File "C:\Users\NN\Documents\scripts\analyse\analyse_all.py", line 289, in <module>
    main()
  File "C:\Users\NN\Documents\scripts\analyse\analyse_all.py", line 277, in main
    join_characteristics_and_score(logpath, benchmarkscores)
  File "C:\Users\NN\Documents\scripts\analyse\analyse_all.py", line 140, in join_characteristics_and_score
    parsed_verbose_xml  = parse_xml(verbose)
  File "C:\Users\NN\Documents\scripts\analyse\analyze_g.py", line 62, in parse_xml
    heapsize = filter(lambda x:x.attrib['name']=='maxHeapSize', attributes)[0].attrib['value']
  File "C:\Users\NN\Documents\scripts\analyse\analyze_g.py", line 62, in <lambda>
    heapsize = filter(lambda x:x.attrib['name']=='maxHeapSize', attributes)[0].attrib['value']
  File "lxml.etree.pyx", line 2272, in lxml.etree._Attrib.__getitem__ (src\lxml\lxml.etree.c:54751)
KeyError: 'name'

When I am printing the elements they are regular dicts with the values in them without using element.clear(). When clearing, those dicts are empty.

EDIT

a minimal running python program illustrating the problem:

#!/usr/bin/python

from lxml import etree
from pprint import pprint

def fast_iter(context, func, *args, **kwargs):
        # http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
        # Author: Liza Daly
        for event, elem in context:
            func(elem, *args, **kwargs) 
            elem.clear()
            while elem.getprevious() is not None:
                del elem.getparent()[0]
        del context

def process_element(elem):
        xmlns = "{http://www.ibm.com/j9/verbosegc}"

        if elem.tag == xmlns + "gc-start":
            memelements = elem.findall('.//root:mem', namespaces = {'root':xmlns[1:-1]})
            pprint(memelements)

if __name__ == '__main__':
    with open('small.xml', "r+") as xmlf:
                context = etree.iterparse(xmlf)
                fast_iter(context, process_element)

The content of the xmlfile is as follows:

<verbosegc xmlns="http://www.ibm.com/j9/verbosegc">
<gc-start id="5" type="scavenge" contextid="4" timestamp="2013-06-14T15:48:46.815">
  <mem-info id="6" free="3048240" total="4194304" percent="72">
    <mem type="nursery" free="0" total="1048576" percent="0">
      <mem type="allocate" free="0" total="524288" percent="0" />
      <mem type="survivor" free="0" total="524288" percent="0" />
    </mem>
    <mem type="tenure" free="3048240" total="3145728" percent="96">
      <mem type="soa" free="2891568" total="2989056" percent="96" />
      <mem type="loa" free="156672" total="156672" percent="100" />
    </mem>
    <remembered-set count="1593" />
  </mem-info>
</gc-start>
</verbosegc>

Solution

  • Liza Daly has written a great article about processing large XML using lxml. Try the fast_iter code presented there:

    import lxml.etree as ET
    import pprint
    
    
    def fast_iter(context, func, *args, **kwargs):
        """
        http://www.ibm.com/developerworks/xml/library/x-hiperfparse/ (Liza Daly)
        See also http://effbot.org/zone/element-iterparse.htm
        """
        for event, elem in context:
            func(elem, *args, **kwargs)
            # It's safe to call clear() here because no descendants will be
            # accessed
            elem.clear()
            # Also eliminate now-empty references from the root node to elem
            # (ancestor loop added by unutbu)
            for ancestor in elem.xpath('ancestor-or-self::*'):
                while ancestor.getprevious() is not None:
                    del ancestor.getparent()[0]
        del context
    
    
    def process_element(elem, namespaces):
        memelements = elem.findall('.//root:mem', namespaces=namespaces)
        pprint.pprint(memelements)
    
    if __name__ == '__main__':
        xmlns = "http://www.ibm.com/j9/verbosegc"
        namespaces = {'root': xmlns}
        with open('small.xml', "r+") as xmlf:
            context = ET.iterparse(xmlf, events=('end', ),
                                   tag='{{{}}}gc-start'.format(xmlns))
            fast_iter(context, process_element, namespaces)