Search code examples
pythonnumpyconcurrent.futures

Issues with using numpy.fromiter & numpy.array in concurrent.futures.ProcessPoolExecutor map() and submit() methods


Background: This blog reported speed benefits from using numpy.fromiter() over numpy.array(). Using the provided script as a based, I wanted to see the benefits of numpy.fromiter() when executed in the map() and submit() methods in python's concurrent.futures.ProcessPoolExecutor class.

Below are my findings for a 2 seconds run: array() vs fromiter()

  1. It is clear that numpy.fromiter() is faster than numpy.array() when the array size is <256 in general.
  2. However the performances of numpy.fromiter() and numpy.array() can be significantly poorer than a series run, and are not consistent, when executed by the map() and submit() methods in python's concurrent.futures.ProcessPoolExecutor class.

Questions: Can the inconsistent and poorer performances of numpy.fromiter() and numpy.array() when used in map() and submit() methods in python's concurrent.futures.ProcessPoolExecutor class be avoided? How can I improve my scripts?

The python scripts that I had used for this benchmarking are given below.

map():

#!/usr/bin/env python3.5
import concurrent.futures
from itertools import chain 
import time
import numpy as np
import pygal
from os import path

list_sizes = [2**x for x in range(1, 11)]
seconds = 2


def test(size_array):
    pyarray = [float(x) for x in range(size_array)]

    start = time.time()
    iterations = 0
    while time.time() - start <= seconds:
        np.fromiter(pyarray, dtype=np.float32, count=size_array)
        iterations += 1
    fromiter_count = iterations

    # array
    start = time.time()
    iterations = 0
    while time.time() - start <= seconds:
        np.array(pyarray, dtype=np.float32)
        iterations += 1
    array_count = iterations

    #return array_count, fromiter_count
    return size_array, array_count, fromiter_count


begin = time.time()
results = {}

with concurrent.futures.ProcessPoolExecutor(max_workers=6) as executor:
    data = list(chain.from_iterable(executor.map(test, list_sizes)))
    print('data = ', data)

for i in range( 0, len(data), 3 ):
    res = tuple(data[i+1:i+3])
    size_array = data[i]
    results[size_array] = res
    print("Result for size {} in {} seconds: {}".format(size_array,seconds,res))

out_folder = path.dirname(path.realpath(__file__))
print("Create diagrams in {}".format(out_folder))

chart = pygal.Line()
chart.title = "Performance in {} seconds".format(seconds)
chart.x_title = "Array size"
chart.y_title = "Iterations"

array_result = []
fromiter_result = []
x_axis = sorted(results.keys())
print(x_axis)
chart.x_labels = x_axis
chart.add('np.array', [results[x][0] for x in x_axis])
chart.add('np.fromiter', [results[x][1] for x in x_axis])
chart.render_to_png(path.join(out_folder, 'result_{}_concurrent_futures_map.png'.format(seconds)))

end = time.time()
compute_time = end - begin
print("Program Time = ", compute_time)

submit():

#!/usr/bin/env python3.5
import concurrent.futures
from itertools import chain 
import time
import numpy as np
import pygal
from os import path

list_sizes = [2**x for x in range(1, 11)]
seconds = 2


def test(size_array):
    pyarray = [float(x) for x in range(size_array)]

    start = time.time()
    iterations = 0
    while time.time() - start <= seconds:
        np.fromiter(pyarray, dtype=np.float32, count=size_array)
        iterations += 1
    fromiter_count = iterations

    # array
    start = time.time()
    iterations = 0
    while time.time() - start <= seconds:
        np.array(pyarray, dtype=np.float32)
        iterations += 1
    array_count = iterations

    return size_array, array_count, fromiter_count


begin = time.time()
results = {}

with concurrent.futures.ProcessPoolExecutor(max_workers=6) as executor:
    future_to_size_array = {executor.submit(test, size_array):size_array
                            for size_array in list_sizes}
    data = list(chain.from_iterable(
        f.result() for f in concurrent.futures.as_completed(future_to_size_array)))
    print('data = ', data)

for i in range( 0, len(data), 3 ):
    res = tuple(data[i+1:i+3])
    size_array = data[i]
    results[size_array] = res
    print("Result for size {} in {} seconds: {}".format(size_array,seconds,res))           

out_folder = path.dirname(path.realpath(__file__))
print("Create diagrams in {}".format(out_folder))

chart = pygal.Line()
chart.title = "Performance in {} seconds".format(seconds)
chart.x_title = "Array size"
chart.y_title = "Iterations"

x_axis = sorted(results.keys())
print(x_axis)
chart.x_labels = x_axis
chart.add('np.array', [results[x][0] for x in x_axis])
chart.add('np.fromiter', [results[x][1] for x in x_axis])
chart.render_to_png(path.join(out_folder, 'result_{}_concurrent_futures_submitv2.png'.format(seconds)))

end = time.time()
compute_time = end - begin
print("Program Time = ", compute_time)

Serial:(with minor changes to original code)

#!/usr/bin/env python3.5
import time
import numpy as np
import pygal
from os import path

list_sizes = [2**x for x in range(1, 11)]
seconds = 2


def test(size_array):
    pyarray = [float(x) for x in range(size_array)]

    # fromiter
    start = time.time()
    iterations = 0
    while time.time() - start <= seconds:
        np.fromiter(pyarray, dtype=np.float32, count=size_array)
        iterations += 1
    fromiter_count = iterations

    # array
    start = time.time()
    iterations = 0
    while time.time() - start <= seconds:
        np.array(pyarray, dtype=np.float32)
        iterations += 1
    array_count = iterations

    return array_count, fromiter_count


begin = time.time()
results = {}

for size_array in list_sizes:
    res = test(size_array)
    results[size_array] = res
    print("Result for size {} in {} seconds: {}".format(size_array,seconds,res))

out_folder = path.dirname(path.realpath(__file__))
print("Create diagrams in {}".format(out_folder))

chart = pygal.Line()
chart.title = "Performance in {} seconds".format(seconds)
chart.x_title = "Array size"
chart.y_title = "Iterations"

x_axis = sorted(results.keys())
print(x_axis)
chart.x_labels = x_axis
chart.add('np.array', [results[x][0] for x in x_axis])
chart.add('np.fromiter', [results[x][1] for x in x_axis])
#chart.add('np.array', [x[0] for x in results.values()])
#chart.add('np.fromiter', [x[1] for x in results.values()])
chart.render_to_png(path.join(out_folder, 'result_{}_serial.png'.format(seconds)))

end = time.time()
compute_time = end - begin
print("Program Time = ", compute_time)

Solution

  • The reason for the inconsistent and poor performances of numpy.fromiter() and numpy.array() that I had encountered earlier appears to be associated to the number of CPUs used by concurrent.futures.ProcessPoolExecutor. I had earlier used 6 CPUs. Below diagrams shows the corresponding performances of numpy.fromiter() and numpy.array() when 2, 4, 6, and 8 CPUs were used. These diagrams show that there exists an optimum number of CPUs that can be used. Using too many CPUs (i.e. >4 CPUs) can be detrimental for small array sizes (<512 elements). Example, >4 CPUs can cause slower performances (by a factor of 1/2) and even inconsistent performances when compared to a serial run.

    2cpus 4cpus 6cpus 8cpus