Search code examples
python-3.xpython-imaging-librarynumpy-ndarray

Image to radar values


I am trying to do similarly to the topic responded to in:RADAR image for dBZ values

However, here I have a figure with different dimensions and a color palette that is not so simple.

Legend: enter image description here

image: enter image description here

Script:

import numpy as np
import PIL.Image

import matplotlib.pyplot as plt
import matplotlib.cm

# numba is an optional import, just here to make the function run faster
import numba


# Separated this function out since its the majority of the run time and slow
@numba.njit()
def _get_distances(pixel: np.ndarray, calibration_inputs: np.ndarray):
    # Create the outarray where each position is the distance from the pixel at the matching index
    outarray = np.empty(shape=(calibration_inputs.shape[0]), dtype=np.int32)
    for i in range(calibration_inputs.shape[0]):
        # Calculate the vector difference
        #   NOTE: These must be signed integers to avoid issues with uinteger wrapping (see "nuclear gandhi")
        diff = calibration_inputs[i] - pixel
        outarray[i] = diff[0] ** 2 + diff[1] ** 2 + diff[2] ** 2
    return outarray


def _main():
    # How many ticks are on the axes in the legend
    calibration_point_count = 17
    fname = 'C:/Users/lucas-fagundes/Downloads/getImagem (1).png'
    fname_chart = 'C:/Users/lucas-fagundes/Downloads/legenda_ciram.png'
    # Whether to collect the calibration data or not
    setup_mode = False
    # The image of the radar screen
    img = np.array(PIL.Image.open(fname))
    # The chart legend with the colour bars
    img_chart = np.array(PIL.Image.open(fname_chart))

    if setup_mode:
        fig = plt.figure()
        plt.title('Select center of colourbar then each tick on legend')
        plt.imshow(img_chart)
        selections = plt.ginput(calibration_point_count + 1)
        # Use the first click to find the horizontal line to read
        calibration_x = int(selections[0][1])
        calibration_ys = np.array([int(y) for y, x in selections[1:]], dtype=int)
        plt.close(fig)
        # Request the tick mark values
        calibration_values = np.empty(shape=(calibration_point_count,), dtype=float)
        for i in range(calibration_point_count):
            calibration_values[i] = float(input(f'Enter calibration point value {i:2}: '))
        # Create a plot to verify that the bars were effectively captured
        for index, colour in enumerate(['red', 'green', 'blue']):
            plt.plot(img_chart[calibration_x, calibration_ys[0]:calibration_ys[-1], index],
                     color=colour)
        plt.title('Colour components in legend')
        plt.show()

    else:
        # If you have already run the calibration once, you can put that data here
        # This saves you alot of clicking in future runs
        calibration_x = 6
        calibration_ys = array([ 14,  43,  69,  93, 120, 152, 179, 206, 233, 259, 285, 312, 342, 371, 397, 421, 451])
        calibration_values = array([ 78. ,  73. ,  68. ,  63. ,  58. ,  53. ,  48. ,  43. ,  38. , 33. ,  28. ,  23. ,  18. ,  13. ,  10. , -10. , -31.5])
    # Record the pixel values to match the colours against
    calibration_inputs = img_chart[calibration_x, calibration_ys[0]:calibration_ys[-1], :3].astype(np.int32)
    # Print this information to console so that you can copy it into the code above and not rerun setup_mode
    print(f'{calibration_x = }')
    print(f'{calibration_ys = }')
    print(f'{calibration_values = }')
    # print(f'{calibration_inputs = }')

    # Make the output array the same size, but without RGB vector, just a magnitude
    arrout = np.zeros(shape=img.shape[:-1], dtype=img.dtype)
    # Iterate through every pixel (can be optimized alot if you need to run this frequently)
    for i in range(img.shape[0]):
        # This takes a while to run, so print some status throughout
        print(f'\r{i / img.shape[0] * 100:.2f}%', end='')
        for j in range(img.shape[1]):
            # Change the type so that the subtraction in the _get_distances function works appropriately
            pixel = img[i, j].astype(np.int32)
            # If this pixel is too dark, leave it as 0
            if np.sum(pixel) < 100:
                continue
            # idx contains the index of the closet match
            idx = np.argmin(_get_distances(pixel, calibration_inputs))
            # Interpolate the value against the chart and save it to the output array
            arrout[i, j] = np.interp(idx + calibration_ys[0], calibration_ys, calibration_values)
    # Create a custom cmap based on jet which looks the most like the input image
    #   This step isn't necessary, but helps us compare the input to the output
    cmap = matplotlib.colormaps['jet']
    cmap.set_under('k')  # If the value is below the bottom clip, set it to black
    fig, ax = plt.subplots(3, 1, gridspec_kw={'wspace': 0.01, 'hspace': 0.01}, height_ratios=(3, 3, 1))
    ax[0].imshow(arrout, cmap=cmap, vmin=0.5); ax[0].axis('off')
    ax[1].imshow(img); ax[1].axis('off'); ax[1].sharex(ax[0]);  ax[1].sharey(ax[0])
    ax[2].imshow(img_chart); ax[2].axis('off')
    plt.show()


if __name__ == '__main__':
    _main()

Below is the error related to the image I have:

enter image description here

The error corresponds to my image having only one dimension, but I am not able to calibrate it. Could you please help me?


Solution

  • The biggest issue seems to be caused by the image formats. They are dissimilar to the ones in your linked question, and as a result, they don't load the same way, causing the error you received. The next issue is with the data itself, the legend doesn't have similar RGB values to the main image, so not every pixel is a good fit, even after a minor correction to change from interpolation to nearest lookup. This means that unless you can get better images, (probably preferably both PNG rather than a JPG which I suspect is causing the colour corruption) you won't get great results.

    A summary of the changes necessary to get a decent answer:

    • Force PIL to interpret the image as RGB by adding .convert('RGBA') to the image load line, this helps to address the differences in loading and allows us to index into the image the same way as we did in the linked question.
    • Change the calibration_inputs to just the selected pixels (instead of a full line of pixels, since we only need the closest match, no interpolation)
    • Change arrout to pre-fill with floats of some value less than -31.5 which is the lowest value in the legend, I chose -50 somewhat arbitrarily. Anything less than our bottom clip is fine (see below).
    • When grabbing the pixel from the input image img we want the index [i, j, 0:3] back to keep it consistent with the pixel distance calculation.
    • Change the interpolation to just retrieving the nearest value already found with the numpy.argmin call. That index retrieved already matches our adjusted calibration_values object.
    • Adjust the bottom clip vmin in the image demo to some value below the lowest expected in the legend, -31.5 . This will keep all of the value-less pixels un-colour-mapped. I chose -40 which is between the fill value above and the lowest expected value.

    And here is a version with those changes incorporated, it works with the downloads of your two images above.

    import numpy as np
    import PIL.Image
    
    import matplotlib.pyplot as plt
    import matplotlib.cm
    
    # numba is an optional import, just here to make the function run faster
    import numba
    
    
    # Separated this function out since its the majority of the run time and slow
    @numba.njit()
    def _get_distances(pixel: np.ndarray, calibration_inputs: np.ndarray):
        # Create the outarray where each position is the distance from the pixel at the matching index
        outarray = np.empty(shape=(calibration_inputs.shape[0]), dtype=np.int32)
        for i in range(calibration_inputs.shape[0]):
            # Calculate the vector difference
            #   NOTE: These must be signed integers to avoid issues with uinteger wrapping (see "nuclear gandhi")
            diff = calibration_inputs[i] - pixel
            outarray[i] = diff[0] ** 2 + diff[1] ** 2 + diff[2] ** 2
        return outarray
    
    
    def _main():
        # How many ticks are on the axes in the legend
        calibration_point_count = 17
        fname = 'radar_image.png'
    
        fname_chart = 'legend.jpg'
        # Whether to collect the calibration data or not
        setup_mode = False
        # The image of the radar screen
        img = np.array(PIL.Image.open(fname).convert('RGBA'))
        # The chart legend with the colour bars
        img_chart = np.array(PIL.Image.open(fname_chart))
    
        if setup_mode:
            fig = plt.figure()
            plt.title('Select center of colourbar then each tick on legend')
            plt.imshow(img_chart)
            selections = plt.ginput(calibration_point_count + 1)
            # Use the first click to find the horizontal line to read
            calibration_x = int(selections[0][1])
            calibration_ys = np.array([int(y) for y, x in selections[1:]], dtype=int)
            plt.close(fig)
            # Request the tick mark values
            calibration_values = np.empty(shape=(calibration_point_count,), dtype=float)
            for i in range(calibration_point_count):
                calibration_values[i] = float(input(f'Enter calibration point value {i:2}: '))
            # Create a plot to verify that the bars were effectively captured
            for index, colour in enumerate(['red', 'green', 'blue']):
                plt.plot(img_chart[calibration_x, calibration_ys[0]:calibration_ys[-1], index],
                         color=colour)
            plt.title('Colour components in legend')
            plt.show()
    
        else:
            # If you have already run the calibration once, you can put that data here
            # This saves you alot of clicking in future runs
            calibration_x = 6
            calibration_ys = np.array([ 14,  43,  69,  93, 120, 152, 179, 206, 233, 259, 285, 312, 342, 371, 397, 421, 451])
            calibration_values = np.array([ 78. ,  73. ,  68. ,  63. ,  58. ,  53. ,  48. ,  43. ,  38. , 33. ,  28. ,  23. ,  18. ,  13. ,  10. , -10. , -31.5])
        # Record the pixel values to match the colours against
        calibration_inputs = np.array([img_chart[calibration_x, x] for x in calibration_ys])
        # Print this information to console so that you can copy it into the code above and not rerun setup_mode
        print(f'{calibration_x = }')
        print(f'{calibration_ys = }')
        print(f'{calibration_values = }')
        # print(f'{calibration_inputs = }')
    
        # Make the output array the same size, but without RGB vector, just a magnitude
        arrout = np.full(shape=img.shape[:-1], fill_value=-50.0, dtype=np.float32)
        # Iterate through every pixel (can be optimized alot if you need to run this frequently)
        for i in range(img.shape[0]):
            # This takes a while to run, so print some status throughout
            print(f'\r{i / img.shape[0] * 100:.2f}%', end='')
            for j in range(img.shape[1]):
                # Change the type so that the subtraction in the _get_distances function works appropriately
                pixel = img[i, j, 0:3].astype(np.int32)
                # If this pixel is too dark, leave it as 0
                if np.sum(pixel) < 100:
                    continue
                # idx contains the index of the closet match
                idx = np.argmin(_get_distances(pixel, calibration_inputs))
                # Grab the legend value for the pixel with the closest match
                arrout[i, j] = calibration_values[idx]
        # Create a custom cmap based on jet which looks the most like the input image
        #   This step isn't necessary, but helps us compare the input to the output
        cmap = matplotlib.colormaps['jet']
        cmap.set_under('k')  # If the value is below the bottom clip, set it to black
        fig, ax = plt.subplots(3, 1, gridspec_kw={'wspace': 0.01, 'hspace': 0.01}, height_ratios=(3, 3, 1))
        ax[0].imshow(arrout, cmap=cmap, vmin=-40); ax[0].axis('off')
        ax[1].imshow(img); ax[1].axis('off'); ax[1].sharex(ax[0]);  ax[1].sharey(ax[0])
        ax[2].imshow(img_chart); ax[2].axis('off')
        plt.show()
    
    
    if __name__ == '__main__':
        _main()
    

    Here is the plots I get when I run that.

    sample output

    It has a number of artifacts from the image colour loss. You can probably get a better result if you can get both PNG's, but I won't promise it.

    Let me know if you have any questions.