Search code examples
arrayspython-3.xpandaspython-imaging-library

RADAR image for dBZ values


I need to perform a home study with data from a weather radar located in Brazil, but at the moment I only have the .png image of the hail event.

I need to convert the image into a datum using the legend to make the values, the stronger the higher the reflectivity (dBZ datum).

The data and legend are from the website: https://www.redemet.aer.mil.br/

Legend:

enter image description here

Image:

enter image description here

I'm using the following script to convert to gray and then register values ​​below a threshold that I think, but this methodology is not good.

#load Packages
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.pyplot as plt

from PIL import Image
import numpy as np

# Open Image with PIL
img = Image.open('./BRSG_20230417_135600_0_source.png')

# other data https://estatico-redemet.decea.mil.br/radar/2023/05/27/sg/maxcappi/maps/2023-05-27--10:56:27.png

# Convert image to grayscale
img_cinza = img.convert('L')

# Convert the image to a NumPy array
arr = np.array(img_cinza)

# Santiago Radar specifications
# extent = [-59.189733162586094, -50.844752805434396, -32.814210356963194, -25.573430009385245]
latt = np.arange(-32.814210356963194, -25.573430009385245, 0.018101950868944873)
lonn = np.arange(-59.189733162586094, -50.844752805434396, 0.020862450892879244)
#lonn = lonn - 360
latt = latt[::-1]

Xi, Yi = np.meshgrid(lonn,latt)

# creating an array for by latitude, longitude and data
dd = np.zeros([3,latt.shape[0],lonn.shape[0]])
# dd = np.zeros([3,400,400])

dd[0,:,:] = arr[:,:]
dd[1,:,:] = Yi[:,:]
dd[2,:,:] = Xi[:,:]

dd[0,:,:][dd[0,:,:]==0] = np.nan

# Here I only filter data below 80 (should be where dBZ reflectivity is highest = thunderstorm)
dd[0,:,:][dd[0,:,:] >= 80] = np.nan


# plot
plt.imshow(dd[0,:,:])

import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.pyplot as plt
import cartopy 
import numpy as np
import cartopy.feature as cfeature
import matplotlib.ticker as mticker
import cartopy
# plt.imshow(, transform=ccrs.PlateCarree())

fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())

ax.contourf(dd[2,:,:], dd[1,:,:], dd[0,:,:], extent = [-59, -45, -35, -22.0], transform=ccrs.PlateCarree())    

# Create a feature for States/Admin 1 regions at 1:50m from Natural Earth
states_provinces = cfeature.NaturalEarthFeature(
    category='cultural',
    name='admin_1_states_provinces_lines',
    scale='50m',
    facecolor='none')
    
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.COASTLINE)
ax.add_feature(states_provinces, edgecolor='gray')

ax.coastlines(resolution='10m', color='black', linewidth=0.8)
ax.add_feature(cartopy.feature.BORDERS, edgecolor='black', linewidth=0.5)
gl = ax.gridlines(crs=ccrs.PlateCarree(), color='gray', alpha=1.0, linestyle='--', linewidth=0.25, xlocs=np.arange(-180, 180, 5), ylocs=np.arange(-90, 90, 5), draw_labels=True)
gl.top_labels = False
gl.right_labels = False

Thanks


Solution

  • Converting to grey before processing doesn't work well because there isn't a gradient or really any contrast in the legend in greyscale. Instead, you can keep the colours, and just compare them to their nearest neighbour in the legend, and perform a linear interpolation of the legend markings to get the approximate value. Your other operations with lat/lon and the shorelines can be performed later, and seem like they might be outside the scope of the question being asked.

    Here is a sample code that shows how you could perform this:

    import numpy as np
    import PIL.Image
    
    import matplotlib.pyplot as plt
    import matplotlib.cm
    
    # numba is an optional import, just here to make the function run faster
    import numba
    
    
    # Separated this function out since its the majority of the run time and slow
    @numba.njit()
    def _get_distances(pixel: np.ndarray, calibration_inputs: np.ndarray):
        # Create the outarray where each position is the distance from the pixel at the matching index
        outarray = np.empty(shape=(calibration_inputs.shape[0]), dtype=np.int32)
        for i in range(calibration_inputs.shape[0]):
            # Calculate the vector difference
            #   NOTE: These must be signed integers to avoid issues with uinteger wrapping (see "nuclear gandhi")
            diff = calibration_inputs[i] - pixel
            outarray[i] = diff[0] ** 2 + diff[1] ** 2 + diff[2] ** 2
        return outarray
    
    
    def _main():
        # How many ticks are on the axes in the legend
        calibration_point_count = 6
        fname = 'radar.png'
        fname_chart = 'chart.png'
        # Whether to collect the calibration data or not
        setup_mode = False
        # The image of the radar screen
        img = np.array(PIL.Image.open(fname))
        # The chart legend with the colour bars
        img_chart = np.array(PIL.Image.open(fname_chart))
    
        if setup_mode:
            fig = plt.figure()
            plt.title('Select center of colourbar then each tick on legend')
            plt.imshow(img_chart)
            selections = plt.ginput(calibration_point_count + 1)
            # Use the first click to find the horizontal line to read
            calibration_x = int(selections[0][1])
            calibration_ys = np.array([int(y) for y, x in selections[1:]], dtype=int)
            plt.close(fig)
            # Request the tick mark values
            calibration_values = np.empty(shape=(calibration_point_count,), dtype=float)
            for i in range(calibration_point_count):
                calibration_values[i] = float(input(f'Enter calibration point value {i:2}: '))
            # Create a plot to verify that the bars were effectively captured
            for index, colour in enumerate(['red', 'green', 'blue']):
                plt.plot(img_chart[calibration_x, calibration_ys[0]:calibration_ys[-1], index],
                         color=colour)
            plt.title('Colour components in legend')
            plt.show()
    
        else:
            # If you have already run the calibration once, you can put that data here
            # This saves you alot of clicking in future runs
            calibration_x = 100
            calibration_ys = np.array([15, 74, 119, 201, 243, 302], dtype=int)
            calibration_values = np.array([0.0, 20.0, 30.0, 45.0, 63.0, 75.0], dtype=float)
        # Record the pixel values to match the colours against
        calibration_inputs = img_chart[calibration_x, calibration_ys[0]:calibration_ys[-1], :3].astype(np.int32)
        # Print this information to console so that you can copy it into the code above and not rerun setup_mode
        print(f'{calibration_x = }')
        print(f'{calibration_ys = }')
        print(f'{calibration_values = }')
        # print(f'{calibration_inputs = }')
    
        # Make the output array the same size, but without RGB vector, just a magnitude
        arrout = np.zeros(shape=img.shape[:-1], dtype=img.dtype)
        # Iterate through every pixel (can be optimized alot if you need to run this frequently)
        for i in range(img.shape[0]):
            # This takes a while to run, so print some status throughout
            print(f'\r{i / img.shape[0] * 100:.2f}%', end='')
            for j in range(img.shape[1]):
                # Change the type so that the subtraction in the _get_distances function works appropriately
                pixel = img[i, j, 0:3].astype(np.int32)
                # If this pixel is too dark, leave it as 0
                if np.sum(pixel) < 100:
                    continue
                # idx contains the index of the closet match
                idx = np.argmin(_get_distances(pixel, calibration_inputs))
                # Interpolate the value against the chart and save it to the output array
                arrout[i, j] = np.interp(idx + calibration_ys[0], calibration_ys, calibration_values)
        # Create a custom cmap based on jet which looks the most like the input image
        #   This step isn't necessary, but helps us compare the input to the output
        cmap = matplotlib.colormaps['jet']
        cmap.set_under('k')  # If the value is below the bottom clip, set it to black
        fig, ax = plt.subplots(3, 1, gridspec_kw={'wspace': 0.01, 'hspace': 0.01}, height_ratios=(3, 3, 1))
        ax[0].imshow(arrout, cmap=cmap, vmin=0.5); ax[0].axis('off')
        ax[1].imshow(img); ax[1].axis('off'); ax[1].sharex(ax[0]);  ax[1].sharey(ax[0])
        ax[2].imshow(img_chart); ax[2].axis('off')
        plt.show()
    
    
    if __name__ == '__main__':
        _main()
    
    

    To test I downloaded the two images from your question as radar.png and chart.png. When I run that I get the following console output:

    calibration_x = 100
    calibration_ys = array([ 15,  74, 119, 201, 243, 302])
    calibration_values = array([ 0., 20., 30., 45., 63., 75.])
    99.75%
    

    And I get the following plot: example output from script showing new plot, original image and calibration bar

    I included in that some conveniences for analysis, the two images zoom together, it has the colour bars on the bottom, I tried to match the colors as closely as I could figure out conveniently, etc.

    Let me know if you have any questions!