Search code examples
c++x86openmpavxicc

Why parallel for of openmp does not work for vectorized color space conversion?


I have vectorized the color space conversion algorithm (RGB to YCbCr). when I don't use threads (#pragma omp parallel for) everything seems to be fine. But when I try to use threads it can not improve the performance of the vectorized version of my codes (It also disimproves).

Threads speedups the scalar code, the auto-vectorized code and the OpenMP SIMDized code (#pragma omp parallel for simd)

I have no idea what is going on and need your help.

Thanks in advance

I use fedora 31, Intel corei7 6700HQ, 12GB RAM, ICC 19.0.3 (-Ofast [-no-vec] -qopenmp -xHOST

Codes are as follows:

Scalar:

//Scalar for basline
#include <stdio.h>
#define MAX1 512
#define MAX2 MAX1


float  __attribute__(( aligned(32))) image_r[MAX1][MAX2], image_g[MAX1][MAX2], image_b[MAX1][MAX2], image_y[MAX1][MAX2], image_cb[MAX1][MAX2], image_cr[MAX1][MAX2];
float coeff_RTY[3][3] = {{0.299, 0.587, 0.114},{-0.169, -0.331, 0.500},{0.500, -0.419, -0.081}};

inline void fill_float(float a[MAX1][MAX1])
{
    int i,j;
    for(i=0; i<MAX1; i++){

        for(j=0; j<MAX2; j++){
            a[i][j] = (i+j+100)%256;

        }
    }
}
int main()
{
    fill_float(image_r);
    fill_float(image_g);
    fill_float(image_b);

    int i, j;
    long t1,t2,min=100000000000000;
    do{
        t1=_rdtsc();
        //#pragma omp parallel for
        for( i=0; i<MAX1; i++){
        for( j=0; j<MAX2; j++){

        image_y[i][j] = coeff_RTY[0][0]*image_r[i][j] + coeff_RTY[0][1]*image_g[i][j] + coeff_RTY[0][2]*image_b[i][j];
        image_cb[i][j] = coeff_RTY[1][0]*image_r[i][j] + coeff_RTY[1][1]*image_g[i][j] + coeff_RTY[1][2]*image_b[i][j] + 128;
        image_cr[i][j] = coeff_RTY[2][0]*image_r[i][j] + coeff_RTY[2][1]*image_g[i][j] + coeff_RTY[2][2]*image_b[i][j] + 128;

        }
        }

        t2=_rdtsc();

        if((t2-t1)<min){
            min=t2-t1;
            printf("\n%li", t2-t1);
        }
    }while(1);
    printf("%f", image_y[MAX1/2][MAX2/2]);
    printf("%f", image_cb[MAX1/2][MAX2/2]);
    printf("%f", image_cr[MAX1/2][MAX2/2]);
    return 0;
}

And the vectorized version using AVX (floating point):

//AVX
#include <stdio.h>
#include <x86intrin.h>
#define MAX1 512
#define MAX2 MAX1

float  __attribute__(( aligned(32))) image_r[MAX1][MAX2], image_g[MAX1][MAX2], image_b[MAX1][MAX2], image_y[MAX1][MAX2], image_cb[MAX1][MAX2], image_cr[MAX1][MAX2];
float coeff_RTY[3][3] = {{0.299, 0.587, 0.114},{-0.169, -0.331, 0.500},{0.500, -0.419, -0.081}};

inline void fill_float(float a[MAX1][MAX1])
{
    int i,j;
    for(i=0; i<MAX1; i++){

        for(j=0; j<MAX2; j++){
            a[i][j] = (i+j+100)%256;

        }
    }
}
int main()
{


    //program variables:
    //calculate filter coeff or use an existing one
    __m256 vec_c[3][3], vec_128;
    __m256 vec_r, vec_g, vec_b, vec_y, vec_cb, vec_cr;
    __m256 vec_t[3][3], vec_sum;

    vec_c[0][0] = _mm256_set1_ps(coeff_RTY[0][0]);
    vec_c[0][1] = _mm256_set1_ps(coeff_RTY[0][1]);
    vec_c[0][2] = _mm256_set1_ps(coeff_RTY[0][2]);

    vec_c[1][0] = _mm256_set1_ps(coeff_RTY[1][0]);
    vec_c[1][1] = _mm256_set1_ps(coeff_RTY[1][1]);
    vec_c[1][2] = _mm256_set1_ps(coeff_RTY[1][2]);

    vec_c[2][0] = _mm256_set1_ps(coeff_RTY[2][0]);
    vec_c[2][1] = _mm256_set1_ps(coeff_RTY[2][1]);
    vec_c[2][2] = _mm256_set1_ps(coeff_RTY[2][2]);

    vec_128 = _mm256_set1_ps(128);
    //iorder to avoid optimization for zero values
    fill_float(image_r);
    fill_float(image_g);
    fill_float(image_b);
    int i, j=0;
    long t1,t2,min=100000000000000;
    do{
        t1=_rdtsc();

        //#pragma omp parallel for
        for( i=0; i<MAX1; i++){
            for( j=0; j<MAX2; j+=8){
            //_mm_prefetch(&image_r[i][j+8],_MM_HINT_T0);
            //_mm_prefetch(&image_g[i][j+8],_MM_HINT_T0);
            //_mm_prefetch(&image_b[i][j+8],_MM_HINT_T0);
            vec_r = _mm256_load_ps(&image_r[i][j]);
            vec_g = _mm256_load_ps(&image_g[i][j]);
            vec_b = _mm256_load_ps(&image_b[i][j]);


            vec_t[0][0] = _mm256_mul_ps(vec_r, vec_c[0][0]);
            vec_t[0][1] = _mm256_mul_ps(vec_g, vec_c[0][1]);
            vec_t[0][2] = _mm256_mul_ps(vec_b, vec_c[0][2]);

            vec_t[1][0] = _mm256_mul_ps(vec_r, vec_c[1][0]);
            vec_t[1][1] = _mm256_mul_ps(vec_g, vec_c[1][1]);
            vec_t[1][2] = _mm256_mul_ps(vec_b, vec_c[1][2]);

            vec_t[2][0] = _mm256_mul_ps(vec_r, vec_c[2][0]);
            vec_t[2][1] = _mm256_mul_ps(vec_g, vec_c[2][1]);
            vec_t[2][2] = _mm256_mul_ps(vec_b, vec_c[2][2]);

            //vec_y = vec_t[0][0] + vec_t[0][1] + vec_t[0][2]
            vec_sum = _mm256_add_ps(vec_t[0][0], vec_t[0][1]);
            vec_y = _mm256_add_ps(vec_t[0][2], vec_sum);

            //vec_cb = vec_t[1][0] + vec_t[1][1] + vec_t[1][2] +128
            vec_sum = _mm256_add_ps(vec_t[1][0], vec_t[1][1]);
            vec_sum = _mm256_add_ps(vec_t[1][2], vec_sum);
            vec_cb = _mm256_add_ps(vec_128, vec_sum);

            //vec_cr = vec_t[2][0] + vec_t[2][1] + vec_t[2][2] +128
            vec_sum = _mm256_add_ps(vec_t[2][0], vec_t[2][1]);
            vec_sum = _mm256_add_ps(vec_t[2][2], vec_sum);
            vec_cr = _mm256_add_ps(vec_128, vec_sum);

            _mm256_stream_ps(&image_y[i][j], vec_y);
            _mm256_stream_ps(&image_cb[i][j], vec_cb);
            _mm256_stream_ps(&image_cr[i][j], vec_cr);

            }
        }
        t2=_rdtsc();

        if((t2-t1)<min){
            min=t2-t1;
            printf("\n%li", t2-t1);
        }
    }while(1);

    //inorder to avoid optimization for non used values
    printf("%f", image_y[MAX1/2][MAX2/2]);
    printf("%f", image_cb[MAX1/2][MAX2/2]);
    printf("%f", image_cr[MAX1/2][MAX2/2]);

    return 0;
}

UPDATE:

The best recorded cycles for 128x128 image size is as follows:


Single core:

Scalar code: 88k
Auto-vectorized: 59k
Vectorized using intrinsics: **21k** 
vectorized by #pragma omp simd: 59k

Multiple cores:

Scalar code: 25k
Auto-vectorized: 13k
Vectorized using intrinsics: **226k** 
vectorized by #pragma omp .. simd: 22k

For 1024x1024 image size is as follows:


Single core:

Scalar code: 7M
Auto-vectorized: 3M
Vectorized using intrinsics: **3M** 
vectorized by #pragma omp simd: 3M

Multiple cores:

Scalar code: 6M
Auto-vectorized: 6M
Vectorized using intrinsics: **15M** 
vectorized by #pragma omp parallel for simd: 8M

Solution

  • After experimenting with different ideas, the problem was solved by adding the following line of OpenMP statements before the #pragma omp parallel for

    omp_set_dynamic(3);

    Therefore the results are:

    Vectorized using intrinsics and Multi-core:

    MAX1=128 --> 28k
    
    MAX1=1024 --> 3M
    

    these results are not weird any more.

    Any new results will be added to this answer in futures updates.