I'm experimenting with writing a couple kernels using GCCs builtin simd support. I've got this code benchmarking an AVX dot product kernel:
#include <time.h>
#include <stdio.h>
#include <assert.h>
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
// define rtdsc instruction
static __inline__ uint64_t tick(void) {
uint32_t hi, lo;
__asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
return ( (uint64_t)lo)|( ((uint64_t)hi)<<32 );
}
// AVX dot product
float avx_dot(float* __restrict__ ans, float* __restrict__ A, float* __restrict__ B, int N, ssize_t nprod, ssize_t shift) {
assert(N % 32 == 0 && "N not divisible by 32");
const int VECTOR_SIZE = 8;
typedef float vec
__attribute__ ((vector_size (sizeof(float) * VECTOR_SIZE)));
N /= VECTOR_SIZE;
for (ssize_t ii=0; ii < nprod; ii++) {
vec *Av = (vec*)A;
vec *Bv = (vec*)(B + ii*shift);
vec temp[4] = {0,0,0,0};
for(int jj = 0; jj < N; jj += 4) {
temp[0] += Av[jj+0] * Bv[jj+0];
temp[1] += Av[jj+1] * Bv[jj+1];
temp[2] += Av[jj+2] * Bv[jj+2];
temp[3] += Av[jj+3] * Bv[jj+3];
}
union {
vec tempv;
float tempf[VECTOR_SIZE];
};
tempv = temp[0] + temp[1] + temp[2] + temp[3];
ans[ii] = 0;
for(int jj = 0; jj < VECTOR_SIZE; ++jj) {
ans[ii] += tempf[jj];
}
}
}
int main(int argc, const char *argv[]) {
const ssize_t NITER = 1000;
const ssize_t DECIM = atoi(argv[2]);
const ssize_t DOTPROD = atoi(argv[3]);
ssize_t size = atoi(argv[1]);
float* A; posix_memalign((void**)&A, 128, size*sizeof(float));
float* B; posix_memalign((void**)&B, 128, (size+(DOTPROD-1)*DECIM)*sizeof(float));
srand(time(NULL));
for (ssize_t ii=0; ii < size; ii++) A[ii] = rand();
for (ssize_t ii=0; ii < size+(DOTPROD-1)*DECIM; ii++) B[ii] = rand();
printf("# size: %i nproducts: %i shift: %i\n", size, DOTPROD, DECIM);
printf("# iter answer cycles seconds samprate\n");
float results[DOTPROD];
for (ssize_t ii=0; ii < NITER; ii++) {
uint64_t beg = tick();
avx_dot(results, A, B, size, DOTPROD, DECIM);
uint64_t end = tick();
float ans = 0;
for (ssize_t jj=0; jj < DOTPROD; jj++) {
ans += results[jj];
}
double CLOCK = 3300e6;
uint64_t cycles = end-beg;
double seconds = (double)cycles/CLOCK;
double samprate = (size*DOTPROD)/seconds;
printf("%-5zd %f %lli %.3e %e\n", ii, ans, (unsigned long long)cycles, seconds, samprate);
}
return 0;
}
Strangely, when compiled with:
g++ -O3 -march=corei7-avx dotprod.cc -ffast-math -o dotprod
I get a segfault the first time I access temp inside avx_dot. But, when compiled with:
g++ -O3 -march=corei7-avx dotprod.cc -o dotprod
IE, without -ffast-math on, it runs fine. I'm very puzzled as fast-math shouldn't affect memory access I believe, so I don't know where the segfault is coming from.
I'm running on:
CentOS Linux release 7.2.1511
gcc version 4.8.5 20150623 (Red Hat 4.8.5-4) (GCC)
Can anyone confirm this behavior on their machine and shed some light on what's happening?
My random guess is data-alignment, considering that it fails loading data (failing instruction is .... vmovaps (%rcx),%ymm4 ... %rcx=0x603228 and Bv is located 0x603228, and reading documentation on that instruction reveals requirement of 16 bytes alignment).
Further investigation:
Problems happens when Bv is offset 8 bytes into B, due to this line (and AVX requires 16byte alignment):
vec *Bv = (vec*)(B + ii*shift);
./dotprod-fast 64 10 10
A=0x1125080
B=0x1125200
# size: 64 nproducts: 10 shift: 10
# iter answer cycles seconds samprate
Av=0x1125080
Bv=0x1125200
Av=0x1125080
Bv=0x1125228
Segmentation fault (core dumped)