Search code examples
cudamatrix-multiplication

Non Square Matrix Multiplication in CUDA


The code I use for matrix multiplications in CUDA lets me multiply both square and non square matrices, however, both Width and Height MUST be multiples of blocksize.

So, for example, I can multiply [3][6] * [6][3] (using blocksize=3), but I can't multiply [3][2]*[2][3].

Does anyone knows a way to do that? This is my kernel:

#include <stdio.h>

#include <limits.h>

#include <stdlib.h>
#define blocksize 3
#define HM (1*blocksize) 
#define WM (2*blocksize) 
#define WN (1*blocksize)
#define HN WM 
#define WP WN   
#define HP HM  
#define PTH WM
#define PTW HM

__global__ void nonsquare(float*M, float*N, float*P, int uWM,int uWN)

{
__shared__ float MS[blocksize][blocksize];
__shared__ float NS[blocksize][blocksize];


int tx=threadIdx.x, ty=threadIdx.y, bx=blockIdx.x, by=blockIdx.y;
int rowM=ty+by*blocksize;
int colN=tx+bx*blocksize;
float Pvalue=0;


for(int m=0; m< uWM/blocksize;++m){
    MS[ty][tx]=M[rowM*uWM+(m*blocksize+tx)];
    NS[ty][tx]=M[colN + uWN*(m*blocksize+ty)];
    __syncthreads();

    for(int k=0;k<blocksize;k++)
        Pvalue+=MS[ty][k]*NS[k][tx];
    __syncthreads();
    P[rowM*WP+colN]=Pvalue;
     }
    }

Thanks in advance!


Solution

  • I think the easiest thing to do would be to just pad the blocks on the end with zeros:

    for(int m=0; m< uWM/blocksize;++m){
        colM = m*blocksize+tx;
        rowN = m*blocksize+ty;
        if (rowM > uWN || rowN > uWM || colM > uWM || colN > uWN) {
            MS[ty][tx]=0.;
            NS[ty][tx]=0.;
        } else {
            MS[ty][tx]=M[rowM*uWM+colM];
            NS[ty][tx]=N[colN + uWN*rowN];
        }
    

    plus or minus. (That NS line should reference N, not M, right?)

    But, since I seem to be the only one here advocating using existing tuned libraries when possible -- why not use CUBLAS or MAGMA instead of rolling your own? They're fast, and tested by hundreds of users.