Sample HIP Program

#include "hip/hip_runtime.h"

/*

* A "hipified" version of the NVIDIA CUDA matrix multiple example

*/

#include <hip/hip_runtime.h>

#include <stdio.h>

// Matrices are stored in row-major order:

// M(row, col) = *(M.elements + row * M.stride + col)

typedef struct {

int width; /* number of columns */

int height; /* number of rows */

int stride;

float* elements;

} Matrix;

// Get a matrix element

/*__forceinline__*/ __device__ float GetElement(const Matrix A, int row, int col)

{

return A.elements[row * A.stride + col];

}

// Set a matrix element

__forceinline__ __device__ void SetElement(Matrix A, int row, int col, float value)

{

A.elements[row * A.stride + col] = value;

}

// Thread block size

#define BLOCK_SIZE 2

// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is

// located col sub-matrices to the right and row sub-matrices down

// from the upper-left corner of A

/*__forceinline__*/ __device__ Matrix GetSubMatrix(Matrix A, int row, int col)

{

Matrix Asub;

Asub.width = BLOCK_SIZE;

Asub.height = BLOCK_SIZE;

Asub.stride = A.stride;

Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row

+ BLOCK_SIZE * col];

return Asub;

}

// Forward declaration of the matrix multiplication kernel

__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);

// Matrix multiplication - Host code

// Matrix dimensions are assumed to be multiples of BLOCK_SIZE

void MatMul(const Matrix A, const Matrix B, Matrix C)

{

// Load A and B to device memory

Matrix d_A;

d_A.width = d_A.stride = A.width; d_A.height = A.height;

size_t size = A.width * A.height * sizeof(float);

hipMalloc((void**)&d_A.elements, size);

hipMemcpy(d_A.elements, A.elements, size,

hipMemcpyHostToDevice);

Matrix d_B;

d_B.width = d_B.stride = B.width; d_B.height = B.height;

size = B.width * B.height * sizeof(float);

hipMalloc((void**)&d_B.elements, size);

hipMemcpy(d_B.elements, B.elements, size,

hipMemcpyHostToDevice);

// Allocate C in device memory

Matrix d_C;

d_C.width = d_C.stride = C.width; d_C.height = C.height;

size = C.width * C.height * sizeof(float);

hipMalloc((void**)&d_C.elements, size);

// Invoke kernel

dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);

dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);

hipLaunchKernelGGL(MatMulKernel, dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);

// Read C from device memory

hipMemcpy(C.elements, d_C.elements, size,

hipMemcpyDeviceToHost);

// Free device memory

hipFree(d_A.elements);

hipFree(d_B.elements);

hipFree(d_C.elements);

}

// Matrix multiplication kernel called by MatrixMul()

__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)

{

// Block row and column

int blockRow = blockIdx.y;

int blockCol = blockIdx.x;

// Each thread block computes one sub-matrix Csub of C

Matrix Csub = GetSubMatrix(C, blockRow, blockCol); /* STOP(called-subroutine): */

// Each thread computes one element of Csub

// by accumulating results into Cvalue

float Cvalue = 0; /* MARKER(plant-after-libload): IN_KERNEL_LINE */

// Thread row and column within Csub

int row = threadIdx.y;

int col = threadIdx.x;

// Loop over all the sub-matrices of A and B that are

// required to compute Csub

// Multiply each pair of sub-matrices together

// and accumulate the results

for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {

// Get sub-matrix Asub of A

Matrix Asub = GetSubMatrix(A, blockRow, m);

// Get sub-matrix Bsub of B

Matrix Bsub = GetSubMatrix(B, m, blockCol);

// Shared memory used to store Asub and Bsub respectively

__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];

__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];

// Load Asub and Bsub from device memory to shared memory

// Each thread loads one element of each sub-matrix

As[row][col] = GetElement(Asub, row, col);

Bs[row][col] = GetElement(Bsub, row, col);

// Synchronize to make sure the sub-matrices are loaded

// before starting the computation

__syncthreads();

// Multiply Asub and Bsub together

for (int e = 0; e < BLOCK_SIZE; ++e)

Cvalue += As[row][e] * Bs[e][col];

// Synchronize to make sure that the preceding

// computation is done before loading two new

// sub-matrices of A and B in the next iteration

__syncthreads();

}

// Write Csub to device memory

// Each thread writes one element

SetElement(Csub, row, col, Cvalue); /* STOP(inlined-subroutine): */

// Just a place to set a breakpoint in the debugger

__syncthreads();

__syncthreads(); /* STOP: Csub should be fully updated */

}

// A function to serve as a backstop for setting breakpoints in the

// preceding kernel, so that they don't slop over to the next function.

static int breakpoint_backstop() { return 0; }

static Matrix

cons_Matrix (int width_, int height_)

{

Matrix A;

A.width = width_;

A.height = height_;

A.stride = width_;

A.elements = (float*) malloc(sizeof(*A.elements) * width_ * height_);

for (int row = 0; row < height_; row++)

for (int col = 0; col < width_; col++)

A.elements[row * width_ + col] = row * 10.0 + col;

return A;

}

static void

print_Matrix (Matrix A, const char *name)

{

printf("%s:\n", name);

for (int row = 0; row < A.height; row++)

for (int col = 0; col < A.width; col++)

printf ("[%5d][%5d] %f\n", row, col, A.elements[row * A.stride + col]);

}

// Multiply an m*n matrix with an n*p matrix results in an m*p matrix.

// Usage: tx_cuda_matmul [ m [ n [ p ] ] ]

// m, n, and p default to 1, and are multiplied by BLOCK_SIZE.

int main(int argc, char **argv)

{

// hipSetDevice(0);

const int m = BLOCK_SIZE * (argc > 1 ? atoi(argv[1]) : 1);

const int n = BLOCK_SIZE * (argc > 2 ? atoi(argv[2]) : 1);

const int p = BLOCK_SIZE * (argc > 3 ? atoi(argv[3]) : 1);

Matrix A = cons_Matrix(m, n); /* MARKER(plant-after-libload): IN_MAIN_LINE */

Matrix B = cons_Matrix(n, p);

Matrix C = cons_Matrix(m, p);

MatMul(A, B, C);

print_Matrix(A, "A");

print_Matrix(B, "B");

print_Matrix(C, "C");

return breakpoint_backstop();

}