Back

statically linking c programs with cuda , a simple tutorial

I love statically linked programs, the final binaries are self contained, easy to use and portable , however i couldn't find a simple tutorial on how to do it for C and cuda (maybe i didn't look hard enough?), so this is a tutorial showing a simple C program that adds vectors, optionally it can be statically linked with cuda for GPU acceleration.
this guide is not meant to be in-depth, just something to get you started! , please consult the official docs! for in-depth information

you will need

  • gcc
  • cuda toolkit (nvcc)

our starting point

first off , our normal code that runs on the cpu create a vec.h header file, we dont need to create a vec.c , just inline it!

vec.h

#ifndef VEC_N_H
#define VEC_N_H
#include <stdio.h>
#include <stdlib.h>

/* n-length vector, for demonstration purposes 
 * we will be using a flat vector */
typedef struct {
  int dimension;
  float* components; 
} vec_N;

/* utility fucntions , create, set, free etc. */
static inline vec_N vec_N_new(int dimension) {
  vec_N* vec = (vec_N*)malloc(sizeof(vec_N));
  if (vec == NULL) {
    fprintf(stderr, "memalloc failed for vec_N!\n");
  }
  vec->dimension = dimension;
  vec->components = (float*)calloc(dimension, sizeof(float));
  if (vec->components == NULL) {
    fprintf(stderr, "memalloc failed for vec_N components!\n");
    free(vec);
  }
  return *vec;
}

static inline void vec_N_free(vec_N* vec_N) {
  if (vec_N) {
    free(vec_N->components);
    free(vec_N);
  }
}

static inline void vec_N_print(const vec_N* vec_N) {
    if (!vec_N) {
        printf("Invalid vector\n");
        return;
    }
    
    printf("vector (Dim %d): [", vec_N->dimension);
    for (int i = 0; i < vec_N->dimension; i++) {
        printf("%s%.2f", i > 0 ? ", " : "", vec_N->components[i]);
    }
    printf("]\n");
}

static inline int vec_N_set(vec_N* vec_N, int index, float value) {
  // we got bounds checking before gta6
  if(!vec_N || index < 0 || index >=  vec_N->dimension) {
    fprintf(stderr, "Invalid ahh index brah\n");
    return -1;
  }
  vec_N->components[index] = value;
  return 0;
}

static inline void vec_N_add(const vec_N* v1, const vec_N* v2, vec_N* result) {
  if (!v1 || !v2 || v1->dimension != v2->dimension) {
      fprintf(stderr, "vectors must have same dimension for addition\n");
      return;
  }

  if (!result || v1->dimension != result->dimension) {
    // sets result vec_Ntor to proper dims
    *result = vec_N_new(v1->dimension);
  }

  for (int i = 0; i < v1->dimension; i++) {
        result->components[i] = v1->components[i] + v2->components[i];
  } 
}



#endif // VEC_N_H

then , to test out our vec.h library, a main file

main.c

#include "vec.h"
#include "time.h"
#include <stdlib.h>

double random_float() {
  return rand() / (float) (RAND_MAX) + 1.0;
}

int main() {
  int vec_N_length = 50000;
  clock_t begin = clock();
  for (int i = 0; i < 10; i++) {
    vec_N vec_1 = vec_N_new(vec_N_length);
    vec_N vec_2 = vec_N_new(vec_N_length);
    vec_N result = vec_N_new(vec_N_length);
    for (int i = 0; i < vec_N_length; i++) {
      vec_N_set(&vec_1, i, random_float());
      vec_N_set(&vec_2, i, random_float());
    } 
    vec_N_add(&vec_1, &vec_2, &result);
  }
  clock_t end = clock();
  double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
  printf("time spent %f\n", time_spent);
  return 0;
}

..and then, a simple Makefile

run_cpu:
    gcc main.c -o main -s 
    ./main

if we run make run_cpu, on my machine , we get this output

gcc main.c -o main -lm -s 
./main
time spent 0.015592

adding gpu suppourt

now , to add GPU suppourt , create a new file called vec_gpu.h and then define out gpu-specific functions

vec_gpu.h

#ifndef VEC_N_CUDA_H
#define VEC_N_CUDA_H


/// cpp compile directives 
/// so we are able to link with CUDA libraries
#ifdef __cplusplus
extern "C" {
#endif // extern cpp start
#include "vec.h"

void vec_add_cu(const vec_N* v1, const vec_N* v2, vec_N* result);


#ifdef __cplusplus
}
#endif // extern cpp end
#endif // vec_N_CUDA_H

writing our (simple) cuda kernels

cuda programs have an extension of .cu so in my case i'd create a new file and name it vec.cu there are three cuda function decorators, at least for our use case in this article (you can look up the others later)

function decorator typewhat do they mean
__host__functions that can be called from the host (aka the CPU) only
__global__functions that are kernels that runs __device__ functions in parallel on the GPU, can only be called from the CPU and must return void
__device__functions that runs on a single GPU thread and can be called from the device or kernels only

let's write our gpu function

vec.cu

#include "vec.h"
#include "vec_gpu.h"
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <driver_types.h>
#include <stdlib.h>

__device__ void vec_add_device(float* v1, float* v2, float* result, int idx) {
    result[idx] = v1[idx] + v2[idx];
}

..our kernel for adding vectors

__global__ void vec_add_kernel(float* v1, float* v2, float* result, int n) {
    int idx = blockIdx.x * blockDim.x + threadIdx.x;
    if (idx < n) {
        vec_add_device(v1, v2, result, idx);
    }
}

then, finally a function that we can call from our program that we defined in vec_gpu.h

void vec_add_cu(const vec_N* v1, const vec_N* v2, vec_N* result) {
  size_t components_memsize = v1->dimension * sizeof(float);
  float *d_v1 , *d_v2, *d_res;   

  // allocate memory on device
  if (cudaMalloc(&d_v1, components_memsize) != cudaSuccess ||
      cudaMalloc(&d_v2, components_memsize)  != cudaSuccess ||
      cudaMalloc(&d_res, components_memsize) != cudaSuccess) {
    fprintf(stderr, "CUDA memory allocation failed\n");
  }

  // copy to device
  if (cudaMemcpy(d_v1, v1->components, components_memsize,
                 cudaMemcpyHostToDevice) != cudaSuccess ||
      cudaMemcpy(d_v2, v2->components, components_memsize,
                 cudaMemcpyHostToDevice) != cudaSuccess)
  {
    fprintf(stderr, "CUDA memory copy to device failed\n");
  }

  int threadsPerBlock = 256;
  int blocksPerGrid = (v1->dimension + threadsPerBlock - 1)  / threadsPerBlock;

  // call our add kernel
  vec_add_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_v1, d_v2, d_res, v1->dimension);
  
  cudaError_t kernelError = cudaGetLastError();

  if (kernelError != cudaSuccess) {
    fprintf(stderr, "kernel launch error: %s\n",
            cudaGetErrorString(kernelError));
  }

  // copy it back to our result vec
  if (cudaMemcpy(result->components, d_res, components_memsize, cudaMemcpyDeviceToHost) !=
      cudaSuccess) 
  {
    cudaError_t cpy_error = cudaGetLastError();
    fprintf(stderr, "CUDA memory copy to host failed\n");
    fprintf(stderr, "device to host Copy error: %s\n",
            cudaGetErrorString(cpy_error));
  }
  
  cudaFree(d_v1);
  cudaFree(d_v2);
  cudaFree(d_res);
}

we also need to upate our vec.h and add a optional compile directive

vec.h

#ifndef VEC_USE_GPU
static inline void vec_N_add(const vec_N* v1, const vec_N* v2, vec_N* result) {
.
. /* cpu implementation of add */
.
#else 

#include "vec_gpu.h"

void vec_N_add(const vec_N* v1, const vec_N* v2, vec_N* result){

  vec_add_cu(v1, v2, result);

}

#endif // VEC_USE_GPU

to compile a static .a library for cuda , add this to your Makefile

static_gpu:
    nvcc -c vec.cu -o vec_gpu.o
    ar rcs libvec_gpu.a vec_gpu.o
    rm vec_gpu.o

then , to test our new gpu accelrated vector addition library, you can add to your Makefile

run_gpu: static_gpu
    gcc -DVEC_USE_GPU=ON main.c -L. -lvec_gpu -lcudart -o main_gpu -lm -s
    ./main_gpu

if we run make run_gpu, we get

nvcc -c vec.cu -o vec_gpu.o
ar rcs libvec_gpu.a vec_gpu.o
rm vec_gpu.o
gcc -DVEC_USE_GPU=ON main.c -L. -lvec_gpu -lcudart -o main_gpu -lm -s
./main_gpu
time spent 0.046546

as you can see our gpu implementation is slower than our cpu implementation, why? you might ask. well, moving all your compute to the gpu doesn't necessarily means it will be faster due to allocation overheads , plus , our code in this example is not the most optimized vector addition out there as it's meant to provide a starting point into gpu programming.

bonus project ideas to try (if you are interested):

  • implement other vector operations on the gpu like add, subtract , divide , dot_product, cosine_similiarity
  • optimize the current kernel code
  • implement multi-array operations instead of just two

you can find the working code here: https://github.com/heabeounMKTO/simple_c_cuda

thanks for reading , until next time !