|
1 |
| -#include "cuda_utils.h" |
2 |
| -#include "matrix_mult.h" |
| 1 | +#include "cuda_utils.h" // Custom CUDA utility functions and macros for error checking |
| 2 | +#include "matrix_mult.h" // Header for this matrix multiplication module |
3 | 3 |
|
| 4 | +// Function to perform matrix multiplication on the GPU using cuBLAS |
| 5 | +// This function transfers the input matrices from the host (CPU) to the device (GPU), |
| 6 | +// executes the matrix multiplication on the GPU, and retrieves the result back to the host. |
| 7 | +// Parameters: |
| 8 | +// - hostMatrixA: Pointer to the first matrix (A) on the host (CPU) |
| 9 | +// - hostMatrixB: Pointer to the second matrix (B) on the host (CPU) |
| 10 | +// - hostResultMatrix: Pointer to the result matrix (C) on the host (CPU) |
| 11 | +// - numRowsA: Number of rows in matrix A |
| 12 | +// - numColsA: Number of columns in matrix A (and rows in matrix B) |
| 13 | +// - numColsB: Number of columns in matrix B |
4 | 14 | template <typename T>
|
5 | 15 | void multiplyMatricesOnGPU(const T* hostMatrixA, const T* hostMatrixB, T* hostResultMatrix,
|
6 | 16 | int numRowsA, int numColsA, int numColsB) {
|
| 17 | + // Calculate the size of matrices A, B, and C in bytes |
7 | 18 | size_t byteSizeA = numRowsA * numColsA * sizeof(T);
|
8 | 19 | size_t byteSizeB = numColsA * numColsB * sizeof(T);
|
9 | 20 | size_t byteSizeC = numRowsA * numColsB * sizeof(T);
|
10 | 21 |
|
| 22 | + // Device (GPU) memory pointers for matrices A, B, and result matrix C |
11 | 23 | T *deviceMatrixA, *deviceMatrixB, *deviceResultMatrix;
|
12 | 24 |
|
13 |
| - // Allocate memory on the GPU |
14 |
| - CUDA_CHECK(cudaMalloc(&deviceMatrixA, byteSizeA)); |
15 |
| - CUDA_CHECK(cudaMalloc(&deviceMatrixB, byteSizeB)); |
16 |
| - CUDA_CHECK(cudaMalloc(&deviceResultMatrix, byteSizeC)); |
| 25 | + // Allocate memory for matrices on the GPU |
| 26 | + CUDA_CHECK(cudaMalloc(&deviceMatrixA, byteSizeA)); // Allocate memory for matrix A on the GPU |
| 27 | + CUDA_CHECK(cudaMalloc(&deviceMatrixB, byteSizeB)); // Allocate memory for matrix B on the GPU |
| 28 | + CUDA_CHECK(cudaMalloc(&deviceResultMatrix, byteSizeC)); // Allocate memory for result matrix C on the GPU |
17 | 29 |
|
18 |
| - // Copy input matrices from host to device |
| 30 | + // Copy matrices A and B from the host (CPU) to the device (GPU) |
19 | 31 | CUDA_CHECK(cudaMemcpy(deviceMatrixA, hostMatrixA, byteSizeA, cudaMemcpyHostToDevice));
|
20 | 32 | CUDA_CHECK(cudaMemcpy(deviceMatrixB, hostMatrixB, byteSizeB, cudaMemcpyHostToDevice));
|
21 | 33 |
|
| 34 | + // Create a cuBLAS handle for matrix multiplication |
22 | 35 | cublasHandle_t cublasHandle;
|
23 | 36 | CUBLAS_CHECK(cublasCreate(&cublasHandle));
|
24 | 37 |
|
| 38 | + // Define alpha and beta scalars for the matrix multiplication: C = alpha * A * B + beta * C |
25 | 39 | const T alpha = 1.0;
|
26 | 40 | const T beta = 0.0;
|
27 | 41 |
|
28 |
| - // Perform matrix multiplication using cuBLAS |
| 42 | + // Perform matrix multiplication using cuBLAS based on the type of T (float or double) |
| 43 | + // For float: Use cublasSgemm (single precision) |
29 | 44 | if constexpr (std::is_same_v<T, float>) {
|
30 | 45 | CUBLAS_CHECK(cublasSgemm(cublasHandle,
|
31 |
| - CUBLAS_OP_N, CUBLAS_OP_N, |
32 |
| - numColsB, numRowsA, numColsA, |
33 |
| - &alpha, |
34 |
| - deviceMatrixB, numColsB, |
35 |
| - deviceMatrixA, numColsA, |
36 |
| - &beta, |
37 |
| - deviceResultMatrix, numColsB)); |
38 |
| - } else if constexpr (std::is_same_v<T, double>) { |
| 46 | + CUBLAS_OP_N, CUBLAS_OP_N, // No transposition for both matrices |
| 47 | + numColsB, numRowsA, numColsA, // Dimensions of matrices |
| 48 | + &alpha, // Scalar alpha |
| 49 | + deviceMatrixB, numColsB, // Matrix B in device memory |
| 50 | + deviceMatrixA, numColsA, // Matrix A in device memory |
| 51 | + &beta, // Scalar beta |
| 52 | + deviceResultMatrix, numColsB)); // Result matrix C in device memory |
| 53 | + } |
| 54 | + // For double: Use cublasDgemm (double precision) |
| 55 | + else if constexpr (std::is_same_v<T, double>) { |
39 | 56 | CUBLAS_CHECK(cublasDgemm(cublasHandle,
|
40 |
| - CUBLAS_OP_N, CUBLAS_OP_N, |
41 |
| - numColsB, numRowsA, numColsA, |
42 |
| - &alpha, |
43 |
| - deviceMatrixB, numColsB, |
44 |
| - deviceMatrixA, numColsA, |
45 |
| - &beta, |
46 |
| - deviceResultMatrix, numColsB)); |
47 |
| - } else { |
| 57 | + CUBLAS_OP_N, CUBLAS_OP_N, // No transposition for both matrices |
| 58 | + numColsB, numRowsA, numColsA, // Dimensions of matrices |
| 59 | + &alpha, // Scalar alpha |
| 60 | + deviceMatrixB, numColsB, // Matrix B in device memory |
| 61 | + deviceMatrixA, numColsA, // Matrix A in device memory |
| 62 | + &beta, // Scalar beta |
| 63 | + deviceResultMatrix, numColsB)); // Result matrix C in device memory |
| 64 | + } |
| 65 | + // If neither float nor double, throw a compile-time error |
| 66 | + else { |
48 | 67 | static_assert(std::is_same_v<T, float> || std::is_same_v<T, double>,
|
49 |
| - "Only float and double types are supported"); |
| 68 | + "Only float and double types are supported for matrix multiplication"); |
50 | 69 | }
|
51 | 70 |
|
| 71 | + // Copy the result matrix from the device (GPU) back to the host (CPU) |
52 | 72 | CUDA_CHECK(cudaMemcpy(hostResultMatrix, deviceResultMatrix, byteSizeC, cudaMemcpyDeviceToHost));
|
53 | 73 |
|
54 |
| - CUBLAS_CHECK(cublasDestroy(cublasHandle)); |
55 |
| - CUDA_CHECK(cudaFree(deviceMatrixA)); |
56 |
| - CUDA_CHECK(cudaFree(deviceMatrixB)); |
57 |
| - CUDA_CHECK(cudaFree(deviceResultMatrix)); |
| 74 | + // Clean up: Destroy cuBLAS handle and free the allocated GPU memory |
| 75 | + CUBLAS_CHECK(cublasDestroy(cublasHandle)); // Destroy cuBLAS context |
| 76 | + CUDA_CHECK(cudaFree(deviceMatrixA)); // Free memory for matrix A |
| 77 | + CUDA_CHECK(cudaFree(deviceMatrixB)); // Free memory for matrix B |
| 78 | + CUDA_CHECK(cudaFree(deviceResultMatrix)); // Free memory for result matrix C |
58 | 79 | }
|
59 | 80 |
|
60 |
| -// Explicit instantiations |
| 81 | +// Explicit template instantiations for float and double types |
61 | 82 | template void multiplyMatricesOnGPU<float>(const float*, const float*, float*, int, int, int);
|
62 | 83 | template void multiplyMatricesOnGPU<double>(const double*, const double*, double*, int, int, int);
|
0 commit comments