c++ 使用 nvidia-ml 的 API 获取GPU 的使用率

创建日期: 2023-06-16 20:46 | 作者: 风波 | 浏览次数: 19 | 分类: CUDA

1. C++代码

#include <iostream>
#include <unistd.h>
#include <cstdio>
#include <nvml.h>


// 来源 https://gist.github.com/sakamoto-poteko/44d6cd19552fa7721b99
// https://github.com/CFSworks/nvml_fix/issues/16
//#pragma comment(lib, "nvml")

struct Arg {
    Arg(int c, int i):count(c), itvl(i) {};
    int count;
    int itvl;
};

Arg parse_arg(int argc, char* argv[]) {
    Arg a(1, 1);
    for(int i = 1; i < argc; i++) {
        auto ar = std::string(argv[i]);
        if(ar == "-x" && i + 1 < argc && argv[i + 1][0] != '-') {
            a.itvl = std::atoi(argv[i + 1]);
            i++;
            continue;
        }
        if(ar == "-c" && i + 1 < argc && argv[i + 1][0] != '-') {
            a.count = std::atoi(argv[i + 1]);
            i++;
            continue;
        }
    }
    return a;
}

int showGPUUtilization(){
    nvmlReturn_t result;

    unsigned int device_count;
    result = nvmlDeviceGetCount(&device_count);
    if (result != NVML_SUCCESS)
        return 2;

    for (int i = 0; i < device_count; ++i) {
        nvmlDevice_t device;
        result = nvmlDeviceGetHandleByIndex(i, &device);
        if (result != NVML_SUCCESS)
            return 3;

        char device_name[NVML_DEVICE_NAME_BUFFER_SIZE];
        result = nvmlDeviceGetName(device, device_name, NVML_DEVICE_NAME_BUFFER_SIZE);
        if (result != NVML_SUCCESS)
            return 4;

        //std::printf("Device %d: %s\n", i, device_name);

        nvmlUtilization_st device_utilization;
        result = nvmlDeviceGetUtilizationRates(device, &device_utilization);

        if (result != NVML_SUCCESS)
            return 5;

        std::printf("GPU%d Util: %u\n", i, device_utilization.gpu);
        std::printf("GPU%d Mem Util: %u\n", i, device_utilization.memory);
    }

    return 0;
}


int main(int argc, char* argv[])
{
    auto a = parse_arg(argc, argv);

    nvmlReturn_t result;
    result = nvmlInit();
    if (result != NVML_SUCCESS)
        return 1;
    for(int i = 0 ; (a.count < 0) || (a.count > 0 && i < a.count); i++) {
        showGPUUtilization();
        if(i + 1 < a.count) {
            sleep(a.itvl);
        }
    }

    nvmlShutdown();
    return 0;
}

2. CMakeLists.txt 代码

PROJECT (device_info LANGUAGES CXX CUDA)

SET(CMAKE_VERBOSE_MAKEFILE ON)
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")

FIND_PACKAGE (CUDAToolkit)

INCLUDE_DIRECTORIES (${CUDAToolkit_INCLUDE_DIRS})
LINK_DIRECTORIES(${CUDAToolkit_LIBRARY_DIR})


ADD_EXECUTABLE (gpu-utilization gpu-utilization.cc)
TARGET_LINK_LIBRARIES (gpu-utilization nvidia-ml)
19 浏览
15 爬虫
0 评论