Add GPU side panel

This commit is contained in:
romner 2023-05-13 00:27:23 +02:00
parent d522a91ef4
commit adcdc583b0
5 changed files with 131 additions and 46 deletions

View file

@ -261,6 +261,8 @@ void clean_quit(int sig) {
#endif
}
Gpu::Nvml::shutdown();
Config::write();
if (Term::initialized) {

View file

@ -1585,12 +1585,18 @@ namespace Gpu {
bool shown = true, redraw = true, mid_line = false;
int graph_height;
Draw::Graph graph_upper;
Draw::Graph temp_graph;
Draw::Meter gpu_meter;
Draw::Meter mem_meter;
unordered_flat_map<string, Draw::Graph> mem_graphs;
string box;
string draw(const gpu_info& gpu, bool force_redraw, bool data_same) {
if (Runner::stopping) return "";
if (force_redraw) redraw = true;
bool show_temps = (Config::getB("check_temp"));
auto tty_mode = Config::getB("tty_mode");
auto& temp_scale = Config::getS("temp_scale");
auto& graph_symbol = (tty_mode ? "tty" : Config::getS("graph_symbol_cpu")); // TODO graph_symbol_gpu
auto& graph_bg = Symbols::graph_symbols.at((graph_symbol == "default" ? Config::getS("graph_symbol") + "_up" : graph_symbol + "_up")).at(6);
string out;
@ -1599,29 +1605,53 @@ namespace Gpu {
//* Redraw elements not needed to be updated every cycle
if (redraw) {
out += box;
graph_height = height-2;
//out += Gpu::Nvml::initialized ? "NVML initialized" : "NVML not initialized";
graph_upper = Draw::Graph{x + width - b_width - 3, graph_height, "cpu", gpu.gpu_percent, graph_symbol, false, true}; // TODO cpu -> gpu
graph_upper = Draw::Graph{x + width - b_width - 3, height-2, "cpu", gpu.gpu_percent, graph_symbol, false, true}; // TODO cpu -> gpu
gpu_meter = Draw::Meter{b_width - (show_temps ? 27 : 11), "cpu"};
temp_graph = Draw::Graph{9, 1, "temp", gpu.temp, graph_symbol, false, false, gpu.temp_max, -23};
mem_meter = Draw::Meter{b_width - 27, "used"};
for (const auto& name : mem_names) {
mem_graphs[name] = Draw::Graph{b_width/2 - (name == "used" ? 1 : 2), 3, name, gpu.mem_percent.at(name), graph_symbol};
}
}
//out += " " + std::to_string(gpu.gpu_percent.back()) + "%";
//? Core text and graphs
try {
//? Gpu graphs
out += Fx::ub + Mv::to(y + 1, x + 1) + graph_upper(gpu.gpu_percent, (data_same or redraw));
//if (not single_graph)
// out += Mv::to( y + graph_up_height + 1 + (mid_line ? 1 : 0), x + 1) + graph_lower(cpu.cpu_percent.at(graph_lo_field), (data_same or redraw));
//? Gpu graph & meter
out += Fx::ub + Mv::to(y + 1, x + 1) + graph_upper(gpu.gpu_percent, (data_same or redraw));
out += Mv::to(b_y + 1, b_x + 1) + Theme::c("main_fg") + Fx::b + "GPU " + gpu_meter(gpu.gpu_percent.back())
+ Theme::g("cpu").at(gpu.gpu_percent.back()) + rjust(to_string(gpu.gpu_percent.back()), 4) + Theme::c("main_fg") + '%';
/*out += Mv::to(b_y + 1, b_x + 1) + Theme::c("main_fg") + Fx::b + "CPU " + cpu_meter(cpu.cpu_percent.at("total").back())
+ Theme::g("cpu").at(clamp(cpu.cpu_percent.at("total").back(), 0ll, 100ll)) + rjust(to_string(cpu.cpu_percent.at("total").back()), 4) + Theme::c("main_fg") + '%';
if (show_temps) {
const auto [temp, unit] = celsius_to(cpu.temp.at(0).back(), temp_scale);
const auto& temp_color = Theme::g("temp").at(clamp(cpu.temp.at(0).back() * 100 / cpu.temp_max, 0ll, 100ll));
if (b_column_size > 1 or b_columns > 1)
//? Temperature graph
if (show_temps) {
const auto [temp, unit] = celsius_to(gpu.temp.back(), temp_scale);
const auto& temp_color = Theme::g("temp").at(clamp(gpu.temp.back() * 100 / gpu.temp_max, 0ll, 100ll));
out += ' ' + Theme::c("inactive_fg") + graph_bg * 5 + Mv::l(5) + temp_color
+ temp_graphs.at(0)(cpu.temp.at(0), data_same or redraw);
out += rjust(to_string(temp), 4) + Theme::c("main_fg") + unit;
}*/
out += Theme::c("div_line") + Symbols::v_line;
+ temp_graph(gpu.temp, data_same or redraw);
out += rjust(to_string(temp), 4) + Theme::c("main_fg") + unit;
}
out += Theme::c("div_line") + Symbols::v_line;
//? Memory usage meter
out += Mv::to(b_y + 2, b_x + 1) + Theme::c("main_fg") + Fx::b + "MEM " + mem_meter(gpu.mem_percent.at("used").back())
+ Theme::g("used").at(gpu.mem_percent.at("used").back()) + rjust(to_string(gpu.mem_percent.at("used").back()), 4) + Theme::c("main_fg") + '%'
+ Fx::b + " Total:" + rjust(floating_humanizer(gpu.mem_total), 9);
//? Memory usage graphs
out += Mv::to(b_y + 4, b_x + 1);
for (const auto& name : mem_names) {
out += mem_graphs[name](gpu.mem_percent.at(name), (data_same or redraw)) + Mv::u(2) + Mv::r(1);
}
//? Memory usage borders // TODO, there's gotta be a more elegant way to do this...
out += Mv::to(b_y + 3, b_x) + Theme::c("div_line") + Symbols::div_left+Symbols::h_line + Theme::c("title") + "Used:" + Theme::c("div_line")
+ Symbols::h_line*(b_width/2-15) + Theme::c("title") + floating_humanizer(gpu.mem_stats.at("used")) + Theme::c("div_line") + Symbols::h_line;
out += Symbols::div_up + Symbols::h_line + Theme::c("title") + "Free:" + Theme::c("div_line")
+ Symbols::h_line*(b_width/2-17) + Theme::c("title") + floating_humanizer(gpu.mem_stats.at("free")) + Theme::c("div_line") + Symbols::h_line + Symbols::div_right;
out += Mv::to(b_y + 7, b_x) + Theme::c("div_line") + Symbols::div_left + Symbols::h_line*(b_width/2-1) + (Mv::u(1) + Symbols::v_line + Mv::l(1))*3
+ Mv::d(3) + Symbols::div_down + Symbols::h_line*(b_width/2-2) + Symbols::div_right;
} catch (const std::exception& e) {
throw std::runtime_error("graphs: " + string{e.what()});
@ -1804,9 +1834,16 @@ namespace Draw {
using namespace Gpu;
width = Term::width;
height = Term::height;
x = 1;
y = 1;
x = 1; y = 1;
box = createBox(x, y, width, height, Theme::c("cpu_box"), true, "gpu", "", 5); // TODO gpu_box
b_width = width/2; // TODO
b_height = height-2;
b_x = x + width - b_width - 1;
b_y = y + ceil((double)(height - 2) / 2) - ceil((double)b_height / 2) + 1;
box += createBox(b_x, b_y, b_width, b_height, "", false, "test");
}
}
}

View file

@ -119,6 +119,7 @@ namespace Menu {
{"2", "Toggle MEM box."},
{"3", "Toggle NET box."},
{"4", "Toggle PROC box."},
{"5", "Toggle GPU box."},
{"d", "Toggle disks view in MEM box."},
{"F2, o", "Shows options."},
{"F1, ?, h", "Shows this window."},

View file

@ -319,15 +319,20 @@ namespace Gpu {
extern int x, y, width, height, min_width, min_height;
extern bool shown, redraw;
const array mem_names { "used"s, "free"s };
struct gpu_info {
deque<long long> gpu_percent = {};
//deque<long long> temp;
//long long temp_max = 0;
//array<float, 3> load_avg;
deque<long long> temp;
long long temp_max = 0;
unordered_flat_map<string, long long> mem_stats = {{"used", 0}, {"free", 0}};
unordered_flat_map<string, deque<long long>> mem_percent = {{"used", {}}, {"free", {}}};
long long mem_total = 0;
};
namespace Nvml {
extern bool initialized;
extern bool shutdown();
}
//* Collect gpu stats and temperatures

View file

@ -97,13 +97,13 @@ namespace Mem {
namespace Gpu {
gpu_info current_gpu;
unsigned int device_count;
nvmlDevice_t device;
//? NVIDIA data collection
namespace Nvml {
bool initialized = false;
bool init();
bool shutdown();
nvmlDevice_t device;
}
}
@ -2087,7 +2087,7 @@ namespace Tools {
catch (const std::invalid_argument&) {}
catch (const std::out_of_range&) {}
}
throw std::runtime_error("Failed get uptime from from " + string{Shared::procPath} + "/uptime");
throw std::runtime_error("Failed get uptime from " + string{Shared::procPath} + "/uptime");
}
}
@ -2105,17 +2105,26 @@ namespace Gpu {
result = nvmlDeviceGetCount(&device_count);
if (result != NVML_SUCCESS) {
Logger::error(std::string("Failed to get NVML device count: ") + nvmlErrorString(result));
Logger::error(std::string("NVML: Failed to get device count: ") + nvmlErrorString(result));
return false;
}
result = nvmlDeviceGetHandleByIndex(0, &device); // TODO: multi-GPU support
if (result != NVML_SUCCESS) {
Logger::error(std::string("Failed to get NVML device handle: ") + nvmlErrorString(result));
Logger::error(std::string("NVML: Failed to get device handle: ") + nvmlErrorString(result));
return false;
}
initialized = true;
//? Get temp_max
unsigned int temp_max;
result = nvmlDeviceGetTemperatureThreshold(device, NVML_TEMPERATURE_THRESHOLD_SHUTDOWN, &temp_max);
if (result != NVML_SUCCESS) {
Logger::error(std::string("NVML: Failed to get maximum GPU temperature: ") + nvmlErrorString(result));
return false;
}
current_gpu.temp_max = (long long)temp_max;
return true;
}
bool shutdown() {
@ -2127,10 +2136,59 @@ namespace Gpu {
} else Logger::warning(std::string("Failed to shutdown NVML: ") + nvmlErrorString(result));
return !initialized;
}
bool collect(gpu_info& gpu) {
if (!initialized) return false;
//? Get GPU utilization
nvmlUtilization_t utilization;
nvmlReturn_t result = nvmlDeviceGetUtilizationRates(device, &utilization);
if (result != NVML_SUCCESS) {
Logger::error(std::string("NVML: Failed to get GPU utilization: ") + nvmlErrorString(result));
return false;
}
gpu.gpu_percent.push_back((long long)utilization.gpu);
//? Reduce size if there are more values than needed for graph
while (cmp_greater(gpu.gpu_percent.size(), width * 2)) gpu.gpu_percent.pop_front();
//? GPU temp
if (Config::getB("check_temp")) {
unsigned int temp;
nvmlReturn_t result = nvmlDeviceGetTemperature(device, NVML_TEMPERATURE_GPU, &temp);
if (result != NVML_SUCCESS) {
Logger::error(std::string("NVML: Failed to get GPU temperature: ") + nvmlErrorString(result));
return false;
}
gpu.temp.push_back((long long)temp);
//? Reduce size if there are more values than needed for graph
while (cmp_greater(gpu.temp.size(), 18)) gpu.temp.pop_front();
}
//? Memory info
nvmlMemory_t memory;
result = nvmlDeviceGetMemoryInfo(device, &memory);
if (result != NVML_SUCCESS) {
Logger::error(std::string("NVML: Failed to get VRAM info: ") + nvmlErrorString(result));
return false;
}
gpu.mem_total = memory.total;
gpu.mem_stats.at("used") = memory.used;
gpu.mem_stats.at("free") = memory.free;
auto used_percent = (long long)round((double)memory.used * 100.0 / (double)memory.total);
gpu.mem_percent.at("used").push_back(used_percent);
gpu.mem_percent.at("free").push_back(100-used_percent);
//? Reduce size if there are more values than needed for graphs
while (cmp_greater(gpu.mem_percent.at("used").size(), width/2)) gpu.mem_percent.at("used").pop_front();
while (cmp_greater(gpu.mem_percent.at("free").size(), width/2)) gpu.mem_percent.at("free").pop_front();
return true;
}
}
// TODO: AMD
// TODO: Intel
//? Collect data from GPU-specific libraries
auto collect(bool no_update) -> gpu_info& {
if (Runner::stopping or (no_update and not current_gpu.gpu_percent.empty())) return current_gpu;
@ -2139,25 +2197,7 @@ namespace Gpu {
//if (Config::getB("show_gpu_freq"))
// TODO gpuHz = get_gpuHz();
//? Get GPU utilization
if (Nvml::initialized) {
nvmlUtilization_t utilization;
nvmlReturn_t result = nvmlDeviceGetUtilizationRates(device, &utilization);
if (result != NVML_SUCCESS) {
throw std::runtime_error(std::string("Failed to get GPU utilization: ") + nvmlErrorString(result));
}
//? Total usage of gpu
gpu.gpu_percent.push_back((long long)utilization.gpu);
//? Reduce size if there are more values than needed for graph
while (cmp_greater(gpu.gpu_percent.size(), width * 2)) gpu.gpu_percent.pop_front();
}
/*if (Config::getB("check_temp")) {
}
*/
Nvml::collect(gpu);
return gpu;
}