1- // clang-format off
2- #include " utils/cortex_utils.h"
3- // clang-format on
41#include " engine_list_cmd.h"
5- #include < filesystem>
62#include < tabulate/table.hpp>
7- #include < utility>
8- #include " trantor/utils/Logger.h"
3+ #include " services/engine_service.h"
94
105namespace commands {
116
127bool EngineListCmd::Exec () {
8+ auto engine_service = EngineService ();
9+ auto status_list = engine_service.GetEngineInfoList ();
10+
1311 tabulate::Table table;
12+ table.format ().font_color (tabulate::Color::green);
1413 table.add_row (
1514 {" (Index)" , " name" , " description" , " version" , " product name" , " status" });
16- table.format ().font_color (tabulate::Color::green);
17- #ifdef _WIN32
18- if (std::filesystem::exists (std::filesystem::current_path ().string () +
19- cortex_utils::kOnnxLibPath )) {
20- table.add_row ({" 1" , " cortex.onnx" ,
21- " This extension enables chat completion API calls using the "
22- " Onnx engine" ,
23- " 0.0.1" , " Onnx Inference Engine" , " ready" });
24- } else {
25- table.add_row ({" 1" , " cortex.onnx" ,
26- " This extension enables chat completion API calls using the "
27- " Onnx engine" ,
28- " 0.0.1" , " Onnx Inference Engine" , " not_initialized" });
15+ for (int i = 0 ; i < status_list.size (); i++) {
16+ auto status = status_list[i];
17+ std::string index = std::to_string (i + 1 );
18+ table.add_row ({index, status.name , status.description , status.version ,
19+ status.product_name , status.status });
2920 }
3021
31- #else
32- table.add_row (
33- {" 1" , " cortex.onnx" ,
34- " This extension enables chat completion API calls using the Onnx engine" ,
35- " 0.0.1" , " Onnx Inference Engine" , " not_supported" });
36- #endif
37- // lllamacpp
38- if (std::filesystem::exists (std::filesystem::current_path ().string () +
39- cortex_utils::kLlamaLibPath )) {
40- table.add_row ({" 2" , " cortex.llamacpp" ,
41- " This extension enables chat completion API calls using the "
42- " LlamaCPP engine" ,
43- " 0.0.1" , " LlamaCPP Inference Engine" , " ready" });
44- } else {
45- table.add_row ({" 2" , " cortex.llamacpp" ,
46- " This extension enables chat completion API calls using the "
47- " LlamaCPP engine" ,
48- " 0.0.1" , " LlamaCPP Inference Engine" , " not_initialized" });
49- }
50- // tensorrt llm
51- if (std::filesystem::exists (std::filesystem::current_path ().string () +
52- cortex_utils::kTensorrtLlmPath )) {
53- table.add_row ({" 3" , " cortex.tensorrt-llm" ,
54- " This extension enables chat completion API calls using the "
55- " TensorrtLLM engine" ,
56- " 0.0.1" , " TensorrtLLM Inference Engine" , " ready" });
57- } else {
58- table.add_row ({" 3" , " cortex.tensorrt-llm" ,
59- " This extension enables chat completion API calls using the "
60- " TensorrtLLM engine" ,
61- " 0.0.1" , " TensorrtLLM Inference Engine" , " not_initialized" });
62- }
6322 for (int i = 0 ; i < 6 ; i++) {
6423 table[0 ][i]
6524 .format ()
@@ -77,5 +36,4 @@ bool EngineListCmd::Exec() {
7736 std::cout << table << std::endl;
7837 return true ;
7938}
80-
8139}; // namespace commands
0 commit comments