33#include " algorithm"
44#include " utils/file_manager_utils.h"
55
6+ namespace {
7+ constexpr static auto kIncompatible = " Incompatible" ;
8+ constexpr static auto kReady = " Ready" ;
9+ constexpr static auto kNotInstalled = " Not Installed" ;
10+ } // namespace
11+
612EngineInfo EngineService::GetEngineInfo (const std::string& engine) const {
713 // if engine is not found in kSupportEngine, throw runtime error
814 if (std::find (kSupportEngines .begin (), kSupportEngines .end (), engine) ==
@@ -21,42 +27,43 @@ EngineInfo EngineService::GetEngineInfo(const std::string& engine) const {
2127std::vector<EngineInfo> EngineService::GetEngineInfoList () const {
2228 auto ecp = file_manager_utils::GetEnginesContainerPath ();
2329
24- std::string onnx_status{" not_supported" };
25- std::string llamacpp_status = std::filesystem::exists (ecp / " cortex.llamacpp" )
26- ? " ready"
27- : " not_initialized" ;
28- std::string tensorrt_status{" not_supported" };
30+ std::string onnx_status{kIncompatible };
31+ std::string llamacpp_status =
32+ std::filesystem::exists (ecp / " cortex.llamacpp" ) ? kReady : kNotInstalled ;
33+ std::string tensorrt_status{kIncompatible };
2934
3035#ifdef _WIN32
31- onnx_status = std::filesystem::exists (ecp / " cortex.onnx" )
32- ? " ready"
33- : " not_initialized" ;
36+ onnx_status =
37+ std::filesystem::exists (ecp / " cortex.onnx" ) ? kReady : kNotInstalled ;
3438 tensorrt_status = std::filesystem::exists (ecp / " cortex.tensorrt-llm" )
35- ? " ready "
36- : " not_initialized " ;
39+ ? kReady
40+ : kNotInstalled ;
3741#elif defined(__linux__)
3842 tensorrt_status = std::filesystem::exists (ecp / " cortex.tensorrt-llm" )
39- ? " ready "
40- : " not_initialized " ;
43+ ? kReady
44+ : kNotInstalled ;
4145#endif
4246 std::vector<EngineInfo> engines = {
4347 {.name = " cortex.onnx" ,
4448 .description = " This extension enables chat completion API calls using "
4549 " the Onnx engine" ,
50+ .format = " ONNX" ,
4651 .version = " 0.0.1" ,
47- .product_name = " Onnx Inference Engine " ,
52+ .product_name = " ONNXRuntime " ,
4853 .status = onnx_status},
4954 {.name = " cortex.llamacpp" ,
5055 .description = " This extension enables chat completion API calls using "
5156 " the LlamaCPP engine" ,
57+ .format = " GGUF" ,
5258 .version = " 0.0.1" ,
53- .product_name = " LlamaCPP Inference Engine " ,
59+ .product_name = " llama.cpp " ,
5460 .status = llamacpp_status},
5561 {.name = " cortex.tensorrt-llm" ,
5662 .description = " This extension enables chat completion API calls using "
5763 " the TensorrtLLM engine" ,
64+ .format = " TensorRT Engines" ,
5865 .version = " 0.0.1" ,
59- .product_name = " TensorrtLLM Inference Engine " ,
66+ .product_name = " TensorRT-LLM " ,
6067 .status = tensorrt_status},
6168 };
6269
0 commit comments