|
5 | 5 | #include "model_start_cmd.h" |
6 | 6 | #include "model_status_cmd.h" |
7 | 7 | #include "server_start_cmd.h" |
| 8 | +#include "utils/cortex_utils.h" |
8 | 9 | #include "utils/file_manager_utils.h" |
9 | | - |
| 10 | +#include "utils/modellist_utils.h" |
10 | 11 | namespace commands { |
11 | 12 |
|
12 | 13 | void RunCmd::Exec() { |
| 14 | + std::optional<std::string> model_id = model_handle_; |
| 15 | + |
| 16 | + modellist_utils::ModelListUtils modellist_handler; |
| 17 | + config::YamlHandler yaml_handler; |
13 | 18 | auto address = host_ + ":" + std::to_string(port_); |
14 | | - CmdInfo ci(model_id_); |
15 | | - std::string model_file = |
16 | | - ci.branch == "main" ? ci.model_name : ci.model_name + "-" + ci.branch; |
17 | | - // TODO should we clean all resource if something fails? |
18 | | - // Check if model existed. If not, download it |
19 | | - { |
20 | | - auto model_conf = model_service_.GetDownloadedModel(model_file + ".yaml"); |
21 | | - if (!model_conf.has_value()) { |
22 | | - model_service_.DownloadModel(model_id_); |
23 | | - } |
24 | | - } |
25 | 19 |
|
26 | | - // Check if engine existed. If not, download it |
| 20 | + // Download model if it does not exist |
27 | 21 | { |
28 | | - auto required_engine = engine_service_.GetEngineInfo(ci.engine_name); |
29 | | - if (!required_engine.has_value()) { |
30 | | - throw std::runtime_error("Engine not found: " + ci.engine_name); |
31 | | - } |
32 | | - if (required_engine.value().status == EngineService::kIncompatible) { |
33 | | - throw std::runtime_error("Engine " + ci.engine_name + " is incompatible"); |
34 | | - } |
35 | | - if (required_engine.value().status == EngineService::kNotInstalled) { |
36 | | - engine_service_.InstallEngine(ci.engine_name); |
| 22 | + if (!modellist_handler.HasModel(model_handle_)) { |
| 23 | + model_id = model_service_.DownloadModel(model_handle_); |
| 24 | + if (!model_id.has_value()) { |
| 25 | + CTL_ERR("Error: Could not get model_id from handle: " << model_handle_); |
| 26 | + return; |
| 27 | + } else { |
| 28 | + CTL_INF("model_id: " << model_id.value()); |
| 29 | + } |
37 | 30 | } |
38 | 31 | } |
39 | 32 |
|
40 | | - // Start server if it is not running |
41 | | - { |
42 | | - if (!commands::IsServerAlive(host_, port_)) { |
43 | | - CLI_LOG("Starting server ..."); |
44 | | - commands::ServerStartCmd ssc; |
45 | | - if (!ssc.Exec(host_, port_)) { |
46 | | - return; |
| 33 | + try { |
| 34 | + auto model_entry = modellist_handler.GetModelInfo(*model_id); |
| 35 | + yaml_handler.ModelConfigFromFile(model_entry.path_to_model_yaml); |
| 36 | + auto mc = yaml_handler.GetModelConfig(); |
| 37 | + |
| 38 | + // Check if engine existed. If not, download it |
| 39 | + { |
| 40 | + auto required_engine = engine_service_.GetEngineInfo(mc.engine); |
| 41 | + if (!required_engine.has_value()) { |
| 42 | + throw std::runtime_error("Engine not found: " + mc.engine); |
| 43 | + } |
| 44 | + if (required_engine.value().status == EngineService::kIncompatible) { |
| 45 | + throw std::runtime_error("Engine " + mc.engine + " is incompatible"); |
| 46 | + } |
| 47 | + if (required_engine.value().status == EngineService::kNotInstalled) { |
| 48 | + engine_service_.InstallEngine(mc.engine); |
47 | 49 | } |
48 | 50 | } |
49 | | - } |
50 | 51 |
|
51 | | - config::YamlHandler yaml_handler; |
52 | | - yaml_handler.ModelConfigFromFile( |
53 | | - file_manager_utils::GetModelsContainerPath().string() + "/" + model_file + |
54 | | - ".yaml"); |
55 | | - auto mc = yaml_handler.GetModelConfig(); |
| 52 | + // Start server if it is not running |
| 53 | + { |
| 54 | + if (!commands::IsServerAlive(host_, port_)) { |
| 55 | + CLI_LOG("Starting server ..."); |
| 56 | + commands::ServerStartCmd ssc; |
| 57 | + if (!ssc.Exec(host_, port_)) { |
| 58 | + return; |
| 59 | + } |
| 60 | + } |
| 61 | + } |
56 | 62 |
|
57 | | - // Always start model if not llamacpp |
58 | | - // If it is llamacpp, then check model status first |
59 | | - { |
60 | | - if ((mc.engine.find("llamacpp") == std::string::npos) || |
61 | | - !commands::ModelStatusCmd().IsLoaded(host_, port_, mc)) { |
62 | | - ModelStartCmd msc(host_, port_, mc); |
63 | | - if (!msc.Exec()) { |
64 | | - return; |
| 63 | + // Always start model if not llamacpp |
| 64 | + // If it is llamacpp, then check model status first |
| 65 | + { |
| 66 | + if ((mc.engine.find("llamacpp") == std::string::npos) || |
| 67 | + !commands::ModelStatusCmd().IsLoaded(host_, port_, mc)) { |
| 68 | + if (!ModelStartCmd().Exec(host_, port_, mc)) { |
| 69 | + return; |
| 70 | + } |
65 | 71 | } |
66 | 72 | } |
67 | | - } |
68 | 73 |
|
69 | | - // Chat |
70 | | - { |
71 | | - ChatCmd cc(host_, port_, mc); |
72 | | - cc.Exec(""); |
| 74 | + // Chat |
| 75 | + ChatCmd().Exec(host_, port_, mc, ""); |
| 76 | + } catch (const std::exception& e) { |
| 77 | + CLI_LOG("Fail to run model with ID '" + model_handle_ + "': " + e.what()); |
73 | 78 | } |
74 | 79 | } |
75 | 80 | }; // namespace commands |
0 commit comments