|
2 | 2 | #include "httplib.h" |
3 | 3 |
|
4 | 4 | #include "cortex_upd_cmd.h" |
| 5 | +#include "model_status_cmd.h" |
| 6 | +#include "server_start_cmd.h" |
5 | 7 | #include "trantor/utils/Logger.h" |
6 | 8 | #include "utils/logging_utils.h" |
7 | | -#include "server_start_cmd.h" |
8 | 9 |
|
9 | 10 | namespace commands { |
10 | 11 | namespace { |
@@ -45,29 +46,11 @@ void ChatCmd::Exec(std::string msg) { |
45 | 46 | } |
46 | 47 |
|
47 | 48 | auto address = host_ + ":" + std::to_string(port_); |
48 | | - // Check if model is loaded |
49 | | - // TODO(sang) only llamacpp support modelstatus for now |
50 | | - if (mc_.engine.find("llamacpp") != std::string::npos) { |
51 | | - httplib::Client cli(address); |
52 | | - nlohmann::json json_data; |
53 | | - json_data["model"] = mc_.name; |
54 | | - json_data["engine"] = mc_.engine; |
55 | | - |
56 | | - auto data_str = json_data.dump(); |
57 | | - |
58 | | - // TODO: move this to another message? |
59 | | - auto res = cli.Post("/inferences/server/modelstatus", httplib::Headers(), |
60 | | - data_str.data(), data_str.size(), "application/json"); |
61 | | - if (res) { |
62 | | - if (res->status != httplib::StatusCode::OK_200) { |
63 | | - CTL_ERR(res->body); |
64 | | - return; |
65 | | - } |
66 | | - } else { |
67 | | - auto err = res.error(); |
68 | | - CTL_ERR("HTTP error: " << httplib::to_string(err)); |
69 | | - return; |
70 | | - } |
| 49 | + // Only check if llamacpp engine |
| 50 | + if ((mc_.engine.find("llamacpp") != std::string::npos) && |
| 51 | + !commands::ModelStatusCmd().IsLoaded(host_, port_, mc_)) { |
| 52 | + CLI_LOG("Model is not loaded yet!"); |
| 53 | + return; |
71 | 54 | } |
72 | 55 |
|
73 | 56 | // Some instruction for user here |
|
0 commit comments