Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit c12a78f

Browse files
authored
fix: run cmd (#1380)
1 parent 58ec4f9 commit c12a78f

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

engine/commands/run_cmd.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -72,16 +72,16 @@ void RunCmd::Exec(bool chat_flag) {
7272
// If it is llamacpp, then check model status first
7373
{
7474
if ((mc.engine.find("llamacpp") == std::string::npos) ||
75-
!commands::ModelStatusCmd().IsLoaded(host_, port_, model_handle_)) {
76-
if (!ModelStartCmd().Exec(host_, port_, model_handle_)) {
75+
!commands::ModelStatusCmd().IsLoaded(host_, port_, *model_id)) {
76+
if (!ModelStartCmd().Exec(host_, port_, *model_id)) {
7777
return;
7878
}
7979
}
8080
}
8181

8282
// Chat
8383
if (chat_flag) {
84-
ChatCompletionCmd().Exec(host_, port_, model_handle_, mc, "");
84+
ChatCompletionCmd().Exec(host_, port_, *model_id, mc, "");
8585
} else {
8686
CLI_LOG(*model_id << " model started successfully. Use `"
8787
<< commands::GetCortexBinary() << " chat " << *model_id

0 commit comments

Comments
 (0)