diff --git a/examples/llava/gemma3-cli.cpp b/examples/llava/gemma3-cli.cpp index 3d56647506c2c..9186527af603f 100644 --- a/examples/llava/gemma3-cli.cpp +++ b/examples/llava/gemma3-cli.cpp @@ -24,7 +24,9 @@ #include #endif -static bool g_is_generating = false; +//volatile, because of signal being an interrupt +static volatile bool g_is_generating = false; +static volatile bool is_app_running = true; /** * Please note that this is NOT a production-ready stuff. @@ -49,8 +51,10 @@ static void sigint_handler(int signo) { g_is_generating = false; } else { console::cleanup(); - LOG("\nInterrupted by user\n"); - _exit(130); + if(!is_app_running) { + _exit(1); + } + is_app_running = false; } } } @@ -134,7 +138,7 @@ struct decode_embd_batch { static int generate_response(gemma3_context & ctx, common_sampler * smpl, int n_predict) { for (int i = 0; i < n_predict; i++) { - if (i > n_predict || !g_is_generating) { + if (i > n_predict || !g_is_generating || !is_app_running) { printf("\n"); break; } @@ -150,6 +154,11 @@ static int generate_response(gemma3_context & ctx, common_sampler * smpl, int n_ printf("%s", common_token_to_piece(ctx.lctx, token_id).c_str()); fflush(stdout); + if(!is_app_running) { + printf("\n"); + break; + } + // eval the token common_batch_clear(ctx.batch); common_batch_add(ctx.batch, token_id, ctx.n_past++, {0}, true); @@ -172,6 +181,7 @@ static int eval_message(gemma3_context & ctx, common_chat_msg & msg, std::vector LOG_DBG("formatted_chat.prompt: %s\n", formatted_chat.prompt.c_str()); for (auto & fname : images_fname) { + if(!is_app_running) return 0; mtmd_bitmap bitmap; if (mtmd_helper_bitmap_init_from_file(fname.c_str(), bitmap)) { LOG_ERR("Unable to load image %s\n", fname.c_str()); @@ -185,6 +195,9 @@ static int eval_message(gemma3_context & ctx, common_chat_msg & msg, std::vector text.add_special = add_bos; text.parse_special = true; mtmd_input_chunks chunks; + + if(!is_app_running) return 0; + int32_t res = mtmd_tokenize(ctx.ctx_vision.get(), chunks, text, bitmaps); if (res != 0) { LOG_ERR("Unable to tokenize prompt, res = %d\n", res); @@ -242,6 +255,8 @@ int main(int argc, char ** argv) { #endif } + if(!is_app_running) return 130; + if (is_single_turn) { g_is_generating = true; if (params.prompt.find("<__image__>") == std::string::npos) { @@ -253,7 +268,7 @@ int main(int argc, char ** argv) { if (eval_message(ctx, msg, params.image, true)) { return 1; } - if (generate_response(ctx, smpl, n_predict)) { + if (is_app_running && generate_response(ctx, smpl, n_predict)) { return 1; } @@ -268,12 +283,13 @@ int main(int argc, char ** argv) { std::vector images_fname; std::string content; - while (true) { + while (is_app_running) { g_is_generating = false; LOG("\n> "); console::set_display(console::user_input); std::string line; console::readline(line, false); + if(!is_app_running) break; console::set_display(console::reset); line = string_strip(line); if (line.empty()) { @@ -301,6 +317,7 @@ int main(int argc, char ** argv) { msg.role = "user"; msg.content = content; int ret = eval_message(ctx, msg, images_fname, is_first_msg); + if(!is_app_running) break; if (ret == 2) { // non-fatal error images_fname.clear(); @@ -317,7 +334,8 @@ int main(int argc, char ** argv) { content.clear(); is_first_msg = false; } - } + } + if(!is_app_running) LOG("\nInterrupted by user\n"); llama_perf_context_print(ctx.lctx); - return 0; + return is_app_running ? 0 : 130; }