This repository was archived by the owner on Jul 4, 2025. It is now read-only.
File tree Expand file tree Collapse file tree 1 file changed +19
-0
lines changed Expand file tree Collapse file tree 1 file changed +19
-0
lines changed Original file line number Diff line number Diff line change @@ -149,6 +149,15 @@ void llamaCPP::chatCompletion(
149149 const HttpRequestPtr &req,
150150 std::function<void (const HttpResponsePtr &)> &&callback) {
151151
152+ if (!model_loaded) {
153+ Json::Value jsonResp;
154+ jsonResp[" message" ] =
155+ " Model has not been loaded, please load model into nitro" ;
156+ auto resp = nitro_utils::nitroHttpJsonResponse (jsonResp);
157+ resp->setStatusCode (drogon::k409Conflict);
158+ callback (resp);
159+ }
160+
152161 const auto &jsonBody = req->getJsonObject ();
153162 std::string formatted_output = pre_prompt;
154163
@@ -338,6 +347,16 @@ void llamaCPP::loadModel(
338347 const HttpRequestPtr &req,
339348 std::function<void (const HttpResponsePtr &)> &&callback) {
340349
350+ if (model_loaded) {
351+ LOG_INFO << " model loaded" ;
352+ Json::Value jsonResp;
353+ jsonResp[" message" ] = " Model already loaded" ;
354+ auto resp = nitro_utils::nitroHttpJsonResponse (jsonResp);
355+ resp->setStatusCode (drogon::k409Conflict);
356+ callback (resp);
357+ return ;
358+ }
359+
341360 const auto &jsonBody = req->getJsonObject ();
342361
343362 gpt_params params;
You can’t perform that action at this time.
0 commit comments