From 746de169931f0f6263eab85c05c537b7af9a43be Mon Sep 17 00:00:00 2001 From: NITHIN V Date: Wed, 9 Jul 2025 09:59:05 +0000 Subject: [PATCH] Improve error message when model file is missing --- llama_cpp/llama.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/llama_cpp/llama.py b/llama_cpp/llama.py index 2e93670e6..56902130d 100644 --- a/llama_cpp/llama.py +++ b/llama_cpp/llama.py @@ -369,15 +369,25 @@ def __init__( if not os.path.exists(model_path): raise ValueError(f"Model path does not exist: {model_path}") - self._model = self._stack.enter_context( - contextlib.closing( - internals.LlamaModel( - path_model=self.model_path, - params=self.model_params, - verbose=self.verbose, + try: + self._model = self._stack.enter_context( + contextlib.closing( + internals.LlamaModel( + path_model=self.model_path, + params=self.model_params, + verbose=self.verbose, + ) ) - ) - ) + ) + except RuntimeError as e: + if "No such file or directory" in str(e): + raise FileNotFoundError( + f"Model file not found at '{self.model_path}'. " + "Make sure the .gguf model file exists at the given path." + ) from e + else: + raise + # Override tokenizer self.tokenizer_ = tokenizer or LlamaTokenizer(self)