Skip to content

Commit f219bd9

Browse files
llama.cpp: import more defensively
1 parent 7f23adc commit f219bd9

File tree

2 files changed

+5
-4
lines changed

2 files changed

+5
-4
lines changed

src/lmql/models/lmtp/backends/llama_cpp_model.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,15 @@
22
import sys
33

44
import numpy as np
5-
from llama_cpp import Llama, LlamaTokenizer
65

76
import lmql.utils.nputil as nputil
87
from lmql.models.lmtp.backends.lmtp_model import (LMTPModel, LMTPModelResult,
98
TokenStreamer)
109

1110
class LlamaCppModel(LMTPModel):
1211
def __init__(self, model_identifier, **kwargs):
12+
from llama_cpp import Llama
13+
1314
self.model_identifier = model_identifier
1415
self.kwargs = kwargs
1516

src/lmql/models/lmtp/backends/lmtp_model.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -107,9 +107,9 @@ def wrapper(loader):
107107
import importlib
108108
if module_dependencies is not None:
109109
for module in module_dependencies:
110-
try:
111-
importlib.import_module(module)
112-
except ImportError:
110+
# check without importing
111+
spec = importlib.util.find_spec(module)
112+
if spec is None:
113113
def error_func(*args, **kwargs):
114114
assert False, "To use the {} backend, please install the '{}' package.".format(name, module)
115115
LMTPModel.registry[name] = error_func

0 commit comments

Comments
 (0)