diff --git a/ramalama/common.py b/ramalama/common.py index 048b9e2b..605eabb9 100644 --- a/ramalama/common.py +++ b/ramalama/common.py @@ -26,19 +26,30 @@ DEFAULT_IMAGE = "quay.io/ramalama/ramalama" +_engine = "" + + def container_manager(): + global _engine + if _engine != "": + if _engine == "None": + return None + return _engine + + _engine = "None" engine = os.getenv("RAMALAMA_CONTAINER_ENGINE") if engine is not None: - return engine + _engine = engine + return _engine if available("podman"): if sys.platform != "darwin" or is_podman_machine_running_with_krunkit(): - return "podman" - - return None + _engine = "podman" + return _engine if available("docker"): - return "docker" + _engine = "docker" + return _engine return None diff --git a/ramalama/model.py b/ramalama/model.py index 802e2e0d..763809bb 100644 --- a/ramalama/model.py +++ b/ramalama/model.py @@ -188,11 +188,11 @@ def setup_container(self, args): conman_args += ["-p", f"{args.port}:{args.port}"] # Check for env var RAMALAMA_DEVICE to explicitly declare the GPU device path - device_override=0 + device_override = 0 gpu_device = os.environ.get("RAMALAMA_DEVICE") if gpu_device: conman_args += ["--device", gpu_device] - device_override=1 + device_override = 1 if device_override != 1: if (sys.platform == "darwin" and os.path.basename(args.engine) != "docker") or os.path.exists("/dev/dri"): conman_args += ["--device", "/dev/dri"] diff --git a/ramalama/ollama.py b/ramalama/ollama.py index b80a8922..75378a64 100644 --- a/ramalama/ollama.py +++ b/ramalama/ollama.py @@ -64,10 +64,10 @@ def init_pull(repos, accept, registry_head, model_name, model_tag, models, model def in_existing_cache(model_name, model_tag): if not available("ollama"): return None - default_ollama_caches=[ + default_ollama_caches = [ os.path.join(os.environ['HOME'], '.ollama/models'), '/usr/share/ollama/.ollama/models', - f'C:\\Users\\{os.getlogin()}\\.ollama\\models' + f'C:\\Users\\{os.getlogin()}\\.ollama\\models', ] for cache_dir in default_ollama_caches: @@ -79,10 +79,11 @@ def in_existing_cache(model_name, model_tag): if layer["mediaType"] == "application/vnd.ollama.image.model": layer_digest = layer["digest"] ollama_digest_path = os.path.join(cache_dir, 'blobs', layer_digest) - if os.path.exists(str(ollama_digest_path).replace(':','-')): - return str(ollama_digest_path).replace(':','-') + if os.path.exists(str(ollama_digest_path).replace(':', '-')): + return str(ollama_digest_path).replace(':', '-') return None + class Ollama(Model): def __init__(self, model): model = rm_until_substring(model, "ollama.com/library/")