From 11fe7875fc5755a00e2c88a52fe3f13e03a85a63 Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Tue, 4 Feb 2025 07:56:37 -0500 Subject: [PATCH] Honor RAMALAMA_IMAGE if set Currently on my cuda laptop, if I set RAMALAMA_IMAGE to something, ramalama ignores it and forces cuda image. Signed-off-by: Daniel J Walsh --- ramalama/common.py | 3 ++- ramalama/model.py | 4 ++-- test/system/030-run.bats | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/ramalama/common.py b/ramalama/common.py index b5d63ca7..240f38b4 100644 --- a/ramalama/common.py +++ b/ramalama/common.py @@ -23,6 +23,7 @@ MNT_FILE = f"{MNT_DIR}/model.file" HTTP_RANGE_NOT_SATISFIABLE = 416 +DEFAULT_IMAGE="quay.io/ramalama/ramalama" def container_manager(): engine = os.getenv("RAMALAMA_CONTAINER_ENGINE") @@ -156,7 +157,7 @@ def default_image(): if image: return image - return "quay.io/ramalama/ramalama" + return DEFAULT_IMAGE def genname(): diff --git a/ramalama/model.py b/ramalama/model.py index 24c27e52..29d7ad6a 100644 --- a/ramalama/model.py +++ b/ramalama/model.py @@ -4,7 +4,7 @@ from ramalama.common import ( container_manager, - default_image, + DEFAULT_IMAGE, exec_cmd, genname, run_cmd, @@ -106,7 +106,7 @@ def attempt_to_use_versioned(self, conman, image, vers, args): return False def _image(self, args): - if args.image != default_image(): + if args.image != DEFAULT_IMAGE: return args.image env_vars = get_env_vars() diff --git a/test/system/030-run.bats b/test/system/030-run.bats index 4f17d9b5..cc40bfd3 100755 --- a/test/system/030-run.bats +++ b/test/system/030-run.bats @@ -30,8 +30,8 @@ load helpers run_ramalama 1 --nocontainer run --name foobar tiny is "${lines[0]}" "Error: --nocontainer and --name options conflict. --name requires a container." "conflict between nocontainer and --name line" - RAMALAMA_IMAGE=${image} run_ramalama --dryrun run ${model} - is "$output" ".*${image}:latest llama-run" "verify image name" + RAMALAMA_IMAGE=${image}:1234 run_ramalama --dryrun run ${model} + is "$output" ".*${image}:1234 llama-run" "verify image name" else run_ramalama --dryrun run -c 4096 ${model} is "$output" 'llama-run -c 4096 --temp 0.8.*/path/to/model.*' "dryrun correct"