Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Stash output from container_manager #790

Merged
merged 1 commit into from
Feb 12, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 16 additions & 5 deletions ramalama/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,19 +26,30 @@
DEFAULT_IMAGE = "quay.io/ramalama/ramalama"


_engine = ""


def container_manager():
global _engine
if _engine != "":
if _engine == "None":
return None
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Empty string, "None" and None... Python is frustrating with all the various kinds of null :'(

return _engine

_engine = "None"
engine = os.getenv("RAMALAMA_CONTAINER_ENGINE")
if engine is not None:
return engine
_engine = engine
return _engine

if available("podman"):
if sys.platform != "darwin" or is_podman_machine_running_with_krunkit():
return "podman"

return None
_engine = "podman"
return _engine

if available("docker"):
return "docker"
_engine = "docker"
return _engine

return None

Expand Down
4 changes: 2 additions & 2 deletions ramalama/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,11 +188,11 @@ def setup_container(self, args):
conman_args += ["-p", f"{args.port}:{args.port}"]

# Check for env var RAMALAMA_DEVICE to explicitly declare the GPU device path
device_override=0
device_override = 0
gpu_device = os.environ.get("RAMALAMA_DEVICE")
if gpu_device:
conman_args += ["--device", gpu_device]
device_override=1
device_override = 1
if device_override != 1:
if (sys.platform == "darwin" and os.path.basename(args.engine) != "docker") or os.path.exists("/dev/dri"):
conman_args += ["--device", "/dev/dri"]
Expand Down
9 changes: 5 additions & 4 deletions ramalama/ollama.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,10 @@ def init_pull(repos, accept, registry_head, model_name, model_tag, models, model
def in_existing_cache(model_name, model_tag):
if not available("ollama"):
return None
default_ollama_caches=[
default_ollama_caches = [
os.path.join(os.environ['HOME'], '.ollama/models'),
'/usr/share/ollama/.ollama/models',
f'C:\\Users\\{os.getlogin()}\\.ollama\\models'
f'C:\\Users\\{os.getlogin()}\\.ollama\\models',
]

for cache_dir in default_ollama_caches:
Expand All @@ -79,10 +79,11 @@ def in_existing_cache(model_name, model_tag):
if layer["mediaType"] == "application/vnd.ollama.image.model":
layer_digest = layer["digest"]
ollama_digest_path = os.path.join(cache_dir, 'blobs', layer_digest)
if os.path.exists(str(ollama_digest_path).replace(':','-')):
return str(ollama_digest_path).replace(':','-')
if os.path.exists(str(ollama_digest_path).replace(':', '-')):
return str(ollama_digest_path).replace(':', '-')
return None


class Ollama(Model):
def __init__(self, model):
model = rm_until_substring(model, "ollama.com/library/")
Expand Down
Loading