Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Parse https://ollama.com/library/ syntax #648

Merged
merged 1 commit into from
Feb 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ramalama/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -967,7 +967,7 @@ def rm_cli(args):
def New(model, args):
if model.startswith("huggingface://") or model.startswith("hf://") or model.startswith("hf.co/"):
return Huggingface(model)
if model.startswith("ollama"):
if model.startswith("ollama://") or "ollama.com/library/" in model:
return Ollama(model)
if model.startswith("oci://") or model.startswith("docker://"):
return OCI(model, args.engine)
Expand Down
7 changes: 3 additions & 4 deletions ramalama/huggingface.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import pathlib
import urllib.request
from ramalama.common import available, run_cmd, exec_cmd, download_file, verify_checksum, perror
from ramalama.model import Model
from ramalama.model import Model, rm_until_substring

missing_huggingface = """
Optional: Huggingface models require the huggingface-cli module.
Expand Down Expand Up @@ -33,9 +33,8 @@ def fetch_checksum_from_api(url):

class Huggingface(Model):
def __init__(self, model):
model = model.removeprefix("huggingface://")
model = model.removeprefix("hf://")
model = model.removeprefix("hf.co/")
model = rm_until_substring(model, "hf.co/")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why are you dropping hugginface://?

Copy link
Collaborator Author

@ericcurtin ericcurtin Feb 10, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's not dropped, at this point of the code, we have already verified/validated the start of the protocol is fine, so we do a more generic:

model = rm_until_substring(model, "://")

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Right thanks.

Just need tests.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if model.startswith("huggingface://") or model.startswith("hf://") or model.startswith("hf.co/"):

model = rm_until_substring(model, "://")
super().__init__(model)
self.type = "huggingface"
split = self.model.rsplit("/", 1)
Expand Down
9 changes: 9 additions & 0 deletions ramalama/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -486,3 +486,12 @@ def distinfo_volume():
return ""

return f"-v{path}:/usr/share/ramalama/{dist_info}:ro"


def rm_until_substring(model, substring):
pos = model.find(substring)
if pos == -1:
return model

# Create a new string starting after the found substring
return ''.join(model[i] for i in range(pos + len(substring), len(model)))
6 changes: 4 additions & 2 deletions ramalama/ollama.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import urllib.request
import json
from ramalama.common import run_cmd, verify_checksum, download_file
from ramalama.model import Model
from ramalama.model import Model, rm_until_substring


def fetch_manifest_data(registry_head, model_tag, accept):
Expand Down Expand Up @@ -60,7 +60,9 @@ def init_pull(repos, accept, registry_head, model_name, model_tag, models, model

class Ollama(Model):
def __init__(self, model):
super().__init__(model.removeprefix("ollama://"))
model = rm_until_substring(model, "ollama.com/library/")
model = rm_until_substring(model, "://")
super().__init__(model)
self.type = "Ollama"

def _local(self, args):
Expand Down
12 changes: 4 additions & 8 deletions ramalama/url.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,13 @@
import os
from ramalama.common import download_file
from ramalama.model import Model
from ramalama.model import Model, rm_until_substring
from urllib.parse import urlparse


class URL(Model):
def __init__(self, model):
self.type = ""
for prefix in ["file", "http", "https"]:
if model.startswith(f"{prefix}://"):
self.type = prefix
model = model.removeprefix(f"{prefix}://")
break

self.type = urlparse(model).scheme
model = rm_until_substring(model, "://")
super().__init__(model)
split = self.model.rsplit("/", 1)
self.directory = split[0].removeprefix("/") if len(split) > 1 else ""
Expand Down
12 changes: 6 additions & 6 deletions test/system/050-pull.bats
Original file line number Diff line number Diff line change
Expand Up @@ -14,15 +14,15 @@ load setup_suite
@test "ramalama pull ollama" {
run_ramalama pull tiny
run_ramalama rm tiny
run_ramalama pull ollama://tinyllama
run_ramalama pull https://ollama.com/library/smollm:135m
run_ramalama list
is "$output" ".*ollama://tinyllama" "image was actually pulled locally"
is "$output" ".*ollama://smollm:135m" "image was actually pulled locally"

RAMALAMA_TRANSPORT=ollama run_ramalama pull tinyllama:1.1b
run_ramalama pull ollama://tinyllama:1.1b
RAMALAMA_TRANSPORT=ollama run_ramalama pull smollm:360m
run_ramalama pull ollama://smollm:360m
run_ramalama list
is "$output" ".*ollama://tinyllama:1.1b" "image was actually pulled locally"
run_ramalama rm ollama://tinyllama ollama://tinyllama:1.1b
is "$output" ".*ollama://smollm:360m" "image was actually pulled locally"
run_ramalama rm ollama://smollm:135m ollama://smollm:360m

random_image_name=i_$(safename)
run_ramalama 1 pull ${random_image_name}
Expand Down
Loading