diff --git a/docs/ramalama-bench.1.md b/docs/ramalama-bench.1.md index 87d51acf..c94435f3 100644 --- a/docs/ramalama-bench.1.md +++ b/docs/ramalama-bench.1.md @@ -28,6 +28,9 @@ URL support means if a model is on a web site or even on your local system, you #### **--help**, **-h** show this help message and exit +#### **--network-mode**=*none* +set the network mode for the container + ## DESCRIPTION Benchmark specified AI Model. diff --git a/docs/ramalama-convert.1.md b/docs/ramalama-convert.1.md index 19fad843..bf09ec05 100644 --- a/docs/ramalama-convert.1.md +++ b/docs/ramalama-convert.1.md @@ -16,6 +16,9 @@ The model can be from RamaLama model storage in Huggingface, Ollama, or local mo #### **--help**, **-h** Print usage message +#### **--network-mode**=*none* +sets the configuration for network namespaces when handling RUN instructions + #### **--type**=*raw* | *car* type of OCI Model Image to convert. diff --git a/docs/ramalama-run.1.md b/docs/ramalama-run.1.md index e0a69bd6..6fdc205b 100644 --- a/docs/ramalama-run.1.md +++ b/docs/ramalama-run.1.md @@ -37,6 +37,9 @@ show this help message and exit #### **--name**, **-n** name of the container to run the Model in +#### **--network-mode**=*none* +set the network mode for the container + #### **--seed**= Specify seed rather than using random seed model interaction diff --git a/docs/ramalama-serve.1.md b/docs/ramalama-serve.1.md index 49840ef7..fede1be3 100644 --- a/docs/ramalama-serve.1.md +++ b/docs/ramalama-serve.1.md @@ -64,6 +64,9 @@ IP address for llama.cpp to listen on. #### **--name**, **-n** Name of the container to run the Model in. +#### **--network-mode**=*default* +set the network mode for the container + #### **--port**, **-p** port for AI Model server to listen on diff --git a/ramalama/cli.py b/ramalama/cli.py index 0b3577b4..5f4de783 100644 --- a/ramalama/cli.py +++ b/ramalama/cli.py @@ -379,6 +379,12 @@ def bench_cli(args): def bench_parser(subparsers): parser = subparsers.add_parser("bench", aliases=["benchmark"], help="benchmark specified AI Model") + parser.add_argument( + "--network-mode", + type=str, + default="none", + help="set the network mode for the container", + ) parser.add_argument("MODEL") # positional argument parser.set_defaults(func=bench_cli) @@ -600,6 +606,13 @@ def convert_parser(subparsers): Model "car" includes base image with the model stored in a /models subdir. Model "raw" contains the model and a link file model.file to it stored at /.""", ) + # https://docs.podman.io/en/latest/markdown/podman-build.1.html#network-mode-net + parser.add_argument( + "--network-mode", + type=str, + default="none", + help="sets the configuration for network namespaces when handling RUN instructions", + ) parser.add_argument("SOURCE") # positional argument parser.add_argument("TARGET") # positional argument parser.set_defaults(func=convert_cli) @@ -717,6 +730,15 @@ def _run(parser): def run_parser(subparsers): parser = subparsers.add_parser("run", help="run specified AI Model as a chatbot") _run(parser) + # Disable network access by default, and give the option to pass any supported network mode into + # podman if needed: + # https://docs.podman.io/en/latest/markdown/podman-run.1.html#network-mode-net + parser.add_argument( + "--network-mode", + type=str, + default="none", + help="set the network mode for the container", + ) parser.add_argument("MODEL") # positional argument parser.add_argument( "ARGS", nargs="*", help="Overrides the default prompt, and the output is returned without entering the chatbot" @@ -742,6 +764,17 @@ def serve_parser(subparsers): parser.add_argument( "-p", "--port", default=config.get('port', "8080"), help="port for AI Model server to listen on" ) + # --network-mode=default lets the container listen on localhost, and is an option that's compatible + # with podman and docker. It should use the bridge driver for rootful podman, the pasta driver for + # rootless podman, and the bridge driver for docker: + # https://docs.podman.io/en/latest/markdown/podman-run.1.html#network-mode-net + # https://docs.docker.com/engine/network/#drivers + parser.add_argument( + "--network-mode", + type=str, + default="default", + help="set the network mode for the container", + ) parser.add_argument("MODEL") # positional argument parser.set_defaults(func=serve_cli) diff --git a/ramalama/model.py b/ramalama/model.py index 710a27ac..6417090b 100644 --- a/ramalama/model.py +++ b/ramalama/model.py @@ -153,6 +153,7 @@ def setup_container(self, args): "-i", "--label", "RAMALAMA", + f"--network={args.network_mode}", "--security-opt=label=disable", "--name", name, diff --git a/ramalama/oci.py b/ramalama/oci.py index 6f5e50ff..096b1062 100644 --- a/ramalama/oci.py +++ b/ramalama/oci.py @@ -174,7 +174,19 @@ def build(self, source, target, args): else: c.write(model_raw) imageid = ( - run_cmd([self.conman, "build", "--no-cache", "-q", "-f", containerfile.name, contextdir], debug=args.debug) + run_cmd( + [ + self.conman, + "build", + "--no-cache", + f"--network={args.network_mode}", + "-q", + "-f", + containerfile.name, + contextdir, + ], + debug=args.debug, + ) .stdout.decode("utf-8") .strip() )