mirror of
https://github.com/containers/ramalama.git
synced 2026-02-05 06:46:39 +01:00
Add global logger and use it in the existing code.
Add global logger that can be used to print message to stderr. Replace all perror calls in dabug cases with logger.debug calls which reduces the extra argument required to pass as the module will print error message based on the level. Signed-off-by: Ales Musil <amusil@redhat.com>
This commit is contained in:
2
.flake8
2
.flake8
@@ -1,4 +1,4 @@
|
||||
[flake8]
|
||||
max-line-length = 120
|
||||
# E203,E221,E231 conflict with black formatting
|
||||
extend-ignore = E203,E221,E231,E702
|
||||
extend-ignore = E203,E221,E231,E702,F824
|
||||
|
||||
@@ -54,7 +54,7 @@ def initialize_args():
|
||||
|
||||
parser = initialize_parser()
|
||||
parsed_args = parser.parse_args()
|
||||
port = get_available_port_if_any(False)
|
||||
port = get_available_port_if_any()
|
||||
|
||||
return parsed_args, port
|
||||
|
||||
|
||||
@@ -24,6 +24,7 @@ import ramalama.rag
|
||||
from ramalama import engine
|
||||
from ramalama.common import accel_image, exec_cmd, get_accel, get_cmd_with_wrapper, perror
|
||||
from ramalama.config import CONFIG
|
||||
from ramalama.logger import configure_logger, logger
|
||||
from ramalama.migrate import ModelStoreImport
|
||||
from ramalama.model import MODEL_TYPES
|
||||
from ramalama.model_factory import ModelFactory
|
||||
@@ -270,6 +271,8 @@ def post_parse_setup(args):
|
||||
if hasattr(args, "runtime_args"):
|
||||
args.runtime_args = shlex.split(args.runtime_args)
|
||||
|
||||
configure_logger("DEBUG" if args.debug else "WARNING")
|
||||
|
||||
|
||||
def login_parser(subparsers):
|
||||
parser = subparsers.add_parser("login", help="login to remote registry")
|
||||
@@ -726,8 +729,7 @@ def push_cli(args):
|
||||
m = ModelFactory(target, args).create_oci()
|
||||
m.push(source_model, args)
|
||||
except Exception as e1:
|
||||
if args.debug:
|
||||
print(e1)
|
||||
logger.debug(e1)
|
||||
raise e
|
||||
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
import glob
|
||||
import hashlib
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import random
|
||||
@@ -19,10 +18,9 @@ from typing import List
|
||||
|
||||
import ramalama.console as console
|
||||
from ramalama.http_client import HttpClient
|
||||
from ramalama.logger import logger
|
||||
from ramalama.version import version
|
||||
|
||||
logging.basicConfig(level=logging.WARNING, format="%(asctime)s - %(levelname)s - %(message)s")
|
||||
|
||||
MNT_DIR = "/mnt/models"
|
||||
MNT_FILE = f"{MNT_DIR}/model.file"
|
||||
MNT_MMPROJ_FILE = f"{MNT_DIR}/mmproj.file"
|
||||
@@ -134,9 +132,8 @@ def quoted(arr):
|
||||
return " ".join(['"' + element + '"' if ' ' in element else element for element in arr])
|
||||
|
||||
|
||||
def exec_cmd(args, debug=False, stdout2null=False, stderr2null=False):
|
||||
if debug:
|
||||
perror("exec_cmd: ", quoted(args))
|
||||
def exec_cmd(args, stdout2null=False, stderr2null=False):
|
||||
logger.debug(f"exec_cmd: {quoted(args)}")
|
||||
|
||||
if stdout2null:
|
||||
with open(os.devnull, 'w') as devnull:
|
||||
@@ -153,7 +150,7 @@ def exec_cmd(args, debug=False, stdout2null=False, stderr2null=False):
|
||||
raise
|
||||
|
||||
|
||||
def run_cmd(args, cwd=None, stdout=subprocess.PIPE, ignore_stderr=False, ignore_all=False, debug=False):
|
||||
def run_cmd(args, cwd=None, stdout=subprocess.PIPE, ignore_stderr=False, ignore_all=False):
|
||||
"""
|
||||
Run the given command arguments.
|
||||
|
||||
@@ -163,13 +160,11 @@ def run_cmd(args, cwd=None, stdout=subprocess.PIPE, ignore_stderr=False, ignore_
|
||||
stdout: standard output configuration
|
||||
ignore_stderr: if True, ignore standard error
|
||||
ignore_all: if True, ignore both standard output and standard error
|
||||
debug: if True, print debug information
|
||||
"""
|
||||
if debug:
|
||||
perror("run_cmd: ", quoted(args))
|
||||
perror(f"Working directory: {cwd}")
|
||||
perror(f"Ignore stderr: {ignore_stderr}")
|
||||
perror(f"Ignore all: {ignore_all}")
|
||||
logger.debug(f"run_cmd: {quoted(args)}")
|
||||
logger.debug(f"Working directory: {cwd}")
|
||||
logger.debug(f"Ignore stderr: {ignore_stderr}")
|
||||
logger.debug(f"Ignore all: {ignore_all}")
|
||||
|
||||
serr = None
|
||||
if ignore_all or ignore_stderr:
|
||||
@@ -180,8 +175,7 @@ def run_cmd(args, cwd=None, stdout=subprocess.PIPE, ignore_stderr=False, ignore_
|
||||
sout = subprocess.DEVNULL
|
||||
|
||||
result = subprocess.run(args, check=True, cwd=cwd, stdout=sout, stderr=serr)
|
||||
if debug:
|
||||
print("Command finished with return code:", result.returncode)
|
||||
logger.debug(f"Command finished with return code: {result.returncode}")
|
||||
|
||||
return result
|
||||
|
||||
@@ -675,16 +669,16 @@ def accel_image(config, args):
|
||||
image += "-rag"
|
||||
|
||||
vers = minor_release()
|
||||
if args.container and attempt_to_use_versioned(conman, image, vers, args.quiet, args.debug):
|
||||
if args.container and attempt_to_use_versioned(conman, image, vers, args.quiet):
|
||||
return f"{image}:{vers}"
|
||||
|
||||
return f"{image}:latest"
|
||||
|
||||
|
||||
def attempt_to_use_versioned(conman, image, vers, quiet, debug):
|
||||
def attempt_to_use_versioned(conman, image, vers, quiet):
|
||||
try:
|
||||
# check if versioned image exists locally
|
||||
if run_cmd([conman, "inspect", f"{image}:{vers}"], ignore_all=True, debug=debug):
|
||||
if run_cmd([conman, "inspect", f"{image}:{vers}"], ignore_all=True):
|
||||
return True
|
||||
|
||||
except Exception:
|
||||
@@ -694,7 +688,7 @@ def attempt_to_use_versioned(conman, image, vers, quiet, debug):
|
||||
# attempt to pull the versioned image
|
||||
if not quiet:
|
||||
print(f"Attempting to pull {image}:{vers} ...")
|
||||
run_cmd([conman, "pull", f"{image}:{vers}"], ignore_stderr=True, debug=debug)
|
||||
run_cmd([conman, "pull", f"{image}:{vers}"], ignore_stderr=True)
|
||||
return True
|
||||
|
||||
except Exception:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from ramalama.logger import logger
|
||||
|
||||
|
||||
def is_locale_utf8():
|
||||
"""Check if the system locale is UTF-8."""
|
||||
@@ -23,16 +24,16 @@ EMOJI = os.getenv("RAMALAMA_FORCE_EMOJI", '').lower() == "true" or supports_emoj
|
||||
|
||||
# Define emoji-aware logging messages
|
||||
def log_message(level, msg, emoji_msg):
|
||||
return f"{emoji_msg} {msg}" if EMOJI else f"[{level}] {msg}"
|
||||
return f"{emoji_msg} {msg}" if EMOJI else f"{msg}"
|
||||
|
||||
|
||||
def error(msg):
|
||||
logging.error(log_message("ERROR", msg, "❌"))
|
||||
logger.error(log_message("ERROR", msg, "❌"))
|
||||
|
||||
|
||||
def warning(msg):
|
||||
logging.warning(log_message("WARNING", msg, "⚠️"))
|
||||
logger.warning(log_message("WARNING", msg, "⚠️"))
|
||||
|
||||
|
||||
def info(msg):
|
||||
logging.info(log_message("INFO", msg, "ℹ️"))
|
||||
logger.info(log_message("INFO", msg, "ℹ️"))
|
||||
|
||||
@@ -33,7 +33,6 @@ class Engine:
|
||||
self.add_tty_option()
|
||||
self.handle_podman_specifics()
|
||||
self.add_detach_option()
|
||||
self.debug = args.debug
|
||||
|
||||
def add_label(self, label):
|
||||
self.add(["--label", label])
|
||||
@@ -161,10 +160,10 @@ class Engine:
|
||||
dry_run(self.exec_args)
|
||||
|
||||
def run(self):
|
||||
run_cmd(self.exec_args, debug=self.debug)
|
||||
run_cmd(self.exec_args)
|
||||
|
||||
def exec(self):
|
||||
exec_cmd(self.exec_args, debug=self.debug)
|
||||
exec_cmd(self.exec_args)
|
||||
|
||||
|
||||
def dry_run(args):
|
||||
@@ -194,7 +193,7 @@ def images(args):
|
||||
conman_args += [f"--format={args.format}"]
|
||||
|
||||
try:
|
||||
output = run_cmd(conman_args, debug=args.debug).stdout.decode("utf-8").strip()
|
||||
output = run_cmd(conman_args).stdout.decode("utf-8").strip()
|
||||
if output == "":
|
||||
return []
|
||||
return output.split("\n")
|
||||
@@ -219,7 +218,7 @@ def containers(args):
|
||||
conman_args += [f"--format={args.format}"]
|
||||
|
||||
try:
|
||||
output = run_cmd(conman_args, debug=args.debug).stdout.decode("utf-8").strip()
|
||||
output = run_cmd(conman_args).stdout.decode("utf-8").strip()
|
||||
if output == "":
|
||||
return []
|
||||
return output.split("\n")
|
||||
@@ -235,7 +234,7 @@ def info(args):
|
||||
|
||||
conman_args = [conman, "info", "--format", "json"]
|
||||
try:
|
||||
output = run_cmd(conman_args, debug=args.debug).stdout.decode("utf-8").strip()
|
||||
output = run_cmd(conman_args).stdout.decode("utf-8").strip()
|
||||
if output == "":
|
||||
return []
|
||||
return json.loads(output)
|
||||
@@ -260,7 +259,7 @@ def stop_container(args, name):
|
||||
|
||||
conman_args += [name]
|
||||
try:
|
||||
run_cmd(conman_args, ignore_stderr=ignore_stderr, debug=args.debug)
|
||||
run_cmd(conman_args, ignore_stderr=ignore_stderr)
|
||||
except subprocess.CalledProcessError:
|
||||
if args.ignore and conman == "docker":
|
||||
return
|
||||
|
||||
@@ -6,6 +6,7 @@ import tempfile
|
||||
import urllib.request
|
||||
|
||||
from ramalama.common import available, download_and_verify, exec_cmd, generate_sha256, perror, run_cmd, verify_checksum
|
||||
from ramalama.logger import logger
|
||||
from ramalama.model import Model
|
||||
from ramalama.model_store import SnapshotFile, SnapshotFileType
|
||||
from ramalama.ollama_repo_utils import repo_pull
|
||||
@@ -263,8 +264,7 @@ class Huggingface(Model):
|
||||
try:
|
||||
return self.hf_pull(args, model_path, directory_path)
|
||||
except Exception as exc:
|
||||
if args.debug:
|
||||
perror(f"failed to hf_pull: {exc}")
|
||||
logger.debug(f"failed to hf_pull: {exc}")
|
||||
pass
|
||||
raise KeyError(f"Failed to pull model: {str(previous_exception)}")
|
||||
|
||||
@@ -358,7 +358,7 @@ class Huggingface(Model):
|
||||
|
||||
def hf_pull(self, args, model_path, directory_path):
|
||||
conman_args = ["huggingface-cli", "download", "--local-dir", directory_path, self.model]
|
||||
run_cmd(conman_args, debug=args.debug)
|
||||
run_cmd(conman_args)
|
||||
|
||||
relative_target_path = os.path.relpath(directory_path, start=os.path.dirname(model_path))
|
||||
pathlib.Path(model_path).unlink(missing_ok=True)
|
||||
@@ -405,13 +405,12 @@ class Huggingface(Model):
|
||||
"--local-dir",
|
||||
os.path.join(args.store, "repos", "huggingface", self.directory),
|
||||
],
|
||||
debug=args.debug,
|
||||
)
|
||||
return proc.stdout.decode("utf-8")
|
||||
|
||||
def exec(self, cmd_args, args):
|
||||
try:
|
||||
exec_cmd(cmd_args, debug=args.debug)
|
||||
exec_cmd(cmd_args)
|
||||
except FileNotFoundError as e:
|
||||
print(f"{str(e).strip()}\n{missing_huggingface}")
|
||||
|
||||
@@ -452,7 +451,7 @@ class Huggingface(Model):
|
||||
|
||||
return snapshot_hash, files
|
||||
|
||||
def _pull_with_model_store(self, args, debug: bool = False):
|
||||
def _pull_with_model_store(self, args):
|
||||
name, tag, organization = self.extract_model_identifiers()
|
||||
hash, cached_files, all = self.store.get_cached_files(tag)
|
||||
if all:
|
||||
@@ -477,8 +476,7 @@ class Huggingface(Model):
|
||||
try:
|
||||
self.store.remove_snapshot(tag)
|
||||
except Exception as exc:
|
||||
if args.debug:
|
||||
perror(f"ignoring failure to remove snapshot: {exc}")
|
||||
logger.debug(f"ignoring failure to remove snapshot: {exc}")
|
||||
# ignore any error when removing snapshot
|
||||
pass
|
||||
raise e
|
||||
@@ -486,14 +484,13 @@ class Huggingface(Model):
|
||||
if not self.hf_cli_available:
|
||||
perror("URL pull failed and huggingface-cli not available")
|
||||
raise KeyError(f"Failed to pull model: {str(e)}")
|
||||
if args.debug:
|
||||
perror(f"ignoring failure to get file list: {e}")
|
||||
logger.debug(f"ignoring failure to get file list: {e}")
|
||||
|
||||
# Create temporary directory for downloading via huggingface-cli
|
||||
with tempfile.TemporaryDirectory() as tempdir:
|
||||
model = f"{organization}/{name}"
|
||||
conman_args = ["huggingface-cli", "download", "--local-dir", tempdir, model]
|
||||
run_cmd(conman_args, debug=debug)
|
||||
run_cmd(conman_args)
|
||||
|
||||
snapshot_hash, files = self._collect_cli_files(tempdir)
|
||||
self.store.new_snapshot(tag, snapshot_hash, files)
|
||||
|
||||
24
ramalama/logger.py
Normal file
24
ramalama/logger.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import logging
|
||||
import sys
|
||||
import typing
|
||||
|
||||
logger = logging.getLogger("ramalama")
|
||||
|
||||
|
||||
def configure_logger(verbosity="WARNING") -> None:
|
||||
global logger
|
||||
|
||||
lvl = logging.WARNING
|
||||
if verbosity in ("DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"):
|
||||
lvl = typing.cast(int, getattr(logging, verbosity))
|
||||
|
||||
logger.setLevel(lvl)
|
||||
|
||||
fmt = "%(asctime)s - %(levelname)s - %(message)s"
|
||||
datefmt = "%Y-%m-%d %H:%M:%S"
|
||||
formatter = logging.Formatter(fmt, datefmt)
|
||||
|
||||
handler = logging.StreamHandler(sys.stderr)
|
||||
handler.setLevel(lvl)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
@@ -26,6 +26,7 @@ from ramalama.console import EMOJI
|
||||
from ramalama.engine import Engine, dry_run
|
||||
from ramalama.gguf_parser import GGUFInfoParser
|
||||
from ramalama.kube import Kube
|
||||
from ramalama.logger import logger
|
||||
from ramalama.model_inspect import GGUFModelInfo, ModelInfoBase
|
||||
from ramalama.model_store import ModelStore
|
||||
from ramalama.quadlet import Quadlet
|
||||
@@ -606,7 +607,7 @@ class Model(ModelBase):
|
||||
if args.dryrun:
|
||||
dry_run(exec_args)
|
||||
return
|
||||
exec_cmd(exec_args, debug=args.debug)
|
||||
exec_cmd(exec_args)
|
||||
except FileNotFoundError as e:
|
||||
if args.container:
|
||||
raise NotImplementedError(
|
||||
@@ -616,7 +617,7 @@ class Model(ModelBase):
|
||||
|
||||
def serve(self, args, quiet=False):
|
||||
self.validate_args(args)
|
||||
args.port = compute_serving_port(args.port, args.debug, quiet)
|
||||
args.port = compute_serving_port(args.port, quiet)
|
||||
model_path = self.get_model_path(args)
|
||||
if is_split_file_model(model_path):
|
||||
mnt_file = MNT_DIR + '/' + self.mnt_path
|
||||
@@ -713,13 +714,12 @@ def compute_ports() -> list:
|
||||
return [first_port] + ports
|
||||
|
||||
|
||||
def get_available_port_if_any(debug: bool) -> int:
|
||||
def get_available_port_if_any() -> int:
|
||||
ports = compute_ports()
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
chosen_port = 0
|
||||
for target_port in ports:
|
||||
if debug:
|
||||
print(f"Checking if {target_port} is available")
|
||||
logger.debug(f"Checking if {target_port} is available")
|
||||
try:
|
||||
s.bind(('localhost', target_port))
|
||||
except OSError:
|
||||
@@ -730,13 +730,13 @@ def get_available_port_if_any(debug: bool) -> int:
|
||||
return chosen_port
|
||||
|
||||
|
||||
def compute_serving_port(port: str, debug: bool, quiet=False) -> str:
|
||||
def compute_serving_port(port: str, quiet=False) -> str:
|
||||
# user probably specified a custom port, don't override the choice
|
||||
if port != "" and port != str(DEFAULT_PORT):
|
||||
return port
|
||||
|
||||
# otherwise compute a random serving port in the range
|
||||
target_port = get_available_port_if_any(debug)
|
||||
target_port = get_available_port_if_any()
|
||||
|
||||
if target_port == 0:
|
||||
raise IOError("no available port could be detected. Please ensure you have enough free ports.")
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import urllib
|
||||
@@ -13,8 +12,7 @@ import ramalama.go2jinja as go2jinja
|
||||
import ramalama.oci
|
||||
from ramalama.common import download_file, generate_sha256, verify_checksum
|
||||
from ramalama.gguf_parser import GGUFInfoParser, GGUFModelInfo
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
from ramalama.logger import logger
|
||||
|
||||
|
||||
def sanitize_filename(filename: str) -> str:
|
||||
@@ -446,7 +444,7 @@ class ModelStore:
|
||||
|
||||
if file.should_verify_checksum:
|
||||
if not verify_checksum(dest_path):
|
||||
LOGGER.info(f"Checksum mismatch for blob {dest_path}, retrying download ...")
|
||||
logger.info(f"Checksum mismatch for blob {dest_path}, retrying download ...")
|
||||
os.remove(dest_path)
|
||||
file.download(dest_path, self.get_snapshot_directory(snapshot_hash))
|
||||
if not verify_checksum(dest_path):
|
||||
@@ -477,7 +475,7 @@ class ModelStore:
|
||||
]
|
||||
self.update_snapshot(model_tag, snapshot_hash, files)
|
||||
except Exception as ex:
|
||||
LOGGER.debug(f"Failed to convert Go Template to Jinja: {ex}")
|
||||
logger.debug(f"Failed to convert Go Template to Jinja: {ex}")
|
||||
return
|
||||
if file.type == SnapshotFileType.Model:
|
||||
model_file = file
|
||||
@@ -512,7 +510,7 @@ class ModelStore:
|
||||
LocalSnapshotFile(jinja_template, "chat_template_converted", SnapshotFileType.ChatTemplate)
|
||||
)
|
||||
except Exception as ex:
|
||||
LOGGER.debug(f"Failed to convert Go Template to Jinja: {ex}")
|
||||
logger.debug(f"Failed to convert Go Template to Jinja: {ex}")
|
||||
|
||||
self.update_snapshot(model_tag, snapshot_hash, files)
|
||||
|
||||
@@ -553,9 +551,9 @@ class ModelStore:
|
||||
try:
|
||||
if os.path.exists(blob_path) and Path(self.base_path) in blob_path.parents:
|
||||
os.remove(blob_path)
|
||||
LOGGER.debug(f"Removed blob for '{snapshot_file_path}'")
|
||||
logger.debug(f"Removed blob for '{snapshot_file_path}'")
|
||||
except Exception as ex:
|
||||
LOGGER.error(f"Failed to remove blob file '{blob_path}': {ex}")
|
||||
logger.error(f"Failed to remove blob file '{blob_path}': {ex}")
|
||||
|
||||
def remove_snapshot(self, model_tag: str):
|
||||
ref_file = self.get_ref_file(model_tag)
|
||||
|
||||
@@ -6,6 +6,7 @@ import urllib.request
|
||||
|
||||
from ramalama.common import available, download_and_verify, exec_cmd, perror, run_cmd, verify_checksum
|
||||
from ramalama.huggingface import HuggingfaceCLIFile, HuggingfaceRepository
|
||||
from ramalama.logger import logger
|
||||
from ramalama.model import Model
|
||||
from ramalama.model_store import SnapshotFileType
|
||||
from ramalama.ollama_repo_utils import repo_pull
|
||||
@@ -163,7 +164,7 @@ class ModelScope(Model):
|
||||
|
||||
def ms_pull(self, args, model_path, directory_path):
|
||||
conman_args = ["modelscope", "download", "--local_dir", directory_path, self.model]
|
||||
run_cmd(conman_args, debug=args.debug)
|
||||
run_cmd(conman_args)
|
||||
|
||||
relative_target_path = os.path.relpath(directory_path, start=os.path.dirname(model_path))
|
||||
pathlib.Path(model_path).unlink(missing_ok=True)
|
||||
@@ -209,13 +210,12 @@ class ModelScope(Model):
|
||||
"--local_dir",
|
||||
os.path.join(args.store, "repos", "modelscope", self.directory),
|
||||
],
|
||||
debug=args.debug,
|
||||
)
|
||||
return proc.stdout.decode("utf-8")
|
||||
|
||||
def exec(self, cmd_args, args):
|
||||
try:
|
||||
exec_cmd(cmd_args, debug=args.debug)
|
||||
exec_cmd(cmd_args)
|
||||
except FileNotFoundError as e:
|
||||
print(f"{str(e).strip()}\n{missing_modelscope}")
|
||||
|
||||
@@ -256,7 +256,7 @@ class ModelScope(Model):
|
||||
|
||||
return snapshot_hash, files
|
||||
|
||||
def _pull_with_model_store(self, args, debug: bool = False):
|
||||
def _pull_with_model_store(self, args):
|
||||
name, tag, organization = self.extract_model_identifiers()
|
||||
hash, cached_files, all = self.store.get_cached_files(tag)
|
||||
if all:
|
||||
@@ -278,8 +278,7 @@ class ModelScope(Model):
|
||||
try:
|
||||
self.store.remove_snapshot(tag)
|
||||
except Exception as exc:
|
||||
if args.debug:
|
||||
perror(f"ignoring failure to remove snapshot: {exc}")
|
||||
logger.debug(f"ignoring failure to remove snapshot: {exc}")
|
||||
# ignore any error when removing snapshot
|
||||
pass
|
||||
raise e
|
||||
@@ -292,7 +291,7 @@ class ModelScope(Model):
|
||||
with tempfile.TemporaryDirectory() as tempdir:
|
||||
model = f"{organization}/{name}"
|
||||
conman_args = ["modelscope", "download", "--local_dir", tempdir, model]
|
||||
run_cmd(conman_args, debug=debug)
|
||||
run_cmd(conman_args)
|
||||
|
||||
snapshot_hash, files = self._collect_cli_files(tempdir)
|
||||
self.store.new_snapshot(tag, snapshot_hash, files)
|
||||
|
||||
@@ -38,7 +38,7 @@ def list_manifests(args):
|
||||
' }}, "ID":"{{ .ID }}"},'
|
||||
),
|
||||
]
|
||||
output = run_cmd(conman_args, debug=args.debug).stdout.decode("utf-8").strip()
|
||||
output = run_cmd(conman_args).stdout.decode("utf-8").strip()
|
||||
if output == "":
|
||||
return []
|
||||
|
||||
@@ -54,7 +54,7 @@ def list_manifests(args):
|
||||
"inspect",
|
||||
manifest["ID"],
|
||||
]
|
||||
output = run_cmd(conman_args, debug=args.debug).stdout.decode("utf-8").strip()
|
||||
output = run_cmd(conman_args).stdout.decode("utf-8").strip()
|
||||
|
||||
if output == "":
|
||||
continue
|
||||
@@ -98,7 +98,7 @@ def list_models(args):
|
||||
"--format",
|
||||
formatLine,
|
||||
]
|
||||
output = run_cmd(conman_args, debug=args.debug).stdout.decode("utf-8").strip()
|
||||
output = run_cmd(conman_args).stdout.decode("utf-8").strip()
|
||||
if output == "":
|
||||
return []
|
||||
|
||||
@@ -111,7 +111,7 @@ def list_models(args):
|
||||
# grab the size from the inspect command
|
||||
for model in models:
|
||||
conman_args = [conman, "image", "inspect", model["id"], "--format", "{{.Size}}"]
|
||||
output = run_cmd(conman_args, debug=args.debug).stdout.decode("utf-8").strip()
|
||||
output = run_cmd(conman_args).stdout.decode("utf-8").strip()
|
||||
# convert the number value from the string output
|
||||
model["size"] = int(output)
|
||||
# drop the id from the model
|
||||
@@ -150,12 +150,12 @@ class OCI(Model):
|
||||
conman_args.append("--password-stdin")
|
||||
if args.REGISTRY:
|
||||
conman_args.append(args.REGISTRY.removeprefix(prefix))
|
||||
return exec_cmd(conman_args, debug=args.debug)
|
||||
return exec_cmd(conman_args)
|
||||
|
||||
def logout(self, args):
|
||||
conman_args = [self.conman, "logout"]
|
||||
conman_args.append(self.model)
|
||||
return exec_cmd(conman_args, debug=args.debug)
|
||||
return exec_cmd(conman_args)
|
||||
|
||||
def _target_decompose(self, model):
|
||||
# Remove the prefix and extract target details
|
||||
@@ -247,7 +247,6 @@ RUN rm -rf /{model_name}-f16.gguf /models/{model_name}
|
||||
imageid = (
|
||||
run_cmd(
|
||||
build_cmd,
|
||||
debug=args.debug,
|
||||
)
|
||||
.stdout.decode("utf-8")
|
||||
.strip()
|
||||
@@ -262,7 +261,7 @@ RUN rm -rf /{model_name}-f16.gguf /models/{model_name}
|
||||
imageid,
|
||||
target,
|
||||
]
|
||||
run_cmd(cmd_args, debug=args.debug)
|
||||
run_cmd(cmd_args)
|
||||
|
||||
def _create_manifest_without_attributes(self, target, imageid, args):
|
||||
# Create manifest list for target with imageid
|
||||
@@ -273,7 +272,7 @@ RUN rm -rf /{model_name}-f16.gguf /models/{model_name}
|
||||
target,
|
||||
imageid,
|
||||
]
|
||||
run_cmd(cmd_args, debug=args.debug)
|
||||
run_cmd(cmd_args)
|
||||
|
||||
def _create_manifest(self, target, imageid, args):
|
||||
if not engine_supports_manifest_attributes(args.engine):
|
||||
@@ -287,7 +286,7 @@ RUN rm -rf /{model_name}-f16.gguf /models/{model_name}
|
||||
target,
|
||||
imageid,
|
||||
]
|
||||
run_cmd(cmd_args, debug=args.debug)
|
||||
run_cmd(cmd_args)
|
||||
|
||||
# Annotate manifest list
|
||||
cmd_args = [
|
||||
@@ -303,12 +302,12 @@ RUN rm -rf /{model_name}-f16.gguf /models/{model_name}
|
||||
target,
|
||||
imageid,
|
||||
]
|
||||
run_cmd(cmd_args, stdout=None, debug=args.debug)
|
||||
run_cmd(cmd_args, stdout=None)
|
||||
|
||||
def _convert(self, source_model, args):
|
||||
print(f"Converting {source_model.store.base_path} to {self.store.base_path} ...")
|
||||
try:
|
||||
run_cmd([self.conman, "manifest", "rm", self.model], ignore_stderr=True, stdout=None, debug=args.debug)
|
||||
run_cmd([self.conman, "manifest", "rm", self.model], ignore_stderr=True, stdout=None)
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
print(f"Building {self.model} ...")
|
||||
@@ -340,7 +339,7 @@ Tagging build instead"""
|
||||
if source != target:
|
||||
self._convert(source_model, args)
|
||||
try:
|
||||
run_cmd(conman_args, debug=args.debug)
|
||||
run_cmd(conman_args)
|
||||
except subprocess.CalledProcessError as e:
|
||||
perror(f"Failed to push OCI {target} : {e}")
|
||||
raise e
|
||||
@@ -359,7 +358,7 @@ Tagging build instead"""
|
||||
if args.authfile:
|
||||
conman_args.extend([f"--authfile={args.authfile}"])
|
||||
conman_args.extend([self.model])
|
||||
run_cmd(conman_args, debug=args.debug, ignore_stderr=self.ignore_stderr)
|
||||
run_cmd(conman_args, ignore_stderr=self.ignore_stderr)
|
||||
return MNT_FILE
|
||||
|
||||
def _registry_reference(self):
|
||||
@@ -397,10 +396,10 @@ Tagging build instead"""
|
||||
|
||||
try:
|
||||
conman_args = [self.conman, "manifest", "rm", self.model]
|
||||
run_cmd(conman_args, debug=args.debug, ignore_stderr=self.ignore_stderr)
|
||||
run_cmd(conman_args, ignore_stderr=self.ignore_stderr)
|
||||
except subprocess.CalledProcessError:
|
||||
conman_args = [self.conman, "rmi", f"--force={args.ignore}", self.model]
|
||||
run_cmd(conman_args, debug=args.debug, ignore_stderr=self.ignore_stderr)
|
||||
run_cmd(conman_args, ignore_stderr=self.ignore_stderr)
|
||||
|
||||
def exists(self, args):
|
||||
try:
|
||||
@@ -415,7 +414,7 @@ Tagging build instead"""
|
||||
|
||||
conman_args = [self.conman, "image", "inspect", self.model]
|
||||
try:
|
||||
run_cmd(conman_args, debug=args.debug, ignore_stderr=self.ignore_stderr)
|
||||
run_cmd(conman_args, ignore_stderr=self.ignore_stderr)
|
||||
return self.model
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@@ -7,6 +7,7 @@ from urllib.parse import urlparse
|
||||
from ramalama.common import accel_image, get_accel_env_vars, run_cmd, set_accel_env_vars
|
||||
from ramalama.config import CONFIG
|
||||
from ramalama.engine import Engine
|
||||
from ramalama.logger import logger
|
||||
|
||||
|
||||
class Rag:
|
||||
@@ -33,8 +34,7 @@ COPY {src} /vector.db
|
||||
# Open the file for writing.
|
||||
with open(containerfile.name, 'w') as c:
|
||||
c.write(cfile)
|
||||
if args.debug:
|
||||
print(f"\nContainerfile: {containerfile.name}\n{cfile}")
|
||||
logger.debug(f"\nContainerfile: {containerfile.name}\n{cfile}")
|
||||
exec_args = [
|
||||
args.engine,
|
||||
"build",
|
||||
@@ -50,7 +50,6 @@ COPY {src} /vector.db
|
||||
imageid = (
|
||||
run_cmd(
|
||||
exec_args,
|
||||
debug=args.debug,
|
||||
)
|
||||
.stdout.decode("utf-8")
|
||||
.strip()
|
||||
|
||||
@@ -89,7 +89,7 @@ class TestEngine(unittest.TestCase):
|
||||
def test_stop_container(self, mock_run_cmd):
|
||||
args = Namespace(engine="podman", debug=False, ignore=False)
|
||||
stop_container(args, "test-container")
|
||||
mock_run_cmd.assert_called_with(["podman", "stop", "-t=0", "test-container"], ignore_stderr=False, debug=False)
|
||||
mock_run_cmd.assert_called_with(["podman", "stop", "-t=0", "test-container"], ignore_stderr=False)
|
||||
|
||||
def test_dry_run(self):
|
||||
with patch('sys.stdout') as mock_stdout:
|
||||
|
||||
Reference in New Issue
Block a user