mirror of
https://github.com/containers/ramalama.git
synced 2026-02-05 06:46:39 +01:00
Merge pull request #2267 from telemaco/e2e-pytest-convert-cmd
Add e2e pytest test for convert command
This commit is contained in:
@@ -146,7 +146,7 @@ class OCI(Transport):
|
||||
gguf_dir = None
|
||||
if getattr(args, "gguf", None):
|
||||
perror("Converting to gguf ...")
|
||||
gguf_dir = tempfile.TemporaryDirectory(prefix="RamaLama_convert_", delete=False)
|
||||
gguf_dir = tempfile.TemporaryDirectory(prefix="RamaLama_convert_")
|
||||
contextdir = gguf_dir.name
|
||||
model_file_name = self._convert_to_gguf(gguf_dir, source_model, args)
|
||||
content = self._gguf_containerfile(model_file_name, args)
|
||||
|
||||
165
test/e2e/test_convert.py
Normal file
165
test/e2e/test_convert.py
Normal file
@@ -0,0 +1,165 @@
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from subprocess import STDOUT, CalledProcessError
|
||||
from test.conftest import skip_if_container, skip_if_docker, skip_if_no_container
|
||||
from test.e2e.utils import RamalamaExecWorkspace
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.e2e
|
||||
def test_convert_custom_gguf_config():
|
||||
config = """
|
||||
[ramalama]
|
||||
gguf_quantization_mode="Q5_0"
|
||||
"""
|
||||
|
||||
with RamalamaExecWorkspace(config=config) as ctx:
|
||||
result = ctx.check_output(["ramalama", "convert", "--help"])
|
||||
assert re.search("GGUF quantization format. If specified without value, Q5_0 is used", result)
|
||||
|
||||
|
||||
@pytest.mark.e2e
|
||||
@skip_if_docker
|
||||
@pytest.mark.parametrize(
|
||||
"in_model, out_model, extra_params, expected",
|
||||
[
|
||||
# fmt: off
|
||||
pytest.param(
|
||||
Path("aimodel"), "foobar", None,
|
||||
"oci://localhost/foobar:latest",
|
||||
id="file://{workspace_dir}/aimodel -> foobar",
|
||||
marks=[skip_if_no_container,
|
||||
pytest.mark.xfail(sys.platform.startswith("win"), reason="windows path formatting")]
|
||||
),
|
||||
pytest.param(
|
||||
Path("aimodel"), "oci://foobar", None,
|
||||
"oci://localhost/foobar:latest",
|
||||
id="file://{workspace_dir}/aimodel -> oci://foobar",
|
||||
marks=[skip_if_no_container,
|
||||
pytest.mark.xfail(sys.platform.startswith("win"), reason="windows path formatting")]
|
||||
),
|
||||
pytest.param(
|
||||
"tiny", "oci://quay.io/ramalama/tiny", None,
|
||||
"oci://quay.io/ramalama/tiny:latest",
|
||||
id="tiny -> oci://quay.io/ramalama/tiny",
|
||||
marks=[skip_if_no_container, skip_if_docker]
|
||||
),
|
||||
pytest.param(
|
||||
"ollama://tinyllama", "oci://quay.io/ramalama/tinyllama", None,
|
||||
"oci://quay.io/ramalama/tinyllama:latest",
|
||||
id="ollama://tinyllama -> oci://quay.io/ramalama/tinyllama",
|
||||
marks=[skip_if_no_container, skip_if_docker]
|
||||
),
|
||||
pytest.param(
|
||||
"hf://TinyLlama/TinyLlama-1.1B-Chat-v1.0", "oci://quay.io/ramalama/tinyllama", None,
|
||||
"oci://quay.io/ramalama/tinyllama:latest",
|
||||
id="hf://TinyLlama/TinyLlama-1.1B-Chat-v1.0 -> oci://quay.io/ramalama/tinyllama",
|
||||
marks=[skip_if_no_container, skip_if_docker]
|
||||
),
|
||||
pytest.param(
|
||||
"hf://TinyLlama/TinyLlama-1.1B-Chat-v1.0", "oci://quay.io/ramalama/tiny-q4-0",
|
||||
["--gguf", "Q4_0"],
|
||||
"oci://quay.io/ramalama/tiny-q4-0:latest",
|
||||
id="hf://TinyLlama/TinyLlama-1.1B-Chat-v1.0 -> oci://quay.io/ramalama/tiny-q4-0 (--gguf Q4_0)",
|
||||
marks=[skip_if_no_container, skip_if_docker]
|
||||
),
|
||||
# fmt: on
|
||||
],
|
||||
)
|
||||
def test_convert(in_model, out_model, extra_params, expected):
|
||||
with RamalamaExecWorkspace() as ctx:
|
||||
ramalama_cli = ["ramalama", "--store", ctx.storage_dir]
|
||||
|
||||
# Ensure a local model exists if it is provided
|
||||
if isinstance(in_model, Path):
|
||||
in_model_path = Path(ctx.workspace_dir) / in_model
|
||||
in_model_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with in_model_path.open("w") as f:
|
||||
f.write("hello ai model!")
|
||||
in_model = in_model_path.as_uri()
|
||||
|
||||
# Exec convert
|
||||
ramalama_convert_cli = ramalama_cli + ["convert"]
|
||||
if extra_params:
|
||||
ramalama_convert_cli += extra_params
|
||||
|
||||
try:
|
||||
ctx.check_call(ramalama_convert_cli + [in_model, out_model])
|
||||
|
||||
# Get ramalama list
|
||||
model_list = json.loads(ctx.check_output(ramalama_cli + ["list", "--json"]))
|
||||
|
||||
# Check if the model pull is the expected
|
||||
model_name_list = [model["name"] for model in model_list]
|
||||
assert expected in model_name_list
|
||||
finally:
|
||||
# Clean images
|
||||
ctx.check_call(ramalama_cli + ["rm", expected.replace("oci://", "")])
|
||||
|
||||
|
||||
@pytest.mark.e2e
|
||||
@pytest.mark.parametrize(
|
||||
"in_model, out_model, expected_exit_code, expected",
|
||||
[
|
||||
# fmt: off
|
||||
pytest.param(
|
||||
None, None, 2, ".*ramalama convert: error: the following arguments are required: SOURCE, TARGET",
|
||||
id="raise error if no models",
|
||||
marks=[skip_if_no_container]
|
||||
),
|
||||
pytest.param(
|
||||
"tiny", None, 2, ".*ramalama convert: error: the following arguments are required: TARGET",
|
||||
id="raise error if target model is missing",
|
||||
marks=[skip_if_no_container]
|
||||
),
|
||||
pytest.param(
|
||||
"bogus", "foobar", 22, ".*Error: Manifest for bogus:latest was not found in the Ollama registry",
|
||||
id="raise error if model doesn't exist",
|
||||
marks=[skip_if_no_container]
|
||||
),
|
||||
pytest.param(
|
||||
"oci://quay.io/ramalama/smollm:135m", "oci://foobar", 22,
|
||||
"Error: converting from an OCI based image oci://quay.io/ramalama/smollm:135m is not supported",
|
||||
id="raise error when models are oci (not supported)",
|
||||
marks=[skip_if_no_container]
|
||||
),
|
||||
pytest.param(
|
||||
"file://{workspace_dir}/aimodel", "ollama://foobar", 22,
|
||||
"Error: ollama://foobar invalid: Only OCI Model types supported",
|
||||
id="raise error when target model is ollama and source mode is not OCI",
|
||||
marks=[skip_if_no_container]
|
||||
),
|
||||
pytest.param(
|
||||
"tiny", "quay.io/ramalama/foobar", 22,
|
||||
"Error: convert command cannot be run with the --nocontainer option.",
|
||||
id="raise error when --nocontainer flag",
|
||||
marks=[skip_if_container]
|
||||
),
|
||||
# fmt: on
|
||||
],
|
||||
)
|
||||
def test_convert_errors(in_model, out_model, expected_exit_code, expected):
|
||||
with RamalamaExecWorkspace() as ctx:
|
||||
# Ensure a local model exists if it is provided
|
||||
if in_model and in_model.startswith("file://"):
|
||||
in_model = in_model.format(workspace_dir=ctx.workspace_dir)
|
||||
in_model_path = Path(in_model.replace("file://", ""))
|
||||
in_model_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with in_model_path.open("w") as f:
|
||||
f.write("hello ai model!")
|
||||
|
||||
ramalama_convert_cli = ["ramalama", "convert", in_model, out_model]
|
||||
|
||||
# Clean Nones if models are missing
|
||||
ramalama_convert_cli = list(filter(None, ramalama_convert_cli))
|
||||
|
||||
# Exec ramalama convert
|
||||
with pytest.raises(CalledProcessError) as exc_info:
|
||||
ctx.check_output(ramalama_convert_cli, stderr=STDOUT)
|
||||
|
||||
# Check the expected results
|
||||
assert exc_info.value.returncode == expected_exit_code
|
||||
assert re.search(expected, exc_info.value.output.decode("utf-8"))
|
||||
Reference in New Issue
Block a user