1
0
mirror of https://github.com/containers/ramalama.git synced 2026-02-05 15:47:26 +01:00

Work around race condition in test/e2e/test_serve.py::test_serve_and_stop

Signed-off-by: Oliver Walsh <owalsh@redhat.com>
This commit is contained in:
Oliver Walsh
2026-01-24 00:47:25 +00:00
parent 2748d16aee
commit a7c2a4e4fe

View File

@@ -6,6 +6,7 @@ import platform
import random
import re
import string
import time
from contextlib import contextmanager
from pathlib import Path
from subprocess import STDOUT, CalledProcessError
@@ -297,6 +298,10 @@ def test_serve_and_stop(shared_ctx, test_model):
# Serve Container1
ctx.check_call(["ramalama", "serve", "--name", container1_id, "--detach", test_model])
# FIXME: race-condition, chat can fail to connect if llama.cpp isn't ready, just sleep a little for now
time.sleep(10)
try:
info = json.loads(ctx.check_output(["ramalama", "info"]))
full_model_name = info["Shortnames"]["Names"][test_model]