mirror of
https://github.com/containers/ramalama.git
synced 2026-02-05 15:47:26 +01:00
Work around race condition in test/e2e/test_serve.py::test_serve_and_stop
Signed-off-by: Oliver Walsh <owalsh@redhat.com>
This commit is contained in:
@@ -6,6 +6,7 @@ import platform
|
||||
import random
|
||||
import re
|
||||
import string
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
from subprocess import STDOUT, CalledProcessError
|
||||
@@ -297,6 +298,10 @@ def test_serve_and_stop(shared_ctx, test_model):
|
||||
|
||||
# Serve Container1
|
||||
ctx.check_call(["ramalama", "serve", "--name", container1_id, "--detach", test_model])
|
||||
|
||||
# FIXME: race-condition, chat can fail to connect if llama.cpp isn't ready, just sleep a little for now
|
||||
time.sleep(10)
|
||||
|
||||
try:
|
||||
info = json.loads(ctx.check_output(["ramalama", "info"]))
|
||||
full_model_name = info["Shortnames"]["Names"][test_model]
|
||||
|
||||
Reference in New Issue
Block a user