mirror of
https://github.com/containers/ramalama.git
synced 2026-02-05 06:46:39 +01:00
Add a new "bats" container which is configured to run the bats tests. The container supports running the standard bats test suite (container-in-container) as well as the "--nocontainer" tests. Add two new Makefile targets for running the bats container via podman. Signed-off-by: Mike Bonnet <mikeb@redhat.com>
25 lines
898 B
Docker
25 lines
898 B
Docker
FROM quay.io/fedora/fedora:42
|
|
|
|
ENV HOME=/tmp \
|
|
XDG_RUNTIME_DIR=/tmp \
|
|
STORAGE_DRIVER=vfs
|
|
WORKDIR /src
|
|
ENTRYPOINT ["/usr/bin/entrypoint.sh"]
|
|
|
|
RUN dnf -y install make bats jq iproute podman openssl httpd-tools \
|
|
ollama python3-huggingface-hub \
|
|
# for building llama-bench
|
|
git-core cmake gcc-c++ curl-devel && \
|
|
dnf -y clean all
|
|
RUN rpm --restore shadow-utils
|
|
RUN git clone --depth=1 https://github.com/ggml-org/llama.cpp && \
|
|
pushd llama.cpp && \
|
|
cmake -B build -DGGML_NATIVE=OFF -DGGML_RPC=ON -DGGML_CCACHE=OFF -DGGML_CMAKE_BUILD_TYPE=Release -DLLAMA_CURL=ON -DCMAKE_INSTALL_PREFIX=/usr && \
|
|
cmake --build build --config Release --parallel $(nproc) && \
|
|
cmake --install build && \
|
|
popd && rm -rf llama.cpp
|
|
|
|
COPY container-images/bats/entrypoint.sh /usr/bin
|
|
COPY container-images/bats/containers.conf /etc/containers
|
|
RUN chmod a+rw /etc/subuid /etc/subgid
|