ci: add vulkan docker image (#3644)

Signed-off-by: rare-magma <rare-magma@posteo.eu>
This commit is contained in:
Nuno 2026-02-09 11:33:06 +01:00 committed by GitHub
parent 052066c4f7
commit 764482c317
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 35 additions and 1 deletions

View File

@ -0,0 +1,20 @@
FROM ubuntu:24.04 AS build
WORKDIR /app
RUN apt-get update && \
apt-get install -y build-essential wget cmake git libvulkan-dev glslc \
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
COPY .. .
RUN make base.en CMAKE_ARGS="-DGGML_VULKAN=1"
FROM ubuntu:24.04 AS runtime
WORKDIR /app
RUN apt-get update && \
apt-get install -y curl ffmpeg libsdl2-dev wget cmake git libvulkan1 mesa-vulkan-drivers \
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
COPY --from=build /app /app
ENV PATH=/app/build/bin:$PATH
ENTRYPOINT [ "bash", "-c" ]

View File

@ -22,6 +22,7 @@ jobs:
- { tag: "main-musa", dockerfile: ".devops/main-musa.Dockerfile", platform: "linux/amd64" }
- { tag: "main-intel", dockerfile: ".devops/main-intel.Dockerfile", platform: "linux/amd64" }
- { tag: "main-cuda", dockerfile: ".devops/main-cuda.Dockerfile", platform: "linux/amd64" }
- { tag: "main-vulkan", dockerfile: ".devops/main-vulkan.Dockerfile", platform: "linux/amd64" }
steps:
- name: Check out the repo

View File

@ -443,11 +443,12 @@ ffmpeg -i samples/jfk.wav jfk.opus
### Images
We have two Docker images available for this project:
We have multiple Docker images available for this project:
1. `ghcr.io/ggml-org/whisper.cpp:main`: This image includes the main executable file as well as `curl` and `ffmpeg`. (platforms: `linux/amd64`, `linux/arm64`)
2. `ghcr.io/ggml-org/whisper.cpp:main-cuda`: Same as `main` but compiled with CUDA support. (platforms: `linux/amd64`)
3. `ghcr.io/ggml-org/whisper.cpp:main-musa`: Same as `main` but compiled with MUSA support. (platforms: `linux/amd64`)
4. `ghcr.io/ggml-org/whisper.cpp:main-vulkan`: Same as `main` but compiled with Vulkan support. (platforms: `linux/amd64`)
### Usage
@ -456,15 +457,27 @@ We have two Docker images available for this project:
docker run -it --rm \
-v path/to/models:/models \
whisper.cpp:main "./models/download-ggml-model.sh base /models"
# transcribe an audio file
docker run -it --rm \
-v path/to/models:/models \
-v path/to/audios:/audios \
whisper.cpp:main "whisper-cli -m /models/ggml-base.bin -f /audios/jfk.wav"
# transcribe an audio file in samples folder
docker run -it --rm \
-v path/to/models:/models \
whisper.cpp:main "whisper-cli -m /models/ggml-base.bin -f ./samples/jfk.wav"
# run the web server
docker run -it --rm -p "8080:8080" \
-v path/to/models:/models \
whisper.cpp:main "whisper-server --host 127.0.0.1 -m /models/ggml-base.bin"
# run the bench too on the small.en model using 4 threads
docker run -it --rm \
-v path/to/models:/models \
whisper.cpp:main "whisper-bench -m /models/ggml-small.en.bin -t 4"
```
## Installing with Conan