ci: add vulkan docker image (#3644)
Signed-off-by: rare-magma <rare-magma@posteo.eu>
This commit is contained in:
parent
052066c4f7
commit
764482c317
|
|
@ -0,0 +1,20 @@
|
|||
FROM ubuntu:24.04 AS build
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y build-essential wget cmake git libvulkan-dev glslc \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||
|
||||
COPY .. .
|
||||
RUN make base.en CMAKE_ARGS="-DGGML_VULKAN=1"
|
||||
|
||||
FROM ubuntu:24.04 AS runtime
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl ffmpeg libsdl2-dev wget cmake git libvulkan1 mesa-vulkan-drivers \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
|
||||
|
||||
COPY --from=build /app /app
|
||||
ENV PATH=/app/build/bin:$PATH
|
||||
ENTRYPOINT [ "bash", "-c" ]
|
||||
|
|
@ -22,6 +22,7 @@ jobs:
|
|||
- { tag: "main-musa", dockerfile: ".devops/main-musa.Dockerfile", platform: "linux/amd64" }
|
||||
- { tag: "main-intel", dockerfile: ".devops/main-intel.Dockerfile", platform: "linux/amd64" }
|
||||
- { tag: "main-cuda", dockerfile: ".devops/main-cuda.Dockerfile", platform: "linux/amd64" }
|
||||
- { tag: "main-vulkan", dockerfile: ".devops/main-vulkan.Dockerfile", platform: "linux/amd64" }
|
||||
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
|
|
|
|||
15
README.md
15
README.md
|
|
@ -443,11 +443,12 @@ ffmpeg -i samples/jfk.wav jfk.opus
|
|||
|
||||
### Images
|
||||
|
||||
We have two Docker images available for this project:
|
||||
We have multiple Docker images available for this project:
|
||||
|
||||
1. `ghcr.io/ggml-org/whisper.cpp:main`: This image includes the main executable file as well as `curl` and `ffmpeg`. (platforms: `linux/amd64`, `linux/arm64`)
|
||||
2. `ghcr.io/ggml-org/whisper.cpp:main-cuda`: Same as `main` but compiled with CUDA support. (platforms: `linux/amd64`)
|
||||
3. `ghcr.io/ggml-org/whisper.cpp:main-musa`: Same as `main` but compiled with MUSA support. (platforms: `linux/amd64`)
|
||||
4. `ghcr.io/ggml-org/whisper.cpp:main-vulkan`: Same as `main` but compiled with Vulkan support. (platforms: `linux/amd64`)
|
||||
|
||||
### Usage
|
||||
|
||||
|
|
@ -456,15 +457,27 @@ We have two Docker images available for this project:
|
|||
docker run -it --rm \
|
||||
-v path/to/models:/models \
|
||||
whisper.cpp:main "./models/download-ggml-model.sh base /models"
|
||||
|
||||
# transcribe an audio file
|
||||
docker run -it --rm \
|
||||
-v path/to/models:/models \
|
||||
-v path/to/audios:/audios \
|
||||
whisper.cpp:main "whisper-cli -m /models/ggml-base.bin -f /audios/jfk.wav"
|
||||
|
||||
# transcribe an audio file in samples folder
|
||||
docker run -it --rm \
|
||||
-v path/to/models:/models \
|
||||
whisper.cpp:main "whisper-cli -m /models/ggml-base.bin -f ./samples/jfk.wav"
|
||||
|
||||
# run the web server
|
||||
docker run -it --rm -p "8080:8080" \
|
||||
-v path/to/models:/models \
|
||||
whisper.cpp:main "whisper-server --host 127.0.0.1 -m /models/ggml-base.bin"
|
||||
|
||||
# run the bench too on the small.en model using 4 threads
|
||||
docker run -it --rm \
|
||||
-v path/to/models:/models \
|
||||
whisper.cpp:main "whisper-bench -m /models/ggml-small.en.bin -t 4"
|
||||
```
|
||||
|
||||
## Installing with Conan
|
||||
|
|
|
|||
Loading…
Reference in New Issue