Fix llama-server and llama-bench #14
build.yml
on: pull_request
ios-xcode-build
0s
macOS-latest-cmake-arm64
1m 1s
macOS-latest-cmake-x64
54s
macOS-latest-cmake-arm64-webgpu
1m 5s
ubuntu-latest-llguidance
57s
ubuntu-latest-cmake-rpc
57s
ubuntu-24-cmake-vulkan-deb
58s
ubuntu-24-cmake-vulkan
51s
ubuntu-24-cmake-webgpu
45s
ubuntu-22-cmake-hip
46s
ubuntu-22-cmake-musa
46s
ubuntu-22-cmake-sycl
57s
ubuntu-22-cmake-sycl-fp16
58s
ubuntu-24-cmake-openvino
0s
build-linux-cross
/
ubuntu-24-riscv64-cpu-cross
0s
build-linux-cross
/
debian-13-loongarch64-cpu-cross
0s
build-linux-cross
/
debian-13-loongarch64-vulkan-cross
0s
build-linux-cross
/
ubuntu-24-riscv64-cpu-spacemit-ime-cross
0s
build-cmake-pkg
/
linux
0s
macOS-latest-cmake-ios
0s
macOS-latest-cmake-tvos
0s
macOS-latest-cmake-visionos
0s
ubuntu-latest-cmake-cuda
0s
windows-latest-cmake-sycl
0s
windows-latest-cmake-hip
0s
android-build
0s
ggml-ci-x64-cpu-low-perf
59s
ggml-ci-arm64-cpu-low-perf
0s
ggml-ci-x64-cpu-high-perf
51s
ggml-ci-arm64-cpu-high-perf
0s
ggml-ci-arm64-cpu-high-perf-sve
0s
ggml-ci-x64-nvidia-cuda
0s
ggml-ci-x64-nvidia-vulkan-cm
0s
ggml-ci-x64-nvidia-vulkan-cm2
0s
ggml-ci-x64-cpu-amx
0s
ggml-ci-mac-metal
0s
ggml-ci-mac-vulkan
0s
ggml-ci-arm64-cpu-kleidiai
44s
Matrix: openEuler-latest-cmake-cann
Matrix: ubuntu-cpu-cmake
Matrix: ubuntu-latest-cmake-sanitizer
Matrix: windows-2022-cmake-cuda
Matrix: windows-latest-cmake
Matrix: windows-msys2
Matrix: macOS-latest-swift
Annotations
76 errors, 2 warnings, and 4 notices