Skip to content

Commit b36cf4c

Browse files
slarenggerganov
authored andcommitted
llama : allow building all tests on windows when not using shared libs (ggml-org#13980)
* llama : allow building all tests on windows when not using shared libraries * add static windows build to ci * tests : enable debug logs for test-chat --------- Co-authored-by: Georgi Gerganov <[email protected]>
1 parent d9cd982 commit b36cf4c

File tree

2 files changed

+40
-123
lines changed

2 files changed

+40
-123
lines changed

.github/workflows/build.yml

Lines changed: 11 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -5,43 +5,10 @@ on:
55
push:
66
branches:
77
- master
8-
paths: [
9-
'.github/workflows/build.yml',
10-
'.github/workflows/build-linux-cross.yml',
11-
'.github/workflows/build-cmake-pkg.yml',
12-
'**/CMakeLists.txt',
13-
'**/.cmake',
14-
'**/*.h',
15-
'**/*.hpp',
16-
'**/*.c',
17-
'**/*.cpp',
18-
'**/*.cu',
19-
'**/*.cuh',
20-
'**/*.swift',
21-
'**/*.m',
22-
'**/*.metal',
23-
'**/*.comp'
24-
]
25-
8+
paths: ['.github/workflows/build.yml', '.github/workflows/build-linux-cross.yml', '**/CMakeLists.txt', '**/.cmake', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.cuh', '**/*.swift', '**/*.m', '**/*.metal', '**/*.comp']
269
pull_request:
2710
types: [opened, synchronize, reopened]
28-
paths: [
29-
'.github/workflows/build.yml',
30-
'.github/workflows/build-linux-cross.yml',
31-
'.github/workflows/build-cmake-pkg.yml',
32-
'**/CMakeLists.txt',
33-
'**/.cmake',
34-
'**/*.h',
35-
'**/*.hpp',
36-
'**/*.c',
37-
'**/*.cpp',
38-
'**/*.cu',
39-
'**/*.cuh',
40-
'**/*.swift',
41-
'**/*.m',
42-
'**/*.metal',
43-
'**/*.comp'
44-
]
11+
paths: ['.github/workflows/build.yml', '.github/workflows/build-linux-cross.yml', '**/CMakeLists.txt', '**/.cmake', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.cuh', '**/*.swift', '**/*.m', '**/*.metal', '**/*.comp']
4512

4613
concurrency:
4714
group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
@@ -84,8 +51,7 @@ jobs:
8451
-DCMAKE_BUILD_RPATH="@loader_path" \
8552
-DLLAMA_FATAL_WARNINGS=ON \
8653
-DGGML_METAL_USE_BF16=ON \
87-
-DGGML_METAL_EMBED_LIBRARY=OFF \
88-
-DGGML_METAL_SHADER_DEBUG=ON \
54+
-DGGML_METAL_EMBED_LIBRARY=ON \
8955
-DGGML_RPC=ON
9056
cmake --build build --config Release -j $(sysctl -n hw.logicalcpu)
9157
@@ -340,7 +306,6 @@ jobs:
340306
id: cmake_test
341307
run: |
342308
cd build
343-
export GGML_VK_VISIBLE_DEVICES=0
344309
# This is using llvmpipe and runs slower than other backends
345310
ctest -L main --verbose --timeout 3600
346311
@@ -512,9 +477,6 @@ jobs:
512477
build-linux-cross:
513478
uses: ./.github/workflows/build-linux-cross.yml
514479

515-
build-cmake-pkg:
516-
uses: ./.github/workflows/build-cmake-pkg.yml
517-
518480
macOS-latest-cmake-ios:
519481
runs-on: macos-latest
520482

@@ -665,7 +627,7 @@ jobs:
665627
./build-xcframework.sh
666628
667629
windows-msys2:
668-
runs-on: windows-2025
630+
runs-on: windows-latest
669631

670632
strategy:
671633
fail-fast: false
@@ -715,33 +677,27 @@ jobs:
715677
cmake --build build --config ${{ matrix.build }} -j $(nproc)
716678
717679
windows-latest-cmake:
718-
runs-on: windows-2025
680+
runs-on: windows-latest
719681

720682
env:
721683
OPENBLAS_VERSION: 0.3.23
722684
SDE_VERSION: 9.33.0-2024-01-07
723-
VULKAN_VERSION: 1.4.313.2
685+
VULKAN_VERSION: 1.4.309.0
724686

725687
strategy:
726688
matrix:
727689
include:
728690
- build: 'cpu-x64 (static)'
729-
arch: 'x64'
730691
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/x64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DBUILD_SHARED_LIBS=OFF'
731692
- build: 'openblas-x64'
732-
arch: 'x64'
733693
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/x64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_OPENMP=OFF -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"'
734694
- build: 'vulkan-x64'
735-
arch: 'x64'
736-
defines: '-DCMAKE_BUILD_TYPE=Release -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_VULKAN=ON'
695+
defines: '-DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_VULKAN=ON'
737696
- build: 'llvm-arm64'
738-
arch: 'arm64'
739697
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON'
740698
- build: 'llvm-arm64-opencl-adreno'
741-
arch: 'arm64'
742699
defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/opencl-arm64-release" -DGGML_OPENCL=ON -DGGML_OPENCL_USE_ADRENO_KERNELS=ON'
743700
# - build: 'kompute-x64'
744-
# arch: 'x64'
745701
# defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/x64-windows-llvm.cmake -DGGML_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DGGML_RPC=ON -DGGML_BACKEND_DL=ON -DGGML_CPU_ALL_VARIANTS=ON -DGGML_OPENMP=OFF -DGGML_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON'
746702

747703
steps:
@@ -779,7 +735,7 @@ jobs:
779735
id: get_vulkan
780736
if: ${{ matrix.build == 'kompute-x64' || matrix.build == 'vulkan-x64' }}
781737
run: |
782-
curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/vulkansdk-windows-X64-${env:VULKAN_VERSION}.exe"
738+
curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/VulkanSDK-${env:VULKAN_VERSION}-Installer.exe"
783739
& "$env:RUNNER_TEMP\VulkanSDK-Installer.exe" --accept-licenses --default-answer --confirm-command install
784740
Add-Content $env:GITHUB_ENV "VULKAN_SDK=C:\VulkanSDK\${env:VULKAN_VERSION}"
785741
Add-Content $env:GITHUB_PATH "C:\VulkanSDK\${env:VULKAN_VERSION}\bin"
@@ -812,8 +768,6 @@ jobs:
812768
- name: libCURL
813769
id: get_libcurl
814770
uses: ./.github/actions/windows-setup-curl
815-
with:
816-
architecture: ${{ matrix.arch == 'x64' && 'win64' || 'win64a' }}
817771

818772
- name: Build
819773
id: cmake_build
@@ -823,7 +777,6 @@ jobs:
823777
cmake -S . -B build ${{ matrix.defines }} `
824778
-DCURL_LIBRARY="$env:CURL_PATH/lib/libcurl.dll.a" -DCURL_INCLUDE_DIR="$env:CURL_PATH/include"
825779
cmake --build build --config Release -j ${env:NUMBER_OF_PROCESSORS}
826-
cp $env:CURL_PATH/bin/libcurl-*.dll build/bin/Release
827780
828781
- name: Add libopenblas.dll
829782
id: add_libopenblas_dll
@@ -834,7 +787,7 @@ jobs:
834787
835788
- name: Test
836789
id: cmake_test
837-
if: ${{ matrix.arch == 'x64' }}
790+
if: ${{ matrix.build != 'llvm-arm64' && matrix.build != 'llvm-arm64-opencl-adreno' }}
838791
run: |
839792
cd build
840793
ctest -L main -C Release --verbose --timeout 900
@@ -939,7 +892,7 @@ jobs:
939892
cmake --build build --config Release
940893
941894
windows-latest-cmake-sycl:
942-
runs-on: windows-2022
895+
runs-on: windows-latest
943896

944897
defaults:
945898
run:
@@ -973,7 +926,7 @@ jobs:
973926

974927
windows-latest-cmake-hip:
975928
if: ${{ github.event.inputs.create_release != 'true' }}
976-
runs-on: windows-2022
929+
runs-on: windows-latest
977930

978931
steps:
979932
- name: Clone

tests/CMakeLists.txt

Lines changed: 29 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -42,34 +42,6 @@ function(llama_test target)
4242
set_property(TEST ${TEST_NAME} PROPERTY LABELS ${LLAMA_TEST_LABEL})
4343
endfunction()
4444

45-
function(llama_test_cmd target)
46-
include(CMakeParseArguments)
47-
set(options)
48-
set(oneValueArgs NAME LABEL WORKING_DIRECTORY)
49-
set(multiValueArgs ARGS)
50-
cmake_parse_arguments(LLAMA_TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
51-
52-
if (NOT DEFINED LLAMA_TEST_LABEL)
53-
set(LLAMA_TEST_LABEL "main")
54-
endif()
55-
if (NOT DEFINED LLAMA_TEST_WORKING_DIRECTORY)
56-
set(LLAMA_TEST_WORKING_DIRECTORY .)
57-
endif()
58-
if (DEFINED LLAMA_TEST_NAME)
59-
set(TEST_NAME ${LLAMA_TEST_NAME})
60-
else()
61-
set(TEST_NAME ${target})
62-
endif()
63-
64-
add_test(
65-
NAME ${TEST_NAME}
66-
WORKING_DIRECTORY ${LLAMA_TEST_WORKING_DIRECTORY}
67-
COMMAND ${target}
68-
${LLAMA_TEST_ARGS})
69-
70-
set_property(TEST ${TEST_NAME} PROPERTY LABELS ${LLAMA_TEST_LABEL})
71-
endfunction()
72-
7345
# Builds and runs a test source file.
7446
# Optional args:
7547
# - NAME: name of the executable & test target (defaults to the source file name without extension)
@@ -111,31 +83,25 @@ endfunction()
11183
# build test-tokenizer-0 target once and add many tests
11284
llama_build(test-tokenizer-0.cpp)
11385

114-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-bert-bge ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-bert-bge.gguf)
115-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-command-r ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-command-r.gguf)
116-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-coder ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-deepseek-coder.gguf)
117-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-llm ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-deepseek-llm.gguf)
118-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-falcon ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-falcon.gguf)
119-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-gpt-2 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-gpt-2.gguf)
120-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-bpe ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-bpe.gguf)
121-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-spm ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-spm.gguf)
122-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-mpt ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-mpt.gguf)
123-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-phi-3 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-phi-3.gguf)
124-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-qwen2 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-qwen2.gguf)
125-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-refact.gguf)
126-
llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-starcoder.gguf)
127-
128-
if (NOT WIN32)
129-
llama_test_cmd(
130-
${CMAKE_CURRENT_SOURCE_DIR}/test-tokenizers-repo.sh
131-
NAME test-tokenizers-ggml-vocabs
132-
WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
133-
ARGS https://huggingface.co/ggml-org/vocabs ${PROJECT_SOURCE_DIR}/models/ggml-vocabs
134-
)
135-
endif()
86+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-bert-bge ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-bert-bge.gguf)
87+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-command-r ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-command-r.gguf)
88+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-coder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-deepseek-coder.gguf)
89+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-llm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-deepseek-llm.gguf)
90+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
91+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-2.gguf)
92+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf)
93+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-spm.gguf)
94+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
95+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-phi-3 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-phi-3.gguf)
96+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-qwen2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-qwen2.gguf)
97+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf)
98+
llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
99+
100+
# TODO: missing HF tokenizer for this model in convert_hf_to_gguf_update.py, see https://github.com/ggml-org/llama.cpp/pull/13847
101+
# llama_test(test-tokenizer-0 NAME test-tokenizer-0-nomic-bert-moe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-nomic-bert-moe.gguf)
136102

137103
if (LLAMA_LLGUIDANCE)
138-
llama_build_and_test(test-grammar-llguidance.cpp ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-bpe.gguf)
104+
llama_build_and_test(test-grammar-llguidance.cpp ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf)
139105
endif ()
140106

141107
if (NOT WIN32 OR NOT BUILD_SHARED_LIBS)
@@ -147,8 +113,8 @@ if (NOT WIN32 OR NOT BUILD_SHARED_LIBS)
147113
llama_build_and_test(test-chat.cpp)
148114
# TODO: disabled on loongarch64 because the ggml-ci node lacks Python 3.8
149115
if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64")
150-
llama_build_and_test(test-json-schema-to-grammar.cpp WORKING_DIRECTORY ${PROJECT_SOURCE_DIR})
151-
target_include_directories(test-json-schema-to-grammar PRIVATE ${PROJECT_SOURCE_DIR}/tools/server)
116+
llama_build_and_test(test-json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/..)
117+
target_include_directories(test-json-schema-to-grammar PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../tools/server)
152118
endif()
153119

154120
if (NOT GGML_BACKEND_DL)
@@ -161,20 +127,20 @@ if (NOT WIN32 OR NOT BUILD_SHARED_LIBS)
161127
llama_build(test-tokenizer-1-bpe.cpp)
162128

163129
# TODO: disabled due to slowness
164-
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-aquila.gguf)
165-
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-falcon ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-falcon.gguf)
166-
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-2 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-gpt-2.gguf)
167-
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-neox ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-gpt-neox.gguf)
168-
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-llama-bpe ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-bpe.gguf --ignore-merges)
169-
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-mpt ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-mpt.gguf)
170-
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-refact ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-refact.gguf)
171-
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-starcoder.gguf)
130+
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
131+
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
132+
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-2.gguf)
133+
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-neox ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-neox.gguf)
134+
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf --ignore-merges)
135+
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
136+
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf)
137+
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
172138

173139
# build test-tokenizer-1-spm target once and add many tests
174140
llama_build(test-tokenizer-1-spm.cpp)
175141

176-
llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-llama-spm ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-spm.gguf)
177-
#llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-baichuan.gguf)
142+
llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-spm.gguf)
143+
#llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-baichuan.gguf)
178144

179145
# llama_build_and_test(test-double-float.cpp) # SLOW
180146
endif()
@@ -185,8 +151,6 @@ llama_build_and_test(test-json-partial.cpp)
185151
llama_build_and_test(test-log.cpp)
186152
llama_build_and_test(test-regex-partial.cpp)
187153

188-
llama_build_and_test(test-thread-safety.cpp ARGS -hf ggml-org/models -hff tinyllamas/stories15M-q4_0.gguf -ngl 99 -p "The meaning of life is" -n 128 -c 256 -ub 32 -np 4)
189-
190154
# this fails on windows (github hosted runner) due to curl DLL not found (exit code 0xc0000135)
191155
if (NOT WIN32)
192156
llama_build_and_test(test-arg-parser.cpp)

0 commit comments

Comments
 (0)