|
25 | 25 | - 'tests/e2e/embedding/**' |
26 | 26 |
|
27 | 27 | jobs: |
28 | | - embedding-cli-tests: |
| 28 | + embedding-cli-tests-linux: |
29 | 29 | runs-on: ubuntu-latest |
| 30 | + env: |
| 31 | + LLAMA_CACHE: tmp # stable path for cache |
| 32 | + EMBD_TEST_DEBUG: "1" |
30 | 33 |
|
31 | 34 | steps: |
| 35 | + - uses: actions/checkout@v4 |
| 36 | + with: { fetch-depth: 0 } |
| 37 | + |
| 38 | + - name: Restore model cache |
| 39 | + uses: actions/cache@v4 |
| 40 | + with: |
| 41 | + path: | |
| 42 | + ~/.cache/llama.cpp |
| 43 | + tmp |
| 44 | + key: hf-${{ runner.os }}-embeddinggemma-300M-q4_0-v1 |
| 45 | + restore-keys: | |
| 46 | + hf-${{ runner.os }}- |
| 47 | + hf- |
| 48 | +
|
32 | 49 | - name: Install system deps |
33 | 50 | run: | |
34 | 51 | sudo apt-get update |
35 | 52 | sudo apt-get -y install \ |
36 | | - build-essential \ |
37 | | - cmake \ |
38 | | - curl \ |
39 | | - libcurl4-openssl-dev \ |
40 | | - python3-pip |
41 | | -
|
42 | | - - name: Checkout repository |
43 | | - uses: actions/checkout@v4 |
44 | | - with: |
45 | | - fetch-depth: 0 |
| 53 | + build-essential cmake curl libcurl4-openssl-dev python3-pip |
46 | 54 |
|
47 | 55 | - name: Set up Python |
48 | 56 | uses: actions/setup-python@v5 |
49 | | - with: |
50 | | - python-version: '3.11' |
| 57 | + with: { python-version: '3.11' } |
51 | 58 |
|
52 | 59 | - name: Install Python deps |
53 | 60 | run: | |
54 | | - pip install -r requirements.txt || echo "No extra requirements found" |
55 | | - pip install pytest |
| 61 | + python -m pip install -r requirements.txt || echo "No extra requirements found" |
| 62 | + python -m pip install pytest numpy pytest-timeout |
56 | 63 |
|
57 | 64 | - name: Build llama-embedding |
58 | 65 | run: | |
59 | | - cmake -B build \ |
60 | | - -DCMAKE_BUILD_TYPE=Release |
| 66 | + cmake -B build -DCMAKE_BUILD_TYPE=Release |
61 | 67 | cmake --build build --target llama-embedding -j $(nproc) |
62 | 68 |
|
63 | | - - name: Run embedding tests |
| 69 | + - name: Pre-download tiny model (retry x3 on network) |
| 70 | + run: | |
| 71 | + set -e |
| 72 | + tries=0 |
| 73 | + until ./build/bin/llama-embedding \ |
| 74 | + -hfr ggml-org/embeddinggemma-300M-qat-q4_0-GGUF \ |
| 75 | + -hff embeddinggemma-300M-qat-Q4_0.gguf \ |
| 76 | + --ctx-size 16 --embd-output-format json --no-warmup --threads 1 --seed 42 <<< "ok"; do |
| 77 | + tries=$((tries+1)) |
| 78 | + if [ $tries -ge 3 ]; then |
| 79 | + echo "Pre-download failed after $tries attempts" |
| 80 | + exit 1 |
| 81 | + fi |
| 82 | + echo "Retrying download ($tries/3)..." |
| 83 | + sleep 3 |
| 84 | + done |
| 85 | +
|
| 86 | + - name: Run embedding tests (30s per-test cap) |
| 87 | + shell: bash |
| 88 | + run: | |
| 89 | + set -o pipefail |
| 90 | + pytest -v tests/e2e/embedding \ |
| 91 | + --timeout=30 \ |
| 92 | + --durations=10 \ |
| 93 | + --junitxml=pytest-report.xml | tee pytest-output.txt |
| 94 | +
|
| 95 | + - name: Upload test artifacts |
| 96 | + if: always() |
| 97 | + uses: actions/upload-artifact@v4 |
| 98 | + with: |
| 99 | + name: linux-embedding-tests |
| 100 | + path: | |
| 101 | + pytest-output.txt |
| 102 | + pytest-report.xml |
| 103 | +
|
| 104 | + - name: Save model cache |
| 105 | + if: always() |
| 106 | + uses: actions/cache@v4 |
| 107 | + with: |
| 108 | + path: | |
| 109 | + ~/.cache/llama.cpp |
| 110 | + tmp |
| 111 | + key: hf-${{ runner.os }}-embeddinggemma-300M-q4_0-v1 |
| 112 | + |
| 113 | + embedding-cli-tests-windows: |
| 114 | + runs-on: windows-latest |
| 115 | + continue-on-error: true |
| 116 | + env: |
| 117 | + LLAMA_CACHE: tmp |
| 118 | + EMBD_TEST_DEBUG: "1" |
| 119 | + |
| 120 | + steps: |
| 121 | + - uses: actions/checkout@v4 |
| 122 | + - uses: actions/setup-python@v5 |
| 123 | + with: { python-version: '3.11' } |
| 124 | + |
| 125 | + # --- vcpkg plain bootstrap (no actions, no submodules) --- |
| 126 | + - name: Bootstrap vcpkg |
| 127 | + shell: pwsh |
| 128 | + run: | |
| 129 | + $env:VCPKG_ROOT = "$env:RUNNER_TEMP\vcpkg" |
| 130 | + git clone https://github.com/microsoft/vcpkg $env:VCPKG_ROOT |
| 131 | + & "$env:VCPKG_ROOT\bootstrap-vcpkg.bat" -disableMetrics |
| 132 | + echo "VCPKG_ROOT=$env:VCPKG_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append |
| 133 | +
|
| 134 | + - name: Install curl with OpenSSL via vcpkg |
| 135 | + shell: pwsh |
| 136 | + run: | |
| 137 | + & "$env:VCPKG_ROOT\vcpkg.exe" install curl[openssl]:x64-windows |
| 138 | +
|
| 139 | + - name: Restore model cache |
| 140 | + uses: actions/cache@v4 |
| 141 | + with: |
| 142 | + path: | |
| 143 | + $HOME/.cache/llama.cpp |
| 144 | + tmp |
| 145 | + key: hf-${{ runner.os }}-embeddinggemma-300M-q4_0-v1 |
| 146 | + restore-keys: | |
| 147 | + hf-${{ runner.os }}- |
| 148 | + hf- |
| 149 | +
|
| 150 | + - name: Install Python deps |
| 151 | + run: pip install pytest numpy |
| 152 | + |
| 153 | + - name: Configure & Build (Release) |
| 154 | + shell: pwsh |
| 155 | + run: | |
| 156 | + cmake -B build -DCMAKE_BUILD_TYPE=Release ` |
| 157 | + -DCMAKE_TOOLCHAIN_FILE="$env:VCPKG_ROOT\scripts\buildsystems\vcpkg.cmake" |
| 158 | + cmake --build build --target llama-embedding --config Release -j 2 |
| 159 | +
|
| 160 | + - name: Pre-download tiny model (retry x3) |
| 161 | + shell: bash |
| 162 | + run: | |
| 163 | + set -e |
| 164 | + tries=0 |
| 165 | + until ./build/bin/Release/llama-embedding.exe \ |
| 166 | + -hfr ggml-org/embeddinggemma-300M-qat-q4_0-GGUF \ |
| 167 | + -hff embeddinggemma-300M-qat-Q4_0.gguf \ |
| 168 | + --ctx-size 16 --embd-output-format json --no-warmup --threads 1 --seed 42 <<< "ok"; do |
| 169 | + tries=$((tries+1)) |
| 170 | + if [ $tries -ge 3 ]; then |
| 171 | + echo "Pre-download failed after $tries attempts"; exit 1 |
| 172 | + fi |
| 173 | + echo "Retrying download ($tries/3)..."; sleep 3 |
| 174 | + done |
| 175 | +
|
| 176 | + - name: Run smoke tests |
| 177 | + shell: bash |
| 178 | + run: | |
| 179 | + pytest -q tests/e2e/embedding -k raw_vs_json_consistency |
| 180 | +
|
| 181 | +
|
| 182 | +
|
| 183 | + embedding-cli-tests-macos: |
| 184 | + runs-on: macos-latest |
| 185 | + continue-on-error: true |
| 186 | + env: |
| 187 | + LLAMA_CACHE: tmp |
| 188 | + EMBD_TEST_DEBUG: "1" |
| 189 | + steps: |
| 190 | + - uses: actions/checkout@v4 |
| 191 | + - uses: actions/setup-python@v5 |
| 192 | + with: { python-version: '3.11' } |
| 193 | + |
| 194 | + - name: Install Python deps |
| 195 | + run: pip install pytest numpy |
| 196 | + |
| 197 | + - name: Build |
| 198 | + run: | |
| 199 | + cmake -B build -DCMAKE_BUILD_TYPE=Release |
| 200 | + cmake --build build --target llama-embedding -j 3 |
| 201 | +
|
| 202 | + - name: Pre-download tiny model (retry x3) |
| 203 | + run: | |
| 204 | + set -e |
| 205 | + tries=0 |
| 206 | + until ./build/bin/llama-embedding \ |
| 207 | + -hfr ggml-org/embeddinggemma-300M-qat-q4_0-GGUF \ |
| 208 | + -hff embeddinggemma-300M-qat-Q4_0.gguf \ |
| 209 | + --ctx-size 16 --embd-output-format json --no-warmup --threads 1 --seed 42 <<< "ok"; do |
| 210 | + tries=$((tries+1)) |
| 211 | + if [ $tries -ge 3 ]; then |
| 212 | + echo "Pre-download failed after $tries attempts"; exit 1 |
| 213 | + fi |
| 214 | + echo "Retrying download ($tries/3)..."; sleep 3 |
| 215 | + done |
| 216 | +
|
| 217 | + - name: Warm cache & run a tiny smoke |
64 | 218 | run: | |
65 | | - pytest -v tests/e2e/embedding |
| 219 | + ./build/bin/llama-embedding --help >/dev/null 2>&1 |
| 220 | + pytest -q tests/e2e/embedding -k raw_vs_json_consistency |
0 commit comments