diff --git a/.github/workflows/workflow_finetune.yml b/.github/workflows/workflow_finetune.yml index 181f46df4..5a8e32720 100644 --- a/.github/workflows/workflow_finetune.yml +++ b/.github/workflows/workflow_finetune.yml @@ -11,10 +11,10 @@ on: default: '10.1.2.13:5000/llmray-build' http_proxy: type: string - default: 'http://proxy-chain.intel.com:911' + default: 'http://10.24.221.149:911' https_proxy: type: string - default: 'http://proxy-chain.intel.com:911' + default: 'http://10.24.221.149:911' runner_config_path: type: string default: '/home/ci/llm-ray-actions-runner' diff --git a/.github/workflows/workflow_inference.yml b/.github/workflows/workflow_inference.yml index 8b3ac1d56..03269a4a4 100644 --- a/.github/workflows/workflow_inference.yml +++ b/.github/workflows/workflow_inference.yml @@ -11,10 +11,10 @@ on: default: '10.1.2.13:5000/llmray-build' http_proxy: type: string - default: 'http://proxy-chain.intel.com:911' + default: 'http://10.24.221.149:911' https_proxy: type: string - default: 'http://proxy-chain.intel.com:911' + default: 'http://10.24.221.149:911' runner_config_path: type: string default: '/home/ci/llm-ray-actions-runner' diff --git a/dev/docker/Dockerfile.vllm b/dev/docker/Dockerfile.vllm index 3b4bd00c8..e4eb63d06 100644 --- a/dev/docker/Dockerfile.vllm +++ b/dev/docker/Dockerfile.vllm @@ -38,6 +38,3 @@ RUN --mount=type=cache,target=/root/.cache/pip pip install -e .[cpu] -f https:// RUN --mount=type=cache,target=/root/.cache/pip \ source /opt/conda/bin/activate base && ./install-vllm-cpu.sh -# TODO: workaround, remove this when fixed in vllm-cpu upstream -RUN --mount=type=cache,target=/root/.cache/pip \ - pip install xformers