@@ -2,71 +2,117 @@ name: CI
22
33on :
44 push :
5- branches : [ main, dev, ci-multi ]
5+ branches : [main, dev, experimental, ci-multi]
66 pull_request :
7- branches : [ main, dev, ci-multi ]
7+ branches : [main, dev, experimental, ci-multi]
88
99jobs :
1010 test :
11- runs-on : ubuntu-latest
11+ runs-on : ubuntu-24.04
1212 timeout-minutes : 180
1313
14+ env :
15+ PYTHON_VERSION : " 3.9"
16+ OLLAMA_VERSION : " 0.18.0"
17+ OLLAMA_HOST : " 127.0.0.1:11434"
18+ OLLAMA_CONTEXT_LENGTH : " 4000"
19+ OLLAMA_MODELS : " /usr/share/ollama/.ollama/models"
20+
1421 steps :
15- - name : Checkout code
16- uses : actions/checkout@v4
17-
18- # 1) Restore any cached Ollama data (~2 GB)
19- - name : Restore Ollama cache
20- uses : actions/cache@v4
21- with :
22- path : ~/.ollama
23- key : qwen3-4b-gguf-v1
24-
25- # 2) Install Ollama
26- - name : Install Ollama
27- run : |
28- curl -fsSL https://ollama.com/install.sh | sh
29-
30- # 3) Drop-in override to bump context window to 4k tokens
31- - name : Configure Ollama for 4K context
32- run : |
33- sudo mkdir -p /etc/systemd/system/ollama.service.d
34- sudo tee /etc/systemd/system/ollama.service.d/override.conf << 'EOF'
35- [Service]
36- ExecStart=
37- ExecStart=/usr/local/bin/ollama serve --num_ctx 4000
38- EOF
39- sudo systemctl daemon-reload
40-
41- # 4) Enable & start the systemd-managed Ollama daemon
42- - name : Enable & start Ollama
43- run : |
44- sudo systemctl enable --now ollama
45-
46- # 5) Pull the phi4-mini:3.8b model (uses cache if present)
47- - name : Pull phi4-mini:3.8b model
48- run : ollama pull phi4-mini:3.8b
49-
50- # 6) Set up Python & install dependencies
51- - uses : actions/setup-python@v5
52- with : { python-version: "3.9" }
53- - name : Install Python deps
54- run : |
55- pip install -e .
56- pip install pytest datasets numpy
57-
58- # 7) Point LiteLLM/OpenAI to our local Ollama server
59- - name : Configure LLM env
60- run : |
61- echo "OPENAI_API_KEY=ollama" >> $GITHUB_ENV
62- echo "OPENAI_API_BASE=http://localhost:11434/v1" >> $GITHUB_ENV
63- echo "TRACE_LITELLM_MODEL=openai/phi4-mini:3.8b" >> $GITHUB_ENV
64-
65- # 8) Run all Trace unit tests
66- - name : Run unit tests
67- run : pytest tests/unit_tests/
68-
69- # 9) Run basic tests for each optimizer (some will fail due to the small LLM model chosen for free GitHub CI)
70- - name : Run optimizers test suite
71- run : pytest tests/llm_optimizers_tests/test_optimizer.py || true
72- continue-on-error : true
22+ - name : Checkout code
23+ uses : actions/checkout@v4
24+
25+ - name : Restore Ollama model cache
26+ uses : actions/cache@v4
27+ with :
28+ path : /usr/share/ollama/.ollama/models
29+ key : ollama-${{ runner.os }}-${{ env.OLLAMA_VERSION }}-phi4-mini-3.8b-v2
30+ restore-keys : |
31+ ollama-${{ runner.os }}-${{ env.OLLAMA_VERSION }}-
32+ ollama-${{ runner.os }}-
33+
34+ - name : Install Ollama
35+ run : |
36+ curl -fsSL https://ollama.com/install.sh | OLLAMA_VERSION=${OLLAMA_VERSION} sh
37+
38+ - name : Prepare Ollama directories
39+ run : |
40+ sudo mkdir -p /usr/share/ollama/.ollama/models
41+ sudo chown -R ollama:ollama /usr/share/ollama
42+
43+ - name : Configure Ollama service
44+ run : |
45+ sudo mkdir -p /etc/systemd/system/ollama.service.d
46+ sudo tee /etc/systemd/system/ollama.service.d/override.conf > /dev/null <<EOF
47+ [Service]
48+ Environment="OLLAMA_HOST=${OLLAMA_HOST}"
49+ Environment="OLLAMA_CONTEXT_LENGTH=${OLLAMA_CONTEXT_LENGTH}"
50+ Environment="OLLAMA_MODELS=${OLLAMA_MODELS}"
51+ EOF
52+ sudo systemctl daemon-reload
53+ sudo systemctl enable ollama
54+ sudo systemctl restart ollama
55+
56+ - name : Verify Ollama service started
57+ shell : bash
58+ run : |
59+ set -euxo pipefail
60+ sudo systemctl status ollama --no-pager || true
61+ sudo journalctl -u ollama -n 100 --no-pager || true
62+
63+ - name : Wait for Ollama to become ready
64+ shell : bash
65+ run : |
66+ set -euo pipefail
67+
68+ for i in $(seq 1 60); do
69+ if curl -fsS "http://${OLLAMA_HOST}/api/tags" > /dev/null; then
70+ echo "Ollama is ready"
71+ break
72+ fi
73+
74+ if [ "$i" -eq 60 ]; then
75+ echo "Ollama failed to become ready"
76+ sudo systemctl status ollama --no-pager || true
77+ sudo journalctl -u ollama -n 200 --no-pager || true
78+ exit 1
79+ fi
80+
81+ echo "Waiting for Ollama... attempt $i/60"
82+ sleep 2
83+ done
84+
85+ - name : Pull phi4-mini:3.8b model
86+ run : |
87+ ollama pull phi4-mini:3.8b
88+
89+ - name : Set up Python
90+ uses : actions/setup-python@v5
91+ with :
92+ python-version : ${{ env.PYTHON_VERSION }}
93+
94+ - name : Install Python deps
95+ run : |
96+ python -m pip install --upgrade pip
97+ pip install -e .
98+ pip install pytest datasets numpy
99+
100+ - name : Configure LLM env
101+ run : |
102+ echo "OPENAI_API_KEY=ollama" >> "$GITHUB_ENV"
103+ echo "OPENAI_API_BASE=http://${OLLAMA_HOST}/v1" >> "$GITHUB_ENV"
104+ echo "TRACE_LITELLM_MODEL=openai/phi4-mini:3.8b" >> "$GITHUB_ENV"
105+
106+ - name : Run unit tests
107+ run : pytest tests/unit_tests/
108+
109+ - name : Run optimizers test suite
110+ run : pytest tests/llm_optimizers_tests/test_optimizer.py || true
111+ continue-on-error : true
112+
113+ - name : Dump Ollama logs on failure
114+ if : failure()
115+ run : |
116+ sudo systemctl status ollama --no-pager || true
117+ sudo journalctl -u ollama -n 300 --no-pager || true
118+ sudo ls -R /usr/share/ollama/.ollama || true
0 commit comments