Skip to content

[Test] Start Llama tests on GPU #898

[Test] Start Llama tests on GPU

[Test] Start Llama tests on GPU #898

Workflow file for this run

name: Unit tests
on:
push:
branches: [main]
pull_request:
branches: [main]
workflow_dispatch:
jobs:
unit-tests:
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
model: ["gpt2cpu", "phi2cpu", "hfllama7b"]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
shell: bash
run: |
python -m pip install --upgrade pip
pip install pytest
pip install -e .[test]
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Install model-specific dependencies
run: |
pip install llama-cpp-python
- name: Run tests (except server)
run: |
pytest --cov=guidance --cov-report=xml --cov-report=term-missing --selected_model ${{ matrix.model }} -m "not server" ./tests/
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
with:
token: ${{ secrets.CODECOV_TOKEN }}