Skip to content

Commit

Permalink
Pre-compiled end-to-end gpu driver validation
Browse files Browse the repository at this point in the history
Signed-off-by: shiva kumar <[email protected]>
  • Loading branch information
shivakunv committed Aug 26, 2024
1 parent 404a65b commit ffafae0
Show file tree
Hide file tree
Showing 18 changed files with 341 additions and 29 deletions.
112 changes: 112 additions & 0 deletions .github/workflows/ci-precompiled.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
# Copyright 2024 NVIDIA CORPORATION
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

name: Pre-Compiled End-to-end tests

on:
workflow_run:
workflows: [image]
types:
- completed
branches:
- e2etestdriver_no
workflow_dispatch:

pull_request:
types:
- opened
- synchronize
branches:
# - main
# - release-*
- e2etestdriver
push:
branches:
# - main
# - release-*
- e2etestdriver

jobs:
e2e-tests-nvidiadriver:
# strategy:
# matrix:
# flavor:
# - aws
# - azure
# - generic
# - nvidia
# - oracle
runs-on: ubuntu-latest

steps:
- name: Check out code
uses: actions/checkout@v4

- name: Set up Holodeck
uses: NVIDIA/[email protected]
env:
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SSH_KEY: ${{ secrets.AWS_SSH_KEY }}
with:
aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws_ssh_key: ${{ secrets.AWS_SSH_KEY }}
holodeck_config: "tests/holodeck.yaml"

- name: Get public dns name
id: get_public_dns_name
uses: mikefarah/yq@master
with:
cmd: yq '.status.properties[] | select(.name == "public-dns-name") | .value' /github/workspace/.cache/holodeck.yaml

- name: Set and Calculate test vars
run: |
echo "instance_hostname=ubuntu@${{ steps.get_public_dns_name.outputs.result }}" >> $GITHUB_ENV
echo "private_key=${{ github.workspace }}/key.pem" >> $GITHUB_ENV
echo "${{ secrets.AWS_SSH_KEY }}" > ${{ github.workspace }}/key.pem && chmod 400 ${{ github.workspace }}/key.pem
echo "COMMIT_SHORT_SHA=${GITHUB_SHA:0:8}" >> $GITHUB_ENV
DRIVER_VERSIONS=$(grep '^DRIVER_VERSIONS ?=' versions.mk | awk -F' ?= ' '{print $2}')
echo "DRIVER_VERSIONS=$DRIVER_VERSIONS" >> $GITHUB_ENV
echo "PRIVATE_REGISTRY=ghcr.io" >> $GITHUB_ENV
- name: Precompiled e2e test- upgrade kernel and Validate gpu driver
env:
# space separated multiple target script can be passed to target
TARGET_SCRIPT: "./tests/cases/precompiled-nvidia-driver.sh ./tests/cases/nvidia-driver.sh"
OPERATOR_OPTIONS: "--set driver.repository=${{ env.PRIVATE_REGISTRY }}/nvidia --set driver.usePrecompiled=true"
run: |
rc=0
for driver_version in ${DRIVER_VERSIONS}; do
echo "Running e2e for DRIVER_VERSION=$driver_version"
# SHIVA change below to KERNEL_FLAVOR
DRIVER_BRANCH=$(echo "${driver_version}" | cut -d '.' -f 1)
DRIVER_VERSION="${DRIVER_BRANCH}"
status=0
./tests/precompile-task_executor.sh "${TARGET_SCRIPT}" "${DRIVER_VERSION}" "${OPERATOR_OPTIONS}" || status=$?
if [ $status -ne 0 ]; then
echo "Precompiled e2e validation failed for driver branch $DRIVER_VERSION and kernel flavor $KERNEL_FLAVOR with status $status"
rc=$status
fi
done
./tests/scripts/pull.sh /tmp/logs logs
exit $rc
- name: Archive test logs
if: ${{ failure() }}
uses: actions/upload-artifact@v4
with:
name: nvidiadriver-Precompiled-e2e-test-logs
path: ./logs/
retention-days: 15
32 changes: 24 additions & 8 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,21 @@ on:
types:
- completed
branches:
- main
- e2etestdriver

pull_request:
types:
- opened
- synchronize
branches:
# - main
# - release-*
- e2etestdriver
push:
branches:
# - main
# - release-*
- e2etestdriver

jobs:
e2e-tests-nvidiadriver:
Expand All @@ -41,36 +55,38 @@ jobs:
aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws_ssh_key: ${{ secrets.AWS_SSH_KEY }}
holodeck_config: "tests/holodeck.yaml"

- name: Get public dns name
id: get_public_dns_name
uses: mikefarah/yq@master
with:
cmd: yq '.status.properties[] | select(.name == "public-dns-name") | .value' /github/workspace/.cache/holodeck.yaml
cmd: yq '.status.properties[] | select(.name == "public-dns-name") | .value' /github/workspace/.cache/holodeck.yaml

- name: Set and Calculate test vars
run: |
echo "instance_hostname=ubuntu@${{ steps.get_public_dns_name.outputs.result }}" >> $GITHUB_ENV
echo "private_key=${{ github.workspace }}/key.pem" >> $GITHUB_ENV
echo "${{ secrets.AWS_SSH_KEY }}" > ${{ github.workspace }}/key.pem && chmod 400 ${{ github.workspace }}/key.pem
echo "COMMIT_SHORT_SHA=${GITHUB_SHA:0:8}" >> $GITHUB_ENV
DRIVER_VERSIONS=$(grep '^DRIVER_VERSIONS ?=' versions.mk | awk -F' ?= ' '{print $2}')
echo "DRIVER_VERSIONS=$DRIVER_VERSIONS" >> $GITHUB_ENV
echo "PRIVATE_REGISTRY=ghcr.io" >> $GITHUB_ENV
- name: Validate gpu driver
env:
TEST_CASE: "./tests/cases/nvidia-driver.sh"
OPERATOR_OPTIONS: "--set driver.repository=${{ env.PRIVATE_REGISTRY }}/nvidia"
run: |
echo "${{ secrets.AWS_SSH_KEY }}" > ${private_key} && chmod 400 ${private_key}
rc=0
for driver_version in ${DRIVER_VERSIONS}; do
echo "Running e2e for DRIVER_VERSION=$driver_version"
./tests/ci-run-e2e.sh ${TEST_CASE} ${COMMIT_SHORT_SHA}-${driver_version} || status=$?
status=0
./tests/ci-run-e2e.sh "${TEST_CASE}" "${COMMIT_SHORT_SHA}-${driver_version}" "${OPERATOR_OPTIONS}" || status=$?
if [ $status -ne 0 ]; then
echo "e2e validation failed for driver version $driver_version with status $status"
rc=$status
fi
done
source ./tests/scripts/.definitions.sh
./tests/scripts/pull.sh /tmp/logs logs
exit $rc
Expand All @@ -80,4 +96,4 @@ jobs:
with:
name: nvidiadriver-e2e-test-logs
path: ./logs/
retention-days: 15
retention-days: 15
10 changes: 6 additions & 4 deletions .github/workflows/image.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,14 @@ on:
- opened
- synchronize
branches:
- main
- release-*
# - main
# - release-*
- e2etestdriver
push:
branches:
- main
- release-*
# - main
# - release-*
- e2etestdriver

jobs:
image:
Expand Down
8 changes: 8 additions & 0 deletions tests/cases/precompiled-nvidia-driver.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
#! /bin/bash
# This test case runs the operator installation / test case with the default options.

SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../scripts && pwd )"
source "${SCRIPTS_DIR}"/.definitions.sh

#upgrade kernel
"${SCRIPTS_DIR}"/upgrade-kernel.sh
10 changes: 5 additions & 5 deletions tests/ci-run-e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@

set -xe

if [[ $# -ne 2 ]]; then
echo "TEST_CASE TARGET_DRIVER_VERSION are required"
if [[ $# -ne 3 ]]; then
echo "TEST_CASE TARGET_DRIVER_VERSION OPERATOR_OPTIONS are required"
exit 1
fi

export TEST_CASE=${1}
export TARGET_DRIVER_VERSION=${2}

export TEST_CASE="${1}"
export TARGET_DRIVER_VERSION="${2}"
export OPERATOR_OPTIONS="${3}"

TEST_DIR="$(pwd)/tests"

Expand Down
1 change: 1 addition & 0 deletions tests/local.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,4 +24,5 @@ remote SKIP_PREREQUISITES="${SKIP_PREREQUISITES}" ./tests/scripts/prerequisites.
remote \
PROJECT="${PROJECT}" \
TARGET_DRIVER_VERSION="${TARGET_DRIVER_VERSION}" \
OPERATOR_OPTIONS=\"${OPERATOR_OPTIONS}\" \
${TEST_CASE}
32 changes: 32 additions & 0 deletions tests/precompile-task_executor.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
#!/bin/bash

set -xe

if [[ $# -ne 3 ]]; then
echo "TARGET_SCRIPT TARGET_DRIVER_VERSION OPERATOR_OPTIONS are required"
exit 1
fi

export TARGET_SCRIPT="${1}"
export TARGET_DRIVER_VERSION="${2}"
export OPERATOR_OPTIONS="${3}"

SCRIPT_DIR="$(pwd)/tests/scripts"

IFS=' ' read -r -a array <<< "$TARGET_SCRIPT"
# execute , wait , try-connect target script
for target_script in "${array[@]}"; do
echo "target_script: $target_script"
./tests/ci-run-e2e.sh "${target_script}" "${TARGET_DRIVER_VERSION}" "${OPERATOR_OPTIONS}" || status=$?
# On the target system, all scripts/test-case exit with code 1 for error handling.
# However, since reboot-related disconnections break the SSH connection
# and can cause the entire job to exit, we should ignore all errors except
# exit code 1. During a reboot, exit code 1 will not be thrown, so handling
# other errors as code 1 will ensure proper management of reboot scenarios
if [ $status -eq 1 ]; then
echo "e2e validation failed"
exit 1
fi
sleep 30
${SCRIPT_DIR}/remote_retry.sh
done
8 changes: 6 additions & 2 deletions tests/scripts/.definitions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@ CASES_DIR="$( cd "${TEST_DIR}/cases" && pwd )"

: ${TEST_NAMESPACE:="test-operator"}

: ${PRIVATE_REGISTRY:="ghcr.io"}

: ${HELM_NVIDIA_REPO:="https://helm.ngc.nvidia.com/nvidia"}

: ${TARGET_DRIVER_VERSION:="550.90.07"}
Expand All @@ -24,3 +22,9 @@ CASES_DIR="$( cd "${TEST_DIR}/cases" && pwd )"
: ${POD_STATUS_TIME_OUT:="2m"}

: ${LOG_DIR:="/tmp/logs"}

: ${OPERATOR_OPTIONS:="--set driver.repository=ghcr.io/nvidia"}
: ${SYSTEM_ONLINE_CHECK_TIMEOUT:="900"}

: ${BASE_TARGET:="jammy"}
: ${GRUB_FILE:="/boot/grub/grub.cfg"}
4 changes: 4 additions & 0 deletions tests/scripts/.local.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,7 @@
function remote() {
${SCRIPT_DIR}/remote.sh "cd ${PROJECT} && "$@""
}

function remote_retry() {
${SCRIPT_DIR}/remote_retry.sh
}
4 changes: 0 additions & 4 deletions tests/scripts/.rsync-excludes

This file was deleted.

2 changes: 2 additions & 0 deletions tests/scripts/.rsync-includes
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
tests/
tests/***
2 changes: 1 addition & 1 deletion tests/scripts/end-to-end-nvidia-driver.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ ${SCRIPT_DIR}/install-operator.sh

"${SCRIPT_DIR}"/verify-operator.sh

echo "--------------Verification completed for GPU Operator, uninstalling the operator--------------"
echo "--------------Verification completed for GPU Operator, uninstalling the GPU operator--------------"

${SCRIPT_DIR}/uninstall-operator.sh ${TEST_NAMESPACE} "gpu-operator"

Expand Down
18 changes: 18 additions & 0 deletions tests/scripts/findkernelversion.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#!/bin/bash

if [[ "${SKIP_INSTALL}" == "true" ]]; then
echo "Skipping install: SKIP_INSTALL=${SKIP_INSTALL}"
exit 0
fi

SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "${SCRIPT_DIR}"/.definitions.sh

export REGCTL_VERSION=v0.4.7
mkdir -p bin
curl -sSLo bin/regctl https://github.com/regclient/regclient/releases/download/${REGCTL_VERSION}/regctl-linux-amd64
chmod a+x bin/regctl
export PATH=$(pwd)/bin:${PATH}
DRIVER_BRANCH=$(echo "${TARGET_DRIVER_VERSION}" | cut -d '.' -f 1)
KERNEL_FLAVOR=$(uname -r | awk -F'-' '{print $3}')
regctl image get-file ghcr.io/nvidia/driver:base-${BASE_TARGET}-${KERNEL_FLAVOR}-${DRIVER_BRANCH} /var/kernel_version.txt ${LOG_DIR}/kernel_version.txt || true
12 changes: 8 additions & 4 deletions tests/scripts/install-operator.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,14 @@ if [[ "${SKIP_INSTALL}" == "true" ]]; then
exit 0
fi

echo "Checking current kernel version..."
CURRENT_KERNEL=$(uname -r)
echo "Current kernel version: $CURRENT_KERNEL"

SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source ${SCRIPT_DIR}/.definitions.sh

OPERATOR_OPTIONS="${OPERATOR_OPTIONS} --set driver.repository=${PRIVATE_REGISTRY}/nvidia --set driver.version=${TARGET_DRIVER_VERSION}"
OPERATOR_OPTIONS="${OPERATOR_OPTIONS} --set driver.version=${TARGET_DRIVER_VERSION}"

# add helm driver repo
helm repo add nvidia ${HELM_NVIDIA_REPO} && helm repo update
Expand All @@ -17,8 +21,8 @@ helm repo add nvidia ${HELM_NVIDIA_REPO} && helm repo update
kubectl create namespace "${TEST_NAMESPACE}"

# Run the helm install command
echo "OPERATOR_OPTIONS: $OPERATOR_OPTIONS"
${HELM} install gpu-operator nvidia/gpu-operator \
echo "OPERATOR_OPTIONS: ${OPERATOR_OPTIONS}"
eval ${HELM} install gpu-operator nvidia/gpu-operator \
-n "${TEST_NAMESPACE}" \
${OPERATOR_OPTIONS} \
"${OPERATOR_OPTIONS}" \
--wait
Loading

0 comments on commit ffafae0

Please sign in to comment.