diff --git a/templates/al2/provisioners/install-worker.sh b/templates/al2/provisioners/install-worker.sh index 127fa0919..5b715a5a7 100644 --- a/templates/al2/provisioners/install-worker.sh +++ b/templates/al2/provisioners/install-worker.sh @@ -135,6 +135,10 @@ fi ################################################################################ sudo mv "${WORKING_DIR}/runtime.slice" /etc/systemd/system/runtime.slice +# this unit is safe to have regardless of variant because it will not run if +# the required binaries are not present. +sudo mv $WORKING_DIR/set-nvidia-clocks.service /etc/systemd/system/set-nvidia-clocks.service +sudo systemctl enable set-nvidia-clocks.service ############################################################################### ### Containerd setup ########################################################## diff --git a/templates/al2/runtime/set-nvidia-clocks.service b/templates/al2/runtime/set-nvidia-clocks.service new file mode 100755 index 000000000..96b7f4e19 --- /dev/null +++ b/templates/al2/runtime/set-nvidia-clocks.service @@ -0,0 +1,14 @@ +[Unit] +Description=Configure NVIDIA GPU clock rate +After=nvidia-persistenced.service +Requires=nvidia-persistenced.service + +ConditionPathExists=/usr/bin/nvidia-smi + +[Service] +Type=oneshot +ExecStart=/usr/bin/set-nvidia-clocks +RemainAfterExit=true + +[Install] +WantedBy=multi-user.target diff --git a/templates/al2023/provisioners/install-nvidia-driver.sh b/templates/al2023/provisioners/install-nvidia-driver.sh index f18bd4bfe..75fd7e5f1 100755 --- a/templates/al2023/provisioners/install-nvidia-driver.sh +++ b/templates/al2023/provisioners/install-nvidia-driver.sh @@ -119,7 +119,6 @@ archive-proprietary-kmod ################################################################################ sudo mv ${WORKING_DIR}/gpu/nvidia-kmod-load.sh /etc/eks/ -sudo mv ${WORKING_DIR}/gpu/set-nvidia-clocks.sh /etc/eks/ sudo mv ${WORKING_DIR}/gpu/nvidia-kmod-load.service /etc/systemd/system/nvidia-kmod-load.service sudo mv ${WORKING_DIR}/gpu/set-nvidia-clocks.service /etc/systemd/system/set-nvidia-clocks.service sudo systemctl daemon-reload diff --git a/templates/al2023/runtime/gpu/set-nvidia-clocks.service b/templates/al2023/runtime/gpu/set-nvidia-clocks.service index f778bd12b..96b7f4e19 100755 --- a/templates/al2023/runtime/gpu/set-nvidia-clocks.service +++ b/templates/al2023/runtime/gpu/set-nvidia-clocks.service @@ -3,9 +3,11 @@ Description=Configure NVIDIA GPU clock rate After=nvidia-persistenced.service Requires=nvidia-persistenced.service +ConditionPathExists=/usr/bin/nvidia-smi + [Service] Type=oneshot -ExecStart=/etc/eks/set-nvidia-clocks.sh +ExecStart=/usr/bin/set-nvidia-clocks RemainAfterExit=true [Install] diff --git a/templates/al2023/runtime/gpu/set-nvidia-clocks.sh b/templates/al2023/runtime/gpu/set-nvidia-clocks.sh deleted file mode 100755 index 78baefc52..000000000 --- a/templates/al2023/runtime/gpu/set-nvidia-clocks.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o xtrace - -if ! gpu-ami-util has-nvidia-devices; then - echo >&2 "no NVIDIA devices are present, nothing to do!" - exit 0 -fi - -# gpu boost clock -sudo nvidia-smi -pm 1 # set persistence mode -sudo nvidia-smi --auto-boost-default=0 - -GPUNAME=$(nvidia-smi -L | head -n1) -echo >&2 "INFO: GPU name: $GPUNAME" - -# set application clock to maximum -if [[ $GPUNAME == *"A100"* ]]; then - nvidia-smi -ac 1215,1410 -elif [[ $GPUNAME == *"V100"* ]]; then - nvidia-smi -ac 877,1530 -elif [[ $GPUNAME == *"T4"* ]]; then - nvidia-smi -ac 5001,1590 -elif [[ $GPUNAME == *"H100"* ]]; then - nvidia-smi -ac 2619,1980 -elif [[ $GPUNAME == *"A10G"* ]]; then - nvidia-smi -ac 6251,1695 -elif [[ $GPUNAME == *"L4"* ]]; then - nvidia-smi -ac 6251,2040 -elif [[ $GPUNAME == *"L40S"* ]]; then - nvidia-smi -ac 9001,2520 -else - echo "unsupported gpu" -fi diff --git a/templates/shared/runtime/bin/set-nvidia-clocks b/templates/shared/runtime/bin/set-nvidia-clocks new file mode 100755 index 000000000..f43383c77 --- /dev/null +++ b/templates/shared/runtime/bin/set-nvidia-clocks @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail +set -o nounset + +# nvidia-smi is required for this script. +if ! nvidia-smi -q > /tmp/nvidia-smi-check; then + echo >&2 "ERROR: nvidia-smi check failed!" + cat /tmp/nvidia-smi-check + exit 1 +fi + +# it's generally recommended to manually set clocks using max performance in +# order to get predictable performance. +# see: https://developer.nvidia.com/blog/advanced-api-performance-setstablepowerstate/ +# see: https://developer.nvidia.com/blog/increase-performance-gpu-boost-k80-autoboost/ + +# persist device power states so that you dont incur long start up delay when +# initializing new contexts on a CUDA GPU. +sudo nvidia-smi --persistence-mode=1 + +# query the highest speed for both memory and graphics GPU clocks +# NOTE: su permissions are not required for queries +MEMORY_CLOCK=$(nvidia-smi --query-supported-clocks=memory --format=csv,noheader,nounits | head -n 1 | tr -d '\n') +GRAPHICS_CLOCK=$(nvidia-smi --query-supported-clocks=graphics --format=csv,noheader,nounits | head -n 1 | tr -d '\n') + +# disable automatic clock boosts and specify desired maximum clock values +sudo nvidia-smi --auto-boost-default=0 +sudo nvidia-smi --applications-clocks ${MEMORY_CLOCK},${GRAPHICS_CLOCK} diff --git a/templates/test/Dockerfile b/templates/test/Dockerfile index a7dd78bd4..06581a152 100644 --- a/templates/test/Dockerfile +++ b/templates/test/Dockerfile @@ -17,5 +17,6 @@ COPY al2/runtime/kubelet-kubeconfig /var/lib/kubelet/kubeconfig COPY al2/runtime/ecr-credential-provider-config.json /etc/eks/image-credential-provider/config.json COPY test/entrypoint.sh /entrypoint.sh COPY al2/runtime/bin/* /usr/bin/ +COPY shared/runtime/bin/* /usr/bin/ COPY test/mocks/ /sbin/ ENTRYPOINT ["/entrypoint.sh"]