diff --git a/.github/workflows/docker-build.yaml b/.github/workflows/docker-build.yaml index b15bb62..68cabe4 100644 --- a/.github/workflows/docker-build.yaml +++ b/.github/workflows/docker-build.yaml @@ -4,7 +4,7 @@ name: docker-build # Only build base when any of the files in the base directory are modified on: push: - branches: [master, build_jetson] + branches: [master, two_x86] tags: - '*' workflow_dispatch: @@ -20,9 +20,13 @@ jobs: strategy: fail-fast: false matrix: + bare_or_nvda: [nvda, bare] platform: - linux/amd64 - linux/arm64 + exclude: + - platform: linux/arm64 + bare_or_nvda: bare steps: - @@ -30,11 +34,13 @@ jobs: uses: jlumbroso/free-disk-space@main with: tool-cache: true + - name: Prepare run: | platform=${{ matrix.platform }} - echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV + bare_or_nvda=${{ matrix.bare_or_nvda }} + echo "PLATFORM_PAIR=${platform//\//-}-${bare_or_nvda}" >> $GITHUB_ENV - name: Checkout uses: actions/checkout@v4 @@ -42,22 +48,35 @@ jobs: name: Set platform-specific build-args id: set_build_args run: | - export $(cat build.bash | grep UPSTREAM_X86_64 | sed 's/^export //') - export $(cat build.bash | grep UPSTREAM_AARCH64 | sed 's/^export //') + # Read the upstream images from build.bash + export $(cat build.bash | grep UPSTREAM_X86_64_NVDA | sed 's/^export //') + export $(cat build.bash | grep UPSTREAM_ARM64_NVDA | sed 's/^export //') + export $(cat build.bash | grep UPSTREAM_X86_64_BARE | sed 's/^export //') + export $(cat build.bash | grep UPSTREAM_ARM64_BARE | sed 's/^export //') if [ "${{ matrix.platform }}" = "linux/amd64" ]; then - echo "upstream_image=$UPSTREAM_X86_64" >> $GITHUB_ENV + if [ "${{ matrix.bare_or_nvda }}" = "bare" ]; then + echo "upstream_image=$UPSTREAM_X86_64_BARE" >> $GITHUB_ENV + else + echo "upstream_image=$UPSTREAM_X86_64_NVDA" >> $GITHUB_ENV + fi elif [ "${{ matrix.platform }}" = "linux/arm64" ]; then - echo "upstream_image=$UPSTREAM_AARCH64" >> $GITHUB_ENV + if [ "${{ matrix.bare_or_nvda }}" = "bare" ]; then + echo "upstream_image=$UPSTREAM_ARM64_BARE" >> $GITHUB_ENV + else + echo "upstream_image=$UPSTREAM_ARM64_NVDA" >> $GITHUB_ENV + fi else echo "Architecture not supported" exit 1 fi + echo "Architecture: ${{ matrix.platform }}" + echo "Bare or nvda option: ${{ matrix.bare_or_nvda }}" - name: Docker meta id: meta uses: docker/metadata-action@v5 with: - images: ${{ env.REGISTRY_IMAGE }} + images: ${{ env.REGISTRY_IMAGE }}-${{ matrix.bare_or_nvda }} - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -78,8 +97,11 @@ jobs: context: "{{defaultContext}}:dcist-master" platforms: ${{ matrix.platform }} labels: ${{ steps.meta.outputs.labels }} - build-args: upstream_image=${{ env.upstream_image }} - outputs: type=image,name=${{ env.REGISTRY_IMAGE }},push-by-digest=true,name-canonical=true,push=true + build-args: | + ARCHITECTURE=${{ matrix.platform }} + UPSTREAM_IMAGE=${{ env.upstream_image }} + BARE_OR_NVDA=${{ matrix.bare_or_nvda }} + outputs: type=image,name=${{ env.REGISTRY_IMAGE }}-${{ matrix.bare_or_nvda }},push-by-digest=true,name-canonical=true,push=true - name: Export digest run: | @@ -94,8 +116,16 @@ jobs: path: /tmp/digests/* if-no-files-found: error retention-days: 1 + - + name: display files + run: | + ls /tmp/digests merge: runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + bare_or_nvda: [nvda, bare] needs: - build steps: @@ -104,8 +134,12 @@ jobs: uses: actions/download-artifact@v4 with: path: /tmp/digests - pattern: digests-* + pattern: digests-*-${{ matrix.bare_or_nvda }} merge-multiple: true + - + name: display files + run: | + ls /tmp/digests - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 @@ -114,7 +148,7 @@ jobs: id: meta uses: docker/metadata-action@v5 with: - images: ${{ env.REGISTRY_IMAGE }} + images: ${{ env.REGISTRY_IMAGE }}-${{ matrix.bare_or_nvda }} - name: Login to Docker Hub uses: docker/login-action@v3 @@ -126,8 +160,8 @@ jobs: working-directory: /tmp/digests run: | docker buildx imagetools create $(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "$DOCKER_METADATA_OUTPUT_JSON") \ - $(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *) + $(printf '${{ env.REGISTRY_IMAGE }}-${{ matrix.bare_or_nvda }}@sha256:%s ' *) - name: Inspect image run: | - docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }} + docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}-${{ matrix.bare_or_nvda }}:${{ steps.meta.outputs.version }} diff --git a/build.bash b/build.bash index 2544f1d..38f0819 100755 --- a/build.bash +++ b/build.bash @@ -20,16 +20,19 @@ set -eo pipefail -# Upstream images for x86_64 and aarch64 -# For x86_64, use the CUDA 11.4.3 image -# For aarch64, use the L4T PyTorch image +# Upstream images for x86_64 and arm64, for both Nvidia and non-Nvidia +# For x86_64 and arm64 non-Nvidia, use the Ubuntu 20.04 image +# For x86_64 Nvidia, use the CUDA 11.4.3 image +# For arm64 Nvidia, use the L4T PyTorch image # Important: do not modify the variable name as it is used by the Github action # to build the image -UPSTREAM_X86_64=nvcr.io/nvidia/cuda:11.4.3-devel-ubuntu20.04 -UPSTREAM_AARCH64=nvcr.io/nvidia/l4t-pytorch:r35.1.0-pth1.13-py3 +UPSTREAM_X86_64_NVDA=nvcr.io/nvidia/cuda:11.4.3-devel-ubuntu20.04 +UPSTREAM_ARM64_NVDA=nvcr.io/nvidia/l4t-pytorch:r35.1.0-pth1.13-py3 +UPSTREAM_X86_64_BARE=ubuntu:20.04 +UPSTREAM_ARM64_BARE=ubuntu:20.04 # Check that the current user has UID 1000. -if [ $(id -u) -ne 1000 ] +if [ "$(id -u)" -ne 1000 ] then echo "ERROR: This script must be run with UID and GID of 1000." echo " Current UID: $(id -u), current GID: $(id -g)" @@ -43,36 +46,51 @@ then elif [ $# -eq 1 ] then # No image type is provided, build x86 by default - upstream=$UPSTREAM_X86_64 + echo "Building x86_64 image by default" + upstream=$UPSTREAM_X86_64_BARE architecture="x86_64" + bare_or_nvda="bare" elif [ $# -eq 2 ] then - # The second argument should be x86_64 or aarch64 + # The second argument should be x86_64_nvda arm64_nvda or x86_64 if [ "$2" = "x86_64" ] then - upstream=$UPSTREAM_X86_64 - elif [ "$2" = "aarch64" ] + upstream=$UPSTREAM_X86_64_BARE + bare_or_nvda="bare" + architecture="x86_64" + elif [ "$2" = "x86_64_nvda" ] then - upstream=$UPSTREAM_AARCH64 + upstream=$UPSTREAM_X86_64_NVDA + bare_or_nvda="nvda" + architecture="x86_64" + elif [ "$2" = "arm64" ] + then + echo "Architecture not supported" + exit 1 + elif [ "$2" = "arm64_nvda" ] + then + upstream=$UPSTREAM_ARM64_NVDA + bare_or_nvda="nvda" + achitecture="amd64" else - echo "Usage: $0 directory-name [x86_64|aarch64]" + echo "Usage: $0 directory-name [x86_64|x86_64_nvda|arm64_nvda]" exit 1 fi - architecture=$2 else - echo "Usage: $0 directory-name [x86_64|aarch64]" + echo "Usage: $0 directory-name [x86_64|x86_64_nvda|arm64_nvda]" exit 1 fi # Create the image name and tag user_id=$(id -u) -image_name=$(basename "$1") +image_name="$(basename "$1")-$bare_or_nvda" revision=$(git describe --tags --long) image_plus_tag=kumarrobotics/$image_name:$revision # Print image name in purple echo -e "\033[0;35mBuilding $image_plus_tag" echo -e "Architecture: $architecture" +echo -e "Bare or NVDA: $bare_or_nvda" echo -e "Upstream image: $upstream\n\033[0m" # get path to current directory @@ -87,8 +105,10 @@ fi # Build the image docker build --rm -t $image_plus_tag \ --build-arg user_id=$user_id \ - --build-arg upstream_image=$upstream \ - -f $DIR/$image_name/Dockerfile . + --build-arg UPSTREAM_IMAGE=$upstream \ + --build-arg ARCHITECTURE=$architecture \ + --build-arg BARE_OR_NVDA=$bare_or_nvda \ + -f $DIR/$1/Dockerfile . echo "Built $image_plus_tag and tagged as $image_name:latest" # Create "latest" tag diff --git a/dcist-master/Dockerfile b/dcist-master/Dockerfile index 6d865c8..d7a3b96 100644 --- a/dcist-master/Dockerfile +++ b/dcist-master/Dockerfile @@ -1,7 +1,9 @@ # The upstream image depends on the architecture, and it is defined when # building the image. Please see build.bash for more details -ARG upstream_image -FROM $upstream_image +ARG UPSTREAM_IMAGE +FROM $UPSTREAM_IMAGE +ARG ARCHITECTURE +ARG BARE_OR_NVDA #Run the frontend first so it doesn't throw an error later RUN apt-get update \ @@ -108,8 +110,20 @@ RUN sudo apt-get install -y \ # Install geographiclib_datasets required by mavros RUN cd /opt/ros/noetic/lib/mavros/ && sudo ./install_geographiclib_datasets.sh -# Install pytorch only if it is not installed (avoid re-installing in Jetson) -RUN sudo sh -c 'pip3 freeze | grep torch || pip3 install torch==1.8.2+cu111 torchvision==0.9.2+cu111 torchaudio==0.8.2 -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html' +# Install pytorch for the appropriate architecture +RUN if [ "$BARE_OR_NVDA" = "nvda" ] ; then \ + if [ "$ARCHITECTURE" = "linux/amd64" ] ; then \ + echo "nvda - x86_64"; sudo pip3 install torch==1.8.2 torchvision==0.9.2 torchaudio==0.8.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cu111 ; \ + else \ + echo "nvda - arm64" ; echo "Skipping pytorch installation for Jetson"; \ + fi; \ +else \ + if [ "$ARCHITECTURE" = "linux/amd64" ] ; then \ + echo "bare - x86_64"; sudo pip3 install torch==1.8.2 torchvision==0.9.2 torchaudio==0.8.2 --extra-index-url https://download.pytorch.org/whl/lts/1.8/cpu ; \ + else \ + echo "Architecture not supported"; exit 1; \ + fi; \ +fi; # Install dependencies for MOCHA RUN sudo sh -c 'pip3 install zmq rospkg defusedxml utm'