diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f56bf10..41d605d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,60 +1,82 @@ -# Automatically build multi-architectural tagged container images and push them to DockerHub -# https://github.com/FNNDSC/cookiecutter-chrisapp/wiki/Automatic-Builds +# Continuous integration testing for ChRIS Plugin. +# https://github.com/FNNDSC/python-chrisapp-template/wiki/Continuous-Integration # -# - targeted platforms: x86_64, PowerPC64, ARM64 -# - master is built as fnndsc/pl-pfdorun:latest -# - tagged commits are built as fnndsc/pl-pfdorun: -# - tagged commits are also uploaded to chrisstore.co -# -# In order to use this workflow, see -# https://github.com/FNNDSC/cookiecutter-chrisapp/wiki/Automatic-Builds#steps-to-enable +# - on push and PR: run pytest +# - on push to main: build and push container images as ":latest" +# - on push to semver tag: build and push container image with tag and +# upload plugin description to https://chrisstore.co -name: ci +name: build on: push: - # we have to guess what the name of the default branch is - branches: [ master, main, trunk ] - tags: [ '**' ] + branches: [ main ] + tags: + - "v?[0-9]+.[0-9]+.[0-9]+*" pull_request: - branches: [ master, main, trunk ] + branches: [ main ] jobs: test: - # delete the line below to enable automatic testing - if: false - runs-on: ubuntu-20.04 + name: Unit tests + if: false # delete this line to enable automatic testing + runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v2 - - name: build - run: docker build -t "${GITHUB_REPOSITORY,,}" . - - name: nose tests - run: docker run "${GITHUB_REPOSITORY,,}" nosetests + - uses: actions/checkout@v3 + - uses: docker/setup-buildx-action@v2 + - name: Cache Docker layers + uses: actions/cache@v3 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- + - name: Build + uses: docker/build-push-action@v3 + with: + build-args: extras_require=dev + context: . + load: true + push: false + tags: "localhost/local/app:dev" + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache + - name: Run pytest + run: | + docker run -v "$GITHUB_WORKSPACE:/app:ro" -w /app localhost/local/app:dev \ + pytest -o cache_dir=/tmp/pytest - publish: + build: + name: Build if: github.event_name == 'push' || github.event_name == 'release' - runs-on: ubuntu-20.04 - - # we want to both push the build to DockerHub, but also - # keep a local copy so that we can run - # - # docker run fnndsc/pl-app app --json > App.json - # - # buildx currently does not support multiple output locations, - # neither can multi-architectural builds be loaded into docker. - # Here we use a local registry to cache the build. - services: - registry: - image: registry:2 - ports: - - 5000:5000 + # needs: [ test ] # uncomment to require passing tests + runs-on: ubuntu-22.04 steps: + - name: Stop docker + run: sudo systemctl stop docker + - name: Clean docker data + run: | + sudo rm -rf /var/lib/docker + sudo mkdir /var/lib/docker + - name: Maximize build space + uses: easimon/maximize-build-space@6ae56c86ea8db291ae39f62352a412c36ab8179b + with: + root-reserve-mb: 8192 # space needed for logs + swap-size-mb: 1 # must be >0 + build-mount-path: /var/lib/docker + remove-dotnet: 'true' + remove-android: 'true' + remove-haskell: 'true' + remove-codeql: 'true' + remove-docker-images: 'false' + - name: Start docker + run: sudo systemctl start docker - name: Get git tag id: git_info if: startsWith(github.ref, 'refs/tags/') - run: echo "::set-output name=tag::${GITHUB_REF##*/}" - - name: Decide image tag name + run: echo "tag=${GITHUB_REF##*/}" >> $GITHUB_OUTPUT + - name: Get project info id: determine env: git_tag: ${{ steps.git_info.outputs.tag }} @@ -62,51 +84,101 @@ jobs: repo="${GITHUB_REPOSITORY,,}" # to lower case # if build triggered by tag, use tag name tag="${git_tag:-latest}" + + # if tag is a version number prefixed by 'v', remove the 'v' + if [[ "$tag" =~ ^v[0-9].* ]]; then + tag="${tag:1}" + fi + dock_image=$repo:$tag echo $dock_image - echo "::set-output name=dock_image::$dock_image" - echo "::set-output name=repo::$repo" - - uses: actions/checkout@v2 + echo "dock_image=$dock_image" >> $GITHUB_OUTPUT + echo "repo=$repo" >> $GITHUB_OUTPUT - # QEMU is for emulating non-x86_64 platforms - - uses: docker/setup-qemu-action@v1 - # buildx is the next-generation docker image builder - - uses: docker/setup-buildx-action@v1 + - uses: actions/checkout@v3 + # QEMU is used for non-x86_64 builds + - uses: docker/setup-qemu-action@v2 + # buildx adds additional features to docker build + - uses: docker/setup-buildx-action@v2 with: driver-opts: network=host - # save some time during rebuilds + # cache slightly improves rebuild time - name: Cache Docker layers - uses: actions/cache@v2 + uses: actions/cache@v3 with: path: /tmp/.buildx-cache key: ${{ runner.os }}-buildx-${{ github.sha }} restore-keys: | ${{ runner.os }}-buildx- + + # Here, we want to do the docker build twice: + # The first build pushes to our local registry for testing. + # The second build pushes to Docker Hub and ghcr.io + - name: Build (local only) + uses: docker/build-push-action@v3 + id: docker_build + with: + context: . + file: ./Dockerfile + tags: localhost/${{ steps.determine.outputs.dock_image }} + load: true + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache + # If you have a directory called examples/incoming/ and examples/outgoing/, then + # run your ChRIS plugin with no parameters, and asser that it creates all the files + # which are expected. File contents are not compared. + - name: Run examples + id: run_examples + run: | + if ! [ -d 'examples/incoming/' ] || ! [ -d 'examples/outgoing/' ]; then + echo "No examples." + exit 0 + fi + + dock_image=localhost/${{ steps.determine.outputs.dock_image }} + output_dir=$(mktemp -d) + cmd=$(docker image inspect -f '{{ (index .Config.Cmd 0) }}' $dock_image) + docker run --rm -u "$(id -u):$(id -g)" \ + -v "$PWD/examples/incoming:/incoming:ro" \ + -v "$output_dir:/outgoing:rw" \ + $dock_image $cmd /incoming /outgoing + + for expected_file in $(find examples/outgoing -type f); do + fname="${expected_file##*/}" + out_path="$output_dir/$fname" + printf "Checking output %s exists..." "$out_path" + if [ -f "$out_path" ]; then + echo "ok" + else + echo "not found" + exit 1 + fi + done + - name: Login to DockerHub id: dockerhub_login - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} - + - name: Login to GitHub Container Registry - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Build and push - uses: docker/build-push-action@v2 - id: docker_build + uses: docker/build-push-action@v3 + if: github.event_name == 'push' || github.event_name == 'release' with: context: . file: ./Dockerfile tags: | - ${{ steps.determine.outputs.dock_image }} - localhost:5000/${{ steps.determine.outputs.dock_image }} + docker.io/${{ steps.determine.outputs.dock_image }} ghcr.io/${{ steps.determine.outputs.dock_image }} - platforms: linux/amd64 + # if non-x86_84 architectures are supported, add them here + platforms: linux/amd64 #,linux/arm64,linux/ppc64le push: true cache-from: type=local,src=/tmp/.buildx-cache cache-to: type=local,dest=/tmp/.buildx-cache @@ -116,47 +188,16 @@ jobs: run: | repo=${{ steps.determine.outputs.repo }} dock_image=${{ steps.determine.outputs.dock_image }} - docker pull localhost:5000/$dock_image - docker tag localhost:5000/$dock_image $dock_image - script=$(docker inspect --format '{{ (index .Config.Cmd 0) }}' $dock_image) - json="$(docker run --rm $dock_image $script --json)" - jq <<< "$json" # pretty print in log - echo "::set-output name=json::$json" - echo "::set-output name=title::$(jq -r '.title' <<< "$json")" + docker run --rm localhost/$dock_image chris_plugin_info > /tmp/description.json + jq < /tmp/description.json # pretty print in log + echo "title=$(jq -r '.title' < /tmp/description.json)" >> $GITHUB_OUTPUT + - name: Update DockerHub description - uses: peter-evans/dockerhub-description@v2 + uses: peter-evans/dockerhub-description@v3 continue-on-error: true # it is not crucial that this works with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} short-description: ${{ steps.pluginmeta.outputs.title }} - readme-filepath: ./README.rst - repository: ${{ steps.determine.outputs.repo }} - - - name: Upload to ChRIS Store - if: "!endsWith(steps.determine.outputs.dock_image, ':latest')" - run: | - repo=${{ steps.determine.outputs.repo }} - dock_image=${{ steps.determine.outputs.dock_image }} - plname="$(sed 's/^.*\///' <<< $repo)" && echo "name=$plname" - descriptor_file=$(mktemp --suffix .json) - cat > $descriptor_file <<< '${{ steps.pluginmeta.outputs.json }}' - res=$( - curl -s -u "${{ secrets.CHRIS_STORE_USER }}" "https://chrisstore.co/api/v1/plugins/" \ - -H 'Accept:application/vnd.collection+json' \ - -F "name=$plname" \ - -F "dock_image=$dock_image" \ - -F "descriptor_file=@$descriptor_file" \ - -F "public_repo=https://github.com/${{ github.repository }}" - ) - success=$? - echo "::debug::$res" - if [ "$success" = "0" ]; then - href="$(jq -r '.collection.items[0].href' <<< "$res")" - echo $href - echo "::set-output name=pluginurl::$href" - else - echo "::error ::Failed upload to ChRIS Store" - echo "$res" - exit $success - fi + readme-filepath: ./README.md + repository: ${{ steps.determine.outputs.repo }} \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 4b0dd93..d1a308f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -26,13 +26,13 @@ #FROM fnndsc/ubuntu-python3:latest FROM tensorflow/tensorflow:latest-gpu-py3 MAINTAINER fnndsc "dev@babymri.org" +WORKDIR /usr/local/src -ENV APPROOT="/usr/src/gpu_test" -COPY ["gpu_test","${APPROOT}"] -COPY ["requirements.txt", "${APPROOT}"] - -WORKDIR $APPROOT +COPY requirements.txt . RUN pip install --upgrade pip RUN pip install -r requirements.txt -ENTRYPOINT ["python3"] -CMD ["gpu_test.py", "--help"] + +COPY . . +RUN pip install . + +CMD ["gpu_test", "--help"] diff --git a/gpu_test/__main__.py b/gpu_test/__main__.py new file mode 100644 index 0000000..5a66ebb --- /dev/null +++ b/gpu_test/__main__.py @@ -0,0 +1,10 @@ +from gpu_test.gpu_test import GPU_test + + +def main(): + chris_app = GPU_test() + chris_app.launch() + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/gpu_test/gpu_test.py b/gpu_test/gpu_test.py index 38225c2..a1d8181 100755 --- a/gpu_test/gpu_test.py +++ b/gpu_test/gpu_test.py @@ -103,26 +103,17 @@ class GPU_test(ChrisApp): """ An app to check the available GPUs. """ - AUTHORS = 'Sandip Samal (sandip.samal@childrens.harvard.edu)' - SELFPATH = os.path.dirname(os.path.abspath(__file__)) - SELFEXEC = os.path.basename(__file__) - EXECSHELL = 'python3' - TITLE = 'An application to check the presence of GPU in the system' - CATEGORY = 'N/A' - TYPE = 'ds' - DESCRIPTION = 'An app to check the available GPUs' - DOCUMENTATION = '...' - VERSION = '0.2.5' - ICON = '' # url of an icon image - LICENSE = 'Opensource (MIT)' - MAX_NUMBER_OF_WORKERS = 1 # Override with integer value - MIN_NUMBER_OF_WORKERS = 1 # Override with integer value - MAX_CPU_LIMIT = '' # Override with millicore value as string, e.g. '2000m' - MIN_CPU_LIMIT = '3000m' # Override with millicore value as string, e.g. '2000m' - MAX_MEMORY_LIMIT = '' # Override with string, e.g. '1Gi', '2000Mi' - MIN_MEMORY_LIMIT = '32000Mi' # Override with string, e.g. '1Gi', '2000Mi' - MIN_GPU_LIMIT = 0 # Override with the minimum number of GPUs, as an integer, for your plugin - MAX_GPU_LIMIT = 0 # Override with the maximum number of GPUs, as an integer, for your plugin + PACKAGE = __package__ + TITLE = 'Shows GPUs available in the compute env' + CATEGORY = 'GPU' + TYPE = 'ds' + ICON = '' # url of an icon image + MIN_NUMBER_OF_WORKERS = 1 # Override with the minimum number of workers as int + MAX_NUMBER_OF_WORKERS = 1 # Override with the maximum number of workers as int + MIN_CPU_LIMIT = 2000 # Override with millicore value as int (1000 millicores == 1 CPU core) + MIN_MEMORY_LIMIT = 32000 # Override with memory MegaByte (MB) limit as int + MIN_GPU_LIMIT = 0 # Override with the minimum number of GPUs as int + MAX_GPU_LIMIT = 0 # Override with the maximum number of GPUs as int # Use this dictionary structure to provide key-value output descriptive information # that may be useful for the next downstream plugin. For example: @@ -166,9 +157,3 @@ def show_man_page(self): Print the app's man page. """ print(Gstr_synopsis) - - -# ENTRYPOINT -if __name__ == "__main__": - chris_app = GPU_test() - chris_app.launch() diff --git a/requirements.txt b/requirements.txt index 2512a0c..e03b486 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,2 @@ -chrisapp +chrisapp~=2.4.0 torch diff --git a/setup.py b/setup.py index 52d366c..f2bb031 100755 --- a/setup.py +++ b/setup.py @@ -1,37 +1,27 @@ - -import sys -import os - - -# Make sure we are running python3.5+ -if 10 * sys.version_info[0] + sys.version_info[1] < 35: - sys.exit("Sorry, only Python 3.5+ is supported.") - - +from os import path from setuptools import setup - -def readme(): - print("Current dir = %s" % os.getcwd()) - print(os.listdir()) - with open('README.rst') as f: - return f.read() +with open(path.join(path.dirname(path.abspath(__file__)), 'README.rst')) as f: + readme = f.read() setup( - name = 'gpu_test', - # for best practices make this version the same as the VERSION class variable - # defined in your ChrisApp-derived Python class - version = '0.2.5', - description = 'An app to check the available GPUs', - long_description = readme(), - author = 'Sandip Samal', - author_email = 'sandip.samal@childrens.harvard.edu', - url = '...', - packages = ['gpu_test'], - install_requires = ['chrisapp', 'pudb'], - test_suite = 'nose.collector', - tests_require = ['nose'], - scripts = ['gpu_test/gpu_test.py'], - license = 'MIT', - zip_safe = False - ) + name = 'gpu_test', + version = '1.0.0', + description = 'An app to check GPU availability', + long_description = readme, + author = 'Sandip Samal', + author_email = 'sandip.samal@childrens.harvard.edu', + url = 'https://github.com/FNNDSC/pl-pfdicom_tagSub', + packages = ['gpu_test'], + install_requires = ['chrisapp'], + test_suite = 'nose.collector', + tests_require = ['nose'], + license = 'MIT', + zip_safe = False, + python_requires = '>=3.6', + entry_points = { + 'console_scripts': [ + 'gpu_test = gpu_test.__main__:main' + ] + } +) \ No newline at end of file