From 777d8893efcc59d46b5b720747a4909e84e475d0 Mon Sep 17 00:00:00 2001 From: Karthik Prasad Date: Mon, 4 Nov 2024 07:41:55 -0800 Subject: [PATCH] delete CircleCI configs since GithubActions CI are now live Summary: Cleanup job after #620 Differential Revision: D53202231 --- .circleci/config.yml | 517 ------------------------------------ .circleci/flake8_config.ini | 119 --------- 2 files changed, 636 deletions(-) delete mode 100644 .circleci/config.yml delete mode 100644 .circleci/flake8_config.ini diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index 99f1f43f..00000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,517 +0,0 @@ -version: 2.1 - -# ------------------------------------------------------------------------------------- -# Commands -# ------------------------------------------------------------------------------------- - -commands: - - py_3_9_setup: - description: "Install and switch to Python 3.9; also install pip and pytest." - steps: - - run: - name: "Setup Python v3.9 environment" - command: | - cd /opt/circleci/.pyenv && git pull && cd - - pyenv install -s 3.9.4 - pyenv global 3.9.4 - pyenv local 3.9.4 - pyenv versions - echo "In venv: $(pyenv local) - $(python -V), $(pip -V)" - sudo "$(which python)" -m pip install --upgrade pip - sudo "$(which python)" -m pip install pytest - sudo "$(which python)" -m pip install coverage - sudo "$(which python)" -m pip install coveralls - - run_nvidia_smi: - description: "Prints GPU capabilities from nvidia-smi" - steps: - - run: - name: "Run Nvidia-SMI" - command: | - nvidia-smi - - pip_dev_install: - description: "Install dependencies via pip, including extra deps. Also supports more options, such as building on top of PyTorch nightly." - parameters: - args: - type: string - default: "" - steps: - - run: - name: "Install dependencies via pip" - command: ./scripts/install_via_pip.sh << parameters.args >> - - lint_flake8: - description: "Lint with flake8" - steps: - - run: - name: "Lint with flake8" - command: flake8 --config ./.circleci/flake8_config.ini - - lint_black: - description: "Lint with black" - steps: - - run: - name: "Lint with black" - command: black --check --diff --color . - - isort: - description: "Check import order with isort" - steps: - - run: - name: "Check import order with isort" - command: isort -v -l 88 -o opacus --lines-after-imports 2 -m 3 --trailing-comma --check-only . - - unit_tests: - description: "Run unit tests" - steps: - - run: - name: "Unit tests & doctests" - no_output_timeout: 1h - command: | - mkdir unittest-reports - coverage run -m pytest --doctest-modules -p conftest --junitxml=unittest-reports/junit.xml opacus - coverage report -i -m - - - store_test_results: - path: unittest-reports - - store_artifacts: - path: unittest-reports - - command_unit_tests_multi_gpu: - description: "Run multi gpu unit tests" - steps: - - run: - name: "Unit test multi_gpu" - no_output_timeout: 1h - command: | - mkdir unittest-multigpu-reports - coverage run -m unittest opacus.tests.multigpu_gradcheck.GradientComputationTest.test_gradient_correct - coverage report -i -m - - coveralls_upload_parallel: - description: "upload coverage to coveralls" - steps: - - run: - name: "coveralls upload" - no_output_timeout: 5m - command: | - pip install coveralls --user - COVERALLS_PARALLEL=true COVERALLS_FLAG_NAME="${CIRCLE_JOB}" coveralls - - mnist_integration_test: - description: "Runs MNIST example end to end" - parameters: - device: - default: "cpu" - type: string - steps: - - run: - name: MNIST example - command: | - mkdir -p runs/mnist/data - mkdir -p runs/mnist/test-reports - echo "Using $(python -V) ($(which python))" - echo "Using $(pip -V) ($(which pip))" - python examples/mnist.py --lr 0.25 --sigma 0.7 -c 1.5 --batch-size 64 --epochs 1 --data-root runs/mnist/data --n-runs 1 --device <> - python -c "import torch; accuracy = torch.load('run_results_mnist_0.25_0.7_1.5_64_1.pt'); exit(0) if (accuracy[0]>0.78 and accuracy[0]<0.95) else exit(1)" - when: always - - store_test_results: - path: runs/mnist/test-reports - - store_artifacts: - path: runs/mnist/test-reports - - mnist_lightning_integration_test: - description: "Runs MNIST-Lightning example end to end" - parameters: - device: - default: "cpu" - type: string - steps: - - run: - name: MNIST-Lightning example - command: | - mkdir -p runs/mnist/data - mkdir -p runs/mnist/test-reports - echo "Using $(python -V) ($(which python))" - echo "Using $(pip -V) ($(which pip))" - python examples/mnist_lightning.py fit --trainer.accelerator <> --model.lr 0.25 --model.sigma 0.7 --model.max_per_sample_grad_norm 1.5 --model.sample_rate 0.004 --trainer.max_epochs 1 --data.data_dir runs/mnist/data --data.sample_rate 0.004 - python -c "import torch; exit(0)" - when: always - - store_test_results: - path: runs/mnist-lightning/test-reports - - store_artifacts: - path: runs/mnist-lightning/test-reports - - cifar10_integration_test: - description: "Runs CIFAR10 example end to end" - parameters: - device: - default: "cpu" - type: string - steps: - - run: - name: CIFAR10 example - command: | - mkdir -p runs/cifar10/data - mkdir -p runs/cifar10/logs - mkdir -p runs/cifar10/test-reports - echo "Using $(python -V) ($(which python))" - echo "Using $(pip -V) ($(which pip))" - pip install tensorboard - python examples/cifar10.py --lr 0.1 --sigma 1.5 -c 10 --batch-size 2000 --epochs 10 --data-root runs/cifar10/data --log-dir runs/cifar10/logs --device <> - python -c "import torch; model = torch.load('model_best.pth.tar'); exit(0) if (model['best_acc1']>0.4 and model['best_acc1']<0.49) else exit(1)" - python examples/cifar10.py --lr 0.1 --sigma 1.5 -c 10 --batch-size 2000 --epochs 10 --data-root runs/cifar10/data --log-dir runs/cifar10/logs --device <> --grad_sample_mode no_op - python -c "import torch; model = torch.load('model_best.pth.tar'); exit(0) if (model['best_acc1']>0.4 and model['best_acc1']<0.49) else exit(1)" - when: always - - store_test_results: - path: runs/cifar10/test-reports - - store_artifacts: - path: runs/cifar10/test-reports - - dcgan_integration_test: - description: "Runs dcgan example end to end" - parameters: - device: - default: "cpu" - type: string - steps: - - run: - name: dcgan example - command: | - mkdir -p runs/dcgan/data - mkdir -p runs/dcgan/test-reports - echo "Using $(python -V) ($(which python))" - echo "Using $(pip -V) ($(which pip))" - python examples/dcgan.py --lr 2e-4 --sigma 0.7 -c 1.5 --batch-size 32 --epochs 1 --data-root runs/dcgan/data --device <> - when: always - - store_test_results: - path: runs/dcgan/test-reports - - store_artifacts: - path: runs/dcgan/test-reports - - imdb_integration_test: - description: "Runs imdb example end to end" - parameters: - device: - default: "cpu" - type: string - steps: - - run: - name: imdb example - command: | - mkdir -p runs/imdb/data - mkdir -p runs/imdb/test-reports - echo "Using $(python -V) ($(which python))" - echo "Using $(pip -V) ($(which pip))" - pip install --user datasets transformers - python examples/imdb.py --lr 0.02 --sigma 1.0 -c 1.0 --batch-size 64 --max-sequence-length 256 --epochs 2 --data-root runs/imdb/data --device <> - python -c "import torch; accuracy = torch.load('run_results_imdb_classification.pt'); exit(0) if (accuracy>0.54 and accuracy<0.66) else exit(1)" - when: always - - store_test_results: - path: runs/imdb/test-reports - - store_artifacts: - path: runs/imdb/test-reports - - charlstm_integration_test: - description: "Runs charlstm example end to end" - parameters: - device: - default: "cpu" - type: string - steps: - - run: - name: charlstm example - command: | - mkdir -p runs/charlstm/data - wget https://download.pytorch.org/tutorial/data.zip -O runs/charlstm/data/data.zip - unzip runs/charlstm/data/data.zip -d runs/charlstm/data - rm runs/charlstm/data/data.zip - mkdir -p runs/charlstm/test-reports - echo "Using $(python -V) ($(which python))" - echo "Using $(pip -V) ($(which pip))" - pip install scikit-learn - python examples/char-lstm-classification.py --epochs=20 --learning-rate=2.0 --hidden-size=128 --delta=8e-5 --batch-size 400 --n-layers=1 --sigma=1.0 --max-per-sample-grad-norm=1.5 --data-root="runs/charlstm/data/data/names/" --device=<> --test-every 5 - python -c "import torch; accuracy = torch.load('run_results_chr_lstm_classification.pt'); exit(0) if (accuracy>0.60 and accuracy<0.80) else exit(1)" - when: always - - store_test_results: - path: runs/charlstm/test-reports - - store_artifacts: - path: runs/charlstm/test-reports - - benchmark_layers_integration_test: - description: "Runs benchmark end to end" - parameters: - device: - default: "cpu" - type: string - layers: - type: string - grad_sample_modes: - default: "baseline hooks" - type: string - report_column: - default: "hooks/baseline" - type: string - runtime_ratio_threshold: - type: string - memory_ratio_threshold: - type: string - steps: - - run: - name: benchmarks - command: | - mkdir -p benchmarks/results/raw - echo "Using $(python -V) ($(which python))" - echo "Using $(pip -V) ($(which pip))" - python benchmarks/run_benchmarks.py --batch_size 16 --layers <> --config_file ./benchmarks/config.json --root ./benchmarks/results/raw/ --cont - IFS=$' ';layers=(<>); rm -rf /tmp/report_layers; mkdir -p /tmp/report_layers; IFS=$'\n'; files=`( echo "${layers[*]}" ) | sed 's/.*/.\/benchmarks\/results\/raw\/&*/'` - cp -v ${files[@]} /tmp/report_layers - report_id=`IFS=$'-'; echo "${layers[*]}"` - python benchmarks/generate_report.py --path-to-results /tmp/report_layers --save-path benchmarks/results/report-${report_id}.csv --format csv - python benchmarks/generate_report.py --path-to-results /tmp/report_layers --save-path benchmarks/results/report-${report_id}.pkl --format pkl - - python benchmarks/check_threshold.py --report-path "./benchmarks/results/report-"$report_id".pkl" --metric runtime --threshold <> --column <> - when: always - - store_artifacts: - path: benchmarks/results/ -# ------------------------------------------------------------------------------------- -# Jobs -# ------------------------------------------------------------------------------------- - -jobs: - - lint_py39_torch_release: - docker: - - image: cimg/python:3.9 - steps: - - checkout - - pip_dev_install - - lint_flake8 - - lint_black - - isort - - unittest_py38_torch_release: - docker: - - image: cimg/python:3.8 - steps: - - checkout - - pip_dev_install - - unit_tests - - unittest_py39_torch_release: - docker: - - image: cimg/python:3.9 - steps: - - checkout - - pip_dev_install - - unit_tests - - unittest_py39_torch_nightly: - docker: - - image: cimg/python:3.9 - steps: - - checkout - - pip_dev_install: - args: "-n" - - unit_tests - - prv_accountant_values: - docker: - - image: cimg/python:3.9 - steps: - - checkout - - py_3_9_setup - - pip_dev_install - - run: - name: "Unit test prv accountant" - no_output_timeout: 1h - command: | - python -m unittest opacus.tests.prv_accountant - - integrationtest_py39_torch_release_cpu: - docker: - - image: cimg/python:3.9 - steps: - - checkout - - py_3_9_setup - - pip_dev_install - - mnist_integration_test: - device: "cpu" - - integrationtest_py39_torch_release_cuda: - machine: - resource_class: gpu.nvidia.small.multi - image: linux-cuda-12:default - steps: - - checkout - - py_3_9_setup - - pip_dev_install - - run_nvidia_smi - - mnist_integration_test: - device: "cuda" - - cifar10_integration_test: - device: "cuda" - - imdb_integration_test: - device: "cuda" - - charlstm_integration_test: - device: "cuda" - - dcgan_integration_test: - device: "cuda" - - micro_benchmarks_py39_torch_release_cuda: - machine: - resource_class: gpu.nvidia.small.multi - image: linux-cuda-12:default - steps: - - checkout - - py_3_9_setup - - pip_dev_install - - run_nvidia_smi - - benchmark_layers_integration_test: - device: "cuda" - layers: "groupnorm instancenorm layernorm" - grad_sample_modes: "baseline hooks" - runtime_ratio_threshold: "3.0" - memory_ratio_threshold: "1.6" - - benchmark_layers_integration_test: - device: "cuda" - layers: "linear" - grad_sample_modes: "baseline hooks" - runtime_ratio_threshold: "3.6" - memory_ratio_threshold: "13.0" - - benchmark_layers_integration_test: - device: "cuda" - layers: "mha dpmha" - report_column: "dp_baseline/baseline" - grad_sample_modes: "baseline hooks" - runtime_ratio_threshold: "3.0" - memory_ratio_threshold: "1.6" - - benchmark_layers_integration_test: - device: "cuda" - layers: "mha dpmha" - report_column: "dp_hooks/baseline" - grad_sample_modes: "baseline hooks" - runtime_ratio_threshold: "3.5" - memory_ratio_threshold: "2.0" - - benchmark_layers_integration_test: - device: "cuda" - layers: "gru dpgru" - report_column: "dp_baseline/baseline" - grad_sample_modes: "baseline hooks" - runtime_ratio_threshold: "55.2" - memory_ratio_threshold: "1.2" - - benchmark_layers_integration_test: - device: "cuda" - layers: "gru dpgru" - report_column: "dp_hooks/baseline" - grad_sample_modes: "baseline hooks" - runtime_ratio_threshold: "140" - memory_ratio_threshold: "1.6" - - benchmark_layers_integration_test: - device: "cuda" - layers: "lstm dplstm" - report_column: "dp_baseline/baseline" - grad_sample_modes: "baseline hooks" - runtime_ratio_threshold: "48.6" - memory_ratio_threshold: "1.2" - - benchmark_layers_integration_test: - device: "cuda" - layers: "lstm dplstm" - report_column: "dp_hooks/baseline" - grad_sample_modes: "baseline hooks" - runtime_ratio_threshold: "126.0" - memory_ratio_threshold: "1.8" - - benchmark_layers_integration_test: - device: "cuda" - layers: "rnn dprnn" - report_column: "dp_baseline/baseline" - grad_sample_modes: "baseline hooks" - runtime_ratio_threshold: "21.4" - memory_ratio_threshold: "1.2" - - benchmark_layers_integration_test: - device: "cuda" - layers: "rnn dprnn" - report_column: "dp_hooks/baseline" - grad_sample_modes: "baseline hooks" - runtime_ratio_threshold: "98.5" - memory_ratio_threshold: "1.2" - - benchmark_layers_integration_test: - device: "cuda" - layers: "embedding" - grad_sample_modes: "baseline hooks" - runtime_ratio_threshold: "8.0" - memory_ratio_threshold: "15.0" - - unittest_multi_gpu: - machine: - resource_class: gpu.nvidia.medium.multi - image: linux-cuda-12:default - steps: - - checkout - - py_3_9_setup - - pip_dev_install - - run_nvidia_smi - - command_unit_tests_multi_gpu - - finish_coveralls_parallel: - docker: - - image: cimg/python:3.9 - steps: - - run: - name: "finish coveralls parallel" - no_output_timeout: 5m - command: | - pip install coveralls --user - coveralls --finish - - -aliases: - - - &exclude_ghpages - branches: - ignore: - - gh-pages - -# ------------------------------------------------------------------------------------- -# Workflows -# ------------------------------------------------------------------------------------- - -workflows: - commit: - when: - not: - equal: [ scheduled_pipeline, << pipeline.trigger_source >> ] - jobs: - - lint_py39_torch_release: - filters: *exclude_ghpages - - unittest_py38_torch_release: - filters: *exclude_ghpages - - unittest_py39_torch_release: - filters: *exclude_ghpages - - unittest_py39_torch_nightly: - filters: *exclude_ghpages - - unittest_multi_gpu: - filters: *exclude_ghpages - - integrationtest_py39_torch_release_cpu: - filters: *exclude_ghpages - - integrationtest_py39_torch_release_cuda: - filters: *exclude_ghpages - - prv_accountant_values: - filters: *exclude_ghpages - - nightly: - when: - equal: [ scheduled_pipeline, << pipeline.trigger_source >> ] - jobs: - - unittest_py39_torch_nightly: - filters: *exclude_ghpages - - integrationtest_py39_torch_release_cpu: - filters: *exclude_ghpages - - integrationtest_py39_torch_release_cuda: - filters: *exclude_ghpages - - lint_py39_torch_release: - filters: *exclude_ghpages - - micro_benchmarks_py39_torch_release_cuda: - filters: *exclude_ghpages diff --git a/.circleci/flake8_config.ini b/.circleci/flake8_config.ini deleted file mode 100644 index 988ed7f3..00000000 --- a/.circleci/flake8_config.ini +++ /dev/null @@ -1,119 +0,0 @@ -[flake8] -select = B,C,E,F,P,W,B9 -max-line-length = 80 -# Main Explanation Docs: https://github.com/grantmcconnaughey/Flake8Rules -ignore = - # Black conflicts and overlaps. - # Found in https://github.com/psf/black/issues/429 - # B950: Line too long. (Use `arc lint`'s LINEWRAP instead) - B950, - # E111: Indentation is not a multiple of four. - E111, - # E115: Expected an indented block (comment). - E115, - # E117: Over-indented. - E117, - # E121: Continuation line under-indented for hanging indent. - E121, - # E122: Continuation line missing indentation or outdented. - E122, - # E123: Closing bracket does not match indentation of opening bracket's line. - E123, - # E124: Closing bracket does not match visual indentation. - E124, - # E125: Continuation line with same indent as next logical line. - E125, - # E126: Continuation line over-indented for hanging indent. - E126, - # E127: Continuation line over-indented for visual indent. - E127, - # E128: Continuation line under-indented for visual indent. - E128, - # E129: Visually indented line with same indent as next logical line. - E129, - # E201: Whitespace after '('. - E201, - # E202: Whitespace before ')'. - E202, - # E203: Whitespace before ':'. - E203, - # E221: Multiple spaces before operator. - E221, - # E222: Multiple spaces after operator. - E222, - # E225: Missing whitespace around operator. - E225, - # E226: Missing whitespace around arithmetic operator. - E226, - # E227: Missing whitespace around bitwise or shift operator. - E227, - # E231: Missing whitespace after ',', ';', or ':'. - E231, - # E241: Multiple spaces after ','. - E241, - # E251: Unexpected spaces around keyword / parameter equals. - E251, - # E261: At least two spaces before inline comment. - E261, - # E262: Inline comment should start with '# '. - E262, - # E265: Block comment should start with '# '. - E265, - # E271: Multiple spaces after keyword. - E271, - # E272: Multiple spaces before keyword. - E272, - # E301: Expected 1 blank line, found 0. - E301, - # E302: Expected 2 blank lines, found 0. - E302, - # E303: Too many blank lines (3). - E303, - # E305: Expected 2 blank lines after end of function or class. - E305, - # E306: Expected 1 blank line before a nested definition. - E306, - # E501: Line too long (82 > 79 characters). - E501, - # E502: The backslash is redundant between brackets. - E502, - # E701: Multiple statements on one line (colon). - E701, - # E702: Multiple statements on one line (semicolon). - E702, - # E703: Statement ends with a semicolon. - E703, - # E704: Multiple statements on one line (def). - E704, - # W291: Trailing whitespace. - W291, - # W292: No newline at end of file. - W292, - # W293: Blank line contains whitespace. - W293, - # W391: Blank line at end of file. - W391, - - # Too opinionated. - # E265: Block comment should start with '# '. - E265, - # E266: Too many leading '#' for block comment. - E266, - # E402: Module level import not at top of file. - E402, - # E722: Do not use bare except, specify exception instead. (Duplicate of B001) - E722, - # F811: Redefinition of unused name from line n. - F811, - # P207: (Duplicate of B003) - P207, - # P208: (Duplicate of C403) - P208, - # W503: Line break occurred before a binary operator. - W503 - -exclude = - .hg, - __pycache__, - -max-complexity = 12