diff --git a/.github/workflows/ci_cpu.yml b/.github/workflows/ci_cpu.yml new file mode 100644 index 00000000..bc2aedc3 --- /dev/null +++ b/.github/workflows/ci_cpu.yml @@ -0,0 +1,170 @@ +name: CI_CPU + +on: + push: + branches: + - main + pull_request: + branches: + - main + schedule: + - cron: '4 4 * * *' # This schedule runs the nightly job every night at 4:04AM + + +jobs: + ########### LINT ############## + lint_py39_torch_release: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.9 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install flake8 black isort + ./scripts/install_via_pip.sh + - name: Lint with flake8 + run: flake8 --config ./.github/workflows/flake8_config.ini + - name: Lint with black + run: black --check --diff --color . + - name: Check import order with isort + run: isort -v -l 88 -o opacus --lines-after-imports 2 -m 3 --trailing-comma --check-only . + + ########### UNIT TESTS ############## + unittest_py38_torch_release: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pytest coverage coveralls + ./scripts/install_via_pip.sh + - name: Run unit tests + run: | + mkdir unittest-py38-release-reports + coverage run -m pytest --doctest-modules -p conftest --junitxml=unittest-py38-release-reports/junit.xml opacus + coverage report -i -m + - name: Store test results + uses: actions/upload-artifact@v2 + with: + name: unittest-py38-release-reports + path: unittest-py38-release-reports + + unittest_py39_torch_release: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.9 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pytest coverage coveralls + ./scripts/install_via_pip.sh + - name: Run unit tests + run: | + mkdir unittest-py39-release-reports + coverage run -m pytest --doctest-modules -p conftest --junitxml=unittest-py39-release-reports/junit.xml opacus + coverage report -i -m + - name: Store test results + uses: actions/upload-artifact@v2 + with: + name: unittest-py39-release-reports + path: unittest-py39-release-reports + + prv_accountant_values: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.9 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + ./scripts/install_via_pip.sh + - name: Run prv accountant unit tests + run: | + python -m unittest opacus.tests.prv_accountant + + ########### NIGHTLY TEST ############## + unittest_py39_torch_nightly: + runs-on: ubuntu-latest + if: ${{ github.event_name == 'schedule' }} + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.9 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pytest coverage coveralls + ./scripts/install_via_pip.sh -n + - name: Run unit tests + run: | + mkdir unittest-py39-nightly-reports + python -m pytest --doctest-modules -p conftest --junitxml=unittest-py39-nightly-reports/junit.xml opacus + - name: Store test results + uses: actions/upload-artifact@v2 + with: + name: unittest-py39-nightly-reports + path: unittest-py39-nightly-reports + + ########### INTEGRATION TEST ############## + integrationtest_py39_torch_release_cpu: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.9 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pytest coverage coveralls + ./scripts/install_via_pip.sh + - name: Run MNIST integration test (CPU) + run: | + mkdir -p runs/mnist/data + mkdir -p runs/mnist/test-reports + coverage run examples/mnist.py --lr 0.25 --sigma 0.7 -c 1.5 --batch-size 64 --epochs 1 --data-root runs/mnist/data --n-runs 1 --device cpu + python -c "import torch; accuracy = torch.load('run_results_mnist_0.25_0.7_1.5_64_1.pt'); exit(0) if (accuracy[0]>0.78 and accuracy[0]<0.95) else exit(1)" + coverage report -i -m + - name: Store test results + uses: actions/upload-artifact@v2 + with: + name: mnist-cpu-reports + path: runs/mnist/test-reports + + ######## FINISH COVERALLS ########## + finish_coveralls_parallel: + needs: [unittest_py38_torch_release, unittest_py39_torch_release, integrationtest_py39_torch_release_cpu] + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Finish Coveralls Parallel + uses: coverallsapp/github-action@v2 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + parallel-finished: true diff --git a/.github/workflows/ci_gpu.yml b/.github/workflows/ci_gpu.yml new file mode 100644 index 00000000..90dca200 --- /dev/null +++ b/.github/workflows/ci_gpu.yml @@ -0,0 +1,173 @@ +name: CI_GPU + +on: + push: + branches: + - main + pull_request: + branches: + - main + + unittest_multi_gpu: + runs-on: linux.4xlarge.nvidia.gpu + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.9 + + - name: Install dependencies + run: | + ./scripts/install_via_pip.sh -c + + - name: Run multi-GPU unit tests + run: | + nvidia-smi + nvcc --version + python -m unittest opacus.tests.multigpu_gradcheck.GradientComputationTest.test_gradient_correct + + + integrationtest_py39_torch_release_cuda: + runs-on: ubuntu-latest + container: + # https://hub.docker.com/r/nvidia/cuda + image: nvidia/cuda:12.3.1-base-ubuntu22.04 + options: --gpus all + env: + TZ: 'UTC' + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.9 + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pytest coverage coveralls + ./scripts/install_via_pip.sh -c + + - name: Install CUDA toolkit and cuDNN + run: | + apt-get update + apt-get install -y --no-install-recommends \ + cuda-toolkit-11-1 \ + libcudnn8=8.1.1.33-1+cuda11.1 \ + libcudnn8-dev=8.1.1.33-1+cuda11.1 + + - name: Run MNIST integration test (CUDA) + run: | + mkdir -p runs/mnist/data + mkdir -p runs/mnist/test-reports + python examples/mnist.py --lr 0.25 --sigma 0.7 -c 1.5 --batch-size 64 --epochs 1 --data-root runs/mnist/data --n-runs 1 --device cuda + python -c "import torch; accuracy = torch.load('run_results_mnist_0.25_0.7_1.5_64_1.pt'); exit(0) if (accuracy[0]>0.78 and accuracy[0]<0.95) else exit(1)" + + - name: Store MNIST test results + uses: actions/upload-artifact@v2 + with: + name: mnist-gpu-reports + path: runs/mnist/test-reports + + - name: Run CIFAR10 integration test (CUDA) + run: | + mkdir -p runs/cifar10/data + mkdir -p runs/cifar10/logs + mkdir -p runs/cifar10/test-reports + pip install tensorboard + python examples/cifar10.py --lr 0.1 --sigma 1.5 -c 10 --batch-size 2000 --epochs 10 --data-root runs/cifar10/data --log-dir runs/cifar10/logs --device cuda + python -c "import torch; model = torch.load('model_best.pth.tar'); exit(0) if (model['best_acc1']>0.4 and model['best_acc1']<0.49) else exit(1)" + python examples/cifar10.py --lr 0.1 --sigma 1.5 -c 10 --batch-size 2000 --epochs 10 --data-root runs/cifar10/data --log-dir runs/cifar10/logs --device cuda --grad_sample_mode no_op + python -c "import torch; model = torch.load('model_best.pth.tar'); exit(0) if (model['best_acc1']>0.4 and model['best_acc1']<0.49) else exit(1)" + + - name: Store CIFAR10 test results + uses: actions/upload-artifact@v2 + with: + name: cifar10-gpu-reports + path: runs/cifar10/test-reports + + - name: Run IMDb integration test (CUDA) + run: | + mkdir -p runs/imdb/data + mkdir -p runs/imdb/test-reports + pip install --user datasets transformers + python examples/imdb.py --lr 0.02 --sigma 1.0 -c 1.0 --batch-size 64 --max-sequence-length 256 --epochs 2 --data-root runs/imdb/data --device cuda + python -c "import torch; accuracy = torch.load('run_results_imdb_classification.pt'); exit(0) if (accuracy>0.54 and accuracy<0.66) else exit(1)" + + - name: Store IMDb test results + uses: actions/upload-artifact@v2 + with: + name: imdb-gpu-reports + path: runs/imdb/test-reports + + - name: Run charlstm integration test (CUDA) + run: | + mkdir -p runs/charlstm/data + wget https://download.pytorch.org/tutorial/data.zip -O runs/charlstm/data/data.zip + unzip runs/charlstm/data/data.zip -d runs/charlstm/data + rm runs/charlstm/data/data.zip + mkdir -p runs/charlstm/test-reports + pip install scikit-learn + python examples/char-lstm-classification.py --epochs=20 --learning-rate=2.0 --hidden-size=128 --delta=8e-5 --batch-size 400 --n-layers=1 --sigma=1.0 --max-per-sample-grad-norm=1.5 --data-root="runs/charlstm/data/data/names/" --device cuda --test-every 5 + python -c "import torch; accuracy = torch.load('run_results_chr_lstm_classification.pt'); exit(0) if (accuracy>0.60 and accuracy<0.80) else exit(1)" + + - name: Store test results + uses: actions/upload-artifact@v2 + with: + name: charlstm-gpu-reports + path: runs/charlstm/test-reports + + micro_benchmarks_py39_torch_release_cuda: + runs-on: ubuntu-latest + needs: [integrationtest_py39_torch_release_cuda] + container: + # https://hub.docker.com/r/nvidia/cuda + image: nvidia/cuda:12.3.1-base-ubuntu22.04 + options: --gpus all + env: + TZ: 'UTC' + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.9 + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pytest coverage coveralls + ./scripts/install_via_pip.sh + + - name: Install CUDA toolkit and cuDNN + run: | + apt-get update + apt-get install -y --no-install-recommends \ + cuda-toolkit-11-1 \ + libcudnn8=8.1.1.33-1+cuda11.1 \ + libcudnn8-dev=8.1.1.33-1+cuda11.1 + + - name: Run benchmark integration tests (CUDA) + run: | + mkdir -p benchmarks/results/raw + python benchmarks/run_benchmarks.py --batch_size 16 --layers "groupnorm instancenorm layernorm" --config_file ./benchmarks/config.json --root ./benchmarks/results/raw/ --cont + IFS=$' ';layers=("groupnorm" "instancenorm" "layernorm"); rm -rf /tmp/report_layers; mkdir -p /tmp/report_layers; IFS=$'\n'; files=`( echo "${layers[*]}" ) | sed 's/.*/.\/benchmarks\/results\/raw\/&*/'` + cp -v ${files[@]} /tmp/report_layers + report_id=`IFS=$'-'; echo "${layers[*]}"` + python benchmarks/generate_report.py --path-to-results /tmp/report_layers --save-path benchmarks/results/report-${report_id}.csv --format csv + python benchmarks/generate_report.py --path-to-results /tmp/report_layers --save-path benchmarks/results/report-${report_id}.pkl --format pkl + python benchmarks/check_threshold.py --report-path "./benchmarks/results/report-"$report_id".pkl" --metric runtime --threshold 3.0 --column "hooks/baseline" + python benchmarks/check_threshold.py --report-path "./benchmarks/results/report-"$report_id".pkl" --metric memory --threshold 1.6 --column "hooks/baseline" + + - name: Store artifacts + uses: actions/upload-artifact@v2 + with: + name: benchmarks-reports + path: benchmarks/results/ diff --git a/.github/workflows/flake8_config.ini b/.github/workflows/flake8_config.ini new file mode 100644 index 00000000..988ed7f3 --- /dev/null +++ b/.github/workflows/flake8_config.ini @@ -0,0 +1,119 @@ +[flake8] +select = B,C,E,F,P,W,B9 +max-line-length = 80 +# Main Explanation Docs: https://github.com/grantmcconnaughey/Flake8Rules +ignore = + # Black conflicts and overlaps. + # Found in https://github.com/psf/black/issues/429 + # B950: Line too long. (Use `arc lint`'s LINEWRAP instead) + B950, + # E111: Indentation is not a multiple of four. + E111, + # E115: Expected an indented block (comment). + E115, + # E117: Over-indented. + E117, + # E121: Continuation line under-indented for hanging indent. + E121, + # E122: Continuation line missing indentation or outdented. + E122, + # E123: Closing bracket does not match indentation of opening bracket's line. + E123, + # E124: Closing bracket does not match visual indentation. + E124, + # E125: Continuation line with same indent as next logical line. + E125, + # E126: Continuation line over-indented for hanging indent. + E126, + # E127: Continuation line over-indented for visual indent. + E127, + # E128: Continuation line under-indented for visual indent. + E128, + # E129: Visually indented line with same indent as next logical line. + E129, + # E201: Whitespace after '('. + E201, + # E202: Whitespace before ')'. + E202, + # E203: Whitespace before ':'. + E203, + # E221: Multiple spaces before operator. + E221, + # E222: Multiple spaces after operator. + E222, + # E225: Missing whitespace around operator. + E225, + # E226: Missing whitespace around arithmetic operator. + E226, + # E227: Missing whitespace around bitwise or shift operator. + E227, + # E231: Missing whitespace after ',', ';', or ':'. + E231, + # E241: Multiple spaces after ','. + E241, + # E251: Unexpected spaces around keyword / parameter equals. + E251, + # E261: At least two spaces before inline comment. + E261, + # E262: Inline comment should start with '# '. + E262, + # E265: Block comment should start with '# '. + E265, + # E271: Multiple spaces after keyword. + E271, + # E272: Multiple spaces before keyword. + E272, + # E301: Expected 1 blank line, found 0. + E301, + # E302: Expected 2 blank lines, found 0. + E302, + # E303: Too many blank lines (3). + E303, + # E305: Expected 2 blank lines after end of function or class. + E305, + # E306: Expected 1 blank line before a nested definition. + E306, + # E501: Line too long (82 > 79 characters). + E501, + # E502: The backslash is redundant between brackets. + E502, + # E701: Multiple statements on one line (colon). + E701, + # E702: Multiple statements on one line (semicolon). + E702, + # E703: Statement ends with a semicolon. + E703, + # E704: Multiple statements on one line (def). + E704, + # W291: Trailing whitespace. + W291, + # W292: No newline at end of file. + W292, + # W293: Blank line contains whitespace. + W293, + # W391: Blank line at end of file. + W391, + + # Too opinionated. + # E265: Block comment should start with '# '. + E265, + # E266: Too many leading '#' for block comment. + E266, + # E402: Module level import not at top of file. + E402, + # E722: Do not use bare except, specify exception instead. (Duplicate of B001) + E722, + # F811: Redefinition of unused name from line n. + F811, + # P207: (Duplicate of B003) + P207, + # P208: (Duplicate of C403) + P208, + # W503: Line break occurred before a binary operator. + W503 + +exclude = + .hg, + __pycache__, + +max-complexity = 12 diff --git a/examples/dcgan.py b/examples/dcgan.py index c1065f12..585e145a 100644 --- a/examples/dcgan.py +++ b/examples/dcgan.py @@ -201,7 +201,7 @@ def __init__(self, ngpu): nn.ReLU(True), # state size. (ngf) x 32 x 32 nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False), - nn.Tanh() + nn.Tanh(), # state size. (nc) x 64 x 64 ) diff --git a/opacus/accountants/analysis/prv/domain.py b/opacus/accountants/analysis/prv/domain.py index 5e86502d..220dbbd9 100644 --- a/opacus/accountants/analysis/prv/domain.py +++ b/opacus/accountants/analysis/prv/domain.py @@ -12,6 +12,7 @@ class Domain: Stores relevant information about the domain on which PRVs are discretized, and includes a few convenience methods for manipulating it. """ + t_min: float t_max: float size: int diff --git a/opacus/layers/dp_rnn.py b/opacus/layers/dp_rnn.py index 5f5c6819..405ec70b 100644 --- a/opacus/layers/dp_rnn.py +++ b/opacus/layers/dp_rnn.py @@ -405,9 +405,12 @@ def forward( for direction, (cell, h0, c0) in directions: # apply single direction layer (with dropout) out_layer, h, c = self.forward_layer( - x - if layer == 0 - else output, # [T, B, D/H/2H] / tuple T x [B, D/H/2H] + ( + x + if layer == 0 + else output + # [T, B, D/H/2H] / tuple T x [B, D/H/2H] + ), h0, # [B, H] c0, batch_sizes, diff --git a/opacus/privacy_engine.py b/opacus/privacy_engine.py index 0ca6811b..555769d0 100644 --- a/opacus/privacy_engine.py +++ b/opacus/privacy_engine.py @@ -536,9 +536,9 @@ def save_checkpoint( if noise_scheduler is not None: checkpoint_dict["noise_scheduler_state_dict"] = noise_scheduler.state_dict() if grad_clip_scheduler is not None: - checkpoint_dict[ - "grad_clip_scheduler_state_dict" - ] = grad_clip_scheduler.state_dict() + checkpoint_dict["grad_clip_scheduler_state_dict"] = ( + grad_clip_scheduler.state_dict() + ) torch.save(checkpoint_dict, path, **(torch_save_kwargs or {})) diff --git a/opacus/tests/grad_samples/conv2d_test.py b/opacus/tests/grad_samples/conv2d_test.py index b0deae3f..d1178506 100644 --- a/opacus/tests/grad_samples/conv2d_test.py +++ b/opacus/tests/grad_samples/conv2d_test.py @@ -90,9 +90,9 @@ def test_conv2d( # Test 'convolution as a backward' GSM # 'convolution as a backward' doesn't support padding=same conv2d_gsm = GradSampleModule.GRAD_SAMPLERS[nn.Conv2d] - GradSampleModule.GRAD_SAMPLERS[ - nn.Conv2d - ] = convolution2d_backward_as_a_convolution + GradSampleModule.GRAD_SAMPLERS[nn.Conv2d] = ( + convolution2d_backward_as_a_convolution + ) self.run_test( x, conv, diff --git a/opacus/tests/privacy_engine_test.py b/opacus/tests/privacy_engine_test.py index 022acfa3..f2ed1a32 100644 --- a/opacus/tests/privacy_engine_test.py +++ b/opacus/tests/privacy_engine_test.py @@ -87,9 +87,11 @@ def _init_vanilla_training( ): model = self._init_model() optimizer = torch.optim.SGD( - model.parameters() - if not opt_exclude_frozen - else [p for p in model.parameters() if p.requires_grad], + ( + model.parameters() + if not opt_exclude_frozen + else [p for p in model.parameters() if p.requires_grad] + ), lr=self.LR, momentum=0, ) @@ -112,9 +114,11 @@ def _init_private_training( model = self._init_model() model = PrivacyEngine.get_compatible_module(model) optimizer = torch.optim.SGD( - model.parameters() - if not opt_exclude_frozen - else [p for p in model.parameters() if p.requires_grad], + ( + model.parameters() + if not opt_exclude_frozen + else [p for p in model.parameters() if p.requires_grad] + ), lr=self.LR, momentum=0, )