Skip to content

Commit

Permalink
Fix github action CI for GPU (#625)
Browse files Browse the repository at this point in the history
Summary: Pull Request resolved: #625

Differential Revision: D53292685
  • Loading branch information
HuanyuZhang authored and facebook-github-bot committed Sep 4, 2024
1 parent 36f7a34 commit 116f75a
Show file tree
Hide file tree
Showing 2 changed files with 135 additions and 124 deletions.
257 changes: 134 additions & 123 deletions .github/workflows/ci_gpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,48 +8,59 @@ on:
branches:
- main

unittest_multi_gpu:
runs-on: linux.4xlarge.nvidia.gpu
steps:
- name: Checkout
uses: actions/checkout@v2
jobs:
# unittest_multi_gpu:
# # Temporarily disable the test since there is no server having multiple GPUs.
# runs-on: 4-core-ubuntu-gpu-t4
# steps:
# - name: Checkout
# uses: actions/checkout@v2

- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.9
# - name: Display Python version
# run: python3 -c "import sys; print(sys.version)"

- name: Install dependencies
run: |
./scripts/install_via_pip.sh -c
# - name: Set up Python
# uses: actions/setup-python@v2
# with:
# python-version: '3.x'

- name: Run multi-GPU unit tests
run: |
nvidia-smi
nvcc --version
python -m unittest opacus.tests.multigpu_gradcheck.GradientComputationTest.test_gradient_correct
# - name: Install dependencies
# run: |
# python -m pip install --upgrade pip
# ./scripts/install_via_pip.sh -c

# - name: Run multi-GPU unit tests
# run: |
# python3 -m unittest opacus.tests.multigpu_gradcheck.GradientComputationTest.test_gradient_correct


integrationtest_py39_torch_release_cuda:
runs-on: ubuntu-latest
container:
# https://hub.docker.com/r/nvidia/cuda
image: nvidia/cuda:12.3.1-base-ubuntu22.04
options: --gpus all
env:
TZ: 'UTC'
# runs-on: linux.4xlarge.nvidia.gpu
# runs-on: ${{ matrix.os }}
runs-on: 4-core-ubuntu-gpu-t4
# strategy:
# matrix:
# os: [linux.4xlarge.nvidia.gpu]
# python-version: [3.8]
# cuda-tag: ["cu11"]
# container:
# # https://hub.docker.com/r/nvidia/cuda
# image: nvidia/cuda:12.3.1-base-ubuntu22.04
# options: --gpus all
# env:
# TZ: 'UTC'
steps:
- name: Checkout
uses: actions/checkout@v2

- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.9
python-version: '3.9'

- name: Install dependencies
run: |
python -m pip install --upgrade pip
python3 -m pip install --upgrade pip
pip install pytest coverage coveralls
./scripts/install_via_pip.sh -c
Expand All @@ -74,100 +85,100 @@ on:
name: mnist-gpu-reports
path: runs/mnist/test-reports

- name: Run CIFAR10 integration test (CUDA)
run: |
mkdir -p runs/cifar10/data
mkdir -p runs/cifar10/logs
mkdir -p runs/cifar10/test-reports
pip install tensorboard
python examples/cifar10.py --lr 0.1 --sigma 1.5 -c 10 --batch-size 2000 --epochs 10 --data-root runs/cifar10/data --log-dir runs/cifar10/logs --device cuda
python -c "import torch; model = torch.load('model_best.pth.tar'); exit(0) if (model['best_acc1']>0.4 and model['best_acc1']<0.49) else exit(1)"
python examples/cifar10.py --lr 0.1 --sigma 1.5 -c 10 --batch-size 2000 --epochs 10 --data-root runs/cifar10/data --log-dir runs/cifar10/logs --device cuda --grad_sample_mode no_op
python -c "import torch; model = torch.load('model_best.pth.tar'); exit(0) if (model['best_acc1']>0.4 and model['best_acc1']<0.49) else exit(1)"
- name: Store CIFAR10 test results
uses: actions/upload-artifact@v2
with:
name: cifar10-gpu-reports
path: runs/cifar10/test-reports

- name: Run IMDb integration test (CUDA)
run: |
mkdir -p runs/imdb/data
mkdir -p runs/imdb/test-reports
pip install --user datasets transformers
python examples/imdb.py --lr 0.02 --sigma 1.0 -c 1.0 --batch-size 64 --max-sequence-length 256 --epochs 2 --data-root runs/imdb/data --device cuda
python -c "import torch; accuracy = torch.load('run_results_imdb_classification.pt'); exit(0) if (accuracy>0.54 and accuracy<0.66) else exit(1)"
- name: Store IMDb test results
uses: actions/upload-artifact@v2
with:
name: imdb-gpu-reports
path: runs/imdb/test-reports

- name: Run charlstm integration test (CUDA)
run: |
mkdir -p runs/charlstm/data
wget https://download.pytorch.org/tutorial/data.zip -O runs/charlstm/data/data.zip
unzip runs/charlstm/data/data.zip -d runs/charlstm/data
rm runs/charlstm/data/data.zip
mkdir -p runs/charlstm/test-reports
pip install scikit-learn
python examples/char-lstm-classification.py --epochs=20 --learning-rate=2.0 --hidden-size=128 --delta=8e-5 --batch-size 400 --n-layers=1 --sigma=1.0 --max-per-sample-grad-norm=1.5 --data-root="runs/charlstm/data/data/names/" --device cuda --test-every 5
python -c "import torch; accuracy = torch.load('run_results_chr_lstm_classification.pt'); exit(0) if (accuracy>0.60 and accuracy<0.80) else exit(1)"
- name: Store test results
uses: actions/upload-artifact@v2
with:
name: charlstm-gpu-reports
path: runs/charlstm/test-reports

micro_benchmarks_py39_torch_release_cuda:
runs-on: ubuntu-latest
needs: [integrationtest_py39_torch_release_cuda]
container:
# https://hub.docker.com/r/nvidia/cuda
image: nvidia/cuda:12.3.1-base-ubuntu22.04
options: --gpus all
env:
TZ: 'UTC'
steps:
- name: Checkout
uses: actions/checkout@v2

- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.9

- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install pytest coverage coveralls
./scripts/install_via_pip.sh
- name: Install CUDA toolkit and cuDNN
run: |
apt-get update
apt-get install -y --no-install-recommends \
cuda-toolkit-11-1 \
libcudnn8=8.1.1.33-1+cuda11.1 \
libcudnn8-dev=8.1.1.33-1+cuda11.1
- name: Run benchmark integration tests (CUDA)
run: |
mkdir -p benchmarks/results/raw
python benchmarks/run_benchmarks.py --batch_size 16 --layers "groupnorm instancenorm layernorm" --config_file ./benchmarks/config.json --root ./benchmarks/results/raw/ --cont
IFS=$' ';layers=("groupnorm" "instancenorm" "layernorm"); rm -rf /tmp/report_layers; mkdir -p /tmp/report_layers; IFS=$'\n'; files=`( echo "${layers[*]}" ) | sed 's/.*/.\/benchmarks\/results\/raw\/&*/'`
cp -v ${files[@]} /tmp/report_layers
report_id=`IFS=$'-'; echo "${layers[*]}"`
python benchmarks/generate_report.py --path-to-results /tmp/report_layers --save-path benchmarks/results/report-${report_id}.csv --format csv
python benchmarks/generate_report.py --path-to-results /tmp/report_layers --save-path benchmarks/results/report-${report_id}.pkl --format pkl
python benchmarks/check_threshold.py --report-path "./benchmarks/results/report-"$report_id".pkl" --metric runtime --threshold 3.0 --column "hooks/baseline"
python benchmarks/check_threshold.py --report-path "./benchmarks/results/report-"$report_id".pkl" --metric memory --threshold 1.6 --column "hooks/baseline"
- name: Store artifacts
uses: actions/upload-artifact@v2
with:
name: benchmarks-reports
path: benchmarks/results/
# - name: Run CIFAR10 integration test (CUDA)
# run: |
# mkdir -p runs/cifar10/data
# mkdir -p runs/cifar10/logs
# mkdir -p runs/cifar10/test-reports
# pip install tensorboard
# python examples/cifar10.py --lr 0.1 --sigma 1.5 -c 10 --batch-size 2000 --epochs 10 --data-root runs/cifar10/data --log-dir runs/cifar10/logs --device cuda
# python -c "import torch; model = torch.load('model_best.pth.tar'); exit(0) if (model['best_acc1']>0.4 and model['best_acc1']<0.49) else exit(1)"
# python examples/cifar10.py --lr 0.1 --sigma 1.5 -c 10 --batch-size 2000 --epochs 10 --data-root runs/cifar10/data --log-dir runs/cifar10/logs --device cuda --grad_sample_mode no_op
# python -c "import torch; model = torch.load('model_best.pth.tar'); exit(0) if (model['best_acc1']>0.4 and model['best_acc1']<0.49) else exit(1)"

# - name: Store CIFAR10 test results
# uses: actions/upload-artifact@v2
# with:
# name: cifar10-gpu-reports
# path: runs/cifar10/test-reports

# - name: Run IMDb integration test (CUDA)
# run: |
# mkdir -p runs/imdb/data
# mkdir -p runs/imdb/test-reports
# pip install --user datasets transformers
# python examples/imdb.py --lr 0.02 --sigma 1.0 -c 1.0 --batch-size 64 --max-sequence-length 256 --epochs 2 --data-root runs/imdb/data --device cuda
# python -c "import torch; accuracy = torch.load('run_results_imdb_classification.pt'); exit(0) if (accuracy>0.54 and accuracy<0.66) else exit(1)"

# - name: Store IMDb test results
# uses: actions/upload-artifact@v2
# with:
# name: imdb-gpu-reports
# path: runs/imdb/test-reports

# - name: Run charlstm integration test (CUDA)
# run: |
# mkdir -p runs/charlstm/data
# wget https://download.pytorch.org/tutorial/data.zip -O runs/charlstm/data/data.zip
# unzip runs/charlstm/data/data.zip -d runs/charlstm/data
# rm runs/charlstm/data/data.zip
# mkdir -p runs/charlstm/test-reports
# pip install scikit-learn
# python examples/char-lstm-classification.py --epochs=20 --learning-rate=2.0 --hidden-size=128 --delta=8e-5 --batch-size 400 --n-layers=1 --sigma=1.0 --max-per-sample-grad-norm=1.5 --data-root="runs/charlstm/data/data/names/" --device cuda --test-every 5
# python -c "import torch; accuracy = torch.load('run_results_chr_lstm_classification.pt'); exit(0) if (accuracy>0.60 and accuracy<0.80) else exit(1)"

# - name: Store test results
# uses: actions/upload-artifact@v2
# with:
# name: charlstm-gpu-reports
# path: runs/charlstm/test-reports

# micro_benchmarks_py39_torch_release_cuda:
# runs-on: ubuntu-latest
# needs: [integrationtest_py39_torch_release_cuda]
# container:
# # https://hub.docker.com/r/nvidia/cuda
# image: nvidia/cuda:12.3.1-base-ubuntu22.04
# options: --gpus all
# env:
# TZ: 'UTC'
# steps:
# - name: Checkout
# uses: actions/checkout@v2

# - name: Set up Python
# uses: actions/setup-python@v2
# with:
# python-version: 3.9

# - name: Install dependencies
# run: |
# python -m pip install --upgrade pip
# pip install pytest coverage coveralls
# ./scripts/install_via_pip.sh

# - name: Install CUDA toolkit and cuDNN
# run: |
# apt-get update
# apt-get install -y --no-install-recommends \
# cuda-toolkit-11-1 \
# libcudnn8=8.1.1.33-1+cuda11.1 \
# libcudnn8-dev=8.1.1.33-1+cuda11.1

# - name: Run benchmark integration tests (CUDA)
# run: |
# mkdir -p benchmarks/results/raw
# python benchmarks/run_benchmarks.py --batch_size 16 --layers "groupnorm instancenorm layernorm" --config_file ./benchmarks/config.json --root ./benchmarks/results/raw/ --cont
# IFS=$' ';layers=("groupnorm" "instancenorm" "layernorm"); rm -rf /tmp/report_layers; mkdir -p /tmp/report_layers; IFS=$'\n'; files=`( echo "${layers[*]}" ) | sed 's/.*/.\/benchmarks\/results\/raw\/&*/'`
# cp -v ${files[@]} /tmp/report_layers
# report_id=`IFS=$'-'; echo "${layers[*]}"`
# python benchmarks/generate_report.py --path-to-results /tmp/report_layers --save-path benchmarks/results/report-${report_id}.csv --format csv
# python benchmarks/generate_report.py --path-to-results /tmp/report_layers --save-path benchmarks/results/report-${report_id}.pkl --format pkl
# python benchmarks/check_threshold.py --report-path "./benchmarks/results/report-"$report_id".pkl" --metric runtime --threshold 3.0 --column "hooks/baseline"
# python benchmarks/check_threshold.py --report-path "./benchmarks/results/report-"$report_id".pkl" --metric memory --threshold 1.6 --column "hooks/baseline"

# - name: Store artifacts
# uses: actions/upload-artifact@v2
# with:
# name: benchmarks-reports
# path: benchmarks/results/
2 changes: 1 addition & 1 deletion scripts/install_via_pip.sh
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ while getopts 'ncdv:' flag; do

curl -sL https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
sudo apt-get update && sudo apt-get install yarn
sudo apt-get -y update && sudo apt-get -y install yarn

# yarn needs terminal info
export TERM=xterm
Expand Down

0 comments on commit 116f75a

Please sign in to comment.