Skip to content

Commit

Permalink
Merge branch 'main' of https://github.com/intel-analytics/BigDL into …
Browse files Browse the repository at this point in the history
…fix_gptj
  • Loading branch information
jenniew committed Feb 5, 2024
2 parents 26d09dd + 68b5cf0 commit 063d622
Show file tree
Hide file tree
Showing 46 changed files with 1,984 additions and 192 deletions.
29 changes: 19 additions & 10 deletions .github/workflows/llm-harness-evaluation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ concurrency:
# Controls when the action will run.
on:
schedule:
- cron: "00 13 * * 5" # GMT time, 13:00 GMT == 21:00 China
- cron: "00 16 * * *" # GMT time, 16:00 GMT == 00:00 China
pull_request:
branches: [main]
paths:
Expand Down Expand Up @@ -39,6 +39,7 @@ on:
jobs:
llm-cpp-build:
uses: ./.github/workflows/llm-binary-build.yml
# Set the testing matrix based on the event (schedule, PR, or manual dispatch)
set-matrix:
runs-on: ubuntu-latest
outputs:
Expand All @@ -50,10 +51,11 @@ jobs:
- name: set-nightly-env
if: ${{github.event_name == 'schedule'}}
env:
NIGHTLY_MATRIX_MODEL_NAME: '["stablelm-3b-4e1t","Mistral-7B-v0.1"]'
NIGHTLY_MATRIX_TASK: '["truthfulqa", "arc"]'
NIGHTLY_MATRIX_PRECISION: '["mixed_fp4", "fp8"]'
NIGHTLY_LABELS: '["self-hosted", "llm", "accuracy"]'
NIGHTLY_MATRIX_MODEL_NAME: '["Llama2-7b-guanaco-dolphin-500", "falcon-7b-instruct-with-patch",
"Mistral-7B-v0.1", "mpt-7b-chat", "Baichuan2-7B-Chat-LLaMAfied", "stablelm-3b"]'
NIGHTLY_MATRIX_TASK: '["arc", "truthfulqa", "winogrande"]'
NIGHTLY_MATRIX_PRECISION: '["sym_int4", "fp8"]'
NIGHTLY_LABELS: '["self-hosted", "llm", "accuracy-nightly"]'
run: |
echo "model_name=$NIGHTLY_MATRIX_MODEL_NAME" >> $GITHUB_ENV
echo "precision=$NIGHTLY_MATRIX_PRECISION" >> $GITHUB_ENV
Expand All @@ -63,9 +65,9 @@ jobs:
- name: set-pr-env
if: ${{github.event_name == 'pull_request'}}
env:
PR_MATRIX_MODEL_NAME: '["stablelm-3b-4e1t"]'
PR_MATRIX_MODEL_NAME: '["stablelm-3b-4e1t", "Mistral-7B-v0.1"]'
PR_MATRIX_TASK: '["truthfulqa"]'
PR_MATRIX_PRECISION: '["mixed_fp4", "fp8"]'
PR_MATRIX_PRECISION: '["fp8"]'
PR_LABELS: '["self-hosted", "llm", "temp-arc01"]'
run: |
echo "model_name=$PR_MATRIX_MODEL_NAME" >> $GITHUB_ENV
Expand Down Expand Up @@ -122,6 +124,7 @@ jobs:
- name: Install dependencies
shell: bash
run: |
set -e
python -m pip install --upgrade pip
python -m pip install --upgrade setuptools==58.0.4
python -m pip install --upgrade wheel
Expand All @@ -132,7 +135,7 @@ jobs:
- name: Run LLM install (all) test
uses: ./.github/actions/llm/setup-llm-env
with:
extra-dependency: "xpu_2.0"
extra-dependency: "xpu_2.1"

- name: Install harness
working-directory: ${{ github.workspace }}/python/llm/dev/benchmark/harness/
Expand All @@ -154,7 +157,13 @@ jobs:
- name: Upgrade packages
shell: bash
run: |
pip install --upgrade transformers==4.34.0 datasets==2.14.6
pip install --upgrade datasets==2.14.6
if [ "${{ matrix.model_name }}" = "Mistral-7B-v0.1" ]; then
pip install --upgrade transformers==4.36
else
pip install --upgrade transformers==4.31
fi

- name: Run harness
shell: bash
Expand All @@ -166,7 +175,7 @@ jobs:
export HF_HOME=${HARNESS_HF_HOME}
export HF_DATASETS=$HARNESS_HF_HOME/datasets
export HF_DATASETS_CACHE=$HARNESS_HF_HOME/datasets
source $HOME/intel/oneapi/setvars.sh
source /opt/intel/oneapi/setvars.sh
python run_llb.py \
--model bigdl-llm \
--pretrained ${MODEL_PATH} \
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: LLM Test for Stable Version
name: ARC LLM Test for Stable Version

# Cancel previous runs in the PR when you push new commits
concurrency:
Expand Down Expand Up @@ -184,111 +184,3 @@ jobs:
cd ../../../test/benchmark
python -m pip install pandas==1.5.3
python csv_to_html.py -f $CSV_SAVE_PATH/fp8
llm-perf-regression-test-on-spr:
needs: llm-cpp-build
strategy:
fail-fast: false
matrix:
python-version: ["3.9"]
runs-on: [self-hosted, llm, spr01-perf]
env:
OMP_NUM_THREADS: 16
THREAD_NUM: 16
ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
steps:
- uses: actions/checkout@v3

- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}

- name: Install dependencies
shell: bash
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade wheel
python -m pip install --upgrade omegaconf
python -m pip install --upgrade pandas
python -m pip install --upgrade einops
python -m pip install --upgrade tiktoken
python -m pip install --upgrade transformers_stream_generator
- name: Download llm binary
uses: ./.github/actions/llm/download-llm-binary

- name: Run LLM install (all) test
uses: ./.github/actions/llm/setup-llm-env

- name: Test on cpu
shell: bash
run: |
mv python/llm/test/benchmark/stable-version-cpu-perf-test.yaml python/llm/dev/benchmark/all-in-one/config.yaml
cd python/llm/dev/benchmark/all-in-one
export http_proxy=${HTTP_PROXY}
export https_proxy=${HTTPS_PROXY}
source bigdl-llm-init -t
export OMP_NUM_THREADS=48
# hide time info
sed -i 's/str(end - st)/"xxxxxx"/g' run.py
python run.py
cp ./*.csv /models/stable_version_perf_regression_test_cpu/
cd ../../../test/benchmark
python -m pip install pandas==1.5.3
python csv_to_html.py -f /models/stable_version_perf_regression_test_cpu/ -b /models/stable_version_perf_regression_test_cpu/transformer_int4-results-1baseline.csv -t 5.0

llm-stress-test-on-spr:
needs: llm-perf-regression-test-on-spr
strategy:
fail-fast: false
matrix:
python-version: ["3.9"]
runs-on: [self-hosted, llm, spr01-perf]
env:
OMP_NUM_THREADS: 16
THREAD_NUM: 16
ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
steps:
- uses: actions/checkout@v3

- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}

- name: Install dependencies
shell: bash
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade wheel
python -m pip install --upgrade omegaconf
python -m pip install --upgrade pandas
python -m pip install --upgrade einops
python -m pip install --upgrade tiktoken
python -m pip install --upgrade transformers_stream_generator
- name: Download llm binary
uses: ./.github/actions/llm/download-llm-binary

- name: Run LLM install (all) test
uses: ./.github/actions/llm/setup-llm-env

- name: Test on cpu
shell: bash
run: |
mv python/llm/test/benchmark/stable-version-cpu-stress-test.yaml python/llm/dev/benchmark/all-in-one/config.yaml
cd python/llm/dev/benchmark/all-in-one
export http_proxy=${HTTP_PROXY}
export https_proxy=${HTTPS_PROXY}
source bigdl-llm-init -t
export OMP_NUM_THREADS=48
# hide time info
sed -i 's/str(end - st)/"xxxxxx"/g' run-stress-test.py
python run-stress-test.py
cp ./*.csv /models/stable_version_stress_test_cpu/
cd ../../../test/benchmark
python -m pip install pandas==1.5.3
python csv_to_html.py -f /models/stable_version_stress_test_cpu/
129 changes: 129 additions & 0 deletions .github/workflows/llm_tests_for_stable_version_on_spr.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
name: SPR LLM Test for Stable Version

# Cancel previous runs in the PR when you push new commits
concurrency:
group: ${{ github.workflow }}-llm-performance-tests-${{ github.event.pull_request.number || github.run_id }}
cancel-in-progress: true

# Controls when the action will run.
on:
# pull_request:
# branches: [main]
# paths:
# - ".github/workflows/llm_performance_tests.yml"
# - "python/llm/test/benchmark/**"
# - "python/llm/dev/benchmark/all-in-one/**"
workflow_dispatch:
workflow_call:

# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
llm-cpp-build:
uses: ./.github/workflows/llm-binary-build.yml

llm-perf-regression-test-on-spr:
needs: llm-cpp-build
strategy:
fail-fast: false
matrix:
python-version: ["3.9"]
runs-on: [self-hosted, llm, spr01-perf]
env:
OMP_NUM_THREADS: 16
THREAD_NUM: 16
ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
steps:
- uses: actions/checkout@v3

- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}

- name: Install dependencies
shell: bash
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade wheel
python -m pip install --upgrade omegaconf
python -m pip install --upgrade pandas
python -m pip install --upgrade einops
python -m pip install --upgrade tiktoken
python -m pip install --upgrade transformers_stream_generator
- name: Download llm binary
uses: ./.github/actions/llm/download-llm-binary

- name: Run LLM install (all) test
uses: ./.github/actions/llm/setup-llm-env

- name: Test on cpu
shell: bash
run: |
mv python/llm/test/benchmark/stable-version-cpu-perf-test.yaml python/llm/dev/benchmark/all-in-one/config.yaml
cd python/llm/dev/benchmark/all-in-one
export http_proxy=${HTTP_PROXY}
export https_proxy=${HTTPS_PROXY}
source bigdl-llm-init -t
export OMP_NUM_THREADS=48
# hide time info
sed -i 's/str(end - st)/"xxxxxx"/g' run.py
python run.py
cp ./*.csv /models/stable_version_perf_regression_test_cpu/
cd ../../../test/benchmark
python -m pip install pandas==1.5.3
python csv_to_html.py -f /models/stable_version_perf_regression_test_cpu/ -b /models/stable_version_perf_regression_test_cpu/transformer_int4-results-1baseline.csv -t 5.0

llm-stress-test-on-spr:
needs: llm-perf-regression-test-on-spr
strategy:
fail-fast: false
matrix:
python-version: ["3.9"]
runs-on: [self-hosted, llm, spr01-perf]
env:
OMP_NUM_THREADS: 16
THREAD_NUM: 16
ANALYTICS_ZOO_ROOT: ${{ github.workspace }}
steps:
- uses: actions/checkout@v3

- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}

- name: Install dependencies
shell: bash
run: |
python -m pip install --upgrade pip
python -m pip install --upgrade wheel
python -m pip install --upgrade omegaconf
python -m pip install --upgrade pandas
python -m pip install --upgrade einops
python -m pip install --upgrade tiktoken
python -m pip install --upgrade transformers_stream_generator
- name: Download llm binary
uses: ./.github/actions/llm/download-llm-binary

- name: Run LLM install (all) test
uses: ./.github/actions/llm/setup-llm-env

- name: Test on cpu
shell: bash
run: |
mv python/llm/test/benchmark/stable-version-cpu-stress-test.yaml python/llm/dev/benchmark/all-in-one/config.yaml
cd python/llm/dev/benchmark/all-in-one
export http_proxy=${HTTP_PROXY}
export https_proxy=${HTTPS_PROXY}
source bigdl-llm-init -t
export OMP_NUM_THREADS=48
# hide time info
sed -i 's/str(end - st)/"xxxxxx"/g' run-stress-test.py
python run-stress-test.py
cp ./*.csv /models/stable_version_stress_test_cpu/
cd ../../../test/benchmark
python -m pip install pandas==1.5.3
python csv_to_html.py -f /models/stable_version_stress_test_cpu/
Loading

0 comments on commit 063d622

Please sign in to comment.