diff --git a/.github/workflows/postsubmit.yml b/.github/workflows/postsubmit.yml index e7ad3b5bebc28..dc5d42d3e821f 100644 --- a/.github/workflows/postsubmit.yml +++ b/.github/workflows/postsubmit.yml @@ -231,3 +231,29 @@ jobs: TI_LIB_DIR=`python3 -c "import taichi;print(taichi.__path__[0])" | tail -1` TI_LIB_DIR="$TI_LIB_DIR/lib" ./build/taichi_cpp_tests ti test -vr2 -t4 -x + + performance_monitoring: + name: Performance monitoring (NVGPU) + timeout-minutes: 60 + runs-on: [self-hosted, x64, cuda, linux, benchmark] + steps: + - uses: actions/checkout@v2 + with: + submodules: 'recursive' + + - name: Build & Install + run: | + export PATH=$PATH:/usr/local/cuda/bin + .github/workflows/scripts/unix_build.sh + env: + LLVM_LIB_ROOT_DIR: /opt/taichi-llvm-10.0.0 + LLVM_PATH: /opt/taichi-llvm-10.0.0/bin + LLVM_DIR: /opt/taichi-llvm-10.0.0/lib/cmake/llvm + CUDA_TOOLKIT_ROOT_DIR: /usr/local/cuda/ + CI_SETUP_CMAKE_ARGS: -DTI_WITH_CUDA_TOOLKIT:BOOL=ON + BUILD_NUM_THREADS: 8 + CXX: clang++-10 + + - name: Run benchmark + run: | + python3 benchmarks/misc/run.py /home/benchmarkbot/benchmark/ diff --git a/benchmarks/misc/membound.py b/benchmarks/misc/membound.py index 55bfd3c9942e3..c2aedab9d1432 100644 --- a/benchmarks/misc/membound.py +++ b/benchmarks/misc/membound.py @@ -8,7 +8,7 @@ test_cases = [fill, saxpy, reduction] test_archs = [ti.cuda] test_dtype = [ti.i32, ti.i64, ti.f32, ti.f64] -test_dsize = [(4**i) * kibibyte for i in range(1, 11)] #[4KB,16KB...1GB] +test_dsize = [(4**i) * kibibyte for i in range(1, 10)] #[4KB,16KB...256MB] test_repeat = 10 results_evaluation = [geometric_mean] diff --git a/benchmarks/misc/run.py b/benchmarks/misc/run.py index 69fff53d82a5c..06406c9fd41fa 100644 --- a/benchmarks/misc/run.py +++ b/benchmarks/misc/run.py @@ -1,4 +1,9 @@ +import datetime +import os +import sys + from membound import Membound +from taichi.core import ti_core as _ti_core import taichi as ti @@ -18,16 +23,30 @@ def run(self): for s in self.suites: s.run() - def write_md(self): - filename = f'performance_result.md' - with open(filename, 'w') as f: + def store_to_path(self, path_with_file_name='./performance_result.md'): + with open(path_with_file_name, 'w') as f: for arch in test_archs: for s in self.suites: lines = s.mdlines(arch) for line in lines: print(line, file=f) + def store_with_date_and_commit_id(self, file_dir='./'): + current_time = datetime.datetime.now().strftime("%Y%m%dd%Hh%Mm%Ss") + commit_hash = _ti_core.get_commit_hash()[:8] + file_name = f'perfresult_{current_time}_{commit_hash}.md' + path = os.path.join(file_dir, file_name) + print('Storing benchmark result to: ' + path) + self.store_to_path(path) + + +def main(): + file_dir = sys.argv[1] if len(sys.argv) > 1 else './' + p = PerformanceMonitoring() + p.run() + p.store_to_path() # for /benchmark + p.store_with_date_and_commit_id(file_dir) #for postsubmit + -p = PerformanceMonitoring() -p.run() -p.write_md() +if __name__ == '__main__': + main()