Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merge from CTuning (Conda support in CM) #1051

Merged
merged 25 commits into from
Jan 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
660907d
Fixes for mlperf-inference
arjunsuresh Jan 8, 2024
c10c341
CM script to run mlperf-inference-intel added (WIP)
arjunsuresh Jan 8, 2024
62e60ea
Fix
arjunsuresh Jan 9, 2024
e2e002e
minor fix
gfursin Jan 9, 2024
258f1a6
cleaning universal benchmarking with loadgen
gfursin Jan 10, 2024
a12ab42
added automation recipe "cm generate_secret utils"
gfursin Jan 12, 2024
dd2a98b
Fixes for intel-mlperf-inference
arjunsuresh Jan 12, 2024
d0cb2a0
Create conda env script added
arjunsuresh Jan 12, 2024
210b663
Fixes for intel-mlperf-inference
arjunsuresh Jan 12, 2024
d78b516
Fixes for conda env
arjunsuresh Jan 12, 2024
3a5cac6
Fixes for intel-mlperf-inference
arjunsuresh Jan 12, 2024
e3ca2ef
Fixes llvm build for intel-mlperf-inference
arjunsuresh Jan 13, 2024
b3f2303
Fixes for intel-mlperf-inference
arjunsuresh Jan 13, 2024
3c0225d
Fixes
arjunsuresh Jan 13, 2024
8bfa2a3
Added onednn from src for intel-mlperf-inference
arjunsuresh Jan 13, 2024
89544be
Added transformers from src for intel-mlperf-inference
arjunsuresh Jan 13, 2024
0617cc3
Checkout full git history with SHA
arjunsuresh Jan 13, 2024
d8edaa3
Checkout full git history with SHA
arjunsuresh Jan 13, 2024
067c62d
Minor cleanup for intel-mlperf-inference
arjunsuresh Jan 13, 2024
99ccb49
Add libffi7 dep for install-pytorch-from-src
arjunsuresh Jan 13, 2024
a0ad37d
Add libffi7 dep for install-pytorch-from-src
arjunsuresh Jan 13, 2024
fdba76c
Add libffi7 dep for install-pytorch-from-src
arjunsuresh Jan 13, 2024
96cd7c8
Fixes for intel-mlperf-inference
arjunsuresh Jan 13, 2024
dd4d03c
Changes to run bert for intel-mlperf-inference
arjunsuresh Jan 13, 2024
24b5629
fixing cuda prebuilt installer
gfursin Jan 14, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion cm-mlops/automation/script/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -2363,6 +2363,8 @@ def _get_variation_groups(script, variations):

for k in variations:
variation = variations[k]
if not variation:
continue
if 'group' in variation:
if variation['group'] not in groups:
groups[variation['group']] = {}
Expand Down Expand Up @@ -4184,7 +4186,7 @@ def append_deps(deps, new_deps):
dep = deps[i]
dep_names = dep.get('names',[])
if len(dep_names)>0:
if set(new_dep_names) & set(dep_names):
if set(new_dep_names) == set(dep_names):
deps[i] = new_dep
existing = True
break
Expand Down
23 changes: 23 additions & 0 deletions cm-mlops/automation/utils/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -757,3 +757,26 @@ def list_files_recursively(self, i):
print (s)

return {'return':0}

##############################################################################
def generate_secret(self, i):
"""
Generate secret for web apps

Args:

Returns:
(CM return dict):

secret (str): secret

* return (int): return code == 0 if no error and >0 if error
* (error) (str): error string if return>0
"""

import secrets
s = secrets.token_urlsafe(16)

print (s)

return {'return':0, 'secret': s}
1 change: 1 addition & 0 deletions cm-mlops/script/app-loadgen-generic-python/_cm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ input_mapping:
interop: CM_MLPERF_INTEROP
execmode: CM_MLPERF_EXEC_MODE
modelpath: CM_ML_MODEL_FILE_WITH_PATH
samples: CM_MLPERF_LOADGEN_SAMPLES

new_env_keys:
- CM_MLPERF_*
Expand Down
24 changes: 20 additions & 4 deletions cm-mlops/script/app-loadgen-generic-python/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,27 +6,43 @@ def preprocess(i):

os_info = i['os_info']

if os_info['platform'] == 'windows':
return {'return':1, 'error': 'Windows is not supported in this script yet'}
# if os_info['platform'] == 'windows':
# return {'return':1, 'error': 'Windows is not supported in this script yet'}

env = i['env']

if 'CM_ML_MODEL_FILE_WITH_PATH' not in env:
return {'return': 1, 'error': 'Please select a variation specifying the model to run'}

env['CM_RUN_OPTS'] = ''
run_opts = ''
run_opts = env.get('CM_RUN_OPTS', '')

if 'CM_MLPERF_RUNNER' in env:
run_opts +=" -r "+env['CM_MLPERF_RUNNER']

if 'CM_MLPERF_CONCURRENCY' in env:
run_opts +=" --concurrency "+env['CM_MLPERF_CONCURRENCY']

if 'CM_MLPERF_EXECUTION_PROVIDER' in env:
run_opts +=" --ep "+env['CM_MLPERF_EXECUTION_PROVIDER']

if 'CM_MLPERF_INTRAOP' in env:
run_opts +=" --intraop "+env['CM_MLPERF_INTRAOP']

if 'CM_MLPERF_INTEROP' in env:
run_opts +=" --interop "+env['CM_MLPERF_INTEROP']

if 'CM_MLPERF_EXECMODE' in env:
run_opts +=" --execmode "+env['CM_MLPERF_EXECUTION_MODE']

if 'CM_MLPERF_LOADGEN_SAMPLES' in env:
run_opts +=" --samples "+env['CM_MLPERF_LOADGEN_SAMPLES']

env['CM_RUN_OPTS'] = run_opts

print ('')
print ('Assembled flags: {}'.format(run_opts))
print ('')

return {'return':0}

def postprocess(i):
Expand Down
4 changes: 4 additions & 0 deletions cm-mlops/script/app-loadgen-generic-python/run.bat
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
rem native script

%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\src\main.py %CM_RUN_OPTS% %CM_ML_MODEL_FILE_WITH_PATH%
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
18 changes: 14 additions & 4 deletions cm-mlops/script/app-loadgen-generic-python/src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@


LOADGEN_EXPECTED_QPS = 50
LOADGEN_SAMPLE_COUNT = 100
LOADGEN_DURATION_SEC = 10


Expand All @@ -35,14 +34,18 @@ def main(
execution_mode: str,
intraop_threads: int,
interop_threads: int,
samples: int
):

model_factory = ORTModelFactory(
model_path,
execution_provider,
execution_mode,
interop_threads,
intraop_threads,
)


model_dataset = ORTModelInputSampler(model_factory)

runner: ModelRunner = None
Expand Down Expand Up @@ -71,7 +74,7 @@ def main(
settings.scenario = mlperf_loadgen.TestScenario.Offline
settings.mode = mlperf_loadgen.TestMode.PerformanceOnly
settings.offline_expected_qps = LOADGEN_EXPECTED_QPS
settings.min_query_count = LOADGEN_SAMPLE_COUNT * 2
settings.min_query_count = samples * 2
settings.min_duration_ms = LOADGEN_DURATION_SEC * 1000
# Duration isn't enforced in offline mode
# Instead, it is used to determine total sample count via
Expand Down Expand Up @@ -99,8 +102,8 @@ def main(
harness = Harness(model_dataset, runner)
try:
query_sample_libary = mlperf_loadgen.ConstructQSL(
LOADGEN_SAMPLE_COUNT, # Total sample count
LOADGEN_SAMPLE_COUNT, # Num to load in RAM at a time
samples, # Total sample count
samples, # Num to load in RAM at a time
harness.load_query_samples,
harness.unload_query_samples,
)
Expand Down Expand Up @@ -171,6 +174,12 @@ def main(
choices=["sequential", "parallel"],
default="sequential",
)
parser.add_argument(
"--samples",
help="number of samples",
default=100,
type=int,
)

args = parser.parse_args()
main(
Expand All @@ -182,4 +191,5 @@ def main(
args.execmode,
args.intraop,
args.interop,
args.samples
)
21 changes: 20 additions & 1 deletion cm-mlops/script/app-mlperf-inference/_cm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -253,9 +253,28 @@ variations:
update_tags_from_env:
- CM_NVIDIA_HARNESS_GPU_VARIATION

intel:
group:
implementation
default_variations:
device: cpu
backend: pytorch
posthook_deps:
- names:
- intel
- intel-harness
- mlperf-inference-implementation
tags: reproduce,mlperf,inference,intel
skip_if_env:
CM_SKIP_RUN:
- yes

kilt:
group:
implementation
default_variations:
device: qaic
backend: glow
posthook_deps:
- names:
- kilt
Expand Down Expand Up @@ -478,7 +497,7 @@ variations:
CM_DATASET_SQUAD_VAL_PATH: "on"
tags: get,dataset,squad,language-processing
- skip_if_env:
CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH": "on"
CM_ML_MODEL_BERT_VOCAB_FILE_WITH_PATH: "on"
tags: get,dataset-aux,squad-vocab
post_deps:
- enable_if_env:
Expand Down
7 changes: 7 additions & 0 deletions cm-mlops/script/benchmark-object-detection-loadgen/run.bat
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
rem native script

echo "TBD"


%CM_PYTHON_BIN_WITH_PATH% %CM_TMP_CURRENT_SCRIPT_PATH%\\python\\main.py
IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL%
43 changes: 43 additions & 0 deletions cm-mlops/script/create-conda-env/_cm.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
{
"alias": "create-conda-env",
"automation_alias": "script",
"automation_uid": "5b4e0237da074764",
"cache": true,
"category": "DevOps automation",
"clean_files": [],
"deps": [
{
"tags": "detect,os"
},
{
"names": [
"conda"
],
"tags": "get,conda"
}
],
"new_env_keys": [
"+PATH",
"+LD_LIBRARY_PATH",
"CM_CONDA_PREFIX",
"CONDA_PREFIX",
"CM_CONDA_BIN_PATH",
"CM_CONDA_LIB_PATH"
],
"tags": [
"create",
"get",
"env",
"conda-env",
"conda-environment",
"create-conda-environment"
],
"uid": "e39e0b04c86a40f2",
"variations": {
"name.#": {
"env": {
"CM_CONDA_ENV_NAME": "#"
}
}
}
}
31 changes: 31 additions & 0 deletions cm-mlops/script/create-conda-env/customize.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
from cmind import utils
import os

def preprocess(i):

os_info = i['os_info']

env = i['env']
automation = i['automation']
run_script_input = i['run_script_input']

recursion_spaces = i['recursion_spaces']

if env.get('CM_CONDA_ENV_NAME', '') == '':
return {'return':1, 'error': 'Please use "_name.<conda env name>" variation'}

return {'return':0}

def postprocess(i):
env = i['env']

conda_prefix = os.getcwd()
env['CONDA_PREFIX'] = conda_prefix
env['CM_CONDA_PREFIX'] = conda_prefix
env['CM_CONDA_BIN_PATH'] = os.path.join(conda_prefix, "bin")
env['CM_CONDA_LIB_PATH'] = os.path.join(conda_prefix, "lib")

env['+PATH'] = [ env['CM_CONDA_BIN_PATH'] ]
env['+LD_LIBRARY_PATH'] = [ env['CM_CONDA_LIB_PATH'] ]

return {'return':0}
7 changes: 7 additions & 0 deletions cm-mlops/script/create-conda-env/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/bash

cmd="${CM_CONDA_BIN_WITH_PATH} create -p ${PWD}"
echo "$cmd"
eval "$cmd"
test $? -eq 0 || exit $?

16 changes: 14 additions & 2 deletions cm-mlops/script/get-conda/_cm.json
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,24 @@
],
"new_env_keys": [
"+PATH",
"CM_CONDA_PREFIX"
"+LD_LIBRARY_PATH",
"CM_CONDA_PREFIX",
"CONDA_PREFIX",
"CM_CONDA_BIN_PATH",
"CM_CONDA_BIN_WITH_PATH",
"CM_CONDA_LIB_PATH"
],
"tags": [
"get",
"conda",
"get-conda"
],
"uid": "6600115f41324c7b"
"uid": "6600115f41324c7b",
"variations": {
"name.#": {
"env": {
"CM_CONDA_PREFIX_NAME": "#"
}
}
}
}
Loading