diff --git a/test_tipc/test_inference_cpp.sh b/test_tipc/test_inference_cpp.sh
index d26954353e..4787f83093 100644
--- a/test_tipc/test_inference_cpp.sh
+++ b/test_tipc/test_inference_cpp.sh
@@ -64,10 +64,11 @@ function func_cpp_inference(){
set_infer_data=$(func_set_params "${cpp_image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${cpp_benchmark_key}" "${cpp_benchmark_value}")
set_batchsize=$(func_set_params "${cpp_batch_size_key}" "${batch_size}")
+ set_mkldnn=$(func_set_params "${cpp_use_mkldnn_key}" "${use_mkldnn}")
set_cpu_threads=$(func_set_params "${cpp_cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${cpp_infer_model_key}" "${_model_dir}")
set_infer_params1=$(func_set_params "${cpp_infer_key1}" "${cpp_infer_value1}")
- command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${cpp_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
+ command="${_script} ${cpp_use_gpu_key}=${use_gpu} ${set_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
diff --git a/test_tipc/test_inference_jeston.sh b/test_tipc/test_inference_jeston.sh
deleted file mode 100644
index 2fd76e1e9e..0000000000
--- a/test_tipc/test_inference_jeston.sh
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/bash
-source test_tipc/common_func.sh
-source test_tipc/test_train_inference_python.sh
-
-FILENAME=$1
-# MODE be one of ['whole_infer']
-MODE=$2
-
-dataline=$(awk 'NR==1, NR==17{print}' $FILENAME)
-
-# parser params
-IFS=$'\n'
-lines=(${dataline})
-
-model_name=$(func_parser_value "${lines[1]}")
-python=$(func_parser_value "${lines[2]}")
-
-infer_model_dir_list=$(func_parser_value "${lines[3]}")
-infer_export_list=$(func_parser_value "${lines[4]}")
-infer_is_quant=$(func_parser_value "${lines[5]}")
-# parser inference
-inference_py=$(func_parser_value "${lines[6]}")
-use_gpu_key=$(func_parser_key "${lines[7]}")
-use_gpu_list=$(func_parser_value "${lines[7]}")
-use_mkldnn_key=$(func_parser_key "${lines[8]}")
-use_mkldnn_list=$(func_parser_value "${lines[8]}")
-cpu_threads_key=$(func_parser_key "${lines[9]}")
-cpu_threads_list=$(func_parser_value "${lines[9]}")
-batch_size_key=$(func_parser_key "${lines[10]}")
-batch_size_list=$(func_parser_value "${lines[10]}")
-use_trt_key=$(func_parser_key "${lines[11]}")
-use_trt_list=$(func_parser_value "${lines[11]}")
-precision_key=$(func_parser_key "${lines[12]}")
-precision_list=$(func_parser_value "${lines[12]}")
-infer_model_key=$(func_parser_key "${lines[13]}")
-image_dir_key=$(func_parser_key "${lines[14]}")
-infer_img_dir=$(func_parser_value "${lines[14]}")
-save_log_key=$(func_parser_key "${lines[15]}")
-benchmark_key=$(func_parser_key "${lines[16]}")
-benchmark_value=$(func_parser_value "${lines[16]}")
-infer_key1=$(func_parser_key "${lines[17]}")
-infer_value1=$(func_parser_value "${lines[17]}")
-
-
-LOG_PATH="./test_tipc/output"
-mkdir -p ${LOG_PATH}
-status_log="${LOG_PATH}/results_python.log"
-
-
-if [ ${MODE} = "whole_infer" ]; then
- GPUID=$3
- if [ ${#GPUID} -le 0 ];then
- env=" "
- else
- env="export CUDA_VISIBLE_DEVICES=${GPUID}"
- fi
- # set CUDA_VISIBLE_DEVICES
- eval $env
- export Count=0
- IFS="|"
- infer_run_exports=(${infer_export_list})
- infer_quant_flag=(${infer_is_quant})
- for infer_model in ${infer_model_dir_list[*]}; do
- # run export
- if [ ${infer_run_exports[Count]} != "null" ];then
- save_infer_dir=$(dirname $infer_model)
- set_export_weight=$(func_set_params "${export_weight}" "${infer_model}")
- set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}")
- export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}"
- echo ${infer_run_exports[Count]}
- echo $export_cmd
- eval $export_cmd
- status_export=$?
- status_check $status_export "${export_cmd}" "${status_log}"
- else
- save_infer_dir=${infer_model}
- fi
- #run inference
- is_quant=${infer_quant_flag[Count]}
- if [ ${MODE} = "klquant_infer" ]; then
- is_quant="True"
- fi
- func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
- Count=$(($Count + 1))
- done
-fi
-
diff --git a/test_tipc/test_inference_python.sh b/test_tipc/test_inference_python.sh
new file mode 100644
index 0000000000..27276d55b9
--- /dev/null
+++ b/test_tipc/test_inference_python.sh
@@ -0,0 +1,170 @@
+#!/bin/bash
+source test_tipc/common_func.sh
+#source test_tipc/test_train_inference_python.sh
+
+FILENAME=$1
+# MODE be one of ['whole_infer']
+MODE=$2
+
+dataline=$(awk 'NR==1, NR==20{print}' $FILENAME)
+
+# parser params
+IFS=$'\n'
+lines=(${dataline})
+
+model_name=$(func_parser_value "${lines[1]}")
+python=$(func_parser_value "${lines[2]}")
+
+infer_model_dir_list=$(func_parser_value "${lines[3]}")
+infer_export_list=$(func_parser_value "${lines[4]}")
+infer_is_quant=$(func_parser_value "${lines[5]}")
+# parser inference
+inference_py=$(func_parser_value "${lines[6]}")
+use_gpu_key=$(func_parser_key "${lines[7]}")
+use_gpu_list=$(func_parser_value "${lines[7]}")
+use_mkldnn_key=$(func_parser_key "${lines[8]}")
+use_mkldnn_list=$(func_parser_value "${lines[8]}")
+cpu_threads_key=$(func_parser_key "${lines[9]}")
+cpu_threads_list=$(func_parser_value "${lines[9]}")
+batch_size_key=$(func_parser_key "${lines[10]}")
+batch_size_list=$(func_parser_value "${lines[10]}")
+use_trt_key=$(func_parser_key "${lines[11]}")
+use_trt_list=$(func_parser_value "${lines[11]}")
+precision_key=$(func_parser_key "${lines[12]}")
+precision_list=$(func_parser_value "${lines[12]}")
+infer_model_key=$(func_parser_key "${lines[13]}")
+image_dir_key=$(func_parser_key "${lines[14]}")
+infer_img_dir=$(func_parser_value "${lines[14]}")
+rec_model_key=$(func_parser_key "${lines[15]}")
+rec_model_value=$(func_parser_value "${lines[15]}")
+benchmark_key=$(func_parser_key "${lines[16]}")
+benchmark_value=$(func_parser_value "${lines[16]}")
+infer_key1=$(func_parser_key "${lines[17]}")
+infer_value1=$(func_parser_value "${lines[17]}")
+
+
+
+LOG_PATH="./test_tipc/output"
+mkdir -p ${LOG_PATH}
+status_log="${LOG_PATH}/results_python.log"
+
+
+function func_inference(){
+ IFS='|'
+ _python=$1
+ _script=$2
+ _model_dir=$3
+ _log_path=$4
+ _img_dir=$5
+ _flag_quant=$6
+ # inference
+ for use_gpu in ${use_gpu_list[*]}; do
+ if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
+ for use_mkldnn in ${use_mkldnn_list[*]}; do
+ if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
+ continue
+ fi
+ for threads in ${cpu_threads_list[*]}; do
+ for batch_size in ${batch_size_list[*]}; do
+ for precision in ${precision_list[*]}; do
+ if [ ${use_mkldnn} = "False" ] && [ ${precision} = "fp16" ]; then
+ continue
+ fi # skip when enable fp16 but disable mkldnn
+ if [ ${_flag_quant} = "True" ] && [ ${precision} != "int8" ]; then
+ continue
+ fi # skip when quant model inference but precision is not int8
+ set_precision=$(func_set_params "${precision_key}" "${precision}")
+
+ _save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
+ set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
+ set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
+ set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
+ set_mkldnn=$(func_set_params "${use_mkldnn_key}" "${use_mkldnn}")
+ set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}")
+ set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
+ set_infer_params0=$(func_set_params "${rec_model_key}" "${rec_model_value}")
+ set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
+ command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_params0} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 "
+ eval $command
+ last_status=${PIPESTATUS[0]}
+ eval "cat ${_save_log_path}"
+ status_check $last_status "${command}" "${status_log}"
+ done
+ done
+ done
+ done
+ elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then
+ for use_trt in ${use_trt_list[*]}; do
+ for precision in ${precision_list[*]}; do
+ if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then
+ continue
+ fi
+ if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then
+ continue
+ fi
+ if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then
+ continue
+ fi
+ for batch_size in ${batch_size_list[*]}; do
+ _save_log_path="${_log_path}/python_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
+ set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
+ set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
+ set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
+ set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}")
+ set_precision=$(func_set_params "${precision_key}" "${precision}")
+ set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
+ set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}")
+ set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
+ command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} ${set_infer_params0} > ${_save_log_path} 2>&1 "
+ eval $command
+ last_status=${PIPESTATUS[0]}
+ eval "cat ${_save_log_path}"
+ status_check $last_status "${command}" "${status_log}"
+
+ done
+ done
+ done
+ else
+ echo "Does not support hardware other than CPU and GPU Currently!"
+ fi
+ done
+}
+
+if [ ${MODE} = "whole_infer" ]; then
+ GPUID=$3
+ if [ ${#GPUID} -le 0 ];then
+ env=" "
+ else
+ env="export CUDA_VISIBLE_DEVICES=${GPUID}"
+ fi
+ # set CUDA_VISIBLE_DEVICES
+ eval $env
+ export Count=0
+ IFS="|"
+ infer_run_exports=(${infer_export_list})
+ infer_quant_flag=(${infer_is_quant})
+ for infer_model in ${infer_model_dir_list[*]}; do
+ # run export
+ if [ ${infer_run_exports[Count]} != "null" ];then
+ save_infer_dir=$(dirname $infer_model)
+ set_export_weight=$(func_set_params "${export_weight}" "${infer_model}")
+ set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}")
+ export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key}"
+ echo ${infer_run_exports[Count]}
+ eval $export_cmd
+ status_export=$?
+ status_check $status_export "${export_cmd}" "${status_log}"
+ else
+ save_infer_dir=${infer_model}
+ fi
+ #run inference
+ is_quant=${infer_quant_flag[Count]}
+ if [ ${MODE} = "klquant_infer" ]; then
+ is_quant="True"
+ fi
+ func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
+ Count=$(($Count + 1))
+ done
+fi
+
+
diff --git a/test_tipc/test_serving.sh b/test_tipc/test_serving.sh
index c36935a60f..1318d012d4 100644
--- a/test_tipc/test_serving.sh
+++ b/test_tipc/test_serving.sh
@@ -10,7 +10,7 @@ lines=(${dataline})
# parser serving
model_name=$(func_parser_value "${lines[1]}")
-python=$(func_parser_value "${lines[2]}")
+python_list=$(func_parser_value "${lines[2]}")
trans_model_py=$(func_parser_value "${lines[3]}")
infer_model_dir_key=$(func_parser_key "${lines[4]}")
infer_model_dir_value=$(func_parser_value "${lines[4]}")
@@ -54,14 +54,15 @@ function func_serving(){
set_serving_server=$(func_set_params "${serving_server_key}" "${serving_server_value}")
set_serving_client=$(func_set_params "${serving_client_key}" "${serving_client_value}")
set_image_dir=$(func_set_params "${image_dir_key}" "${image_dir_value}")
- trans_model_cmd="${python} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
+ python_list=(${python_list})
+ trans_model_cmd="${python_list[0]} ${trans_model_py} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_serving_server} ${set_serving_client}"
eval $trans_model_cmd
cd ${serving_dir_value}
echo $PWD
unset https_proxy
unset http_proxy
- for python in ${python[*]}; do
- if [ ${python} = "cpp"]; then
+ for python in ${python_list[*]}; do
+ if [ ${python} = "cpp" ]; then
for use_gpu in ${web_use_gpu_list[*]}; do
if [ ${use_gpu} = "null" ]; then
web_service_cpp_cmd="${python} -m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293"
@@ -91,9 +92,6 @@ function func_serving(){
echo ${ues_gpu}
if [ ${use_gpu} = "null" ]; then
for use_mkldnn in ${web_use_mkldnn_list[*]}; do
- if [ ${use_mkldnn} = "False" ]; then
- continue
- fi
for threads in ${web_cpu_threads_list[*]}; do
set_cpu_threads=$(func_set_params "${web_cpu_threads_key}" "${threads}")
web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${web_use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} &"
@@ -124,6 +122,9 @@ function func_serving(){
continue
fi
set_tensorrt=$(func_set_params "${web_use_trt_key}" "${use_trt}")
+ if [ ${use_trt} = True ]; then
+ device_type=2
+ fi
set_precision=$(func_set_params "${web_precision_key}" "${precision}")
web_service_cmd="${python} ${web_service_py} ${web_use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} & "
eval $web_service_cmd
diff --git a/test_tipc/test_train_inference_python.sh b/test_tipc/test_train_inference_python.sh
index 7d03525652..b69c0f278f 100644
--- a/test_tipc/test_train_inference_python.sh
+++ b/test_tipc/test_train_inference_python.sh
@@ -90,36 +90,39 @@ infer_value1=$(func_parser_value "${lines[50]}")
# parser klquant_infer
if [ ${MODE} = "klquant_whole_infer" ]; then
- dataline=$(awk 'NR==1 NR==17{print}' $FILENAME)
+ dataline=$(awk 'NR==1, NR==17{print}' $FILENAME)
lines=(${dataline})
model_name=$(func_parser_value "${lines[1]}")
python=$(func_parser_value "${lines[2]}")
+ export_weight=$(func_parser_key "${lines[3]}")
+ save_infer_key=$(func_parser_key "${lines[4]}")
# parser inference model
- infer_model_dir_list=$(func_parser_value "${lines[3]}")
- infer_export_list=$(func_parser_value "${lines[4]}")
- infer_is_quant=$(func_parser_value "${lines[5]}")
+ infer_model_dir_list=$(func_parser_value "${lines[5]}")
+ infer_export_list=$(func_parser_value "${lines[6]}")
+ infer_is_quant=$(func_parser_value "${lines[7]}")
# parser inference
- inference_py=$(func_parser_value "${lines[6]}")
- use_gpu_key=$(func_parser_key "${lines[7]}")
- use_gpu_list=$(func_parser_value "${lines[7]}")
- use_mkldnn_key=$(func_parser_key "${lines[8]}")
- use_mkldnn_list=$(func_parser_value "${lines[8]}")
- cpu_threads_key=$(func_parser_key "${lines[9]}")
- cpu_threads_list=$(func_parser_value "${lines[9]}")
- batch_size_key=$(func_parser_key "${lines[10]}")
- batch_size_list=$(func_parser_value "${lines[10]}")
- use_trt_key=$(func_parser_key "${lines[11]}")
- use_trt_list=$(func_parser_value "${lines[11]}")
- precision_key=$(func_parser_key "${lines[12]}")
- precision_list=$(func_parser_value "${lines[12]}")
- infer_model_key=$(func_parser_key "${lines[13]}")
- image_dir_key=$(func_parser_key "${lines[14]}")
- infer_img_dir=$(func_parser_value "${lines[14]}")
- save_log_key=$(func_parser_key "${lines[15]}")
- benchmark_key=$(func_parser_key "${lines[16]}")
- benchmark_value=$(func_parser_value "${lines[16]}")
- infer_key1=$(func_parser_key "${lines[17]}")
- infer_value1=$(func_parser_value "${lines[17]}")
+ inference_py=$(func_parser_value "${lines[8]}")
+ use_gpu_key=$(func_parser_key "${lines[9]}")
+ use_gpu_list=$(func_parser_value "${lines[9]}")
+ use_mkldnn_key=$(func_parser_key "${lines[10]}")
+ use_mkldnn_list=$(func_parser_value "${lines[10]}")
+ cpu_threads_key=$(func_parser_key "${lines[11]}")
+ cpu_threads_list=$(func_parser_value "${lines[11]}")
+ batch_size_key=$(func_parser_key "${lines[12]}")
+ batch_size_list=$(func_parser_value "${lines[12]}")
+ use_trt_key=$(func_parser_key "${lines[13]}")
+ use_trt_list=$(func_parser_value "${lines[13]}")
+ precision_key=$(func_parser_key "${lines[14]}")
+ precision_list=$(func_parser_value "${lines[14]}")
+ infer_model_key=$(func_parser_key "${lines[15]}")
+ image_dir_key=$(func_parser_key "${lines[16]}")
+ infer_img_dir=$(func_parser_value "${lines[16]}")
+ save_log_key=$(func_parser_key "${lines[17]}")
+ save_log_value=$(func_parser_value "${lines[17]}")
+ benchmark_key=$(func_parser_key "${lines[18]}")
+ benchmark_value=$(func_parser_value "${lines[18]}")
+ infer_key1=$(func_parser_key "${lines[19]}")
+ infer_value1=$(func_parser_value "${lines[19]}")
fi
LOG_PATH="./test_tipc/output"
@@ -157,10 +160,12 @@ function func_inference(){
set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
+ set_mkldnn=$(func_set_params "${use_mkldnn_key}" "${use_mkldnn}")
set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}")
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
+ set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}")
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
- command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 "
+ command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_params0} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
@@ -189,8 +194,9 @@ function func_inference(){
set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}")
set_precision=$(func_set_params "${precision_key}" "${precision}")
set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
+ set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}")
set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
- command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 "
+ command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} ${set_infer_params0} > ${_save_log_path} 2>&1 "
eval $command
last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}"
@@ -235,7 +241,7 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
fi
#run inference
is_quant=${infer_quant_flag[Count]}
- if [ ${MODE} = "klquant_infer" ]; then
+ if [ ${MODE} = "klquant_whole_infer" ]; then
is_quant="True"
fi
func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
@@ -316,10 +322,6 @@ else
save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}_nodes_${nodes}"
fi
- # load pretrain from norm training if current trainer is pact or fpgm trainer
- if ([ ${trainer} = ${pact_key} ] || [ ${trainer} = ${fpgm_key} ]) && [ ${nodes} -le 1 ]; then
- set_pretrain="${load_norm_train_model}"
- fi
set_save_model=$(func_set_params "${save_model_key}" "${save_log}")
if [ ${#gpu} -le 2 ];then # train with cpu or single gpu
@@ -335,10 +337,7 @@ else
status_check $? "${cmd}" "${status_log}"
set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}")
- # save norm trained models to set pretrain for pact training and fpgm training
- if [ ${trainer} = ${trainer_norm} ] && [ ${nodes} -le 1 ]; then
- load_norm_train_model=${set_eval_pretrain}
- fi
+
# run eval
if [ ${eval_py} != "null" ]; then
set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}")
diff --git a/tools/infer/predict_cls.py b/tools/infer/predict_cls.py
index a25cac2600..ab3f4b04f0 100755
--- a/tools/infer/predict_cls.py
+++ b/tools/infer/predict_cls.py
@@ -145,8 +145,6 @@ def main(args):
for ino in range(len(img_list)):
logger.info("Predicts of {}:{}".format(valid_image_file_list[ino],
cls_res[ino]))
- logger.info(
- "The predict time about text angle classify module is as follows: ")
if __name__ == "__main__":
diff --git a/tools/infer/utility.py b/tools/infer/utility.py
index bd9e14a657..21bbee098e 100644
--- a/tools/infer/utility.py
+++ b/tools/infer/utility.py
@@ -195,6 +195,7 @@ def create_predictor(args, mode, logger):
max_batch_size=args.max_batch_size,
min_subgraph_size=args.min_subgraph_size)
# skip the minmum trt subgraph
+ use_dynamic_shape = True
if mode == "det":
min_input_shape = {
"x": [1, 3, 50, 50],
@@ -211,7 +212,7 @@ def create_predictor(args, mode, logger):
"nearest_interp_v2_0.tmp_0": [1, 256, 2, 2]
}
max_input_shape = {
- "x": [1, 3, 1280, 1280],
+ "x": [1, 3, 1536, 1536],
"conv2d_92.tmp_0": [1, 120, 400, 400],
"conv2d_91.tmp_0": [1, 24, 200, 200],
"conv2d_59.tmp_0": [1, 96, 400, 400],
@@ -260,19 +261,20 @@ def create_predictor(args, mode, logger):
max_input_shape.update(max_pact_shape)
opt_input_shape.update(opt_pact_shape)
elif mode == "rec":
+ if args.rec_algorithm != "CRNN":
+ use_dynamic_shape = False
min_input_shape = {"x": [1, 3, 32, 10]}
- max_input_shape = {"x": [args.rec_batch_num, 3, 32, 1024]}
+ max_input_shape = {"x": [args.rec_batch_num, 3, 32, 1536]}
opt_input_shape = {"x": [args.rec_batch_num, 3, 32, 320]}
elif mode == "cls":
min_input_shape = {"x": [1, 3, 48, 10]}
max_input_shape = {"x": [args.rec_batch_num, 3, 48, 1024]}
opt_input_shape = {"x": [args.rec_batch_num, 3, 48, 320]}
else:
- min_input_shape = {"x": [1, 3, 10, 10]}
- max_input_shape = {"x": [1, 3, 512, 512]}
- opt_input_shape = {"x": [1, 3, 256, 256]}
- config.set_trt_dynamic_shape_info(min_input_shape, max_input_shape,
- opt_input_shape)
+ use_dynamic_shape = False
+ if use_dynamic_shape:
+ config.set_trt_dynamic_shape_info(
+ min_input_shape, max_input_shape, opt_input_shape)
else:
config.disable_gpu()
@@ -311,7 +313,10 @@ def create_predictor(args, mode, logger):
def get_infer_gpuid():
- cmd = "env | grep CUDA_VISIBLE_DEVICES"
+ if not paddle.fluid.core.is_compiled_with_rocm():
+ cmd = "env | grep CUDA_VISIBLE_DEVICES"
+ else:
+ cmd = "env | grep HIP_VISIBLE_DEVICES"
env_cuda = os.popen(cmd).readlines()
if len(env_cuda) == 0:
return 0
diff --git a/tools/infer_det.py b/tools/infer_det.py
index bb2cca7362..1c679e0faf 100755
--- a/tools/infer_det.py
+++ b/tools/infer_det.py
@@ -53,6 +53,7 @@ def draw_det_res(dt_boxes, config, img, img_name, save_path):
logger.info("The detected Image saved in {}".format(save_path))
+@paddle.no_grad()
def main():
global_config = config['Global']