Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Inferenec Optimize] add Qwen2vl inference optimize #820

Open
wants to merge 3 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 56 additions & 5 deletions paddlemix/examples/qwen2_vl/multi_image_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import datetime

import paddle
from paddlenlp.transformers import Qwen2Tokenizer

from paddlemix.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLForConditionalGeneration
Expand All @@ -21,7 +24,12 @@
process_vision_info,
)

MODEL_NAME = "Qwen/Qwen2-VL-2B-Instruct"
benchmark = True
warm_up = 3

# MODEL_NAME = "Qwen/Qwen2-VL-2B-Instruct"
MODEL_NAME = "Qwen/Qwen2-VL-7B-Instruct"

model = Qwen2VLForConditionalGeneration.from_pretrained(MODEL_NAME, dtype="bfloat16")

image_processor = Qwen2VLImageProcessor()
Expand All @@ -37,8 +45,14 @@
{
"role": "user",
"content": [
{"type": "image", "image": "paddlemix/demo_images/examples_image1.jpg"},
{"type": "image", "image": "paddlemix/demo_images/examples_image2.jpg"},
{
"type": "image",
"image": "/root/paddlejob/workspace/env_run/output/changwenbin/PaddleMIX/paddlemix/demo_images/examples_image1.jpg",
},
{
"type": "image",
"image": "/root/paddlejob/workspace/env_run/output/changwenbin/PaddleMIX/paddlemix/demo_images/examples_image2.jpg",
},
{"type": "text", "text": "Identify the similarities between these images."},
],
}
Expand All @@ -59,7 +73,44 @@
return_tensors="pd",
)

# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=128) # already trimmed in paddle

if warm_up > 0:
for _ in range(warm_up):
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=128) # already trimmed in paddle
if benchmark:
repeat_times = 10
sumtime = 0.0
for i in range(repeat_times):
paddle.device.synchronize()
starttime = datetime.datetime.now()

# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=128) # already trimmed in paddle

paddle.device.synchronize()
endtime = datetime.datetime.now()

duringtime = endtime - starttime
duringtime = duringtime.seconds * 1000 + duringtime.microseconds / 1000.0
sumtime += duringtime
print(f"Multi {MODEL_NAME} end to end time : ", duringtime, "ms")

paddle.device.cuda.empty_cache()
inference_global_mem = paddle.device.cuda.memory_reserved() / (1024**3)
print(f"Inference used CUDA memory : {inference_global_mem:.3f} GiB")

print(f"Multi {MODEL_NAME} ave end to end time : ", sumtime / repeat_times, "ms")

paddle.device.cuda.empty_cache()
inference_global_mem = paddle.device.cuda.memory_reserved() / (1024**3)
print(f"Inference used CUDA memory : {inference_global_mem:.3f} GiB")
cuda_mem_after_used = paddle.device.cuda.max_memory_allocated() / (1024**3)
print(f"Max used CUDA memory : {cuda_mem_after_used:.3f} GiB")
else:
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=128) # already trimmed in paddle


output_text = processor.batch_decode(generated_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
print("output_text:\n", output_text[0])
90 changes: 86 additions & 4 deletions paddlemix/examples/qwen2_vl/single_image_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import datetime

import paddle
from paddlenlp.transformers import Qwen2Tokenizer

from paddlemix.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLForConditionalGeneration
Expand All @@ -20,8 +23,32 @@
Qwen2VLProcessor,
process_vision_info,
)
import argparse
def parse_args():
parser = argparse.ArgumentParser(
description=" Use PaddleMIX to accelerate the Stable Diffusion3 image generation model."
)
parser.add_argument(
"--benchmark",
type=(lambda x: str(x).lower() in ["true", "1", "yes"]),
default=False,
help="if set to True, measure inference performance",
)
parser.add_argument(
"--inference_optimize",
type=(lambda x: str(x).lower() in ["true", "1", "yes"]),
default=False,
help="If set to True, all optimizations except Triton are enabled.",
)
return parser.parse_args()

args = parse_args()




MODEL_NAME = "Qwen/Qwen2-VL-2B-Instruct"
# MODEL_NAME = "Qwen/Qwen2-VL-7B-Instruct"
model = Qwen2VLForConditionalGeneration.from_pretrained(MODEL_NAME, dtype="bfloat16")

image_processor = Qwen2VLImageProcessor()
Expand All @@ -32,14 +59,14 @@
# max_pixels = 1280*28*28 # 1003520
# processor = Qwen2VLProcessor(image_processor, tokenizer, min_pixels=min_pixels, max_pixels=max_pixels)


messages = [
{
"role": "user",
"content": [
{
"type": "image",
"image": "paddlemix/demo_images/examples_image1.jpg",
# "image": "paddlemix/demo_images/examples_image1.jpg",
"image": "/root/paddlejob/workspace/env_run/output/changwenbin/PaddleMIX/paddlemix/demo_images/examples_image1.jpg",
},
{"type": "text", "text": "Describe this image."},
],
Expand All @@ -61,7 +88,62 @@
return_tensors="pd",
)

# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=128) # already trimmed in paddle
# pipe.transformer = paddle.incubate.jit.inference(
# pipe.transformer,
# save_model_dir="./tmp/sd3",
# enable_new_ir=True,
# cache_static_model=True,
# # V100环境下,需设置exp_enable_use_cutlass=False,
# exp_enable_use_cutlass=True,
# delete_pass_lists=["add_norm_fuse_pass"],
# )


if args.benchmark:
warm_up = 3
for _ in range(warm_up):
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=128) # already trimmed in paddle
repeat_times = 10
sumtime = 0.0
for i in range(repeat_times):
paddle.device.synchronize()
starttime = datetime.datetime.now()

paddle.device.synchronize()
import nvtx

generate_nvtx = nvtx.start_range(message="generate", color="green")

# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=128) # already trimmed in paddle

paddle.device.synchronize()
nvtx.end_range(generate_nvtx)

paddle.device.synchronize()
endtime = datetime.datetime.now()

duringtime = endtime - starttime
duringtime = duringtime.seconds * 1000 + duringtime.microseconds / 1000.0
sumtime += duringtime
print(f"Single {MODEL_NAME} end to end time : ", duringtime, "ms")

paddle.device.cuda.empty_cache()
inference_global_mem = paddle.device.cuda.memory_reserved() / (1024**3)
print(f"Inference used CUDA memory : {inference_global_mem:.3f} GiB")

print(f"Single {MODEL_NAME} ave end to end time : ", sumtime / repeat_times, "ms")

paddle.device.cuda.empty_cache()
inference_global_mem = paddle.device.cuda.memory_reserved() / (1024**3)
print(f"Inference used CUDA memory : {inference_global_mem:.3f} GiB")
cuda_mem_after_used = paddle.device.cuda.max_memory_allocated() / (1024**3)
print(f"Max used CUDA memory : {cuda_mem_after_used:.3f} GiB")
else:
# breakpoint()
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=128) # already trimmed in paddle

output_text = processor.batch_decode(generated_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
print("output_text:\n", output_text[0])
58 changes: 55 additions & 3 deletions paddlemix/examples/qwen2_vl/video_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import datetime

import paddle
from paddlenlp.transformers import Qwen2Tokenizer

from paddlemix.models.qwen2_vl.modeling_qwen2_vl import Qwen2VLForConditionalGeneration
Expand All @@ -21,6 +24,10 @@
process_vision_info,
)

benchmark = True
warm_up = 3

# MODEL_NAME = "Qwen/Qwen2-VL-2B-Instruct"
MODEL_NAME = "Qwen/Qwen2-VL-7B-Instruct"
model = Qwen2VLForConditionalGeneration.from_pretrained(MODEL_NAME, dtype="bfloat16")

Expand All @@ -37,7 +44,7 @@
"content": [
{
"type": "video",
"video": "paddlemix/demo_images/red-panda.mp4",
"video": "/root/paddlejob/workspace/env_run/output/changwenbin/PaddleMIX/paddlemix/demo_images/red-panda.mp4",
"max_pixels": 360 * 420,
"fps": 1.0,
},
Expand All @@ -58,8 +65,53 @@
padding=True,
return_tensors="pd",
)
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=128) # already trimmed in paddle

if warm_up > 0:
for _ in range(warm_up):
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=128) # already trimmed in paddle
if benchmark:
repeat_times = 10
sumtime = 0.0
for i in range(repeat_times):
paddle.device.synchronize()
starttime = datetime.datetime.now()

paddle.device.synchronize()
import nvtx

generate_nvtx = nvtx.start_range(message="generate", color="green")

# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=128) # already trimmed in paddle

paddle.device.synchronize()
nvtx.end_range(generate_nvtx)

paddle.device.synchronize()
endtime = datetime.datetime.now()

duringtime = endtime - starttime
duringtime = duringtime.seconds * 1000 + duringtime.microseconds / 1000.0
sumtime += duringtime
print(f"Video {MODEL_NAME} end to end time : ", duringtime, "ms")

paddle.device.cuda.empty_cache()
inference_global_mem = paddle.device.cuda.memory_reserved() / (1024**3)
print(f"Inference used CUDA memory : {inference_global_mem:.3f} GiB")

print(f"Video {MODEL_NAME} ave end to end time : ", sumtime / repeat_times, "ms")

paddle.device.cuda.empty_cache()
inference_global_mem = paddle.device.cuda.memory_reserved() / (1024**3)
print(f"Inference used CUDA memory : {inference_global_mem:.3f} GiB")
cuda_mem_after_used = paddle.device.cuda.max_memory_allocated() / (1024**3)
print(f"Max used CUDA memory : {cuda_mem_after_used:.3f} GiB")
else:
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=128) # already trimmed in paddle


# print("generated_ids:\n", generated_ids)
output_text = processor.batch_decode(generated_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
print("output_text:\n", output_text[0])
2 changes: 1 addition & 1 deletion paddlemix/models/qwen2_vl/modeling_qwen2_vl.py
Original file line number Diff line number Diff line change
Expand Up @@ -1610,4 +1610,4 @@ def prepare_inputs_for_generation(
"rope_deltas": rope_deltas, # [[-3504]]
}
)
return model_inputs
return model_inputs