From 7adf8c1b3213ec0573ccd7e3b3d05dc3c6c07a80 Mon Sep 17 00:00:00 2001 From: Stanley Winata <68087699+raikonenfnu@users.noreply.github.com> Date: Tue, 26 Nov 2024 23:46:24 -0800 Subject: [PATCH] [mlperf][pkgci] Update punet-fp8 with reduction dim as last dim (#19316) We have changes in sharktank that converts reduction dim of the custom attention to be the fastest dimension. This makes it more uniform with the FP16 and canonical attention form and hopefully makes optimization gets called more easily down the line with this. Additionally, this is to prefetch S.T we do not break the coming sharktank/mlperf bots and runs. Signed-off-by: Stanley Winata --- .../external_test_suite/attention_and_matmul_spec_punet.mlir | 2 +- .../regression_suite/shark-test-suite-models/sdxl/test_unet.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/build_tools/pkgci/external_test_suite/attention_and_matmul_spec_punet.mlir b/build_tools/pkgci/external_test_suite/attention_and_matmul_spec_punet.mlir index 7b0944471990..16049fad2543 100644 --- a/build_tools/pkgci/external_test_suite/attention_and_matmul_spec_punet.mlir +++ b/build_tools/pkgci/external_test_suite/attention_and_matmul_spec_punet.mlir @@ -76,7 +76,7 @@ transform.named_sequence @match_attention_f8(%attention: !transform.any_op {tran transform.iree.match.cast_compatible_type %in0 = tensor : !transform.any_value %config = transform.param.constant #iree_codegen.compilation_info< - lowering_config = #iree_gpu.lowering_config<{workgroup = [1, 1, 64, 0, 0, 0], reduction=[0, 0, 0, 0, 64, 0], promote_operands = [1, 2]}>, + lowering_config = #iree_gpu.lowering_config<{workgroup = [1, 1, 64, 0, 0, 0], reduction=[0, 0, 0, 0, 0, 64], promote_operands = [1, 2]}>, translation_info = #iree_codegen.translation_info