Skip to content
This repository has been archived by the owner on Dec 12, 2024. It is now read-only.

Commit

Permalink
Integrate LLVM at llvm/llvm-project@d1a83ff
Browse files Browse the repository at this point in the history
Updates LLVM usage to match llvm/llvm-project@d1a83ff. Further updates
the StableHLO submodule to openxla/stablehlo@059319f.

Co-authored-by: Simon Camphausen <[email protected]>
  • Loading branch information
marbre and simon-camp committed Jan 9, 2024
1 parent de724e7 commit 2aadf0e
Show file tree
Hide file tree
Showing 5 changed files with 39 additions and 22 deletions.
2 changes: 1 addition & 1 deletion build_tools/llvm_version.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
511ba45a47d6f9e48ad364181830c9fb974135b2
d1a83ff3e0274f26746e874d480c866bec3818d6
33 changes: 25 additions & 8 deletions lib/Conversion/StablehloToEmitC/StablehloToEmitC.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,14 @@ DenseIntElementsAttr i64ElementsAttr(int64_t value, size_t count,
return DenseIntElementsAttr::get(ty, values);
}

DenseIntElementsAttr getI64ElementsAttr(const ArrayRef<long> values,
MLIRContext *ctx) {
RankedTensorType ty = RankedTensorType::get(
{static_cast<int64_t>(values.size())}, IntegerType::get(ctx, 64));

return DenseIntElementsAttr::get(ty, values);
}

SmallVector<Attribute, 2> indexSequence(int64_t n, MLIRContext *ctx) {
return llvm::to_vector<2>(
llvm::map_range(llvm::seq<int64_t>(0, n), [&ctx](int64_t i) -> Attribute {
Expand Down Expand Up @@ -309,11 +317,15 @@ class SliceOpConversion : public OpConversionPattern<stablehlo::SliceOp> {
SmallVector<Attribute, 2> arguments =
indexSequence(adaptor.getOperands().size(), sliceOp.getContext());

arguments.push_back(sliceOp.getStartIndices());
arguments.push_back(sliceOp.getLimitIndices());
arguments.push_back(sliceOp.getStrides());
arguments.push_back(getI64ElementsAttr(sliceOp.getStartIndicesAttr(),
sliceOp.getContext()));
arguments.push_back(getI64ElementsAttr(sliceOp.getLimitIndicesAttr(),
sliceOp.getContext()));
arguments.push_back(
getI64ElementsAttr(sliceOp.getStridesAttr(), sliceOp.getContext()));

ArrayAttr args = rewriter.getArrayAttr(arguments);

ArrayAttr templateArgs =
rewriter.getArrayAttr({TypeAttr::get(sliceOp.getResult().getType())});

Expand Down Expand Up @@ -344,7 +356,8 @@ class DynamicSliceOpConversion
SmallVector<Attribute, 2> arguments = indexSequence(
adaptor.getOperands().size(), dynamicSliceOp.getContext());

arguments.push_back(dynamicSliceOp.getSliceSizes());
arguments.push_back(getI64ElementsAttr(dynamicSliceOp.getSliceSizesAttr(),
dynamicSliceOp.getContext()));

ArrayAttr args = rewriter.getArrayAttr(arguments);

Expand Down Expand Up @@ -408,9 +421,12 @@ class PadOpConversion : public OpConversionPattern<stablehlo::PadOp> {
SmallVector<Attribute, 2> arguments =
indexSequence(adaptor.getOperands().size(), padOp.getContext());

arguments.push_back(padOp.getEdgePaddingLow());
arguments.push_back(padOp.getEdgePaddingHigh());
arguments.push_back(padOp.getInteriorPadding());
arguments.push_back(
getI64ElementsAttr(padOp.getEdgePaddingLowAttr(), padOp.getContext()));
arguments.push_back(
getI64ElementsAttr(padOp.getEdgePaddingHighAttr(), padOp.getContext()));
arguments.push_back(
getI64ElementsAttr(padOp.getInteriorPaddingAttr(), padOp.getContext()));

ArrayAttr args = rewriter.getArrayAttr(arguments);

Expand Down Expand Up @@ -443,7 +459,8 @@ class TransposeOpConversion
SmallVector<Attribute> arguments =
indexSequence(adaptor.getOperands().size(), transposeOp.getContext());

arguments.push_back(transposeOp.getPermutation());
arguments.push_back(getI64ElementsAttr(transposeOp.getPermutationAttr(),
transposeOp.getContext()));
ArrayAttr args = rewriter.getArrayAttr(arguments);

Type resultType = transposeOp.getResult().getType();
Expand Down
16 changes: 8 additions & 8 deletions test/Conversion/stablehlo-to-emitc.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -244,19 +244,19 @@ func.func @stablehlo_compare(%arg0: tensor<4xi32>, %arg1: tensor<4xi32>) -> tens

func.func @stablehlo_slice(%arg0: tensor<12xi32>, %arg1: tensor<8x7xi32>) -> tensor<4x3xi32> {
// CHECK: emitc.call_opaque "emitc::stablehlo::slice"(%arg0) {args = [0 : index, dense<0> : tensor<1xi64>, dense<1> : tensor<1xi64>, dense<1> : tensor<1xi64>], template_args = [tensor<1xi32>]} : (tensor<12xi32>) -> tensor<1xi32>
%0 = "stablehlo.slice"(%arg0) {limit_indices = dense<1> : tensor<1xi64>, start_indices = dense<0> : tensor<1xi64>, strides = dense<1> : tensor<1xi64>} : (tensor<12xi32>) -> tensor<1xi32>
%0 = "stablehlo.slice"(%arg0) {limit_indices = array<i64: 1>, start_indices = array<i64: 0>, strides = array<i64: 1>} : (tensor<12xi32>) -> tensor<1xi32>
// CHECK: emitc.call_opaque "emitc::stablehlo::slice"(%arg1) {args = [0 : index, dense<0> : tensor<2xi64>, dense<[4, 3]> : tensor<2xi64>, dense<1> : tensor<2xi64>], template_args = [tensor<4x3xi32>]} : (tensor<8x7xi32>) -> tensor<4x3xi32>
%1 = "stablehlo.slice"(%arg1) {limit_indices = dense<[4, 3]> : tensor<2xi64>, start_indices = dense<0> : tensor<2xi64>, strides = dense<1> : tensor<2xi64>} : (tensor<8x7xi32>) -> tensor<4x3xi32>
%1 = "stablehlo.slice"(%arg1) {limit_indices = array<i64: 4, 3 >, start_indices = array<i64: 0, 0>, strides = array<i64: 1, 1>} : (tensor<8x7xi32>) -> tensor<4x3xi32>
return %1 : tensor<4x3xi32>
}

func.func @stablehlo_dynamic_slice(%arg0: tensor<12xi32>, %arg1: tensor<8x7xi32>) -> () {
%cst = "arith.constant"() {value = dense<1> : tensor<i64>} : () -> tensor<i64>
%cst_0 = "arith.constant"() {value = dense<3> : tensor<i64>} : () -> tensor<i64>
// CHECK: emitc.call_opaque "emitc::stablehlo::dynamic_slice"(%arg0, %cst) {args = [0 : index, 1 : index, dense<4> : tensor<1xi64>], template_args = [tensor<4xi32>]} : (tensor<12xi32>, tensor<i64>) -> tensor<4xi32>
%0 = "stablehlo.dynamic_slice"(%arg0, %cst) {slice_sizes = dense<4> : tensor<1xi64>} : (tensor<12xi32>, tensor<i64>) -> tensor<4xi32>
%0 = "stablehlo.dynamic_slice"(%arg0, %cst) {slice_sizes = array<i64: 4>} : (tensor<12xi32>, tensor<i64>) -> tensor<4xi32>
// CHECK: emitc.call_opaque "emitc::stablehlo::dynamic_slice"(%arg1, %cst, %cst_0) {args = [0 : index, 1 : index, 2 : index, dense<[4, 2]> : tensor<2xi64>], template_args = [tensor<4x2xi32>]} : (tensor<8x7xi32>, tensor<i64>, tensor<i64>) -> tensor<4x2xi32>
%1 = "stablehlo.dynamic_slice"(%arg1, %cst, %cst_0) {slice_sizes = dense<[4, 2]> : tensor<2xi64>} : (tensor<8x7xi32>, tensor<i64>, tensor<i64>) -> tensor<4x2xi32>
%1 = "stablehlo.dynamic_slice"(%arg1, %cst, %cst_0) {slice_sizes = array<i64: 4, 2>} : (tensor<8x7xi32>, tensor<i64>, tensor<i64>) -> tensor<4x2xi32>
return
}

Expand Down Expand Up @@ -348,9 +348,9 @@ func.func @stablehlo_dot(%arg0: tensor<512x512xf32>) -> tensor<512x512xf32> {
func.func @stablehlo_pad(%arg0: tensor<2x3xf32>, %arg1: tensor<f32>) -> tensor<4x7xf32> {
// CHECK: emitc.call_opaque "emitc::stablehlo::pad"(%arg0, %arg1) {args = [0 : index, 1 : index, dense<-1> : tensor<2xi64>, dense<1> : tensor<2xi64>, dense<2> : tensor<2xi64>], template_args = [tensor<4x7xf32>]} : (tensor<2x3xf32>, tensor<f32>) -> tensor<4x7xf32>
%0 = "stablehlo.pad"(%arg0, %arg1) {
edge_padding_low = dense<-1> : tensor<2xi64>,
edge_padding_high = dense<1> : tensor<2xi64>,
interior_padding = dense<2> : tensor<2xi64>
edge_padding_low = array<i64: -1, -1>,
edge_padding_high = array<i64: 1, 1>,
interior_padding = array<i64: 2, 2 >
} : (tensor<2x3xf32>, tensor<f32>) -> tensor<4x7xf32>
return %0 : tensor<4x7xf32>
}
Expand Down Expand Up @@ -421,7 +421,7 @@ func.func @select_scalar_pred(%arg0: tensor<i1>, %arg1: tensor<2x3xi32>, %arg2:

func.func @stablehlo_transpose(%arg0: tensor<2x3x4xf32>) -> tensor<4x3x2xf32> {
// CHECK: emitc.call_opaque "emitc::stablehlo::transpose"(%arg0) {args = [0 : index, dense<[2, 1, 0]> : tensor<3xi64>], template_args = [tensor<4x3x2xf32>]} : (tensor<2x3x4xf32>) -> tensor<4x3x2xf32>
%0 = "stablehlo.transpose"(%arg0) {permutation = dense<[2, 1, 0]> : tensor<3xi64>} : (tensor<2x3x4xf32>) -> tensor<4x3x2xf32>
%0 = "stablehlo.transpose"(%arg0) {permutation = array<i64: 2, 1, 0>} : (tensor<2x3x4xf32>) -> tensor<4x3x2xf32>
return %0 : tensor<4x3x2xf32>
}

Expand Down
8 changes: 4 additions & 4 deletions test/MobileNetV2_FakeWeights_stablehlo.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, p
%54 = stablehlo.convolution(%53, %39) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], rhs_dilate = [1, 1]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x112x112x16xf32>, tensor<1x1x16x96xf32>) -> tensor<1x112x112x96xf32>
%55 = "stablehlo.batch_norm_inference"(%54, %21, %21, %21, %21) {epsilon = 1.000000e-03 : f32, feature_index = 3 : i64} : (tensor<1x112x112x96xf32>, tensor<96xf32>, tensor<96xf32>, tensor<96xf32>, tensor<96xf32>) -> tensor<1x112x112x96xf32>
%56 = "stablehlo.clamp"(%43, %55, %44) : (tensor<f32>, tensor<1x112x112x96xf32>, tensor<f32>) -> tensor<1x112x112x96xf32>
%57 = "stablehlo.pad"(%56, %43) {edge_padding_high = dense<[0, 1, 1, 0]> : tensor<4xi64>, edge_padding_low = dense<0> : tensor<4xi64>, interior_padding = dense<0> : tensor<4xi64>} : (tensor<1x112x112x96xf32>, tensor<f32>) -> tensor<1x113x113x96xf32>
%57 = "stablehlo.pad"(%56, %43) {edge_padding_high = array<i64: 0, 1, 1, 0>, edge_padding_low = array<i64: 0, 0, 0, 0>, interior_padding = array<i64: 0, 0, 0, 0>} : (tensor<1x112x112x96xf32>, tensor<f32>) -> tensor<1x113x113x96xf32>
%58 = stablehlo.convolution(%57, %7) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [2, 2], pad = [[0, 0], [0, 0]], rhs_dilate = [1, 1]} {batch_group_count = 1 : i64, feature_group_count = 96 : i64} : (tensor<1x113x113x96xf32>, tensor<3x3x1x96xf32>) -> tensor<1x56x56x96xf32>
%59 = "stablehlo.batch_norm_inference"(%58, %21, %21, %21, %21) {epsilon = 1.000000e-03 : f32, feature_index = 3 : i64} : (tensor<1x56x56x96xf32>, tensor<96xf32>, tensor<96xf32>, tensor<96xf32>, tensor<96xf32>) -> tensor<1x56x56x96xf32>
%60 = "stablehlo.clamp"(%43, %59, %44) : (tensor<f32>, tensor<1x56x56x96xf32>, tensor<f32>) -> tensor<1x56x56x96xf32>
Expand All @@ -76,7 +76,7 @@ module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, p
%72 = stablehlo.convolution(%71, %35) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], rhs_dilate = [1, 1]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x56x56x24xf32>, tensor<1x1x24x144xf32>) -> tensor<1x56x56x144xf32>
%73 = "stablehlo.batch_norm_inference"(%72, %34, %34, %34, %34) {epsilon = 1.000000e-03 : f32, feature_index = 3 : i64} : (tensor<1x56x56x144xf32>, tensor<144xf32>, tensor<144xf32>, tensor<144xf32>, tensor<144xf32>) -> tensor<1x56x56x144xf32>
%74 = "stablehlo.clamp"(%43, %73, %44) : (tensor<f32>, tensor<1x56x56x144xf32>, tensor<f32>) -> tensor<1x56x56x144xf32>
%75 = "stablehlo.pad"(%74, %43) {edge_padding_high = dense<[0, 1, 1, 0]> : tensor<4xi64>, edge_padding_low = dense<0> : tensor<4xi64>, interior_padding = dense<0> : tensor<4xi64>} : (tensor<1x56x56x144xf32>, tensor<f32>) -> tensor<1x57x57x144xf32>
%75 = "stablehlo.pad"(%74, %43) {edge_padding_high = array<i64: 0, 1, 1, 0>, edge_padding_low = array<i64: 0, 0, 0, 0>, interior_padding = array<i64: 0, 0, 0, 0>} : (tensor<1x56x56x144xf32>, tensor<f32>) -> tensor<1x57x57x144xf32>
%76 = stablehlo.convolution(%75, %6) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [2, 2], pad = [[0, 0], [0, 0]], rhs_dilate = [1, 1]} {batch_group_count = 1 : i64, feature_group_count = 144 : i64} : (tensor<1x57x57x144xf32>, tensor<3x3x1x144xf32>) -> tensor<1x28x28x144xf32>
%77 = "stablehlo.batch_norm_inference"(%76, %34, %34, %34, %34) {epsilon = 1.000000e-03 : f32, feature_index = 3 : i64} : (tensor<1x28x28x144xf32>, tensor<144xf32>, tensor<144xf32>, tensor<144xf32>, tensor<144xf32>) -> tensor<1x28x28x144xf32>
%78 = "stablehlo.clamp"(%43, %77, %44) : (tensor<f32>, tensor<1x28x28x144xf32>, tensor<f32>) -> tensor<1x28x28x144xf32>
Expand All @@ -103,7 +103,7 @@ module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, p
%99 = stablehlo.convolution(%98, %30) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], rhs_dilate = [1, 1]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x28x28x32xf32>, tensor<1x1x32x192xf32>) -> tensor<1x28x28x192xf32>
%100 = "stablehlo.batch_norm_inference"(%99, %29, %29, %29, %29) {epsilon = 1.000000e-03 : f32, feature_index = 3 : i64} : (tensor<1x28x28x192xf32>, tensor<192xf32>, tensor<192xf32>, tensor<192xf32>, tensor<192xf32>) -> tensor<1x28x28x192xf32>
%101 = "stablehlo.clamp"(%43, %100, %44) : (tensor<f32>, tensor<1x28x28x192xf32>, tensor<f32>) -> tensor<1x28x28x192xf32>
%102 = "stablehlo.pad"(%101, %43) {edge_padding_high = dense<[0, 1, 1, 0]> : tensor<4xi64>, edge_padding_low = dense<0> : tensor<4xi64>, interior_padding = dense<0> : tensor<4xi64>} : (tensor<1x28x28x192xf32>, tensor<f32>) -> tensor<1x29x29x192xf32>
%102 = "stablehlo.pad"(%101, %43) {edge_padding_high = array<i64: 0, 1, 1, 0>, edge_padding_low = array<i64: 0, 0, 0, 0>, interior_padding = array<i64: 0, 0, 0, 0>} : (tensor<1x28x28x192xf32>, tensor<f32>) -> tensor<1x29x29x192xf32>
%103 = stablehlo.convolution(%102, %5) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [2, 2], pad = [[0, 0], [0, 0]], rhs_dilate = [1, 1]} {batch_group_count = 1 : i64, feature_group_count = 192 : i64} : (tensor<1x29x29x192xf32>, tensor<3x3x1x192xf32>) -> tensor<1x14x14x192xf32>
%104 = "stablehlo.batch_norm_inference"(%103, %29, %29, %29, %29) {epsilon = 1.000000e-03 : f32, feature_index = 3 : i64} : (tensor<1x14x14x192xf32>, tensor<192xf32>, tensor<192xf32>, tensor<192xf32>, tensor<192xf32>) -> tensor<1x14x14x192xf32>
%105 = "stablehlo.clamp"(%43, %104, %44) : (tensor<f32>, tensor<1x14x14x192xf32>, tensor<f32>) -> tensor<1x14x14x192xf32>
Expand Down Expand Up @@ -165,7 +165,7 @@ module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, p
%161 = stablehlo.convolution(%160, %20) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [1, 1], pad = [[0, 0], [0, 0]], rhs_dilate = [1, 1]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x14x14x96xf32>, tensor<1x1x96x576xf32>) -> tensor<1x14x14x576xf32>
%162 = "stablehlo.batch_norm_inference"(%161, %19, %19, %19, %19) {epsilon = 1.000000e-03 : f32, feature_index = 3 : i64} : (tensor<1x14x14x576xf32>, tensor<576xf32>, tensor<576xf32>, tensor<576xf32>, tensor<576xf32>) -> tensor<1x14x14x576xf32>
%163 = "stablehlo.clamp"(%43, %162, %44) : (tensor<f32>, tensor<1x14x14x576xf32>, tensor<f32>) -> tensor<1x14x14x576xf32>
%164 = "stablehlo.pad"(%163, %43) {edge_padding_high = dense<[0, 1, 1, 0]> : tensor<4xi64>, edge_padding_low = dense<0> : tensor<4xi64>, interior_padding = dense<0> : tensor<4xi64>} : (tensor<1x14x14x576xf32>, tensor<f32>) -> tensor<1x15x15x576xf32>
%164 = "stablehlo.pad"(%163, %43) {edge_padding_high = array<i64: 0, 1, 1, 0>, edge_padding_low = array<i64: 0, 0, 0, 0>, interior_padding = array<i64: 0, 0, 0, 0>} : (tensor<1x14x14x576xf32>, tensor<f32>) -> tensor<1x15x15x576xf32>
%165 = stablehlo.convolution(%164, %3) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {stride = [2, 2], pad = [[0, 0], [0, 0]], rhs_dilate = [1, 1]} {batch_group_count = 1 : i64, feature_group_count = 576 : i64} : (tensor<1x15x15x576xf32>, tensor<3x3x1x576xf32>) -> tensor<1x7x7x576xf32>
%166 = "stablehlo.batch_norm_inference"(%165, %19, %19, %19, %19) {epsilon = 1.000000e-03 : f32, feature_index = 3 : i64} : (tensor<1x7x7x576xf32>, tensor<576xf32>, tensor<576xf32>, tensor<576xf32>, tensor<576xf32>) -> tensor<1x7x7x576xf32>
%167 = "stablehlo.clamp"(%43, %166, %44) : (tensor<f32>, tensor<1x7x7x576xf32>, tensor<f32>) -> tensor<1x7x7x576xf32>
Expand Down
2 changes: 1 addition & 1 deletion third_party/stablehlo
Submodule stablehlo updated 289 files

0 comments on commit 2aadf0e

Please sign in to comment.