Skip to content
This repository has been archived by the owner on Dec 12, 2024. It is now read-only.

Integrate LLVM at llvm/llvm-project@cca9f9b7 #408

Merged
merged 2 commits into from
Feb 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion build_tools/llvm_version.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
21830c913505b1fd2cf10e454253483180c7e10b
cca9f9b78fc657c280f7e4024a552af43a315bdb
2 changes: 1 addition & 1 deletion scripts/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
tf-nightly==2.16.0.dev20231102
tf-nightly==2.16.0.dev20240207
ml_dtypes==0.3.1
wrapt==1.14.1
10 changes: 5 additions & 5 deletions test/Conversion/stablehlo-to-emitc.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -333,8 +333,8 @@ func.func @stablehlo_conv(%arg0: tensor<3x2x4x3xf32>, %arg1 : tensor<2x2x3x4xf32
>,
feature_group_count = 1 : i64,
padding = dense<[[0, 1], [0, 1]]> : tensor<2x2xi64>,
rhs_dilation = dense<[1, 2]> : tensor<2xi64>,
window_strides = dense<[2, 1]> : tensor<2xi64>
rhs_dilation = array<i64: 1, 2>,
window_strides = array<i64: 2, 1>
} : (tensor<2x2x3x4xf32>, tensor<3x2x4x3xf32>) -> tensor<2x1x2x3xf32>
return %out : tensor<2x1x2x3xf32>
}
Expand Down Expand Up @@ -369,14 +369,14 @@ func.func @stablehlo_reduce(%arg0 : tensor<2x1000xf32>, %arg1 : tensor<f32>, %ar
^bb0(%arg4: tensor<f32>, %arg5: tensor<f32>):
%1 = stablehlo.add %arg4, %arg5 : tensor<f32>
"stablehlo.return"(%1) : (tensor<f32>) -> ()
}) {dimensions = dense<1> : tensor<1xi64>} : (tensor<2x1000xf32>, tensor<f32>) -> tensor<2xf32>
}) {dimensions = array<i64: 1>} : (tensor<2x1000xf32>, tensor<f32>) -> tensor<2xf32>

// CHECK: emitc.call_opaque "emitc::stablehlo::reduce"(%arg2, %arg3) {args = [0 : index, 1 : index, dense<1> : tensor<1xi64>, @stablehlo_reduce_lambda_1], template_args = [tensor<2xi32>, 1]} : (tensor<2x1000xi32>, tensor<i32>) -> tensor<2xi32>
%1 = "stablehlo.reduce"(%arg2, %arg3) ({
^bb0(%arg4: tensor<i32>, %arg5: tensor<i32>):
%2 = stablehlo.maximum %arg4, %arg5 : tensor<i32>
"stablehlo.return"(%2) : (tensor<i32>) -> ()
}) {dimensions = dense<1> : tensor<1xi64>} : (tensor<2x1000xi32>, tensor<i32>) -> tensor<2xi32>
}) {dimensions = array<i64: 1>} : (tensor<2x1000xi32>, tensor<i32>) -> tensor<2xi32>

// CHECK: emitc.call_opaque "emitc::stablehlo::reduce"(%arg0, %arg2, %arg1, %arg3) {args = [0 : index, 1 : index, 2 : index, 3 : index, dense<1> : tensor<1xi64>, @stablehlo_reduce_lambda_2], template_args = [tensor<2xf32>, tensor<2xi32>, 1]} : (tensor<2x1000xf32>, tensor<2x1000xi32>, tensor<f32>, tensor<i32>) -> (tensor<2xf32>, tensor<2xi32>)
%2:2 = stablehlo.reduce(%arg0 init: %arg1), (%arg2 init: %arg3) across dimensions = [1] : (tensor<2x1000xf32>, tensor<2x1000xi32>, tensor<f32>, tensor<i32>) -> (tensor<2xf32>, tensor<2xi32>)
Expand All @@ -397,7 +397,7 @@ func.func @stablehlo_reduce_window(%arg0 : tensor<2x114x114x64xf32>, %arg1 : ten
^bb0(%arg2: tensor<f32>, %arg3: tensor<f32>): // no predecessors
%516 = stablehlo.maximum %arg2, %arg3 : tensor<f32>
"stablehlo.return"(%516) : (tensor<f32>) -> ()
}) {window_dimensions = dense<[1, 3, 3, 1]> : tensor<4xi64>, window_strides = dense<[1, 2, 2, 1]> : tensor<4xi64>} : (tensor<2x114x114x64xf32>, tensor<f32>) -> tensor<2x56x56x64xf32>
}) {window_dimensions = array<i64: 1, 3, 3, 1>, window_strides = array<i64: 1, 2, 2, 1>} : (tensor<2x114x114x64xf32>, tensor<f32>) -> tensor<2x56x56x64xf32>

return %0 : tensor<2x56x56x64xf32>
}
Expand Down
6 changes: 3 additions & 3 deletions test/MobileNetV2_FakeWeights_stablehlo.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -204,23 +204,23 @@ module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, p
^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors
%210 = stablehlo.add %arg1, %arg2 : tensor<f32>
"stablehlo.return"(%210) : (tensor<f32>) -> ()
}) {dimensions = dense<[1, 2]> : tensor<2xi64>} : (tensor<1x7x7x1280xf32>, tensor<f32>) -> tensor<1x1280xf32>
}) {dimensions = array<i64: 1, 2>} : (tensor<1x7x7x1280xf32>, tensor<f32>) -> tensor<1x1280xf32>
%200 = stablehlo.divide %199, %1 : tensor<1x1280xf32>
%201 = "stablehlo.dot"(%200, %9) : (tensor<1x1280xf32>, tensor<1280x1000xf32>) -> tensor<1x1000xf32>
%202 = stablehlo.add %201, %0 : tensor<1x1000xf32>
%203 = "stablehlo.reduce"(%202, %45) ( {
^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors
%210 = stablehlo.maximum %arg1, %arg2 : tensor<f32>
"stablehlo.return"(%210) : (tensor<f32>) -> ()
}) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x1000xf32>, tensor<f32>) -> tensor<1xf32>
}) {dimensions = array<i64: 1>} : (tensor<1x1000xf32>, tensor<f32>) -> tensor<1xf32>
%204 = "stablehlo.broadcast_in_dim"(%203) {broadcast_dimensions = array<i64: 0>} : (tensor<1xf32>) -> tensor<1x1000xf32>
%205 = stablehlo.subtract %202, %204 : tensor<1x1000xf32>
%206 = "stablehlo.exponential"(%205) : (tensor<1x1000xf32>) -> tensor<1x1000xf32>
%207 = "stablehlo.reduce"(%206, %43) ( {
^bb0(%arg1: tensor<f32>, %arg2: tensor<f32>): // no predecessors
%210 = stablehlo.add %arg1, %arg2 : tensor<f32>
"stablehlo.return"(%210) : (tensor<f32>) -> ()
}) {dimensions = dense<1> : tensor<1xi64>} : (tensor<1x1000xf32>, tensor<f32>) -> tensor<1xf32>
}) {dimensions = array<i64: 1>} : (tensor<1x1000xf32>, tensor<f32>) -> tensor<1xf32>
%208 = "stablehlo.broadcast_in_dim"(%207) {broadcast_dimensions = array<i64: 0>} : (tensor<1xf32>) -> tensor<1x1000xf32>
%209 = stablehlo.divide %206, %208 : tensor<1x1000xf32>
return %209 : tensor<1x1000xf32>
Expand Down
2 changes: 1 addition & 1 deletion third_party/stablehlo
Submodule stablehlo updated 418 files
Loading