Skip to content
This repository has been archived by the owner on Dec 12, 2024. It is now read-only.

Integrate LLVM at llvm/llvm-project@6ae7b73 #392

Merged
merged 3 commits into from
Nov 3, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ The `emitc-opt` tool supports the following options:

| option | |
| :----------------------------------------- |:------------------------------------------------------------------------ |
| `--convert-scf-to-emitc` | Convert SCF dialect to EmitC dialect, maintaining structured control flow|
| `--convert-stablehlo-region-ops-to-emitc ` | Convert StableHLO operations containing regions to EmitC dialect. |
| `--convert-stablehlo-to-emitc ` | Convert from StableHLO dialect to EmitC dialect. |
| `--convert-arith-to-emitc ` | Convert arith dialect to EmitC dialect, replacing IndexCastOp. |
Expand Down
2 changes: 1 addition & 1 deletion build_tools/llvm_version.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
79e96b2457fe4e1586effc36aab657c508c122cf
6ae7b735dbd50eb7ade1573a86d037a2943e679c
8 changes: 4 additions & 4 deletions docs/scf-op-coverage.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@ SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-->
# SCF Op Coverage

The table below shows the supported SCF ops.
The table below shows the SCF ops, supported via `--convert-scf-to-emitc` **upstream** conversions.

| op | supported | comment |
| :-------------------- |:------------------:| :------ |
| for | :white_check_mark: | via `emitc-translate` |
| if | :white_check_mark: | via `emitc-translate` |
| yield | :white_check_mark: | via `emitc-translate` |
| for | :heavy_check_mark: | |
| if | :heavy_check_mark: | |
| yield | :white_check_mark: | only as part of lowering `for` and `if` ops |
2 changes: 1 addition & 1 deletion scripts/generate_testscases.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def generate_example(model_input):

dtype = model_input.dtype

if dtype == np.float32:
if dtype == "float32":
return np.random.uniform(low=0.0, high=1.0,
size=shape).astype(np.float32)
else:
Expand Down
4 changes: 2 additions & 2 deletions scripts/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
tf-nightly==2.15.0.dev20230828
ml_dtypes==0.2.0
tf-nightly==2.16.0.dev20231102
ml_dtypes==0.3.1
wrapt==1.14.1
22 changes: 11 additions & 11 deletions test/Conversion/tosa-to-emitc.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -147,47 +147,47 @@ func.func @test_logical_left_shift(%arg0: tensor<13x21x1xi32>, %arg1: tensor<13x
// MulOp: no broadcast
func.func @test_mul10(%arg0: tensor<13x21x3xf32>, %arg1: tensor<13x21x3xf32>) -> tensor<13x21x3xf32> {
// CHECK: emitc.call "emitc::tosa::mul"(%arg0, %arg1) : (tensor<13x21x3xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xf32>
%0 = "tosa.mul"(%arg0, %arg1) { shift = 0 : i32 } : (tensor<13x21x3xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xf32>
%0 = "tosa.mul"(%arg0, %arg1) { shift = 0 : i8 } : (tensor<13x21x3xf32>, tensor<13x21x3xf32>) -> tensor<13x21x3xf32>
return %0 : tensor<13x21x3xf32>
}

// MulOp: First operand needs to be broadcasted
func.func @test_mul1(%arg0: tensor<13x1x3xi32>, %arg1: tensor<13x21x3xi32>) -> tensor<13x21x3xi32> {
// CHECK: emitc.call "emitc::broadcast_in_dim"(%arg0) {args = [0 : index, dense<[0, 1, 2]> : tensor<3xi64>], template_args = [tensor<13x21x3xi32>]} : (tensor<13x1x3xi32>) -> tensor<13x21x3xi32>
// CHECK: emitc.call "emitc::tosa::mul"(%0, %arg1) {args = [0 : index, 1 : index, 1 : i32]} : (tensor<13x21x3xi32>, tensor<13x21x3xi32>) -> tensor<13x21x3xi32>
%0 = "tosa.mul"(%arg0, %arg1) { shift = 1 : i32 } : (tensor<13x1x3xi32>, tensor<13x21x3xi32>) -> tensor<13x21x3xi32>
// CHECK: emitc.call "emitc::tosa::mul"(%0, %arg1) {args = [0 : index, 1 : index, 1 : i8]} : (tensor<13x21x3xi32>, tensor<13x21x3xi32>) -> tensor<13x21x3xi32>
%0 = "tosa.mul"(%arg0, %arg1) { shift = 1 : i8 } : (tensor<13x1x3xi32>, tensor<13x21x3xi32>) -> tensor<13x21x3xi32>
return %0 : tensor<13x21x3xi32>
}

// MulOp: Second operand needs to be broadcasted
func.func @test_mul2(%arg0: tensor<13x21x3xi32>, %arg1: tensor<13x1x3xi32>) -> tensor<13x21x3xi32> {
// CHECK: emitc.call "emitc::broadcast_in_dim"(%arg1) {args = [0 : index, dense<[0, 1, 2]> : tensor<3xi64>], template_args = [tensor<13x21x3xi32>]} : (tensor<13x1x3xi32>) -> tensor<13x21x3xi32>
// CHECK: emitc.call "emitc::tosa::mul"(%arg0, %0) {args = [0 : index, 1 : index, 1 : i32]} : (tensor<13x21x3xi32>, tensor<13x21x3xi32>) -> tensor<13x21x3xi32>
%0 = "tosa.mul"(%arg0, %arg1) { shift = 1 : i32 } : (tensor<13x21x3xi32>, tensor<13x1x3xi32>) -> tensor<13x21x3xi32>
// CHECK: emitc.call "emitc::tosa::mul"(%arg0, %0) {args = [0 : index, 1 : index, 1 : i8]} : (tensor<13x21x3xi32>, tensor<13x21x3xi32>) -> tensor<13x21x3xi32>
%0 = "tosa.mul"(%arg0, %arg1) { shift = 1 : i8 } : (tensor<13x21x3xi32>, tensor<13x1x3xi32>) -> tensor<13x21x3xi32>
return %0 : tensor<13x21x3xi32>
}

// MulOp: Second operand needs to be broadcasted + expanded to two dimensions
func.func @test_mul3(%arg0: tensor<21x3xi32>, %arg1: tensor<3xi32>) -> tensor<21x3xi32> {
// CHECK: emitc.call "emitc::broadcast_in_dim"(%arg1) {args = [0 : index, dense<1> : tensor<1xi64>], template_args = [tensor<21x3xi32>]} : (tensor<3xi32>) -> tensor<21x3xi32>
// CHECK: emitc.call "emitc::tosa::mul"(%arg0, %0) {args = [0 : index, 1 : index, 3 : i32]} : (tensor<21x3xi32>, tensor<21x3xi32>) -> tensor<21x3xi32>
%0 = "tosa.mul"(%arg0, %arg1) { shift = 3 : i32 } : (tensor<21x3xi32>, tensor<3xi32>) -> tensor<21x3xi32>
// CHECK: emitc.call "emitc::tosa::mul"(%arg0, %0) {args = [0 : index, 1 : index, 3 : i8]} : (tensor<21x3xi32>, tensor<21x3xi32>) -> tensor<21x3xi32>
%0 = "tosa.mul"(%arg0, %arg1) { shift = 3 : i8 } : (tensor<21x3xi32>, tensor<3xi32>) -> tensor<21x3xi32>
return %0 : tensor<21x3xi32>
}

// MulOp: Second operand needs to be broadcasted + expanded to three dimensions
func.func @test_mul4(%arg0: tensor<13x21x3xi32>, %arg1: tensor<3xi32>) -> tensor<13x21x3xi32> {
// CHECK: emitc.call "emitc::broadcast_in_dim"(%arg1) {args = [0 : index, dense<2> : tensor<1xi64>], template_args = [tensor<13x21x3xi32>]} : (tensor<3xi32>) -> tensor<13x21x3xi32>
// CHECK: emitc.call "emitc::tosa::mul"(%arg0, %0) {args = [0 : index, 1 : index, 1 : i32]} : (tensor<13x21x3xi32>, tensor<13x21x3xi32>) -> tensor<13x21x3xi32>
%0 = "tosa.mul"(%arg0, %arg1) { shift = 1 : i32 } : (tensor<13x21x3xi32>, tensor<3xi32>) -> tensor<13x21x3xi32>
// CHECK: emitc.call "emitc::tosa::mul"(%arg0, %0) {args = [0 : index, 1 : index, 1 : i8]} : (tensor<13x21x3xi32>, tensor<13x21x3xi32>) -> tensor<13x21x3xi32>
%0 = "tosa.mul"(%arg0, %arg1) { shift = 1 : i8 } : (tensor<13x21x3xi32>, tensor<3xi32>) -> tensor<13x21x3xi32>
return %0 : tensor<13x21x3xi32>
}

// MulOp: Second two dimensional operand needs to be broadcasted + expanded to four dimensions
func.func @test_mul5(%arg0: tensor<2x13x21x3xi32>, %arg1: tensor<21x3xi32>) -> tensor<2x13x21x3xi32> {
// CHECK: emitc.call "emitc::broadcast_in_dim"(%arg1) {args = [0 : index, dense<[2, 3]> : tensor<2xi64>], template_args = [tensor<2x13x21x3xi32>]} : (tensor<21x3xi32>) -> tensor<2x13x21x3xi32>
// CHECK: emitc.call "emitc::tosa::mul"(%arg0, %0) {args = [0 : index, 1 : index, 5 : i32]} : (tensor<2x13x21x3xi32>, tensor<2x13x21x3xi32>) -> tensor<2x13x21x3xi32>
%0 = "tosa.mul"(%arg0, %arg1) { shift = 5 : i32 } : (tensor<2x13x21x3xi32>, tensor<21x3xi32>) -> tensor<2x13x21x3xi32>
// CHECK: emitc.call "emitc::tosa::mul"(%arg0, %0) {args = [0 : index, 1 : index, 5 : i8]} : (tensor<2x13x21x3xi32>, tensor<2x13x21x3xi32>) -> tensor<2x13x21x3xi32>
%0 = "tosa.mul"(%arg0, %arg1) { shift = 5 : i8 } : (tensor<2x13x21x3xi32>, tensor<21x3xi32>) -> tensor<2x13x21x3xi32>
return %0 : tensor<2x13x21x3xi32>
}

Expand Down
Loading