diff --git a/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td b/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td index dfb1b0382918..41ca1f5801dc 100644 --- a/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td +++ b/include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td @@ -2131,12 +2131,12 @@ def Torch_AtenDiv_ScalarOp : Torch_Op<"aten.div_.Scalar", [ }]; } -def Torch_AtenNeScalarOp : Torch_Op<"aten.ne.Scalar", [ +def Torch_AtenFmodScalarOp : Torch_Op<"aten.fmod.Scalar", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::ne.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::fmod.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, AnyTorchScalarType:$other @@ -2146,20 +2146,20 @@ def Torch_AtenNeScalarOp : Torch_Op<"aten.ne.Scalar", [ ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenNeScalarOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenFmodScalarOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenNeScalarOp::print(OpAsmPrinter &printer) { + void AtenFmodScalarOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenNe_ScalarOp : Torch_Op<"aten.ne_.Scalar", [ +def Torch_AtenFmod_ScalarOp : Torch_Op<"aten.fmod_.Scalar", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::ne_.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::fmod_.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, AnyTorchScalarType:$other @@ -2169,638 +2169,628 @@ def Torch_AtenNe_ScalarOp : Torch_Op<"aten.ne_.Scalar", [ ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenNe_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenFmod_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenNe_ScalarOp::print(OpAsmPrinter &printer) { + void AtenFmod_ScalarOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenEqScalarOp : Torch_Op<"aten.eq.Scalar", [ +def Torch_AtenMaskedFillScalarOp : Torch_Op<"aten.masked_fill.Scalar", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::eq.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::masked_fill.Scalar : (Tensor, Tensor, Scalar) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchScalarType:$other + AnyTorchTensorType:$mask, + AnyTorchScalarType:$value ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenEqScalarOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenMaskedFillScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenEqScalarOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenMaskedFillScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 3, 1); } }]; } -def Torch_AtenEq_ScalarOp : Torch_Op<"aten.eq_.Scalar", [ +def Torch_AtenMaskedFill_ScalarOp : Torch_Op<"aten.masked_fill_.Scalar", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::eq_.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::masked_fill_.Scalar : (Tensor, Tensor, Scalar) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - AnyTorchScalarType:$other + Torch_NonValueTensorType:$mask, + AnyTorchScalarType:$value ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenEq_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenMaskedFill_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenEq_ScalarOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenMaskedFill_ScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 3, 1); } }]; } -def Torch_AtenGtScalarOp : Torch_Op<"aten.gt.Scalar", [ +def Torch_AtenClampOp : Torch_Op<"aten.clamp", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::gt.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::clamp : (Tensor, Scalar?, Scalar?) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchScalarType:$other + AnyTorchOptionalScalarType:$min, + AnyTorchOptionalScalarType:$max ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenGtScalarOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenClampOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenGtScalarOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenClampOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 3, 1); } }]; } -def Torch_AtenGt_ScalarOp : Torch_Op<"aten.gt_.Scalar", [ +def Torch_AtenClamp_Op : Torch_Op<"aten.clamp_", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::gt_.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::clamp_ : (Tensor, Scalar?, Scalar?) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - AnyTorchScalarType:$other + AnyTorchOptionalScalarType:$min, + AnyTorchOptionalScalarType:$max ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenGt_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenClamp_Op::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenGt_ScalarOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenClamp_Op::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 3, 1); } }]; } -def Torch_AtenGeScalarOp : Torch_Op<"aten.ge.Scalar", [ +def Torch_AtenClampTensorOp : Torch_Op<"aten.clamp.Tensor", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::ge.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::clamp.Tensor : (Tensor, Tensor?, Tensor?) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchScalarType:$other + AnyTorchOptionalTensorType:$min, + AnyTorchOptionalTensorType:$max ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenGeScalarOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenClampTensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenGeScalarOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenClampTensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 3, 1); } }]; } -def Torch_AtenGe_ScalarOp : Torch_Op<"aten.ge_.Scalar", [ +def Torch_AtenClamp_TensorOp : Torch_Op<"aten.clamp_.Tensor", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::ge_.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::clamp_.Tensor : (Tensor, Tensor?, Tensor?) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - AnyTorchScalarType:$other + AnyTorchOptionalNonValueTensorType:$min, + AnyTorchOptionalNonValueTensorType:$max ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenGe_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenClamp_TensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenGe_ScalarOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenClamp_TensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 3, 1); } }]; } -def Torch_AtenLtScalarOp : Torch_Op<"aten.lt.Scalar", [ +def Torch_AtenClampMinOp : Torch_Op<"aten.clamp_min", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::lt.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::clamp_min : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchScalarType:$other + AnyTorchScalarType:$min ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenLtScalarOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenClampMinOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenLtScalarOp::print(OpAsmPrinter &printer) { + void AtenClampMinOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenLt_ScalarOp : Torch_Op<"aten.lt_.Scalar", [ +def Torch_AtenClampMin_Op : Torch_Op<"aten.clamp_min_", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::lt_.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::clamp_min_ : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - AnyTorchScalarType:$other + AnyTorchScalarType:$min ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenLt_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenClampMin_Op::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenLt_ScalarOp::print(OpAsmPrinter &printer) { + void AtenClampMin_Op::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenLeScalarOp : Torch_Op<"aten.le.Scalar", [ +def Torch_AtenClampMinTensorOp : Torch_Op<"aten.clamp_min.Tensor", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::le.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::clamp_min.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchScalarType:$other + AnyTorchTensorType:$min ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenLeScalarOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenClampMinTensorOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenLeScalarOp::print(OpAsmPrinter &printer) { + void AtenClampMinTensorOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenLe_ScalarOp : Torch_Op<"aten.le_.Scalar", [ +def Torch_AtenClampMin_TensorOp : Torch_Op<"aten.clamp_min_.Tensor", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::le_.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::clamp_min_.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - AnyTorchScalarType:$other + Torch_NonValueTensorType:$min ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenLe_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenClampMin_TensorOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenLe_ScalarOp::print(OpAsmPrinter &printer) { + void AtenClampMin_TensorOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenFmodScalarOp : Torch_Op<"aten.fmod.Scalar", [ +def Torch_AtenClampMaxOp : Torch_Op<"aten.clamp_max", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::fmod.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::clamp_max : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchScalarType:$other + AnyTorchScalarType:$max ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenFmodScalarOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenClampMaxOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenFmodScalarOp::print(OpAsmPrinter &printer) { + void AtenClampMaxOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenFmod_ScalarOp : Torch_Op<"aten.fmod_.Scalar", [ +def Torch_AtenClampMax_Op : Torch_Op<"aten.clamp_max_", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::fmod_.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::clamp_max_ : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - AnyTorchScalarType:$other + AnyTorchScalarType:$max ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenFmod_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenClampMax_Op::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenFmod_ScalarOp::print(OpAsmPrinter &printer) { + void AtenClampMax_Op::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenMaskedFillScalarOp : Torch_Op<"aten.masked_fill.Scalar", [ +def Torch_AtenClampMaxTensorOp : Torch_Op<"aten.clamp_max.Tensor", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::masked_fill.Scalar : (Tensor, Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::clamp_max.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchTensorType:$mask, - AnyTorchScalarType:$value + AnyTorchTensorType:$max ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenMaskedFillScalarOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 3, 1); + ParseResult AtenClampMaxTensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenMaskedFillScalarOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 3, 1); + void AtenClampMaxTensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenMaskedFill_ScalarOp : Torch_Op<"aten.masked_fill_.Scalar", [ +def Torch_AtenClampMax_TensorOp : Torch_Op<"aten.clamp_max_.Tensor", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::masked_fill_.Scalar : (Tensor, Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::clamp_max_.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - Torch_NonValueTensorType:$mask, - AnyTorchScalarType:$value + Torch_NonValueTensorType:$max ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenMaskedFill_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 3, 1); + ParseResult AtenClampMax_TensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenMaskedFill_ScalarOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 3, 1); + void AtenClampMax_TensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenClampOp : Torch_Op<"aten.clamp", [ +def Torch_AtenLog2Op : Torch_Op<"aten.log2", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::clamp : (Tensor, Scalar?, Scalar?) -> (Tensor)`"; + let summary = "Generated op for `aten::log2 : (Tensor) -> (Tensor)`"; let arguments = (ins - AnyTorchTensorType:$self, - AnyTorchOptionalScalarType:$min, - AnyTorchOptionalScalarType:$max + AnyTorchTensorType:$self ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenClampOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 3, 1); + ParseResult AtenLog2Op::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenClampOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 3, 1); + void AtenLog2Op::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenClamp_Op : Torch_Op<"aten.clamp_", [ +def Torch_AtenLog2_Op : Torch_Op<"aten.log2_", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::clamp_ : (Tensor, Scalar?, Scalar?) -> (Tensor)`"; + let summary = "Generated op for `aten::log2_ : (Tensor) -> (Tensor)`"; let arguments = (ins - Torch_NonValueTensorType:$self, - AnyTorchOptionalScalarType:$min, - AnyTorchOptionalScalarType:$max + Torch_NonValueTensorType:$self ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenClamp_Op::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 3, 1); + ParseResult AtenLog2_Op::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenClamp_Op::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 3, 1); + void AtenLog2_Op::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenClampTensorOp : Torch_Op<"aten.clamp.Tensor", [ +def Torch_AtenLog10Op : Torch_Op<"aten.log10", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::clamp.Tensor : (Tensor, Tensor?, Tensor?) -> (Tensor)`"; + let summary = "Generated op for `aten::log10 : (Tensor) -> (Tensor)`"; let arguments = (ins - AnyTorchTensorType:$self, - AnyTorchOptionalTensorType:$min, - AnyTorchOptionalTensorType:$max + AnyTorchTensorType:$self ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenClampTensorOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 3, 1); + ParseResult AtenLog10Op::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenClampTensorOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 3, 1); + void AtenLog10Op::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenClamp_TensorOp : Torch_Op<"aten.clamp_.Tensor", [ +def Torch_AtenLog10_Op : Torch_Op<"aten.log10_", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::clamp_.Tensor : (Tensor, Tensor?, Tensor?) -> (Tensor)`"; + let summary = "Generated op for `aten::log10_ : (Tensor) -> (Tensor)`"; let arguments = (ins - Torch_NonValueTensorType:$self, - AnyTorchOptionalNonValueTensorType:$min, - AnyTorchOptionalNonValueTensorType:$max + Torch_NonValueTensorType:$self ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenClamp_TensorOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 3, 1); + ParseResult AtenLog10_Op::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenClamp_TensorOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 3, 1); + void AtenLog10_Op::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenClampMinOp : Torch_Op<"aten.clamp_min", [ +def Torch_AtenSqrtOp : Torch_Op<"aten.sqrt", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::clamp_min : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::sqrt : (Tensor) -> (Tensor)`"; let arguments = (ins - AnyTorchTensorType:$self, - AnyTorchScalarType:$min + AnyTorchTensorType:$self ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenClampMinOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenSqrtOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenClampMinOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenSqrtOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenClampMin_Op : Torch_Op<"aten.clamp_min_", [ +def Torch_AtenSqrt_Op : Torch_Op<"aten.sqrt_", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::clamp_min_ : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::sqrt_ : (Tensor) -> (Tensor)`"; let arguments = (ins - Torch_NonValueTensorType:$self, - AnyTorchScalarType:$min + Torch_NonValueTensorType:$self ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenClampMin_Op::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenSqrt_Op::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenClampMin_Op::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenSqrt_Op::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenClampMinTensorOp : Torch_Op<"aten.clamp_min.Tensor", [ +def Torch_AtenLog1pOp : Torch_Op<"aten.log1p", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::clamp_min.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::log1p : (Tensor) -> (Tensor)`"; let arguments = (ins - AnyTorchTensorType:$self, - AnyTorchTensorType:$min + AnyTorchTensorType:$self ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenClampMinTensorOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenLog1pOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenClampMinTensorOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenLog1pOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenClampMin_TensorOp : Torch_Op<"aten.clamp_min_.Tensor", [ +def Torch_AtenLog1p_Op : Torch_Op<"aten.log1p_", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::clamp_min_.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::log1p_ : (Tensor) -> (Tensor)`"; let arguments = (ins - Torch_NonValueTensorType:$self, - Torch_NonValueTensorType:$min + Torch_NonValueTensorType:$self ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenClampMin_TensorOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenLog1p_Op::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenClampMin_TensorOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenLog1p_Op::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenClampMaxOp : Torch_Op<"aten.clamp_max", [ +def Torch_AtenLogitOp : Torch_Op<"aten.logit", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::clamp_max : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::logit : (Tensor, float?) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchScalarType:$max + AnyTorchOptionalFloatType:$eps ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenClampMaxOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenLogitOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenClampMaxOp::print(OpAsmPrinter &printer) { + void AtenLogitOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenClampMax_Op : Torch_Op<"aten.clamp_max_", [ +def Torch_AtenLogit_Op : Torch_Op<"aten.logit_", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::clamp_max_ : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::logit_ : (Tensor, float?) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - AnyTorchScalarType:$max + AnyTorchOptionalFloatType:$eps ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenClampMax_Op::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenLogit_Op::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenClampMax_Op::print(OpAsmPrinter &printer) { + void AtenLogit_Op::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenClampMaxTensorOp : Torch_Op<"aten.clamp_max.Tensor", [ +def Torch_AtenRsqrtOp : Torch_Op<"aten.rsqrt", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::clamp_max.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::rsqrt : (Tensor) -> (Tensor)`"; let arguments = (ins - AnyTorchTensorType:$self, - AnyTorchTensorType:$max + AnyTorchTensorType:$self ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenClampMaxTensorOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenRsqrtOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenClampMaxTensorOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenRsqrtOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenClampMax_TensorOp : Torch_Op<"aten.clamp_max_.Tensor", [ +def Torch_AtenRsqrt_Op : Torch_Op<"aten.rsqrt_", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::clamp_max_.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::rsqrt_ : (Tensor) -> (Tensor)`"; let arguments = (ins - Torch_NonValueTensorType:$self, - Torch_NonValueTensorType:$max + Torch_NonValueTensorType:$self ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenClampMax_TensorOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenRsqrt_Op::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenClampMax_TensorOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenRsqrt_Op::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenLog2Op : Torch_Op<"aten.log2", [ +def Torch_AtenAbsOp : Torch_Op<"aten.abs", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::log2 : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::abs : (Tensor) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self ); @@ -2809,20 +2799,20 @@ def Torch_AtenLog2Op : Torch_Op<"aten.log2", [ ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenLog2Op::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenAbsOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenLog2Op::print(OpAsmPrinter &printer) { + void AtenAbsOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenLog2_Op : Torch_Op<"aten.log2_", [ +def Torch_AtenAbs_Op : Torch_Op<"aten.abs_", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::log2_ : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::abs_ : (Tensor) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self ); @@ -2831,21 +2821,21 @@ def Torch_AtenLog2_Op : Torch_Op<"aten.log2_", [ ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenLog2_Op::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenAbs_Op::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenLog2_Op::print(OpAsmPrinter &printer) { + void AtenAbs_Op::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenLog10Op : Torch_Op<"aten.log10", [ +def Torch_AtenReciprocalOp : Torch_Op<"aten.reciprocal", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::log10 : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::reciprocal : (Tensor) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self ); @@ -2854,20 +2844,20 @@ def Torch_AtenLog10Op : Torch_Op<"aten.log10", [ ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenLog10Op::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenReciprocalOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenLog10Op::print(OpAsmPrinter &printer) { + void AtenReciprocalOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenLog10_Op : Torch_Op<"aten.log10_", [ +def Torch_AtenReciprocal_Op : Torch_Op<"aten.reciprocal_", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::log10_ : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::reciprocal_ : (Tensor) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self ); @@ -2876,904 +2866,931 @@ def Torch_AtenLog10_Op : Torch_Op<"aten.log10_", [ ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenLog10_Op::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenReciprocal_Op::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenLog10_Op::print(OpAsmPrinter &printer) { + void AtenReciprocal_Op::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenSqrtOp : Torch_Op<"aten.sqrt", [ +def Torch_AtenBitwiseAndTensorOp : Torch_Op<"aten.bitwise_and.Tensor", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::sqrt : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::bitwise_and.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins - AnyTorchTensorType:$self + AnyTorchTensorType:$self, + AnyTorchTensorType:$other ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenSqrtOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 1, 1); + ParseResult AtenBitwiseAndTensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenSqrtOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 1, 1); + void AtenBitwiseAndTensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenSqrt_Op : Torch_Op<"aten.sqrt_", [ +def Torch_AtenBitwiseAnd_TensorOp : Torch_Op<"aten.bitwise_and_.Tensor", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::sqrt_ : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::bitwise_and_.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins - Torch_NonValueTensorType:$self + Torch_NonValueTensorType:$self, + Torch_NonValueTensorType:$other ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenSqrt_Op::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 1, 1); + ParseResult AtenBitwiseAnd_TensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenSqrt_Op::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 1, 1); + void AtenBitwiseAnd_TensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenLog1pOp : Torch_Op<"aten.log1p", [ +def Torch_AtenBitwiseAndScalarOp : Torch_Op<"aten.bitwise_and.Scalar", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::log1p : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::bitwise_and.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins - AnyTorchTensorType:$self + AnyTorchTensorType:$self, + AnyTorchScalarType:$other ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenLog1pOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 1, 1); + ParseResult AtenBitwiseAndScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenLog1pOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 1, 1); + void AtenBitwiseAndScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenLog1p_Op : Torch_Op<"aten.log1p_", [ +def Torch_AtenBitwiseAnd_ScalarOp : Torch_Op<"aten.bitwise_and_.Scalar", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::log1p_ : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::bitwise_and_.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins - Torch_NonValueTensorType:$self + Torch_NonValueTensorType:$self, + AnyTorchScalarType:$other ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenLog1p_Op::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 1, 1); + ParseResult AtenBitwiseAnd_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenLog1p_Op::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 1, 1); + void AtenBitwiseAnd_ScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenLogitOp : Torch_Op<"aten.logit", [ +def Torch_AtenBitwiseOrTensorOp : Torch_Op<"aten.bitwise_or.Tensor", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::logit : (Tensor, float?) -> (Tensor)`"; + let summary = "Generated op for `aten::bitwise_or.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchOptionalFloatType:$eps + AnyTorchTensorType:$other ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenLogitOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenBitwiseOrTensorOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenLogitOp::print(OpAsmPrinter &printer) { + void AtenBitwiseOrTensorOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenLogit_Op : Torch_Op<"aten.logit_", [ +def Torch_AtenBitwiseOr_TensorOp : Torch_Op<"aten.bitwise_or_.Tensor", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::logit_ : (Tensor, float?) -> (Tensor)`"; + let summary = "Generated op for `aten::bitwise_or_.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - AnyTorchOptionalFloatType:$eps + Torch_NonValueTensorType:$other ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenLogit_Op::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenBitwiseOr_TensorOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenLogit_Op::print(OpAsmPrinter &printer) { + void AtenBitwiseOr_TensorOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenRsqrtOp : Torch_Op<"aten.rsqrt", [ +def Torch_AtenBitwiseXorTensorOp : Torch_Op<"aten.bitwise_xor.Tensor", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::rsqrt : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::bitwise_xor.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins - AnyTorchTensorType:$self + AnyTorchTensorType:$self, + AnyTorchTensorType:$other ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenRsqrtOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 1, 1); + ParseResult AtenBitwiseXorTensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenRsqrtOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 1, 1); + void AtenBitwiseXorTensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenRsqrt_Op : Torch_Op<"aten.rsqrt_", [ +def Torch_AtenBitwiseXor_TensorOp : Torch_Op<"aten.bitwise_xor_.Tensor", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::rsqrt_ : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::bitwise_xor_.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins - Torch_NonValueTensorType:$self + Torch_NonValueTensorType:$self, + Torch_NonValueTensorType:$other ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenRsqrt_Op::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 1, 1); + ParseResult AtenBitwiseXor_TensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenRsqrt_Op::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 1, 1); + void AtenBitwiseXor_TensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenAbsOp : Torch_Op<"aten.abs", [ +def Torch_AtenBitwiseLeftShiftTensorOp : Torch_Op<"aten.bitwise_left_shift.Tensor", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::abs : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::bitwise_left_shift.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins - AnyTorchTensorType:$self + AnyTorchTensorType:$self, + AnyTorchTensorType:$other ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenAbsOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 1, 1); + ParseResult AtenBitwiseLeftShiftTensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenAbsOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 1, 1); + void AtenBitwiseLeftShiftTensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenAbs_Op : Torch_Op<"aten.abs_", [ +def Torch_AtenBitwiseLeftShift_TensorOp : Torch_Op<"aten.bitwise_left_shift_.Tensor", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::abs_ : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::bitwise_left_shift_.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins - Torch_NonValueTensorType:$self + Torch_NonValueTensorType:$self, + Torch_NonValueTensorType:$other ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenAbs_Op::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 1, 1); + ParseResult AtenBitwiseLeftShift_TensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenAbs_Op::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 1, 1); + void AtenBitwiseLeftShift_TensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenReciprocalOp : Torch_Op<"aten.reciprocal", [ +def Torch_AtenBitwiseRightShiftTensorOp : Torch_Op<"aten.bitwise_right_shift.Tensor", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::reciprocal : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::bitwise_right_shift.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins - AnyTorchTensorType:$self + AnyTorchTensorType:$self, + AnyTorchTensorType:$other ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenReciprocalOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 1, 1); + ParseResult AtenBitwiseRightShiftTensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenReciprocalOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 1, 1); + void AtenBitwiseRightShiftTensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenReciprocal_Op : Torch_Op<"aten.reciprocal_", [ +def Torch_AtenBitwiseRightShift_TensorOp : Torch_Op<"aten.bitwise_right_shift_.Tensor", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::reciprocal_ : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::bitwise_right_shift_.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins - Torch_NonValueTensorType:$self + Torch_NonValueTensorType:$self, + Torch_NonValueTensorType:$other ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenReciprocal_Op::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 1, 1); + ParseResult AtenBitwiseRightShift_TensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenReciprocal_Op::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 1, 1); + void AtenBitwiseRightShift_TensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenBitwiseAndTensorOp : Torch_Op<"aten.bitwise_and.Tensor", [ +def Torch_AtenThresholdOp : Torch_Op<"aten.threshold", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::bitwise_and.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::threshold : (Tensor, Scalar, Scalar) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchTensorType:$other + AnyTorchScalarType:$threshold, + AnyTorchScalarType:$value ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenBitwiseAndTensorOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenThresholdOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenBitwiseAndTensorOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenThresholdOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 3, 1); } }]; } -def Torch_AtenBitwiseAnd_TensorOp : Torch_Op<"aten.bitwise_and_.Tensor", [ +def Torch_AtenThreshold_Op : Torch_Op<"aten.threshold_", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::bitwise_and_.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::threshold_ : (Tensor, Scalar, Scalar) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - Torch_NonValueTensorType:$other + AnyTorchScalarType:$threshold, + AnyTorchScalarType:$value ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenBitwiseAnd_TensorOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenThreshold_Op::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenBitwiseAnd_TensorOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenThreshold_Op::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 3, 1); } }]; } -def Torch_AtenBitwiseAndScalarOp : Torch_Op<"aten.bitwise_and.Scalar", [ +def Torch_AtenSquareOp : Torch_Op<"aten.square", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::bitwise_and.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::square : (Tensor) -> (Tensor)`"; let arguments = (ins - AnyTorchTensorType:$self, - AnyTorchScalarType:$other + AnyTorchTensorType:$self ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenBitwiseAndScalarOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenSquareOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenBitwiseAndScalarOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenSquareOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenBitwiseAnd_ScalarOp : Torch_Op<"aten.bitwise_and_.Scalar", [ +def Torch_AtenSquare_Op : Torch_Op<"aten.square_", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::bitwise_and_.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::square_ : (Tensor) -> (Tensor)`"; let arguments = (ins - Torch_NonValueTensorType:$self, - AnyTorchScalarType:$other + Torch_NonValueTensorType:$self ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenBitwiseAnd_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenSquare_Op::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenBitwiseAnd_ScalarOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenSquare_Op::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenBitwiseOrTensorOp : Torch_Op<"aten.bitwise_or.Tensor", [ +def Torch_AtenUnsqueezeOp : Torch_Op<"aten.unsqueeze", [ AllowsTypeRefinement, - HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::bitwise_or.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::unsqueeze : (Tensor, int) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchTensorType:$other + Torch_IntType:$dim ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenBitwiseOrTensorOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenUnsqueezeOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenBitwiseOrTensorOp::print(OpAsmPrinter &printer) { + void AtenUnsqueezeOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenBitwiseOr_TensorOp : Torch_Op<"aten.bitwise_or_.Tensor", [ +def Torch_AtenUnsqueeze_Op : Torch_Op<"aten.unsqueeze_", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::bitwise_or_.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::unsqueeze_ : (Tensor, int) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - Torch_NonValueTensorType:$other + Torch_IntType:$dim ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenBitwiseOr_TensorOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenUnsqueeze_Op::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenBitwiseOr_TensorOp::print(OpAsmPrinter &printer) { + void AtenUnsqueeze_Op::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenBitwiseXorTensorOp : Torch_Op<"aten.bitwise_xor.Tensor", [ +def Torch_AtenZeroOp : Torch_Op<"aten.zero", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::bitwise_xor.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::zero : (Tensor) -> (Tensor)`"; let arguments = (ins - AnyTorchTensorType:$self, - AnyTorchTensorType:$other + AnyTorchTensorType:$self ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenBitwiseXorTensorOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenZeroOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenBitwiseXorTensorOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenZeroOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenBitwiseXor_TensorOp : Torch_Op<"aten.bitwise_xor_.Tensor", [ +def Torch_AtenZero_Op : Torch_Op<"aten.zero_", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::bitwise_xor_.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::zero_ : (Tensor) -> (Tensor)`"; let arguments = (ins - Torch_NonValueTensorType:$self, - Torch_NonValueTensorType:$other + Torch_NonValueTensorType:$self ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenBitwiseXor_TensorOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenZero_Op::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 1, 1); } - void AtenBitwiseXor_TensorOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenZero_Op::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 1, 1); } }]; } -def Torch_AtenBitwiseLeftShiftTensorOp : Torch_Op<"aten.bitwise_left_shift.Tensor", [ +def Torch_AtenFillScalarOp : Torch_Op<"aten.fill.Scalar", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::bitwise_left_shift.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::fill.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchTensorType:$other + AnyTorchScalarType:$value ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenBitwiseLeftShiftTensorOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenFillScalarOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenBitwiseLeftShiftTensorOp::print(OpAsmPrinter &printer) { + void AtenFillScalarOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenBitwiseLeftShift_TensorOp : Torch_Op<"aten.bitwise_left_shift_.Tensor", [ +def Torch_AtenFill_ScalarOp : Torch_Op<"aten.fill_.Scalar", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::bitwise_left_shift_.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::fill_.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - Torch_NonValueTensorType:$other + AnyTorchScalarType:$value ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenBitwiseLeftShift_TensorOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenFill_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenBitwiseLeftShift_TensorOp::print(OpAsmPrinter &printer) { + void AtenFill_ScalarOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenBitwiseRightShiftTensorOp : Torch_Op<"aten.bitwise_right_shift.Tensor", [ +def Torch_AtenFillTensorOp : Torch_Op<"aten.fill.Tensor", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::bitwise_right_shift.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::fill.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchTensorType:$other + AnyTorchTensorType:$value ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenBitwiseRightShiftTensorOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenFillTensorOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenBitwiseRightShiftTensorOp::print(OpAsmPrinter &printer) { + void AtenFillTensorOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenBitwiseRightShift_TensorOp : Torch_Op<"aten.bitwise_right_shift_.Tensor", [ +def Torch_AtenFill_TensorOp : Torch_Op<"aten.fill_.Tensor", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::bitwise_right_shift_.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::fill_.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - Torch_NonValueTensorType:$other + Torch_NonValueTensorType:$value ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenBitwiseRightShift_TensorOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenFill_TensorOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenBitwiseRightShift_TensorOp::print(OpAsmPrinter &printer) { + void AtenFill_TensorOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenThresholdOp : Torch_Op<"aten.threshold", [ +def Torch_AtenDivTensorModeOp : Torch_Op<"aten.div.Tensor_mode", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::threshold : (Tensor, Scalar, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::div.Tensor_mode : (Tensor, Tensor, str?) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchScalarType:$threshold, - AnyTorchScalarType:$value + AnyTorchTensorType:$other, + AnyTorchOptionalStringType:$rounding_mode ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenThresholdOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenDivTensorModeOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenThresholdOp::print(OpAsmPrinter &printer) { + void AtenDivTensorModeOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 3, 1); } }]; + let hasCanonicalizer = 1; } -def Torch_AtenThreshold_Op : Torch_Op<"aten.threshold_", [ +def Torch_AtenDiv_TensorModeOp : Torch_Op<"aten.div_.Tensor_mode", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::threshold_ : (Tensor, Scalar, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::div_.Tensor_mode : (Tensor, Tensor, str?) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - AnyTorchScalarType:$threshold, - AnyTorchScalarType:$value + Torch_NonValueTensorType:$other, + AnyTorchOptionalStringType:$rounding_mode ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenThreshold_Op::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenDiv_TensorModeOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenThreshold_Op::print(OpAsmPrinter &printer) { + void AtenDiv_TensorModeOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 3, 1); } }]; } -def Torch_AtenSquareOp : Torch_Op<"aten.square", [ +def Torch_AtenMulTensorOp : Torch_Op<"aten.mul.Tensor", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::square : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::mul.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins - AnyTorchTensorType:$self + AnyTorchTensorType:$self, + AnyTorchTensorType:$other ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenSquareOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 1, 1); + ParseResult AtenMulTensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenSquareOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 1, 1); + void AtenMulTensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; + let hasFolder = 1; + let hasCanonicalizer = 1; } -def Torch_AtenSquare_Op : Torch_Op<"aten.square_", [ +def Torch_AtenMul_TensorOp : Torch_Op<"aten.mul_.Tensor", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::square_ : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::mul_.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins - Torch_NonValueTensorType:$self + Torch_NonValueTensorType:$self, + Torch_NonValueTensorType:$other ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenSquare_Op::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 1, 1); + ParseResult AtenMul_TensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenSquare_Op::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 1, 1); + void AtenMul_TensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenUnsqueezeOp : Torch_Op<"aten.unsqueeze", [ +def Torch_AtenAddTensorOp : Torch_Op<"aten.add.Tensor", [ AllowsTypeRefinement, + HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::unsqueeze : (Tensor, int) -> (Tensor)`"; + let summary = "Generated op for `aten::add.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - Torch_IntType:$dim + AnyTorchTensorType:$other, + AnyTorchScalarType:$alpha ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenUnsqueezeOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenAddTensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenUnsqueezeOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenAddTensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 3, 1); } }]; + let hasFolder = 1; + let hasCanonicalizer = 1; } -def Torch_AtenUnsqueeze_Op : Torch_Op<"aten.unsqueeze_", [ +def Torch_AtenAdd_TensorOp : Torch_Op<"aten.add_.Tensor", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::unsqueeze_ : (Tensor, int) -> (Tensor)`"; + let summary = "Generated op for `aten::add_.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - Torch_IntType:$dim + Torch_NonValueTensorType:$other, + AnyTorchScalarType:$alpha ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenUnsqueeze_Op::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenAdd_TensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenUnsqueeze_Op::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenAdd_TensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 3, 1); } }]; } -def Torch_AtenZeroOp : Torch_Op<"aten.zero", [ +def Torch_AtenSubTensorOp : Torch_Op<"aten.sub.Tensor", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::zero : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::sub.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)`"; let arguments = (ins - AnyTorchTensorType:$self + AnyTorchTensorType:$self, + AnyTorchTensorType:$other, + AnyTorchScalarType:$alpha ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenZeroOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 1, 1); + ParseResult AtenSubTensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenZeroOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 1, 1); + void AtenSubTensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 3, 1); } }]; + let hasFolder = 1; + let hasCanonicalizer = 1; } -def Torch_AtenZero_Op : Torch_Op<"aten.zero_", [ +def Torch_AtenSub_TensorOp : Torch_Op<"aten.sub_.Tensor", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::zero_ : (Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::sub_.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)`"; let arguments = (ins - Torch_NonValueTensorType:$self + Torch_NonValueTensorType:$self, + Torch_NonValueTensorType:$other, + AnyTorchScalarType:$alpha ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenZero_Op::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 1, 1); + ParseResult AtenSub_TensorOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenZero_Op::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 1, 1); + void AtenSub_TensorOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 3, 1); } }]; } -def Torch_AtenFillScalarOp : Torch_Op<"aten.fill.Scalar", [ +def Torch_AtenAddScalarOp : Torch_Op<"aten.add.Scalar", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::fill.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::add.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchScalarType:$value + AnyTorchScalarType:$other, + AnyTorchScalarType:$alpha ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenFillScalarOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenAddScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenFillScalarOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenAddScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 3, 1); } }]; + let hasCanonicalizer = 1; } -def Torch_AtenFill_ScalarOp : Torch_Op<"aten.fill_.Scalar", [ +def Torch_AtenAdd_ScalarOp : Torch_Op<"aten.add_.Scalar", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::fill_.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::add_.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - AnyTorchScalarType:$value + AnyTorchScalarType:$other, + AnyTorchScalarType:$alpha ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenFill_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenAdd_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenFill_ScalarOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenAdd_ScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 3, 1); } }]; } -def Torch_AtenFillTensorOp : Torch_Op<"aten.fill.Tensor", [ +def Torch_AtenSubScalarOp : Torch_Op<"aten.sub.Scalar", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::fill.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::sub.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchTensorType:$value + AnyTorchScalarType:$other, + AnyTorchScalarType:$alpha ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenFillTensorOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenSubScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenFillTensorOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenSubScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 3, 1); } }]; + let hasCanonicalizer = 1; } -def Torch_AtenFill_TensorOp : Torch_Op<"aten.fill_.Tensor", [ +def Torch_AtenSub_ScalarOp : Torch_Op<"aten.sub_.Scalar", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::fill_.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::sub_.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - Torch_NonValueTensorType:$value + AnyTorchScalarType:$other, + AnyTorchScalarType:$alpha ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenFill_TensorOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 2, 1); + ParseResult AtenSub_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 3, 1); } - void AtenFill_TensorOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 2, 1); + void AtenSub_ScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 3, 1); } }]; } -def Torch_AtenDivTensorModeOp : Torch_Op<"aten.div.Tensor_mode", [ +def Torch_AtenMulScalarOp : Torch_Op<"aten.mul.Scalar", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::div.Tensor_mode : (Tensor, Tensor, str?) -> (Tensor)`"; + let summary = "Generated op for `aten::mul.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchTensorType:$other, - AnyTorchOptionalStringType:$rounding_mode + AnyTorchScalarType:$other ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenDivTensorModeOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 3, 1); + ParseResult AtenMulScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenDivTensorModeOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 3, 1); + void AtenMulScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; let hasCanonicalizer = 1; } -def Torch_AtenDiv_TensorModeOp : Torch_Op<"aten.div_.Tensor_mode", [ +def Torch_AtenMul_ScalarOp : Torch_Op<"aten.mul_.Scalar", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::div_.Tensor_mode : (Tensor, Tensor, str?) -> (Tensor)`"; + let summary = "Generated op for `aten::mul_.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - Torch_NonValueTensorType:$other, - AnyTorchOptionalStringType:$rounding_mode + AnyTorchScalarType:$other ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenDiv_TensorModeOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 3, 1); + ParseResult AtenMul_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenDiv_TensorModeOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 3, 1); + void AtenMul_ScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenMulTensorOp : Torch_Op<"aten.mul.Tensor", [ +def Torch_AtenEqTensorOp : Torch_Op<"aten.eq.Tensor", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::mul.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::eq.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, AnyTorchTensorType:$other @@ -3783,22 +3800,21 @@ def Torch_AtenMulTensorOp : Torch_Op<"aten.mul.Tensor", [ ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenMulTensorOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenEqTensorOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenMulTensorOp::print(OpAsmPrinter &printer) { + void AtenEqTensorOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; let hasFolder = 1; - let hasCanonicalizer = 1; } -def Torch_AtenMul_TensorOp : Torch_Op<"aten.mul_.Tensor", [ +def Torch_AtenEq_TensorOp : Torch_Op<"aten.eq_.Tensor", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::mul_.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::eq_.Tensor : (Tensor, Tensor) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, Torch_NonValueTensorType:$other @@ -3808,223 +3824,213 @@ def Torch_AtenMul_TensorOp : Torch_Op<"aten.mul_.Tensor", [ ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenMul_TensorOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenEq_TensorOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenMul_TensorOp::print(OpAsmPrinter &printer) { + void AtenEq_TensorOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenAddTensorOp : Torch_Op<"aten.add.Tensor", [ +def Torch_AtenLeScalarOp : Torch_Op<"aten.le.Scalar", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::add.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::le.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchTensorType:$other, - AnyTorchScalarType:$alpha + AnyTorchScalarType:$other ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenAddTensorOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 3, 1); + ParseResult AtenLeScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenAddTensorOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 3, 1); + void AtenLeScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; let hasFolder = 1; - let hasCanonicalizer = 1; } -def Torch_AtenAdd_TensorOp : Torch_Op<"aten.add_.Tensor", [ +def Torch_AtenLe_ScalarOp : Torch_Op<"aten.le_.Scalar", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::add_.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::le_.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - Torch_NonValueTensorType:$other, - AnyTorchScalarType:$alpha + AnyTorchScalarType:$other ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenAdd_TensorOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 3, 1); + ParseResult AtenLe_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenAdd_TensorOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 3, 1); + void AtenLe_ScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenSubTensorOp : Torch_Op<"aten.sub.Tensor", [ +def Torch_AtenLtScalarOp : Torch_Op<"aten.lt.Scalar", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::sub.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::lt.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchTensorType:$other, - AnyTorchScalarType:$alpha + AnyTorchScalarType:$other ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenSubTensorOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 3, 1); + ParseResult AtenLtScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenSubTensorOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 3, 1); + void AtenLtScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; let hasFolder = 1; - let hasCanonicalizer = 1; } -def Torch_AtenSub_TensorOp : Torch_Op<"aten.sub_.Tensor", [ +def Torch_AtenLt_ScalarOp : Torch_Op<"aten.lt_.Scalar", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::sub_.Tensor : (Tensor, Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::lt_.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - Torch_NonValueTensorType:$other, - AnyTorchScalarType:$alpha + AnyTorchScalarType:$other ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenSub_TensorOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 3, 1); + ParseResult AtenLt_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenSub_TensorOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 3, 1); + void AtenLt_ScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenAddScalarOp : Torch_Op<"aten.add.Scalar", [ +def Torch_AtenGtScalarOp : Torch_Op<"aten.gt.Scalar", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::add.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::gt.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchScalarType:$other, - AnyTorchScalarType:$alpha + AnyTorchScalarType:$other ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenAddScalarOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 3, 1); + ParseResult AtenGtScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenAddScalarOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 3, 1); + void AtenGtScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; - let hasCanonicalizer = 1; + let hasFolder = 1; } -def Torch_AtenAdd_ScalarOp : Torch_Op<"aten.add_.Scalar", [ +def Torch_AtenGt_ScalarOp : Torch_Op<"aten.gt_.Scalar", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::add_.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::gt_.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - AnyTorchScalarType:$other, - AnyTorchScalarType:$alpha + AnyTorchScalarType:$other ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenAdd_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 3, 1); + ParseResult AtenGt_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenAdd_ScalarOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 3, 1); + void AtenGt_ScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenSubScalarOp : Torch_Op<"aten.sub.Scalar", [ +def Torch_AtenGeScalarOp : Torch_Op<"aten.ge.Scalar", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::sub.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::ge.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchScalarType:$other, - AnyTorchScalarType:$alpha + AnyTorchScalarType:$other ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenSubScalarOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 3, 1); + ParseResult AtenGeScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenSubScalarOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 3, 1); + void AtenGeScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; - let hasCanonicalizer = 1; + let hasFolder = 1; } -def Torch_AtenSub_ScalarOp : Torch_Op<"aten.sub_.Scalar", [ +def Torch_AtenGe_ScalarOp : Torch_Op<"aten.ge_.Scalar", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::sub_.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::ge_.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - AnyTorchScalarType:$other, - AnyTorchScalarType:$alpha + AnyTorchScalarType:$other ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenSub_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { - return parseDefaultTorchOp(parser, result, 3, 1); + ParseResult AtenGe_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { + return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenSub_ScalarOp::print(OpAsmPrinter &printer) { - printDefaultTorchOp(printer, *this, 3, 1); + void AtenGe_ScalarOp::print(OpAsmPrinter &printer) { + printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenMulScalarOp : Torch_Op<"aten.mul.Scalar", [ +def Torch_AtenEqScalarOp : Torch_Op<"aten.eq.Scalar", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::mul.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::eq.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, AnyTorchScalarType:$other @@ -4034,21 +4040,21 @@ def Torch_AtenMulScalarOp : Torch_Op<"aten.mul.Scalar", [ ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenMulScalarOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenEqScalarOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenMulScalarOp::print(OpAsmPrinter &printer) { + void AtenEqScalarOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; - let hasCanonicalizer = 1; + let hasFolder = 1; } -def Torch_AtenMul_ScalarOp : Torch_Op<"aten.mul_.Scalar", [ +def Torch_AtenEq_ScalarOp : Torch_Op<"aten.eq_.Scalar", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::mul_.Scalar : (Tensor, Scalar) -> (Tensor)`"; + let summary = "Generated op for `aten::eq_.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, AnyTorchScalarType:$other @@ -4058,58 +4064,58 @@ def Torch_AtenMul_ScalarOp : Torch_Op<"aten.mul_.Scalar", [ ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenMul_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenEq_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenMul_ScalarOp::print(OpAsmPrinter &printer) { + void AtenEq_ScalarOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; } -def Torch_AtenEqTensorOp : Torch_Op<"aten.eq.Tensor", [ +def Torch_AtenNeScalarOp : Torch_Op<"aten.ne.Scalar", [ AllowsTypeRefinement, HasValueSemantics, ReadOnly ]> { - let summary = "Generated op for `aten::eq.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::ne.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins AnyTorchTensorType:$self, - AnyTorchTensorType:$other + AnyTorchScalarType:$other ); let results = (outs AnyTorchTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenEqTensorOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenNeScalarOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenEqTensorOp::print(OpAsmPrinter &printer) { + void AtenNeScalarOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; let hasFolder = 1; } -def Torch_AtenEq_TensorOp : Torch_Op<"aten.eq_.Tensor", [ +def Torch_AtenNe_ScalarOp : Torch_Op<"aten.ne_.Scalar", [ IsTrailingUnderscoreInplaceVariant, AllowsTypeRefinement ]> { - let summary = "Generated op for `aten::eq_.Tensor : (Tensor, Tensor) -> (Tensor)`"; + let summary = "Generated op for `aten::ne_.Scalar : (Tensor, Scalar) -> (Tensor)`"; let arguments = (ins Torch_NonValueTensorType:$self, - Torch_NonValueTensorType:$other + AnyTorchScalarType:$other ); let results = (outs Torch_NonValueTensorType:$result ); let hasCustomAssemblyFormat = 1; let extraClassDefinition = [{ - ParseResult AtenEq_TensorOp::parse(OpAsmParser &parser, OperationState &result) { + ParseResult AtenNe_ScalarOp::parse(OpAsmParser &parser, OperationState &result) { return parseDefaultTorchOp(parser, result, 2, 1); } - void AtenEq_TensorOp::print(OpAsmPrinter &printer) { + void AtenNe_ScalarOp::print(OpAsmPrinter &printer) { printDefaultTorchOp(printer, *this, 2, 1); } }]; diff --git a/lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp b/lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp index 8a677b8ce058..a7bdddbc8d78 100644 --- a/lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp +++ b/lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp @@ -591,7 +591,7 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP( Value one = rewriter.create( loc, intTy, rewriter.getI64IntegerAttr(1)); Value lt = - rewriter.create(loc, boolTy, indices, zero); + rewriter.create(loc, boolTy, indices, zero); Value dim = rewriter.create(loc, intTy, data, index); Value add = rewriter.create(loc, indicesTy, diff --git a/lib/Dialect/Torch/IR/TorchOps.cpp b/lib/Dialect/Torch/IR/TorchOps.cpp index 03f39be9c806..9f10c8bce3ba 100644 --- a/lib/Dialect/Torch/IR/TorchOps.cpp +++ b/lib/Dialect/Torch/IR/TorchOps.cpp @@ -1481,6 +1481,197 @@ OpFoldResult AtenEqTensorOp::fold(FoldAdaptor adaptor) { return nullptr; } +//===----------------------------------------------------------------------===// +// AtenLeScalarOp +//===----------------------------------------------------------------------===// + +using ComparisonFoldFpOperator = std::function; +using ComparisonFoldIntOperator = std::function; + +static OpFoldResult comparisonScaleFolder(DenseElementsAttr lhs, Attribute rhs, + ValueTensorType resultTy, + ComparisonFoldFpOperator fpFolder, + ComparisonFoldIntOperator intFolder) { + constexpr int64_t kMaxFold = 16; + if (!lhs || !rhs || !resultTy) + return nullptr; + if (!resultTy.hasSizes() || !resultTy.hasDtype()) + return nullptr; + + for (auto size : resultTy.getSizes()) + if (size == Torch::kUnknownSize) + return nullptr; + + auto ctx = lhs.getContext(); + auto resultETy = resultTy.getDtype(); + auto tensorETy = cast(lhs.getType()).getElementType(); + if (lhs.isSplat()) { + if (auto intAttr = dyn_cast(rhs)) { + auto unsign = cast(tensorETy).isUnsigned(); + auto scalarAP = intAttr.getValue(); + auto tensorAP = lhs.getSplatValue().getValue(); + tensorAP = APInt( + scalarAP.getBitWidth(), + unsign ? tensorAP.getZExtValue() : tensorAP.getSExtValue(), !unsign); + auto resultBool = intFolder(tensorAP, scalarAP, unsign); + auto resultAP = IntegerAttr::get(IntegerType::get(ctx, 1), resultBool); + return DenseElementsAttr::get(resultTy.toBuiltinTensor().clone(resultETy), + resultAP); + } + + if (auto floatAttr = dyn_cast(rhs)) { + APFloat scalarAP = floatAttr.getValue(); + APFloat tensorAP = lhs.getSplatValue().getValue(); + auto resultBool = + fpFolder(tensorAP.convertToDouble(), scalarAP.convertToDouble()); + auto resultAP = IntegerAttr::get(IntegerType::get(ctx, 1), resultBool); + return DenseElementsAttr::get(resultTy.toBuiltinTensor().clone(resultETy), + resultAP); + } + return nullptr; + } + + int64_t count = 1; + for (auto size : resultTy.getSizes()) + count *= size; + + if (count > kMaxFold) + return nullptr; + + if (auto intAttr = dyn_cast(rhs)) { + auto unsign = cast(tensorETy).isUnsigned(); + llvm::SmallVector values; + for (auto tensorAP : lhs.getValues()) { + auto scalarAP = intAttr.getValue(); + tensorAP = APInt( + scalarAP.getBitWidth(), + unsign ? tensorAP.getZExtValue() : tensorAP.getSExtValue(), !unsign); + auto resultBool = intFolder(tensorAP, scalarAP, unsign); + values.push_back(resultBool); + } + return DenseElementsAttr::get(resultTy.toBuiltinTensor().clone(resultETy), + values); + } + + if (auto floatAttr = dyn_cast(rhs)) { + llvm::SmallVector values; + for (auto tensorAP : lhs.getValues()) { + APFloat scalarAP = floatAttr.getValue(); + auto resultBool = + fpFolder(tensorAP.convertToDouble(), scalarAP.convertToDouble()); + values.push_back(resultBool); + } + return DenseElementsAttr::get(resultTy.toBuiltinTensor().clone(resultETy), + values); + } + + return nullptr; +} + +OpFoldResult AtenLeScalarOp::fold(FoldAdaptor adaptor) { + auto self = dyn_cast_or_null(adaptor.getSelf()); + auto other = adaptor.getOther(); + auto resultTy = dyn_cast(getType()); + + auto fpFold = [](double lhs, double rhs) -> bool { return lhs <= rhs; }; + + auto intFold = [](APInt lhs, APInt rhs, bool unsign) -> bool { + return unsign ? lhs.ule(rhs) : lhs.sle(rhs); + }; + + return comparisonScaleFolder(self, other, resultTy, fpFold, intFold); +} + +//===----------------------------------------------------------------------===// +// AtenLtScalarOp +//===----------------------------------------------------------------------===// + +OpFoldResult AtenLtScalarOp::fold(FoldAdaptor adaptor) { + auto self = dyn_cast_or_null(adaptor.getSelf()); + auto other = adaptor.getOther(); + auto resultTy = dyn_cast(getType()); + + auto fpFold = [](double lhs, double rhs) -> bool { return lhs < rhs; }; + + auto intFold = [](APInt lhs, APInt rhs, bool unsign) -> bool { + return unsign ? lhs.ult(rhs) : lhs.slt(rhs); + }; + + return comparisonScaleFolder(self, other, resultTy, fpFold, intFold); +} + +//===----------------------------------------------------------------------===// +// AtenGtScalarOp +//===----------------------------------------------------------------------===// + +OpFoldResult AtenGtScalarOp::fold(FoldAdaptor adaptor) { + auto self = dyn_cast_or_null(adaptor.getSelf()); + auto other = adaptor.getOther(); + auto resultTy = dyn_cast(getType()); + + auto fpFold = [](double lhs, double rhs) -> bool { return lhs > rhs; }; + + auto intFold = [](APInt lhs, APInt rhs, bool unsign) -> bool { + return unsign ? lhs.ugt(rhs) : lhs.sgt(rhs); + }; + + return comparisonScaleFolder(self, other, resultTy, fpFold, intFold); +} + +//===----------------------------------------------------------------------===// +// AtenGeScalarOp +//===----------------------------------------------------------------------===// + +OpFoldResult AtenGeScalarOp::fold(FoldAdaptor adaptor) { + auto self = dyn_cast_or_null(adaptor.getSelf()); + auto other = adaptor.getOther(); + auto resultTy = dyn_cast(getType()); + + auto fpFold = [](double lhs, double rhs) -> bool { return lhs >= rhs; }; + + auto intFold = [](APInt lhs, APInt rhs, bool unsign) -> bool { + return unsign ? lhs.uge(rhs) : lhs.sge(rhs); + }; + + return comparisonScaleFolder(self, other, resultTy, fpFold, intFold); +} + +//===----------------------------------------------------------------------===// +// AtenEqScalarOp +//===----------------------------------------------------------------------===// + +OpFoldResult AtenEqScalarOp::fold(FoldAdaptor adaptor) { + auto self = dyn_cast_or_null(adaptor.getSelf()); + auto other = adaptor.getOther(); + auto resultTy = dyn_cast(getType()); + + auto fpFold = [](double lhs, double rhs) -> bool { return lhs == rhs; }; + + auto intFold = [](APInt lhs, APInt rhs, bool unsign) -> bool { + return lhs.eq(rhs); + }; + + return comparisonScaleFolder(self, other, resultTy, fpFold, intFold); +} + +//===----------------------------------------------------------------------===// +// AtenNeScalarOp +//===----------------------------------------------------------------------===// + +OpFoldResult AtenNeScalarOp::fold(FoldAdaptor adaptor) { + auto self = dyn_cast_or_null(adaptor.getSelf()); + auto other = adaptor.getOther(); + auto resultTy = dyn_cast(getType()); + + auto fpFold = [](double lhs, double rhs) -> bool { return lhs != rhs; }; + + auto intFold = [](APInt lhs, APInt rhs, bool unsign) -> bool { + return lhs.ne(rhs); + }; + + return comparisonScaleFolder(self, other, resultTy, fpFold, intFold); +} + //===----------------------------------------------------------------------===// // AtenFloorOp //===----------------------------------------------------------------------===// diff --git a/projects/pt1/e2e_testing/xfail_sets.py b/projects/pt1/e2e_testing/xfail_sets.py index d16a20893dbf..695b51c18c2c 100644 --- a/projects/pt1/e2e_testing/xfail_sets.py +++ b/projects/pt1/e2e_testing/xfail_sets.py @@ -1495,11 +1495,6 @@ "FlipNegativeIndexModule_basic", "HardsigmoidModule_basic", "HardsigmoidRandomModule_basic", - "IndexSelectDynamicInputSizeModule_basic", - "IndexSelectWholeDimensionModule_basic", - "IndexSelectWholeTensorModule_basic", - "IndexTensorStaticModule_basic", - "IndexTensorStaticNonContiguousWithNoneModule_basic", "PixelShuffleModuleStaticRank4Float32_basic", "ResNet18Module_basic", "SliceCopyEndGreaterThanDimSize_Module_basic", @@ -1998,24 +1993,15 @@ "NativeDropoutTrainModule_basic", "NativeDropoutTrainStaticShapeModule_basic", "ReduceProdDimIntFloatModule_basic", - "StdCorrectionAllDimReduceModule_basic", - "StdCorrectionKeepDimModule_basic", "StdCorrectionLargeInputModule_basic", "StdCorrectionModule_basic", "StdCorrectionNoneModule_basic", "StdDimNoneDimModule_basic", "StdUnbiasedModule_basic", - "VarCorrectionAllDimReduceModule_basic", - "VarCorrectionKeepDimModule_basic", "VarCorrectionLargeInputModule_basic", "VarCorrectionModule_basic", "VarCorrectionNoneModule_basic", - "VarDimAllDimReduceModule_basic", - "VarDimModule_basic", - "VarDimMultiDimModule_basic", "VarDimNoneDimModule_basic", - "VarDimSingleDimModule_basic", - "VarDimUnbiasedModule_basic", "VarMeanCorrectionNoneModule_basic", "VarMeanUnbiasedModule_basic", "VarUnbiasedModule_basic", @@ -2110,9 +2096,6 @@ "IndexTensorMultiInputOneDim_basic", "IndexTensorMultiInputThreeIndexers_basic", "IndexTensorMultiInput_basic", - "IndexTensorStaticContiguousWithNoneModule_basic", - "SelectIntModule_basic", - "SliceSingleIdxModule_basic", "ViewFlattenAndExpandModule_basic", "ViewSizeDimFollowedByCollapsedOnesModule_basic", "ViewSizeDimFollowedByExpandedOnesModule_basic", @@ -2151,7 +2134,6 @@ "FlattenDynamicModule_basic", "GluStaticModule_basic", "GroupNormModule_basic", - "IndexSelectDynamicModulebasic", "IndexTensorHackedTwinModule3dInput_basic", "IndexTensorHackedTwinModule_basic", "IndexTensorModule3dInput_basic", @@ -2169,11 +2151,5 @@ "TensorsStackPromoteDTypeModule_basic", } -if torch_version_for_comparison() < version.parse("2.3.0.dev"): - ONNX_XFAIL_SET = ONNX_XFAIL_SET | { - # ERROR: dtype (torch.float64) is not equal to golden dtype (torch.float32) - "ElementwiseWhereScalarModule_basic", - } - ONNX_CRASHING_SET = { } diff --git a/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py b/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py index 2b0ec4aee1cb..ba41d4220e2f 100644 --- a/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py +++ b/projects/pt1/python/torch_mlir/jit_ir_importer/build_tools/torch_ods_gen.py @@ -301,12 +301,6 @@ def emit_with_mutating_variants(key, **kwargs): "aten::le.Tensor : (Tensor, Tensor) -> (Tensor)", "aten::ne.Tensor : (Tensor, Tensor) -> (Tensor)", "aten::div.Scalar : (Tensor, Scalar) -> (Tensor)", - "aten::ne.Scalar : (Tensor, Scalar) -> (Tensor)", - "aten::eq.Scalar : (Tensor, Scalar) -> (Tensor)", - "aten::gt.Scalar : (Tensor, Scalar) -> (Tensor)", - "aten::ge.Scalar : (Tensor, Scalar) -> (Tensor)", - "aten::lt.Scalar : (Tensor, Scalar) -> (Tensor)", - "aten::le.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::fmod.Scalar : (Tensor, Scalar) -> (Tensor)", "aten::masked_fill.Scalar : (Tensor, Tensor, Scalar) -> (Tensor)", "aten::clamp : (Tensor, Scalar?, Scalar?) -> (Tensor)", @@ -347,6 +341,12 @@ def emit_with_mutating_variants(key, **kwargs): emit_with_mutating_variants("aten::sub.Scalar : (Tensor, Scalar, Scalar) -> (Tensor)", has_canonicalizer=True) emit_with_mutating_variants("aten::mul.Scalar : (Tensor, Scalar) -> (Tensor)", has_canonicalizer=True) emit_with_mutating_variants("aten::eq.Tensor : (Tensor, Tensor) -> (Tensor)", has_folder=True) + emit_with_mutating_variants("aten::le.Scalar : (Tensor, Scalar) -> (Tensor)", has_folder=True) + emit_with_mutating_variants("aten::lt.Scalar : (Tensor, Scalar) -> (Tensor)", has_folder=True) + emit_with_mutating_variants("aten::gt.Scalar : (Tensor, Scalar) -> (Tensor)", has_folder=True) + emit_with_mutating_variants("aten::ge.Scalar : (Tensor, Scalar) -> (Tensor)", has_folder=True) + emit_with_mutating_variants("aten::eq.Scalar : (Tensor, Scalar) -> (Tensor)", has_folder=True) + emit_with_mutating_variants("aten::ne.Scalar : (Tensor, Scalar) -> (Tensor)", has_folder=True) emit_with_mutating_variants("aten::floor : (Tensor) -> (Tensor)", has_canonicalizer=True) emit_with_mutating_variants("aten::masked_fill.Tensor : (Tensor, Tensor, Tensor) -> (Tensor)", has_canonicalizer=True) diff --git a/projects/pt1/python/torch_mlir_e2e_test/test_suite/elementwise.py b/projects/pt1/python/torch_mlir_e2e_test/test_suite/elementwise.py index d9921d23d677..689fe182f57c 100644 --- a/projects/pt1/python/torch_mlir_e2e_test/test_suite/elementwise.py +++ b/projects/pt1/python/torch_mlir_e2e_test/test_suite/elementwise.py @@ -413,7 +413,7 @@ def __init__(self): ([-1, -1, -1], torch.float32, True), ]) def forward(self, a): - return torch.where(a > 0.5, 4.0, 8.0) + return torch.where(a > 0.5, 4.0, 8.0).to(torch.float) @register_test_case(module_factory=lambda: ElementwiseWhereScalarModule()) diff --git a/test/Conversion/TorchOnnxToTorch/simple_ops_g_to_p.mlir b/test/Conversion/TorchOnnxToTorch/simple_ops_g_to_p.mlir index d1f4307d4de6..9dceff316eaa 100644 --- a/test/Conversion/TorchOnnxToTorch/simple_ops_g_to_p.mlir +++ b/test/Conversion/TorchOnnxToTorch/simple_ops_g_to_p.mlir @@ -42,7 +42,7 @@ func.func @test_gather_nd(%arg0: !torch.vtensor<[3,4,5],f32>, %arg1: !torch.vten // CHECK: %[[AXIS:.+]] = torch.constant.int 0 // CHECK: %[[ZERO:.+]] = torch.constant.int 0 // CHECK: %[[ONE:.+]] = torch.constant.int 1 - // CHECK: %[[LT:.+]] = torch.aten.le.Scalar %arg1, %[[ZERO]] + // CHECK: %[[LT:.+]] = torch.aten.lt.Scalar %arg1, %[[ZERO]] // CHECK: %[[SZ:.+]] = torch.aten.size.int %arg0, %[[AXIS]] // CHECK: %[[ADD:.+]] = torch.aten.add.Scalar %arg1, %[[SZ]], %[[ONE]] // CHECK: %[[SEL:.+]] = torch.aten.where.self %[[LT]], %[[ADD]], %arg1 @@ -72,7 +72,7 @@ func.func @test_gather_scalar(%arg0: !torch.vtensor<[3,4,5],f32>, %arg1: !torch. // CHECK: %[[AXIS:.+]] = torch.constant.int 0 // CHECK: %[[ZERO:.+]] = torch.constant.int 0 // CHECK: %[[ONE:.+]] = torch.constant.int 1 - // CHECK: %[[LT:.+]] = torch.aten.le.Scalar %arg1, %[[ZERO]] + // CHECK: %[[LT:.+]] = torch.aten.lt.Scalar %arg1, %[[ZERO]] // CHECK: %[[SZ:.+]] = torch.aten.size.int %arg0, %[[AXIS]] // CHECK: %[[ADD:.+]] = torch.aten.add.Scalar %arg1, %[[SZ]], %[[ONE]] // CHECK: %[[SEL:.+]] = torch.aten.where.self %[[LT]], %[[ADD]], %arg1 diff --git a/test/Dialect/Torch/canonicalize.mlir b/test/Dialect/Torch/canonicalize.mlir index 2b5405b75197..a607365f4918 100644 --- a/test/Dialect/Torch/canonicalize.mlir +++ b/test/Dialect/Torch/canonicalize.mlir @@ -2708,3 +2708,128 @@ func.func @aten_cat_zero(%arg0 : !torch.vtensor<[4,5,6],f32>, %arg1 : !torch.vte %0 = torch.aten.cat %list, %dim : !torch.list, !torch.int -> !torch.vtensor<[4,5,6],f32> return %0 : !torch.vtensor<[4,5,6],f32> } + +// ----- + +// CHECK-LABEL: @aten_tensor_scalar_lt +func.func @aten_tensor_scalar_lt() -> (!torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>) { + // CHECK: %[[CST:.+]] = torch.vtensor.literal(dense : tensor<4xi1>) : !torch.vtensor<[4],i1> + // CHECK: return %[[CST]], %[[CST]] : !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1> + %intTensor = torch.vtensor.literal(dense<1> : tensor<4xsi8>) : !torch.vtensor<[4],si8> + %fpTensor = torch.vtensor.literal(dense<1.0> : tensor<4xf32>) : !torch.vtensor<[4],f32> + %intScalar = torch.constant.int 2 + %fpScalar = torch.constant.float 2.0 + %intBool = torch.aten.lt.Scalar %intTensor, %intScalar : !torch.vtensor<[4],si8>, !torch.int -> !torch.vtensor<[4],i1> + %fpBool = torch.aten.lt.Scalar %fpTensor, %fpScalar : !torch.vtensor<[4],f32>, !torch.float -> !torch.vtensor<[4],i1> + return %intBool, %fpBool : !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1> +} + + +// ----- + +// CHECK-LABEL: @aten_tensor_tensor_lt +func.func @aten_tensor_tensor_lt() -> (!torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>) { + // CHECK: %[[UNSIGN:.+]] = torch.vtensor.literal(dense : tensor<4xi1>) : !torch.vtensor<[4],i1> + // CHECK: %[[SIGNED:.+]] = torch.vtensor.literal(dense<[true, false, false, false]> : tensor<4xi1>) : !torch.vtensor<[4],i1> + // CHECK: return %[[UNSIGN]], %[[SIGNED]], %[[SIGNED]] + %intTensor = torch.vtensor.literal(dense<[127, -128, -127, -126]> : tensor<4xsi8>) : !torch.vtensor<[4],si8> + %uintTensor = torch.vtensor.literal(dense<[127, 128, 129, 130]> : tensor<4xui8>) : !torch.vtensor<[4],ui8> + %fpTensor = torch.vtensor.literal(dense<[127.0, 128.0, 129.0, 130.0]> : tensor<4xf32>) : !torch.vtensor<[4],f32> + %intScalar = torch.constant.int 128 + %fpScalar = torch.constant.float 128.0 + %intBool = torch.aten.lt.Scalar %intTensor, %intScalar : !torch.vtensor<[4],si8>, !torch.int -> !torch.vtensor<[4],i1> + %uintBool = torch.aten.lt.Scalar %uintTensor, %intScalar : !torch.vtensor<[4],ui8>, !torch.int -> !torch.vtensor<[4],i1> + %fpBool = torch.aten.lt.Scalar %fpTensor, %fpScalar : !torch.vtensor<[4],f32>, !torch.float -> !torch.vtensor<[4],i1> + return %intBool, %uintBool, %fpBool : !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1> +} + +// ----- + +// CHECK-LABEL: @aten_tensor_tensor_le +func.func @aten_tensor_tensor_le() -> (!torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>) { + // CHECK: %[[UNSIGN:.+]] = torch.vtensor.literal(dense : tensor<4xi1>) : !torch.vtensor<[4],i1> + // CHECK: %[[SIGNED:.+]] = torch.vtensor.literal(dense<[true, true, false, false]> : tensor<4xi1>) : !torch.vtensor<[4],i1> + // CHECK: return %[[UNSIGN]], %[[SIGNED]], %[[SIGNED]] + %intTensor = torch.vtensor.literal(dense<[127, -128, -127, -126]> : tensor<4xsi8>) : !torch.vtensor<[4],si8> + %uintTensor = torch.vtensor.literal(dense<[127, 128, 129, 130]> : tensor<4xui8>) : !torch.vtensor<[4],ui8> + %fpTensor = torch.vtensor.literal(dense<[127.0, 128.0, 129.0, 130.0]> : tensor<4xf32>) : !torch.vtensor<[4],f32> + %intScalar = torch.constant.int 128 + %fpScalar = torch.constant.float 128.0 + %intBool = torch.aten.le.Scalar %intTensor, %intScalar : !torch.vtensor<[4],si8>, !torch.int -> !torch.vtensor<[4],i1> + %uintBool = torch.aten.le.Scalar %uintTensor, %intScalar : !torch.vtensor<[4],ui8>, !torch.int -> !torch.vtensor<[4],i1> + %fpBool = torch.aten.le.Scalar %fpTensor, %fpScalar : !torch.vtensor<[4],f32>, !torch.float -> !torch.vtensor<[4],i1> + return %intBool, %uintBool, %fpBool : !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1> +} + + +// ----- + +// CHECK-LABEL: @aten_tensor_tensor_ge +func.func @aten_tensor_tensor_ge() -> (!torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>) { + // CHECK: %[[UNSIGN:.+]] = torch.vtensor.literal(dense : tensor<4xi1>) : !torch.vtensor<[4],i1> + // CHECK: %[[SIGNED:.+]] = torch.vtensor.literal(dense<[false, true, true, true]> : tensor<4xi1>) : !torch.vtensor<[4],i1> + // CHECK: return %[[UNSIGN]], %[[SIGNED]], %[[SIGNED]] + %intTensor = torch.vtensor.literal(dense<[127, -128, -127, -126]> : tensor<4xsi8>) : !torch.vtensor<[4],si8> + %uintTensor = torch.vtensor.literal(dense<[127, 128, 129, 130]> : tensor<4xui8>) : !torch.vtensor<[4],ui8> + %fpTensor = torch.vtensor.literal(dense<[127.0, 128.0, 129.0, 130.0]> : tensor<4xf32>) : !torch.vtensor<[4],f32> + %intScalar = torch.constant.int 128 + %fpScalar = torch.constant.float 128.0 + %intBool = torch.aten.ge.Scalar %intTensor, %intScalar : !torch.vtensor<[4],si8>, !torch.int -> !torch.vtensor<[4],i1> + %uintBool = torch.aten.ge.Scalar %uintTensor, %intScalar : !torch.vtensor<[4],ui8>, !torch.int -> !torch.vtensor<[4],i1> + %fpBool = torch.aten.ge.Scalar %fpTensor, %fpScalar : !torch.vtensor<[4],f32>, !torch.float -> !torch.vtensor<[4],i1> + return %intBool, %uintBool, %fpBool : !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1> +} + +// ----- + +// CHECK-LABEL: @aten_tensor_tensor_gt +func.func @aten_tensor_tensor_gt() -> (!torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>) { + // CHECK: %[[UNSIGN:.+]] = torch.vtensor.literal(dense : tensor<4xi1>) : !torch.vtensor<[4],i1> + // CHECK: %[[SIGNED:.+]] = torch.vtensor.literal(dense<[false, false, true, true]> : tensor<4xi1>) : !torch.vtensor<[4],i1> + // CHECK: return %[[UNSIGN]], %[[SIGNED]], %[[SIGNED]] + %intTensor = torch.vtensor.literal(dense<[127, -128, -127, -126]> : tensor<4xsi8>) : !torch.vtensor<[4],si8> + %uintTensor = torch.vtensor.literal(dense<[127, 128, 129, 130]> : tensor<4xui8>) : !torch.vtensor<[4],ui8> + %fpTensor = torch.vtensor.literal(dense<[127.0, 128.0, 129.0, 130.0]> : tensor<4xf32>) : !torch.vtensor<[4],f32> + %intScalar = torch.constant.int 128 + %fpScalar = torch.constant.float 128.0 + %intBool = torch.aten.gt.Scalar %intTensor, %intScalar : !torch.vtensor<[4],si8>, !torch.int -> !torch.vtensor<[4],i1> + %uintBool = torch.aten.gt.Scalar %uintTensor, %intScalar : !torch.vtensor<[4],ui8>, !torch.int -> !torch.vtensor<[4],i1> + %fpBool = torch.aten.gt.Scalar %fpTensor, %fpScalar : !torch.vtensor<[4],f32>, !torch.float -> !torch.vtensor<[4],i1> + return %intBool, %uintBool, %fpBool : !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1> +} + +// ----- + +// CHECK-LABEL: @aten_tensor_tensor_eq +func.func @aten_tensor_tensor_eq() -> (!torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>) { + // CHECK: %[[UNSIGN:.+]] = torch.vtensor.literal(dense : tensor<4xi1>) : !torch.vtensor<[4],i1> + // CHECK: %[[SIGNED:.+]] = torch.vtensor.literal(dense<[false, true, false, false]> : tensor<4xi1>) : !torch.vtensor<[4],i1> + // CHECK: return %[[UNSIGN]], %[[SIGNED]], %[[SIGNED]] + %intTensor = torch.vtensor.literal(dense<[127, -128, -127, -126]> : tensor<4xsi8>) : !torch.vtensor<[4],si8> + %uintTensor = torch.vtensor.literal(dense<[127, 128, 129, 130]> : tensor<4xui8>) : !torch.vtensor<[4],ui8> + %fpTensor = torch.vtensor.literal(dense<[127.0, 128.0, 129.0, 130.0]> : tensor<4xf32>) : !torch.vtensor<[4],f32> + %intScalar = torch.constant.int 128 + %fpScalar = torch.constant.float 128.0 + %intBool = torch.aten.eq.Scalar %intTensor, %intScalar : !torch.vtensor<[4],si8>, !torch.int -> !torch.vtensor<[4],i1> + %uintBool = torch.aten.eq.Scalar %uintTensor, %intScalar : !torch.vtensor<[4],ui8>, !torch.int -> !torch.vtensor<[4],i1> + %fpBool = torch.aten.eq.Scalar %fpTensor, %fpScalar : !torch.vtensor<[4],f32>, !torch.float -> !torch.vtensor<[4],i1> + return %intBool, %uintBool, %fpBool : !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1> +} + +// ----- + +// CHECK-LABEL: @aten_tensor_tensor_ne +func.func @aten_tensor_tensor_ne() -> (!torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>) { + // CHECK: %[[UNSIGN:.+]] = torch.vtensor.literal(dense : tensor<4xi1>) : !torch.vtensor<[4],i1> + // CHECK: %[[SIGNED:.+]] = torch.vtensor.literal(dense<[true, false, true, true]> : tensor<4xi1>) : !torch.vtensor<[4],i1> + // CHECK: return %[[UNSIGN]], %[[SIGNED]], %[[SIGNED]] + %intTensor = torch.vtensor.literal(dense<[127, -128, -127, -126]> : tensor<4xsi8>) : !torch.vtensor<[4],si8> + %uintTensor = torch.vtensor.literal(dense<[127, 128, 129, 130]> : tensor<4xui8>) : !torch.vtensor<[4],ui8> + %fpTensor = torch.vtensor.literal(dense<[127.0, 128.0, 129.0, 130.0]> : tensor<4xf32>) : !torch.vtensor<[4],f32> + %intScalar = torch.constant.int 128 + %fpScalar = torch.constant.float 128.0 + %intBool = torch.aten.ne.Scalar %intTensor, %intScalar : !torch.vtensor<[4],si8>, !torch.int -> !torch.vtensor<[4],i1> + %uintBool = torch.aten.ne.Scalar %uintTensor, %intScalar : !torch.vtensor<[4],ui8>, !torch.int -> !torch.vtensor<[4],i1> + %fpBool = torch.aten.ne.Scalar %fpTensor, %fpScalar : !torch.vtensor<[4],f32>, !torch.float -> !torch.vtensor<[4],i1> + return %intBool, %uintBool, %fpBool : !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1>, !torch.vtensor<[4],i1> +}