@@ -645,6 +645,22 @@ func.func @torch.aten.ne.Tensor$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !
645
645
646
646
// -----
647
647
648
+ // CHECK-LABEL: func.func @torch.aten.logical_or$basic(
649
+ // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],i1>,
650
+ // CHECK-SAME: %[[VAL_1:.*]]: !torch.vtensor<[?,?],i1>) -> !torch.vtensor<[?,?],i1> {
651
+ // CHECK: %[[VAL_2:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],i1> -> tensor<?x?xi1>
652
+ // CHECK: %[[VAL_3:.*]] = torch_c.to_builtin_tensor %[[VAL_1]] : !torch.vtensor<[?,?],i1> -> tensor<?x?xi1>
653
+ // CHECK: %[[VAL_4:.*]] = tosa.logical_or %[[VAL_2]], %[[VAL_3]] : (tensor<?x?xi1>, tensor<?x?xi1>) -> tensor<?x?xi1>
654
+ // CHECK: %[[VAL_5:.*]] = torch_c.from_builtin_tensor %[[VAL_4]] : tensor<?x?xi1> -> !torch.vtensor<[?,?],i1>
655
+ // CHECK: return %[[VAL_5]] : !torch.vtensor<[?,?],i1>
656
+ // CHECK: }
657
+ func.func @torch.aten.logical_or$basic (%arg0: !torch.vtensor <[?,?],i1 >, %arg1: !torch.vtensor <[?,?],i1 >) -> !torch.vtensor <[?,?],i1 > {
658
+ %0 = torch.aten.logical_or %arg0 , %arg1 : !torch.vtensor <[?,?],i1 >, !torch.vtensor <[?,?],i1 > -> !torch.vtensor <[?,?],i1 >
659
+ return %0 : !torch.vtensor <[?,?],i1 >
660
+ }
661
+
662
+ // -----
663
+
648
664
// CHECK-LABEL: func.func @forward(
649
665
// CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[3,4,2],f32>) -> !torch.vtensor<[3,2,4],f32> {
650
666
// CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[3,4,2],f32> -> tensor<3x4x2xf32>
@@ -1055,6 +1071,61 @@ func.func @torch.aten.Scalar$basic(%arg0: !torch.vtensor<[1,1,128,128],si64>) ->
1055
1071
return %0 : !torch.vtensor <[1 ,1 ,128 ,128 ],si64 >
1056
1072
}
1057
1073
1074
+ // -----
1075
+ // CHECK-LABEL: func.func @torch.aten.slice.negative_start(
1076
+ // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[4,65,256],f32>) -> !torch.vtensor<[4,16,256],f32> {
1077
+ // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[4,65,256],f32> -> tensor<4x65x256xf32>
1078
+ // CHECK: %[[VAL_2:.*]] = torch.constant.int 0
1079
+ // CHECK: %[[VAL_3:.*]] = torch.constant.int 1
1080
+ // CHECK: %[[VAL_4:.*]] = torch.constant.int 100
1081
+ // CHECK: %[[VAL_5:.*]] = torch.constant.int -16
1082
+ // CHECK: %[[VAL_4:.*]] = tosa.slice %[[VAL_1]] {size = array<i64: 4, 16, 256>, start = array<i64: 0, 49, 0>} : (tensor<4x65x256xf32>) -> tensor<4x16x256xf32>
1083
+ // CHECK: %[[VAL_5:.*]] = torch_c.from_builtin_tensor %[[VAL_4]] : tensor<4x16x256xf32> -> !torch.vtensor<[4,16,256],f32>
1084
+ // CHECK: return %[[VAL_5]] : !torch.vtensor<[4,16,256],f32>
1085
+ // CHECK: }
1086
+ func.func @torch.aten.slice.negative_start (%arg0: !torch.vtensor <[4 ,65 ,256 ],f32 >) -> !torch.vtensor <[4 ,16 ,256 ],f32 > {
1087
+ %int0 = torch.constant.int 0
1088
+ %int1 = torch.constant.int 1
1089
+ %int100 = torch.constant.int 100
1090
+ %int -16 = torch.constant.int -16
1091
+ %0 = torch.aten.slice.Tensor %arg0 , %int1 , %int -16 , %int100 , %int1 : !torch.vtensor <[4 ,65 ,256 ],f32 >, !torch.int , !torch.int , !torch.int , !torch.int -> !torch.vtensor <[4 ,16 ,256 ],f32 >
1092
+ return %0 : !torch.vtensor <[4 ,16 ,256 ],f32 >
1093
+ }
1094
+
1095
+ // -----
1096
+ // CHECK-LABEL: func.func @torch.aten.clamp.min_none(
1097
+ // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[1,1,128,128],si64>) -> !torch.vtensor<[1,1,128,128],si64> {
1098
+ // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[1,1,128,128],si64> -> tensor<1x1x128x128xi64>
1099
+ // CHECK: %[[VAL_2:.*]] = torch.constant.int 0
1100
+ // CHECK: %[[VAL_3:.*]] = torch.constant.none
1101
+ // CHECK: %[[VAL_4:.*]] = tosa.clamp %[[VAL_1]] {max_fp = 0.000000e+00 : f32, max_int = 0 : i64, min_fp = -3.40282347E+38 : f32, min_int = -9223372036854775808 : i64} : (tensor<1x1x128x128xi64>) -> tensor<1x1x128x128xi64>
1102
+ // CHECK: %[[VAL_5:.*]] = torch_c.from_builtin_tensor %[[VAL_4]] : tensor<1x1x128x128xi64> -> !torch.vtensor<[1,1,128,128],si64>
1103
+ // CHECK: return %[[VAL_5]] : !torch.vtensor<[1,1,128,128],si64>
1104
+ // CHECK: }
1105
+ func.func @torch.aten.clamp.min_none (%arg0: !torch.vtensor <[1 ,1 ,128 ,128 ],si64 >) -> !torch.vtensor <[1 ,1 ,128 ,128 ],si64 > {
1106
+ %int0 = torch.constant.int 0
1107
+ %none = torch.constant.none
1108
+ %0 = torch.aten.clamp %arg0 , %none , %int0 : !torch.vtensor <[1 ,1 ,128 ,128 ],si64 >, !torch.none , !torch.int -> !torch.vtensor <[1 ,1 ,128 ,128 ],si64 >
1109
+ return %0 : !torch.vtensor <[1 ,1 ,128 ,128 ],si64 >
1110
+ }
1111
+
1112
+ // -----
1113
+ // CHECK-LABEL: func.func @torch.aten.clamp.max_none(
1114
+ // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[1,1,128,128],si64>) -> !torch.vtensor<[1,1,128,128],si64> {
1115
+ // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[1,1,128,128],si64> -> tensor<1x1x128x128xi64>
1116
+ // CHECK: %[[VAL_2:.*]] = torch.constant.int 0
1117
+ // CHECK: %[[VAL_3:.*]] = torch.constant.none
1118
+ // CHECK: %[[VAL_4:.*]] = tosa.clamp %[[VAL_1]] {max_fp = 3.40282347E+38 : f32, max_int = 9223372036854775807 : i64, min_fp = 0.000000e+00 : f32, min_int = 0 : i64} : (tensor<1x1x128x128xi64>) -> tensor<1x1x128x128xi64>
1119
+ // CHECK: %[[VAL_5:.*]] = torch_c.from_builtin_tensor %[[VAL_4]] : tensor<1x1x128x128xi64> -> !torch.vtensor<[1,1,128,128],si64>
1120
+ // CHECK: return %[[VAL_5]] : !torch.vtensor<[1,1,128,128],si64>
1121
+ // CHECK: }
1122
+ func.func @torch.aten.clamp.max_none (%arg0: !torch.vtensor <[1 ,1 ,128 ,128 ],si64 >) -> !torch.vtensor <[1 ,1 ,128 ,128 ],si64 > {
1123
+ %int0 = torch.constant.int 0
1124
+ %none = torch.constant.none
1125
+ %0 = torch.aten.clamp %arg0 , %int0 , %none : !torch.vtensor <[1 ,1 ,128 ,128 ],si64 >, !torch.int , !torch.none -> !torch.vtensor <[1 ,1 ,128 ,128 ],si64 >
1126
+ return %0 : !torch.vtensor <[1 ,1 ,128 ,128 ],si64 >
1127
+ }
1128
+
1058
1129
// -----
1059
1130
// CHECK-LABEL: func.func @torch.aten.clamp(
1060
1131
// CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[1,1,128,128],si64>) -> !torch.vtensor<[1,1,128,128],si64> {
0 commit comments