Skip to content

Commit

Permalink
Update filecheck test patterns
Browse files Browse the repository at this point in the history
  • Loading branch information
Tombana committed Jun 15, 2024
1 parent f940562 commit 9b3ac0c
Show file tree
Hide file tree
Showing 5 changed files with 51 additions and 45 deletions.
2 changes: 1 addition & 1 deletion larq_compute_engine/mlir/tests/bitpack-weights.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,6 @@ func.func @bitpack_bconv2d_filters(%arg0: tensor<256x32x32x1xi32>, %arg1: tensor
return %0 : tensor<256x30x30x16xf32>

// CHECK: %cst = arith.constant dense<0> : tensor<16x3x3x1xi32>
// CHECK: %0 = "lq.Bconv2d"(%arg0, %cst, %arg1, %arg2, %arg3) {channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32} : (tensor<256x32x32x1xi32>, tensor<16x3x3x1xi32>, tensor<16xf32>, tensor<16xf32>, none) -> tensor<256x30x30x16xf32>
// CHECK: %0 = "lq.Bconv2d"(%arg0, %cst, %arg1, %arg2, %arg3) <{channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32}> : (tensor<256x32x32x1xi32>, tensor<16x3x3x1xi32>, tensor<16xf32>, tensor<16xf32>, none) -> tensor<256x30x30x16xf32>
// CHECK-NEXT: return %0
}
4 changes: 2 additions & 2 deletions larq_compute_engine/mlir/tests/const-fold.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ func.func @quantize() -> (tensor<1x1x2x1xi32>, tensor<1x1x2x1xi32>) {
%1 = "lq.Quantize"(%neg) {} : (tensor<1x1x2x32xf32>) -> tensor<1x1x2x1xi32>
return %0, %1 : tensor<1x1x2x1xi32>, tensor<1x1x2x1xi32>

// CHECK: %[[neg:.*]] = arith.constant dense<-1> : tensor<1x1x2x1xi32>
// CHECK: %[[pos:.*]] = arith.constant dense<0> : tensor<1x1x2x1xi32>
// CHECK: %[[neg:.*]] = arith.constant dense<-1> : tensor<1x1x2x1xi32>
// CHECK: return %[[pos]], %[[neg]] : tensor<1x1x2x1xi32>, tensor<1x1x2x1xi32>
}

Expand All @@ -21,7 +21,7 @@ func.func @dequantize() -> (tensor<1x1x2x32xf32>, tensor<1x1x2x32xf32>) {
%1 = "lq.Dequantize"(%neg) {} : (tensor<1x1x2x1xi32>) -> tensor<1x1x2x32xf32>
return %0, %1 : tensor<1x1x2x32xf32>, tensor<1x1x2x32xf32>

// CHECK: %[[neg:.*]] = arith.constant dense<-1.000000e+00> : tensor<1x1x2x32xf32>
// CHECK: %[[pos:.*]] = arith.constant dense<1.000000e+00> : tensor<1x1x2x32xf32>
// CHECK: %[[neg:.*]] = arith.constant dense<-1.000000e+00> : tensor<1x1x2x32xf32>
// CHECK: return %[[pos]], %[[neg]] : tensor<1x1x2x32xf32>, tensor<1x1x2x32xf32>
}
4 changes: 2 additions & 2 deletions larq_compute_engine/mlir/tests/legalize-lce.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ func.func @legalize_bconv2d(%arg0: tensor<256x32x32x1xi32>, %arg1: tensor<16x3x3
// CHECK: %0 = "tfl.custom"(%arg0, %arg1, %arg2, %arg3, %arg4) {custom_code = "LceBconv2d", custom_option = #tfl<const_bytes : "0x6368616E6E656C735F696E0064696C6174696F6E5F6865696768745F666163746F720064696C6174696F6E5F77696474685F666163746F720066757365645F61637469766174696F6E5F66756E6374696F6E007061645F76616C7565730070616464696E67007374726964655F686569676874007374726964655F776964746800088277614C3329221508010803010100000101010404040404040404102401">} : (tensor<256x32x32x1xi32>, tensor<16x3x3x3xf32>, tensor<16xf32>, tensor<16xf32>, none) -> tensor<256x30x30x16xf32>
// CHECK-NEXT: return %0

// TRANSLATE: %0 = "lq.Bconv2d"(%arg0, %arg1, %arg2, %arg3, %arg4) {channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32} : (tensor<256x32x32x1xi32>, tensor<16x3x3x3xf32>, tensor<16xf32>, tensor<16xf32>, none) -> tensor<256x30x30x16xf32>
// TRANSLATE: %0 = "lq.Bconv2d"(%arg0, %arg1, %arg2, %arg3, %arg4) <{channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32}> : (tensor<256x32x32x1xi32>, tensor<16x3x3x3xf32>, tensor<16xf32>, tensor<16xf32>, none) -> tensor<256x30x30x16xf32>
// TRANSLATE-NEXT: return %0 : tensor<256x30x30x16xf32>
}

Expand All @@ -21,7 +21,7 @@ func.func @legalize_bmax_pool2d(%arg0: tensor<256x32x32x3xi32>) -> tensor<256x16
// CHECK: %0 = "tfl.custom"(%arg0) {custom_code = "LceBMaxPool2d", custom_option = #tfl<const_bytes : "0x70616464696E67007374726964655F7769647468007374726964655F6865696768740066696C7465725F77696474680066696C7465725F68656967687400050F1D412D3B050105020200020204040404040A2401">} : (tensor<256x32x32x3xi32>) -> tensor<256x16x16x3xi32>
// CHECK-NEXT: return %0

// TRANSLATE: %0 = "lq.BMaxPool2d"(%arg0) {filter_height = 2 : i32, filter_width = 2 : i32, padding = "SAME", stride_height = 2 : i32, stride_width = 2 : i32} : (tensor<256x32x32x3xi32>) -> tensor<256x16x16x3xi32>
// TRANSLATE: %0 = "lq.BMaxPool2d"(%arg0) <{filter_height = 2 : i32, filter_width = 2 : i32, padding = "SAME", stride_height = 2 : i32, stride_width = 2 : i32}> : (tensor<256x32x32x3xi32>) -> tensor<256x16x16x3xi32>
// TRANSLATE-NEXT: return %0 : tensor<256x16x16x3xi32>
}

Expand Down
26 changes: 13 additions & 13 deletions larq_compute_engine/mlir/tests/optimize.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ func.func @fuse_relu_into_bconv2d(%arg0: tensor<256x32x32x1xi32>, %arg1: tensor<
%1 = "tfl.relu"(%0) : (tensor<256x30x30x16xf32>) -> tensor<256x30x30x16xf32>
return %1 : tensor<256x30x30x16xf32>

// CHECK: %0 = "lq.Bconv2d"(%arg0, %arg1, %cst, %cst_0, %arg2) {channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "RELU", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32}
// CHECK: %0 = "lq.Bconv2d"(%arg0, %arg1, %cst, %cst_0, %arg2) <{channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "RELU", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32}>
// CHECK-NEXT: return %0
}

Expand All @@ -121,7 +121,7 @@ func.func @fuse_relu6_into_bconv2d(%arg0: tensor<256x32x32x1xi32>, %arg1: tensor
%1 = "tfl.relu6"(%0) : (tensor<256x30x30x16xf32>) -> tensor<256x30x30x16xf32>
return %1 : tensor<256x30x30x16xf32>

// CHECK: %0 = "lq.Bconv2d"(%arg0, %arg1, %cst, %cst_0, %arg2) {channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "RELU6", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32}
// CHECK: %0 = "lq.Bconv2d"(%arg0, %arg1, %cst, %cst_0, %arg2) <{channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "RELU6", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32}>
// CHECK-NEXT: return %0
}

Expand All @@ -133,7 +133,7 @@ func.func @fuse_relu1_into_bconv2d(%arg0: tensor<256x32x32x1xi32>, %arg1: tensor
%1 = "tfl.relu_n1_to_1"(%0) : (tensor<256x30x30x16xf32>) -> tensor<256x30x30x16xf32>
return %1 : tensor<256x30x30x16xf32>

// CHECK: %0 = "lq.Bconv2d"(%arg0, %arg1, %cst, %cst_0, %arg2) {channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "RELU_N1_TO_1", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32}
// CHECK: %0 = "lq.Bconv2d"(%arg0, %arg1, %cst, %cst_0, %arg2) <{channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "RELU_N1_TO_1", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32}>
// CHECK-NEXT: return %0
}

Expand All @@ -145,7 +145,7 @@ func.func @fuse_relu_into_bconv2d_padding_same_one(%arg0: tensor<256x32x32x1xi32
%1 = "tfl.relu"(%0) : (tensor<256x32x32x16xf32>) -> tensor<256x32x32x16xf32>
return %1 : tensor<256x32x32x16xf32>

// CHECK: %0 = "lq.Bconv2d"(%arg0, %arg1, %cst, %cst_0, %arg2) {channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "RELU", pad_values = 1 : i32, padding = "SAME", stride_height = 1 : i32, stride_width = 1 : i32}
// CHECK: %0 = "lq.Bconv2d"(%arg0, %arg1, %cst, %cst_0, %arg2) <{channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "RELU", pad_values = 1 : i32, padding = "SAME", stride_height = 1 : i32, stride_width = 1 : i32}>
// CHECK-NEXT: return %0
}

Expand All @@ -157,7 +157,7 @@ func.func @do_not_fuse_relu_into_bconv2d_padding_same_zero(%arg0: tensor<256x32x
%1 = "tfl.relu"(%0) : (tensor<256x32x32x16xf32>) -> tensor<256x32x32x16xf32>
return %1 : tensor<256x32x32x16xf32>

// CHECK: %0 = "lq.Bconv2d"(%arg0, %arg1, %cst, %cst_0, %arg2) {channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 0 : i32, padding = "SAME", stride_height = 1 : i32, stride_width = 1 : i32}
// CHECK: %0 = "lq.Bconv2d"(%arg0, %arg1, %cst, %cst_0, %arg2) <{channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 0 : i32, padding = "SAME", stride_height = 1 : i32, stride_width = 1 : i32}>
// CHECK-NEXT: %1 = "tfl.relu"(%0)
// CHECK-NEXT: return %1
}
Expand All @@ -170,7 +170,7 @@ func.func @do_not_fuse_relu_into_bconv2d_no_post_activation_bias(%arg0: tensor<2
%1 = "tfl.relu"(%0) : (tensor<256x30x30x16xf32>) -> tensor<256x30x30x16xf32>
return %1 : tensor<256x30x30x16xf32>

// CHECK: %0 = "lq.Bconv2d"(%arg0, %arg1, %cst, %cst_0, %arg2) {channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32}
// CHECK: %0 = "lq.Bconv2d"(%arg0, %arg1, %cst, %cst_0, %arg2) <{channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32}>
// CHECK-NEXT: %1 = "tfl.relu"(%0)
// CHECK-NEXT: return %1
}
Expand All @@ -183,7 +183,7 @@ func.func @do_not_fuse_relu_into_bconv2d_no_post_activation_multiplier(%arg0: te
%1 = "tfl.relu"(%0) : (tensor<256x30x30x16xf32>) -> tensor<256x30x30x16xf32>
return %1 : tensor<256x30x30x16xf32>

// CHECK: %0 = "lq.Bconv2d"(%arg0, %arg1, %cst, %cst_0, %arg2) {channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32}
// CHECK: %0 = "lq.Bconv2d"(%arg0, %arg1, %cst, %cst_0, %arg2) <{channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32}>
// CHECK-NEXT: %1 = "tfl.relu"(%0)
// CHECK-NEXT: return %1
}
Expand All @@ -195,7 +195,7 @@ func.func @target_specific_reorder_maxpool_2d_quantize(%arg0: tensor<256x32x32x6
return %1 : tensor<256x16x8x3xi32>

// CHECK-ARM-NEXT: %0 = "lq.Quantize"(%arg0) : (tensor<256x32x32x65xf32>) -> tensor<256x32x32x3xi32>
// CHECK-ARM-NEXT: %1 = "lq.BMaxPool2d"(%0) {filter_height = 3 : i32, filter_width = 2 : i32, padding = "SAME", stride_height = 2 : i32, stride_width = 4 : i32} : (tensor<256x32x32x3xi32>) -> tensor<256x16x8x3xi32>
// CHECK-ARM-NEXT: %1 = "lq.BMaxPool2d"(%0) <{filter_height = 3 : i32, filter_width = 2 : i32, padding = "SAME", stride_height = 2 : i32, stride_width = 4 : i32}> : (tensor<256x32x32x3xi32>) -> tensor<256x16x8x3xi32>
// CHECK-ARM-NEXT: return %1

// CHECK-XCORE-NEXT: %0 = "tfl.max_pool_2d"(%arg0) {filter_height = 3 : i32, filter_width = 2 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 2 : i32, stride_w = 4 : i32} : (tensor<256x32x32x65xf32>) -> tensor<256x16x8x65xf32>
Expand Down Expand Up @@ -236,7 +236,7 @@ func.func @bitpack_activation_thresholds_with_negative_post_multipliers(%arg0: t
// Verify correct thresholds. These have been manually computed.
// CHECK-NEXT: %cst_0 = arith.constant dense<[0, 3, 2, 2, -2147483648, 2, 1, 2]> : tensor<8xi32>

// CHECK-NEXT: %1 = "lq.Bconv2d"(%arg0, %cst, %0, %0, %cst_0) {channels_in = 1 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 1 : i32, padding = "SAME", stride_height = 1 : i32, stride_width = 1 : i32} : (tensor<256x32x32x1xi32>, tensor<8x2x2x1xf32>, none, none, tensor<8xi32>) -> tensor<256x32x32x1xi32>
// CHECK-NEXT: %1 = "lq.Bconv2d"(%arg0, %cst, %0, %0, %cst_0) <{channels_in = 1 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 1 : i32, padding = "SAME", stride_height = 1 : i32, stride_width = 1 : i32}> : (tensor<256x32x32x1xi32>, tensor<8x2x2x1xf32>, none, none, tensor<8xi32>) -> tensor<256x32x32x1xi32>
// CHECK-NEXT: return %1
}

Expand All @@ -250,7 +250,7 @@ func.func @bitpack_activations_valid_padding(%arg0: tensor<256x32x32x1xi32>) ->
%2 = "lq.Quantize"(%1) : (tensor<256x30x30x65xf32>) -> tensor<256x30x30x3xi32>
return %2 : tensor<256x30x30x3xi32>

// CHECK: %1 = "lq.Bconv2d"(%arg0, %cst, %0, %0, %cst_0) {channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32} : (tensor<256x32x32x1xi32>, tensor<65x3x3x3xf32>, none, none, tensor<65xi32>) -> tensor<256x30x30x3xi32>
// CHECK: %1 = "lq.Bconv2d"(%arg0, %cst, %0, %0, %cst_0) <{channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32}> : (tensor<256x32x32x1xi32>, tensor<65x3x3x3xf32>, none, none, tensor<65xi32>) -> tensor<256x30x30x3xi32>
// CHECK-NEXT: return %1
}

Expand All @@ -264,7 +264,7 @@ func.func @bitpack_activations_same_one_padding(%arg0: tensor<256x32x32x1xi32>)
%2 = "lq.Quantize"(%1) : (tensor<256x32x32x65xf32>) -> tensor<256x32x32x3xi32>
return %2 : tensor<256x32x32x3xi32>

// CHECK: %1 = "lq.Bconv2d"(%arg0, %cst, %0, %0, %cst_0) {channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 1 : i32, padding = "SAME", stride_height = 1 : i32, stride_width = 1 : i32} : (tensor<256x32x32x1xi32>, tensor<65x3x3x3xf32>, none, none, tensor<65xi32>) -> tensor<256x32x32x3xi32>
// CHECK: %1 = "lq.Bconv2d"(%arg0, %cst, %0, %0, %cst_0) <{channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 1 : i32, padding = "SAME", stride_height = 1 : i32, stride_width = 1 : i32}> : (tensor<256x32x32x1xi32>, tensor<65x3x3x3xf32>, none, none, tensor<65xi32>) -> tensor<256x32x32x3xi32>
// CHECK-NEXT: return %1
}

Expand All @@ -278,7 +278,7 @@ func.func @do_not_bitpack_activations_same_zero_padding(%arg0: tensor<256x32x32x
%2 = "lq.Quantize"(%1) : (tensor<256x32x32x65xf32>) -> tensor<256x32x32x3xi32>
return %2 : tensor<256x32x32x3xi32>

// CHECK: %1 = "lq.Bconv2d"(%arg0, %cst, %cst_0, %cst_1, %0) {channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 0 : i32, padding = "SAME", stride_height = 1 : i32, stride_width = 1 : i32} : (tensor<256x32x32x1xi32>, tensor<65x3x3x3xf32>, tensor<65xf32>, tensor<65xf32>, none) -> tensor<256x32x32x65xf32>
// CHECK: %1 = "lq.Bconv2d"(%arg0, %cst, %cst_0, %cst_1, %0) <{channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 0 : i32, padding = "SAME", stride_height = 1 : i32, stride_width = 1 : i32}> : (tensor<256x32x32x1xi32>, tensor<65x3x3x3xf32>, tensor<65xf32>, tensor<65xf32>, none) -> tensor<256x32x32x65xf32>
// CHECK-NEXT: %2 = "lq.Quantize"(%1) : (tensor<256x32x32x65xf32>) -> tensor<256x32x32x3xi32>
// CHECK-NEXT: return %2
}
Expand All @@ -293,7 +293,7 @@ func.func @do_not_bitpack_activations_multiple_uses(%arg0: tensor<256x32x32x1xi3
%2 = "lq.Quantize"(%1) : (tensor<256x30x30x65xf32>) -> tensor<256x30x30x3xi32>
return %1, %2: tensor<256x30x30x65xf32>, tensor<256x30x30x3xi32>

// CHECK: %1 = "lq.Bconv2d"(%arg0, %cst, %cst_0, %cst_1, %0) {channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32} : (tensor<256x32x32x1xi32>, tensor<65x3x3x3xf32>, tensor<65xf32>, tensor<65xf32>, none) -> tensor<256x30x30x65xf32>
// CHECK: %1 = "lq.Bconv2d"(%arg0, %cst, %cst_0, %cst_1, %0) <{channels_in = 3 : i32, dilation_height_factor = 1 : i32, dilation_width_factor = 1 : i32, fused_activation_function = "NONE", pad_values = 0 : i32, padding = "VALID", stride_height = 1 : i32, stride_width = 1 : i32}> : (tensor<256x32x32x1xi32>, tensor<65x3x3x3xf32>, tensor<65xf32>, tensor<65xf32>, none) -> tensor<256x30x30x65xf32>
// CHECK-NEXT: %2 = "lq.Quantize"(%1) : (tensor<256x30x30x65xf32>) -> tensor<256x30x30x3xi32>
// CHECK-NEXT: return %1, %2
}
Loading

0 comments on commit 9b3ac0c

Please sign in to comment.