module_tensor_version_pw_conv2d - Joejiong/buddy-mlir GitHub Wiki

#map0 = affine_map<(d0) -> (d0)>
#map1 = affine_map<(d0) -> (d0 ceildiv 256)>
module  {

  func private @print_memref_f32(memref<*xf32>)

  // Creates and returns 4-D buffer of size (%s1, %s2, %s3, %s4) filled with the value %f
  func @alloc_4d_filled_f32(%s1 : index, %s2 : index, %s3 : index, %s4 : index, %f : f32) -> memref<?x?x?x?xf32> {
    %buf = memref.alloc(%s1, %s2, %s3, %s4) : memref<?x?x?x?xf32>
    linalg.fill(%f, %buf) : f32, memref<?x?x?x?xf32>
    return %buf : memref<?x?x?x?xf32>
  }

  func @pw_cbsm_conv2d_outer_func_tensor(%input: tensor<?x?x?x?xf32>, %filter: tensor<?x?x?x?xf32>) -> %output: tensor<?x?x?x?xf32> {
    %c0 = arith.constant 0 : index
    %c1 = arith.constant 1 : index
    %c2 = arith.constant 2 : index
    %c3 = arith.constant 3 : index
    
    %KH = tensor.dim %filter, %c0 : tensor<?x?x?x?xf32> // FH
    %KW = tensor.dim %filter, %c1 : tensor<?x?x?x?xf32> // FW
    %KC = tensor.dim %filter, %c2 : tensor<?x?x?x?xf32> // FC
    
    %ON = tensor.dim %output, %c1 : tensor<?x?x?x?xf32> // ON
    %OH = tensor.dim %output, %c1 : tensor<?x?x?x?xf32> // OH
    %OW = tensor.dim %output, %c0 : tensor<?x?x?x?xf32> // OW

    %OF = tensor.dim %output, %c2 : tensor<?x?x?x?xf32> // OF
    
    // -1. out for adds out_adds_kc_tmp <1,OH,OW,1>
    %output = arith.constant dense<0> : tensor<%ONx%OHx%OWx%OFxf32>

    affine.for %on = #map0(%c0) to #map0(%ON) {           // on : 0-on(batch)
        affine.for %of = #map0(%c0) to #map0(%OF) {          // of : 0-of
            // 0. out for adds out_adds_kc_tmp <1,OH,OW,1>
            %out_adds_kc_tmp = arith.constant dense<0> : tensor<%ONx%OHx%OWx%OFxf32>
            affine.for %kc = #map0(%c0) to #map0(%KC) {          // kc : 0-kc (need to add)
                
                // 1. init kc_out_tmp[OH,OW]
                %output_inner = arith.constant dense<0> : tensor<%OHx%OWxf32>
                
                // 2. silce input for cbsm
                // input_inner = input[on,:,:,kc]
                // filter_inner = filter[0,0,kc,of]
                %input_inner = tensor.extract_slice %input[%on,0,0,%kc][1,%OH,%OW,1][1,1,1,1] : tensor<?x?x?x?xf32> to tensor<?x?xf32>
                %filter_inner = tensor.extract_slice %filter[0,0,%kc,%of][1,1,1,1][1,1,1,1] : tensor<?x?x?x?xf32> to tensor<?x?xf32>
                
                // 3. call conv_2d
                call @conv_2d(%input_inner, %filter_inner, %output_inner) : (tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>) -> tensor<?x?xf32>
                
                // 4. accmulate all kc with elementwise add to one of tensor
                // %out_adds_kc_tmp =+ %output_inner
                %out_adds_kc_tmp arith.addf %output_inner, %out_adds_kc_tmp : tensor<?x?xf32>
            }
            
            // 5. insert added kc_out to one layer of real output using: output.insert_stride_slice 
            %output = tensor.insert_slice %out_adds_kc_tmp %into %output[%on,0,0,%of][1,%OH,%OW,1][1,1,1,1] : tensor<?x?xf32> into tensor<?x?x?x?xf32>
        }
    }
    return %output
  }

  func @conv_2d_tensor(%input:  tensor<?x?xi32>,
               %filter: tensor<?x?xi32>,
               %output: tensor<?x?xi32>) -> tensor<?x?xi32> {
    %0 = linalg.conv_2d
      ins  (%input, %filter: tensor<?x?xi32>, tensor<?x?xi32>)
      outs (%output: tensor<?x?xi32>) -> tensor<?x?xi32>
    return %0 : tensor<?x?xi32>
  }
  func @main() {
    %c0 = arith.constant 0 : index
    %c1 = arith.constant 1 : index
    %c2 = arith.constant 2 : index
    %c3 = arith.constant 3 : index
    %c6 = arith.constant 6 : index
    %c8 = arith.constant 8 : index
    %f10 = arith.constant 10.00000e+00 : f32
    %val = arith.constant 2.00000e+00 : f32
    %zero = arith.constant 0.00000e+00 : f32

    // normal_conv2d_test
    // filter: 1,1,1,3 
    // in    : 1,2,2,1
    // out   : 1,2,2,3
    %filter2D_nhwc = call @alloc_4d_filled_f32(%c1, %c1, %c1, %c3, %val) :(index, index, index, index, f32) -> (memref<?x?x?x?xf32>)
    %in2D_nhwc = call @alloc_4d_filled_f32(%c1, %c2, %c2, %c1, %val) : (index, index, index, index, f32) -> (memref<?x?x?x?xf32>)
    %out2D_nhwc = call @alloc_4d_filled_f32(%c1, %c2, %c2, %c3, %zero) : (index, index, index, index, f32) -> (memref<?x?x?x?xf32>)

    memref.store %f10, %in2D_nhwc[%c0, %c0, %c1, %c0] : memref<?x?x?x?xf32>
    call @conv_2d_nhwc_hwcf(%in2D_nhwc, %filter2D_nhwc, %out2D_nhwc) : (memref<?x?x?x?xf32>, memref<?x?x?x?xf32>, memref<?x?x?x?xf32>) -> ()
    %out2D_nhwc_ = memref.cast %out2D_nhwc : memref<?x?x?x?xf32> to memref<*xf32>
    call @print_memref_f32(%out2D_nhwc_): (memref<*xf32>) -> ()
 
    // pw_conv2d_test
    // filter: 1,1,1,3 
    // in    : 1,2,2,1
    // out   : 1,2,2,3
    %filter2D_nhwc_pw = call @alloc_4d_filled_f32(%c1, %c1, %c1, %c3, %val) :(index, index, index, index, f32) -> (memref<?x?x?x?xf32>)
    %in2D_nhwc_pw = call @alloc_4d_filled_f32(%c1, %c2, %c2, %c1, %val) : (index, index, index, index, f32) -> (memref<?x?x?x?xf32>)
    %out2D_nhwc_pw = call @alloc_4d_filled_f32(%c1, %c2, %c2, %c3, %zero) : (index, index, index, index, f32) -> (memref<?x?x?x?xf32>)

    memref.store %f10, %in2D_nhwc_pw[%c0, %c0, %c1, %c0] : memref<?x?x?x?xf32>
    call @pw_cbsm_conv2d(%in2D_nhwc_pw, %filter2D_nhwc_pw, %out2D_nhwc_pw) : (memref<?x?x?x?xf32>, memref<?x?x?x?xf32>, memref<?x?x?x?xf32>) -> ()
    %out2D_nhwc_pw_ = memref.cast %out2D_nhwc_pw : memref<?x?x?x?xf32> to memref<*xf32>
    call @print_memref_f32(%out2D_nhwc_pw_): (memref<*xf32>) -> ()

    // dealloc memref
    memref.dealloc %filter2D_nhwc : memref<?x?x?x?xf32>
    memref.dealloc %in2D_nhwc : memref<?x?x?x?xf32>
    memref.dealloc %out2D_nhwc : memref<?x?x?x?xf32>

    memref.dealloc %filter2D_nhwc_pw : memref<?x?x?x?xf32>
    memref.dealloc %in2D_nhwc_pw : memref<?x?x?x?xf32>
    memref.dealloc %out2D_nhwc_pw : memref<?x?x?x?xf32>
    return
  }
}
⚠️ **GitHub.com Fallback** ⚠️