How to use tiny yolo on TensorRt3.0 while doing it through nvDecinfer_Detection in deepstream sample

I have converted the tiny-yolo.cfg into tiny-yolo.prototxt and have changed the negetive slope of relu by bypassing it with relu+scale+eltwise layer. But still i ma getting a error while the protxt is parsed. The error is

Parameter check failed at: Network.cpp::addScale::118, condition: shift.count == 0 || shift.count == weightCount
error parsing layer type Scale index 9
./run.sh: line 42: 4750 Segmentation fault (core dumped) …/bin/sample_detection -devID_display=${DISPLAY_GPU} -devID_infer=${INFER_GPU} -nChannels=${CHANNELS} -fileList=${FILE_LIST} -deployFile=${DEPLOY} -modelFile=${MODEL} -labelFile=${LABEL} -int8=1 -calibrationTableFile=${CALIBRATION} -tileWidth=${TILE_WIDTH} -tileHeight=${TILE_HEIGHT} -tilesInRow=${TILES_IN_ROW} -fullscreen=0 -gui=0 -endlessLoop=0

I am adding the tiny-yolo.prototxt, can you help me out.

name: "tiny-yolo"
input: "data"
input_shape {
  dim: 1
  dim: 3
  dim: 416
  dim: 416
}
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  convolution_param {
    num_output: 16
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
  }
}
layer {
  name: "bn1"
  type: "BatchNorm"
  bottom: "conv1"
  top: "bn1"
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale1"
  type: "Scale"
  bottom: "bn1"
  top: "scale1"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "relu1"	
  type: "ReLU"
  bottom: "scale1" 
  top: "relu1"
}
layer {
 name: "scale2"
 type: "Power"
 bottom: "scale1"
 top: "scale2"
 power_param {
       scale: 0.08
       }
}
layer {
  name: "eltwise1"  
  type: "Eltwise" 
  bottom: "relu1"
  bottom: "scale2"
  top: "eltwise1"
  eltwise_param {
        operation: SUM
        }
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "eltwise1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "pool1"
  top: "conv2"
  convolution_param {
    num_output: 32
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
  }
}
layer {
  name: "bn2"
  type: "BatchNorm"
  bottom: "conv2"
  top: "bn2"
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale3"
  type: "Scale"
  bottom: "bn2"
  top: "scale3"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "relu2"	
  type: "ReLU"
  bottom: "scale3" 
  top: "relu2"
}
layer {
 name: "scale4"
 type: "Power"
 bottom: "scale3"
 top: "scale4"
 power_param {
       scale: 0.08
       }
}
layer {
  name: "eltwise2"  
  type: "Eltwise" 
  bottom: "relu2"
  bottom: "scale4"
  top: "eltwise2"
  eltwise_param {
        operation: SUM
        }
}
layer {
  name: "pool3"
  type: "Pooling"
  bottom: "eltwise2"
  top: "pool3"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "conv4"
  type: "Convolution"
  bottom: "eltwise2"
  top: "conv4"
  convolution_param {
    num_output: 128
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
  }
}
layer {
  name: "bn4"
  type: "BatchNorm"
  bottom: "conv4"
  top: "bn4"
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale5"
  type: "Scale"
  bottom: "bn4"
  top: "scale5"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "relu3"
  type: "ReLU"
  bottom: "scale5"
  top : "relu3"
}
layer {
  name : "scale6"
  type: "Power"
  bottom :"relu3"
  top: "scale6"
  power_param {
        scale:0.08
      }
}
layer {
  name:"eltwise3"
  type:"Eltwise"
  bottom:"scale6"
  bottom:"relu3"
  top: "eltwise3"
  eltwise_param {
        operation: SUM
}
}
layer {
  name: "pool4"
  type: "Pooling"
  bottom: "eltwise3"
  top: "pool4"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "conv5"
  type: "Convolution"
  bottom: "pool4"
  top: "conv5"
  convolution_param {
    num_output: 256
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
  }
}
layer {
  name: "bn5"
  type: "BatchNorm"
  bottom: "conv5"
  top: "bn5"
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale"
  type: "Scale"
  bottom: "bn5"
  top: "scale5"
  scale_param {
    bias_term: true
  }
}
layer {
   name: "relu4"
   type: "ReLU"
   bottom:"bn5"
   top: "relu4"
  }
layer {
   name: "scale7"
   type: "Power"  
   bottom: "bn5"
   top: "scale7"
   power_param {
         scale: 0.08
       }
}
layer {
  name: "eltwise4"
  type: "Eltwise"
  bottom: "relu4"
  bottom: "scale7"
  top: "eltwise4" 
  eltwise_param {
        operation: SUM
    }
}
layer {
  name: "pool5"
  type: "Pooling"
  bottom: "eltwise4"
  top: "pool5"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "conv6"
  type: "Convolution"
  bottom: "pool5"
  top: "conv6"
  convolution_param {
    num_output: 512
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
  }
}
layer {
  name: "bn6"
  type: "BatchNorm"
  bottom: "conv6"
  top: "bn6"
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale8"
  type: "Scale"
  bottom: "bn6"
  top: "scale8"
  scale_param {
    bias_term: true
  }
}     
layer {
  name: "relu5"
  type: "ReLU"
  bottom: "scale8"
  top: "relu5"
}
layer {   
  name: "scale9"
  type: "Power"
  top: "scale8"
  bottom: "scale9"
  power_param {
        scale: 0.08
       }
}
layer {
 name: "eltwise5"
 type: "Eltwise"
 top: "eltwise5"
 bottom: "relu5"
 bottom: "scale9"
 eltwise_param {
         operation: SUM
        }
}
layer {
  name: "pool6"
  type: "Pooling"
  bottom: "eltwise5"
  top: "pool6"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 1
  }
}
layer {
  name: "conv7"
  type: "Convolution"
  bottom: "pool6"
  top: "conv7"
  convolution_param {
    num_output: 1024
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
  }
}
layer {
  name: "bn7"
  type: "BatchNorm"
  bottom: "conv7"
  top: "bn7"
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale10"
  type: "Scale"
  bottom: "bn7"
  top: "scale10"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "relu6"
  type: "ReLU"
  bottom: "scale10"
  top: "relu6"
}
layer {   
  name: "scale11"
  type: "Power"
  top: "scale10"
  bottom: "scale11"
  power_param {
        scale: 0.08
       }
}
layer {
 name: "eltwise6"
 type: "Eltwise"
 top: "eltwise6"
 bottom: "relu6"
 bottom: "scale11"
 eltwise_param {
         operation: SUM
        }
}
layer {
  name: "conv8"
  type: "Convolution"
  bottom: "eltwise6"
  top: "conv8"
  convolution_param {
    num_output: 512
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
  }
}
layer {
  name: "bn8"
  type: "BatchNorm"
  bottom: "conv8"
  top: "bn8"
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale12"
  type: "Scale"
  bottom: "bn8"
  top: "scale12"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "relu7"
  type: "ReLU"
  bottom: "scale12"
  top: "relu7"
}
layer {   
  name: "scale13"
  type: "Power"
  top: "scale12"
  bottom: "scale13"
  power_param {
        scale: 0.08
       }
}
layer {
 name: "eltwise7"
 type: "Eltwise"
 top: "eltwise7"
 bottom: "relu7"
 bottom: "scale13"
 eltwise_param {
         operation: SUM
        }
}


layer {
  name: "conv9"
  type: "Convolution"
  bottom: "scale13"
  top: "conv9"
  convolution_param {
    num_output: 40
    kernel_size: 1
    stride: 1
    pad: 0
  }
}

Hi,

It looks like there are something incorrect in your prototxt.
Could you re-check if this model is well-modified? Especially for the Eltwise layer.

Thanks.

This is my new prototxt, could you please verify

name: "tiny-yolo"
input: "data"
input_shape {
  dim: 1
  dim: 3
  dim: 416
  dim: 416
}
layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  convolution_param {
    num_output: 16
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
  }
}
layer {
  name: "bn1"
  type: "BatchNorm"
  bottom: "conv1"
  top: "bn1"
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale1"
  type: "Scale"
  bottom: "bn1"
  top: "scale1"
  scale_param {
    bias_term: true
  }
layer {
  name: "relu1"	
  type: "Relu"
  bottom: "scale1" 
  top: "relu1"
}
layer {
 name: "scale2"
 type: "Power"
 bottom: "scale1"
 top: "scale2"
 power_param {
       scale: 0.08
       }
}
layer {
  name: "eltwise1"  
  type: "Eltwise" 
  bottom: "relu1"
  bottom: "scale2"
  top: "eltwise1"
  eltwise_param {
        operation: SUM
        }
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "eltwise1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "pool1"
  top: "conv2"
  convolution_param {
    num_output: 32
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
  }
}
layer {
  name: "bn2"
  type: "BatchNorm"
  bottom: "conv2"
  top: "bn2"
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale3"
  type: "Scale"
  bottom: "bn2"
  top: "scale3"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "relu2"	
  type: "Relu"
  bottom: "scale3" 
  top: "relu2"
}
layer {
 name: "scale4"
 type: "Power"
 bottom: "scale3"
 top: "scale4"
 power_param {
       scale: 0.08
       }
}
layer {
  name: "eltwise2"  
  type: "Eltwise" 
  bottom: "relu2"
  bottom: "scale4"
  top: "eltwise2"
  eltwise_param {
        operation: SUM
        }
}
layer {
  name: "pool3"
  type: "Pooling"
  bottom: "eltwise2"
  top: "pool3"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "conv4"
  type: "Convolution"
  bottom: "eltwise2"
  top: "conv4"
  convolution_param {
    num_output: 128
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
  }
}
layer {
  name: "bn4"
  type: "BatchNorm"
  bottom: "conv4"
  top: "bn4"
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale5"
  type: "Scale"
  bottom: "bn4"
  top: "scale5"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "relu3"
  type: "Relu"
  bottom: "scale5"
  top : "relu3"
}
layer {
  name : "scale6"
  type: "Power"
  bottom :"relu3"
  top: "scale6"
  power_param {
        scale:0.08
      }
}
layer {
  name:"eltwise3"
  type:"Eltwise"
  bottom:"scale6"
  bottom:"relu3"
  top: "eltwise3"
  eltwise_param {
        operation: SUM
}
}
layer {
  name: "pool4"
  type: "Pooling"
  bottom: "eltwise3"
  top: "pool4"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "conv5"
  type: "Convolution"
  bottom: "pool4"
  top: "conv5"
  convolution_param {
    num_output: 256
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
  }
}
layer {
  name: "bn5"
  type: "BatchNorm"
  bottom: "conv5"
  top: "bn5"
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale"
  type: "Scale"
  bottom: "bn5"
  top: "scale5"
  scale_param {
    bias_term: true
  }
}
layer {
   name: "relu4"
   type: "Relu"
   bottom:"bn5"
   top: "relu4"
  }
layer {
   name: "scale7"
   type: "Power"  
   bottom: "bn5"
   top: "scale7"
   scale_param {
         scale: 0.08
       }
}
layer {
  name: "eltwise4"
  type: "Eltwise"
  bottom: "relu4"
  bottom: "scale7"
  top: "eltwise4" 
  eltwise_param {
        operation: SUM
    }
}
layer {
  name: "pool5"
  type: "Pooling"
  bottom: "eltwise4"
  top: "pool5"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 2
  }
}
layer {
  name: "conv6"
  type: "Convolution"
  bottom: "pool5"
  top: "conv6"
  convolution_param {
    num_output: 512
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
  }
}
layer {
  name: "bn6"
  type: "BatchNorm"
  bottom: "conv6"
  top: "bn6"
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale8"
  type: "Scale"
  bottom: "bn6"
  top: "scale8"
  scale_param {
    bias_term: true
  }
}     
layer {
  name: "relu5"
  type: "Relu"
  bottom: "scale8"
  top: "relu5"
}
layer {   
  name: "scale9"
  type: "Power"
  top: "scale8"
  bottom: "scale9"
  power_param {
        scale: 0.08
       }
}
layer {
 name: "eltwise5"
 type: "Eltwise"
 top: "eltwise5"
 bottom: "relu5"
 bottom: "scale9"
 eltwise_param {
         operation: SUM
        }
}
layer {
  name: "pool6"
  type: "Pooling"
  bottom: "eltwise5"
  top: "pool6"
  pooling_param {
    pool: MAX
    kernel_size: 2
    stride: 1
  }
}
layer {
  name: "conv7"
  type: "Convolution"
  bottom: "pool6"
  top: "conv7"
  convolution_param {
    num_output: 1024
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
  }
}
layer {
  name: "bn7"
  type: "BatchNorm"
  bottom: "conv7"
  top: "bn7"
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale10"
  type: "Scale"
  bottom: "bn7"
  top: "scale10"
  scale_param {
    bias_term: true
  }
layer {
  name: "relu6"
  type: "Relu"
  bottom: "scale10"
  top: "relu6"
}
layer {   
  name: "scale11"
  type: "Power"
  top: "scale10"
  bottom: "scale11"
  power_param {
        scale: 0.08
       }
}
layer {
 name: "eltwise6"
 type: "Eltwise"
 top: "eltwise6"
 bottom: "relu6"
 bottom: "scale11"
 eltwise_param {
         operation: SUM
        }
}
layer {
  name: "conv8"
  type: "Convolution"
  bottom: "eltwise6"
  top: "conv8"
  convolution_param {
    num_output: 512
    kernel_size: 3
    stride: 1
    pad: 1
    bias_term: false
  }
}
layer {
  name: "bn8"
  type: "BatchNorm"
  bottom: "conv8"
  top: "bn8"
  batch_norm_param {
    use_global_stats: true
  }
}
layer {
  name: "scale12"
  type: "Scale"
  bottom: "bn8"
  top: "scale12"
  scale_param {
    bias_term: true
  }
}
layer {
  name: "relu7"
  type: "Relu"
  bottom: "scale12"
  top: "relu7"
}
layer {   
  name: "scale13"
  type: "Power"
  top: "scale12"
  bottom: "scale13"
  power_param {
        scale: 0.08
       }
}
layer {
 name: "eltwise7"
 type: "Eltwise"
 top: "eltwise7"
 bottom: "relu7"
 bottom: "scale13"
 eltwise_param {
         operation: SUM
        }
}


layer {
  name: "conv9"
  type: "Convolution"
  bottom: "scale13"
  top: "conv9"
  convolution_param {
    num_output: 40
    kernel_size: 1
    stride: 1
    pad: 0
  }
}

Hi,

It’s recommended to use DIGITs to visualize the model for debugging.
There are some missing ‘}’ and incorrect parameter name in your model.

You can try DIGITs and verify the model correctness.

Thanks.