Hello I’ve been trying to convert the code from sampleUffSSD to use newer tensorflow models (>=1.8) instead of the sample coco file you reference.
The problem is that when i convert with the currect sample config.py
import graphsurgeon as gs
import tensorflow as tf
Input = gs.create_node("Input",
op="Placeholder",
dtype=tf.float32,
shape=[1, 3, 300, 300])
PriorBox = gs.create_node("PriorBox",
numLayers=6,
minScale=0.2,
maxScale=0.95,
aspectRatios=[1.0, 2.0, 0.5, 3.0, 0.33],
layerVariances=[0.1,0.1,0.2,0.2],
featureMapShapes=[19, 10, 5, 3, 2, 1])
NMS = gs.create_node("NMS",
scoreThreshold=1e-8,
iouThreshold=0.6,
maxDetectionsPerClass=100,
maxTotalDetections=100,
numClasses=91,
scoreConverter="SIGMOID")
concat_priorbox = gs.create_node("concat_priorbox", dtype=tf.float32, axis=2)
concat_box_loc = gs.create_node("concat_box_loc")
concat_box_conf = gs.create_node("concat_box_conf")
namespace_plugin_map = {
"MultipleGridAnchorGenerator": PriorBox,
"Postprocessor": NMS,
"Preprocessor": Input,
"ToFloat": Input,
"image_tensor": Input,
"MultipleGridAnchorGenerator/Concatenate": concat_priorbox,
"concat": concat_box_loc,
"concat_1": concat_box_conf
}
def preprocess(dynamic_graph):
# Now create a new graph by collapsing namespaces
dynamic_graph.collapse_namespaces(namespace_plugin_map)
# Remove the outputs, so we just have a single output node (NMS).
dynamic_graph.remove(dynamic_graph.graph_outputs, remove_exclusive_dependencies=False)
i get an NMS node with different number of inputs
Old tf model
Loading ssd_inception_v2_coco_2017_frozen_inference_graph.pb
Using output node NMS
Converting to UFF graph
Warning: No conversion function registered for layer: NMS yet.
Converting as custom op NMS NMS
name: "NMS"
op: "NMS"
input: "concat_box_loc"
input: "concat_priorbox"
input: "concat_box_conf"
New tf model
Loading ssd_inception_v2_coco_2018_01_28_frozen_inference_graph.pb
Using output node NMS
Converting to UFF graph
Warning: No conversion function registered for layer: NMS yet.
Converting as custom op NMS NMS
name: "NMS"
op: "NMS"
input: "Input"
input: "Squeeze"
input: "concat_priorbox"
input: "concat_box_conf"
so when i try to load the converted uff file i get an assertion complaining about the no inputs
cp conversion/ssd_inception_v2_coco_2018_01_28_frozen_inference_graph.pb.uff data/ssd/sample_ssd.uff && ./bin/sample_uff_ssd
data/ssd/sample_ssd.uff
Begin parsing model...
End parsing model...
Begin building engine...
sample_uff_ssd: NvPluginSSD.cu:713: virtual nvinfer1::Dims nvinfer1::plugin::DetectionOutput::getOutputDimensions(int, const nvinfer1::Dims*, int): Assertion `nbInputDims == 3' failed.
Aborted (core dumped)
This is a link of the nodes of the newer model https://gist.github.com/NikolasMarkou/48553938699c8e9b8d903cf0e46870ac
List of nodes is generated with the following snippet
import argparse
import tensorflow as tf
def load_graph(frozen_graph_filename):
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Then, we import the graph_def into a new Graph and returns it
with tf.Graph().as_default() as graph:
# The name var will prefix every op/nodes in your graph
# Since we load everything in a new graph, this is not needed
tf.import_graph_def(graph_def)
return graph
if __name__ == '__main__':
# Let's allow the user to pass the filename as an argument
parser = argparse.ArgumentParser()
parser.add_argument("--model", default="frozen_inference_graph.pb", type=str, help="Frozen model file to import")
args = parser.parse_args()
# We use our "load_graph" function
graph = load_graph(args.model)
counter = 0;
for n in graph.as_graph_def().node:
print('%d | %s' % (counter, n.name))
counter += 1