Hello AastaLL,
So I had to change the config file because it didnt include the preprocess function that is needed by the convert_to_uff.py.
So the current files are
convert_to_uff.py:
#!/usr/bin/python
"""
convert_to_uff.py
Main script for doing uff conversions from
different frameworks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import argparse
import uff
import os
def _replace_ext(path, ext):
return os.path.splitext(path)[0] + ext
def process_cmdline_args():
"""
Helper function for processing commandline arguments
"""
parser = argparse.ArgumentParser(description="""Converts TensorFlow models to Unified Framework Format (UFF).""")
parser.add_argument(
"input_file",
help="""path to input model (protobuf file of frozen GraphDef)""")
parser.add_argument(
'-l', '--list-nodes', action='store_true',
help="""show list of nodes contained in input file""")
parser.add_argument(
'-t', '--text', action='store_true',
help="""write a text version of the output in addition to the
binary""")
parser.add_argument(
'--write_preprocessed', action='store_true',
help="""write the preprocessed protobuf in addition to the
binary""")
parser.add_argument(
'-q', '--quiet', action='store_true',
help="""disable log messages""")
parser.add_argument(
'-d', '--debug', action='store_true',
help="""Enables debug mode to provide helpful debugging output""")
parser.add_argument(
"-o", "--output",
help="""name of output uff file""")
parser.add_argument(
"-O", "--output-node", default=[], action='append',
help="""name of output nodes of the model""")
parser.add_argument(
'-I', '--input-node', default=[], action='append',
help="""name of a node to replace with an input to the model.
Must be specified as: "name,new_name,dtype,dim1,dim2,..."
""")
parser.add_argument(
"-p", "--preprocessor",
help="""the preprocessing file to run before handling the graph. This file must define a `preprocess` function that accepts a GraphSurgeon DynamicGraph as it's input. All transformations should happen in place on the graph, as return values are discarded""")
args, _ = parser.parse_known_args()
args.output = _replace_ext((args.output if args.output else args.input_file), ".uff")
return args, _
def main():
args, _ = process_cmdline_args()
if not args.quiet:
print("Loading", args.input_file)
uff.from_tensorflow_frozen_model(
args.input_file,
output_nodes=args.output_node,
preprocessor=args.preprocessor,
input_node=args.input_node,
quiet=args.quiet,
text=args.text,
list_nodes=args.list_nodes,
output_filename=args.output,
write_preprocessed=args.write_preprocessed,
debug_mode=args.debug
)
if __name__ == '__main__':
main()
The modified config file :
import graphsurgeon as gs
import tensorflow as tf
Input = gs.create_plugin_node(
name="Input",
op="Placeholder",
dtype=tf.float32,
shape=[1, 3, 300, 300]
)
PriorBox = gs.create_plugin_node(
name="GridAnchor",
op="GridAnchor_TRT",
minSize=0.2,
maxSize=0.95,
aspectRatios=[1.0, 2.0, 0.5, 3.0, 0.33],
variance=[0.1,0.1,0.2,0.2],
featureMapShapes=[19, 10, 5, 3, 2, 1],
numLayers=6
)
NMS = gs.create_plugin_node(
name="NMS",
op="NMS_TRT",
shareLocation=1,
varianceEncodedInTarget=0,
backgroundLabelId=0,
confidenceThreshold=1e-8,
nmsThreshold=0.6,
topK=100,
keepTopK=100,
numClasses=91,
inputOrder=[0, 2, 1],
confSigmoid=1,
isNormalized=1,
scoreConverter="SIGMOID"
)
concat_priorbox = gs.create_plugin_node(
"concat_priorbox",
op="ConcatV2",
dtype=tf.float32,
axis=2
)
concat_box_loc = gs.create_plugin_node(
"concat_box_loc",
op="FlattenConcat_TRT",
dtype=tf.float32
)
concat_box_conf = gs.create_plugin_node(
"concat_box_conf",
op="FlattenConcat_TRT",
dtype=tf.float32
)
namespace_plugin_map = {
"MultipleGridAnchorGenerator": PriorBox,
"Postprocessor": NMS,
"Preprocessor": Input,
"ToFloat": Input,
"image_tensor": Input,
"MultipleGridAnchorGenerator/Concatenate": concat_priorbox,
"concat": concat_box_loc,
"concat_1": concat_box_conf
}
def preprocess(dynamic_graph):
all_assert_nodes = dynamic_graph.find_nodes_by_op("Assert")
dynamic_graph.remove(all_assert_nodes, remove_exclusive_dependencies=True)
all_identity_nodes = dynamic_graph.find_nodes_by_op("Identity")
dynamic_graph.forward_inputs(all_identity_nodes)
print(" Operation done ")
dynamic_graph.collapse_namespaces(namespace_plugin_map)
dynamic_graph.remove(dynamic_graph.graph_outputs, remove_exclusive_dependencies=False)
Then i run the command to get the UFF file :
python3 convert_to_uff.py --input-file frozen_inference_graph.pb -O NMS -p config.py
After that I use the UFF file to create the engine, which now gives a different error:
nvidia@nvidia:~/Desktop/test/build$ ./testssd
Using pipeline:
nvarguscamerasrc ! video/x-raw(memory:NVMM), width=(int)1280, height=(int)720, format=(string)NV12, framerate=(fraction)60/1 ! nvvidconv flip-method=0 ! video/x-raw, width=(int)1280, height=(int)720, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink
GST_ARGUS: Creating output stream
CONSUMER: Waiting until producer is connected…
GST_ARGUS: Available Sensor modes :
GST_ARGUS: 2592 x 1944 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 16.000000; Exposure Range min 34000, max 550385000;
GST_ARGUS: 2592 x 1458 FR = 29.999999 fps Duration = 33333334 ; Analog Gain range min 1.000000, max 16.000000; Exposure Range min 34000, max 550385000;
GST_ARGUS: 1280 x 720 FR = 120.000005 fps Duration = 8333333 ; Analog Gain range min 1.000000, max 16.000000; Exposure Range min 22000, max 358733000;
GST_ARGUS: Running with following settings:
Camera index = 0
Camera mode = 2
Output Stream W = 1280 H = 720
seconds to Run = 0
Frame Rate = 120.000005
GST_ARGUS: PowerService: requested_clock_Hz=24192000
GST_ARGUS: Setup Complete, Starting captures for 0 seconds
GST_ARGUS: Starting repeat capture requests.
CONSUMER: Producer has connected; continuing.
Hit ESC to exit
Hit ESC to exit
No saved model found , making a fresh engine
…/…/…/Desktop/test/inception.uff
Begin parsing model…
testssd: /home/nvidia/Desktop/test/testssd.cpp:254: FlattenConcat::FlattenConcat(int, bool): Assertion `mConcatAxisID == 1 || mConcatAxisID == 2 || mConcatAxisID == 3’ failed.
Aborted (core dumped)
Also, if you see the config file, i have added the " dtype = tf.float32" for Input and concat fields, this is because of I dont add them, I am getting the following error:
nvidia@nvidia:~/Desktop/ssd_inception_output$ python3 convert_to_uff.py --input-file frozen_inference_graph.pb -O NMS -p config_ori.py
Loading frozen_inference_graph.pb
WARNING:tensorflow:From /usr/lib/python3.6/dist-packages/uff/converters/tensorflow/conversion_helpers.py:185: FastGFile.init (from tensorflow.python.platform.gfile) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.gfile.GFile.
Operation done
UFF Version 0.5.5
=== Automatically deduced input nodes ===
[name: “Input”
op: “Placeholder”
attr {
key: “shape”
value {
shape {
dim {
size: 1
}
dim {
size: 3
}
dim {
size: 300
}
dim {
size: 300
}
}
}
}
]
Using output node NMS
Converting to UFF graph
Warning: No conversion function registered for layer: NMS_TRT yet.
Converting NMS as custom op: NMS_TRT
Traceback (most recent call last):
File “/usr/lib/python3.6/dist-packages/uff/converters/tensorflow/converter.py”, line 102, in convert_tf2numpy_dtype
return dtype.as_numpy_dtype
AttributeError: ‘int’ object has no attribute ‘as_numpy_dtype’
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File “convert_to_uff.py”, line 93, in
main()
File “convert_to_uff.py”, line 89, in main
debug_mode=args.debug
File “/usr/lib/python3.6/dist-packages/uff/converters/tensorflow/conversion_helpers.py”, line 187, in from_tensorflow_frozen_model
return from_tensorflow(graphdef, output_nodes, preprocessor, **kwargs)
File “/usr/lib/python3.6/dist-packages/uff/converters/tensorflow/conversion_helpers.py”, line 157, in from_tensorflow
debug_mode=debug_mode)
File “/usr/lib/python3.6/dist-packages/uff/converters/tensorflow/converter.py”, line 94, in convert_tf2uff_graph
uff_graph, input_replacements, debug_mode=debug_mode)
File “/usr/lib/python3.6/dist-packages/uff/converters/tensorflow/converter.py”, line 79, in convert_tf2uff_node
op, name, tf_node, inputs, uff_graph, tf_nodes=tf_nodes, debug_mode=debug_mode)
File “/usr/lib/python3.6/dist-packages/uff/converters/tensorflow/converter.py”, line 47, in convert_layer
return cls.registry_[op](name, tf_node, inputs, uff_graph, **kwargs)
File “/usr/lib/python3.6/dist-packages/uff/converters/tensorflow/converter_functions.py”, line 19, in convert_placeholder
dtype = tf2uff.convert_tf2numpy_dtype(tf_node.attr[‘dtype’].type)
File “/usr/lib/python3.6/dist-packages/uff/converters/tensorflow/converter.py”, line 113, in convert_tf2numpy_dtype
return np.dtype(dt[dtype])
TypeError: data type “invalid” not understood
I think my retrained model is exactly the same as the pre-trained model except the fact that after I convert the pre-trained model, it shows " No. nodes: 563".
And when I convert my re-trained model it shows " No. nodes: 781"
I dont know if the not being able to parse the UFF engine has to do with the number of nodes becasue its giving the axis error.
Another side issue is, whenever I run my program with the Gstreamer pipeline, I runs only once and to run the program again I need to restart my jetson because it gives the following error after the first time of the run:
nvidia@nvidia:~/Desktop/test/build$ ./testssd
Using pipeline:
nvarguscamerasrc ! video/x-raw(memory:NVMM), width=(int)1280, height=(int)720, format=(string)NV12, framerate=(fraction)60/1 ! nvvidconv flip-method=0 ! video/x-raw, width=(int)1280, height=(int)720, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink
Error generated. /dvs/git/dirty/git-master_linux/multimedia/nvgstreamer/gst-nvarguscamera/gstnvarguscamerasrc.cpp, execute:532 Failed to create CaptureSession