diff --git a/converters/pb2nnet.py b/converters/pb2nnet.py index 47510ee..92b780b 100644 --- a/converters/pb2nnet.py +++ b/converters/pb2nnet.py @@ -21,7 +21,7 @@ def processGraph(op,input_op, foundInputFlag, weights, biases): (bool): Updated foundInputFlag ''' - if op.node_def.op=='Const': + if op.node_def.op=='Const' and op.outputs[0].consumers()[0].type == 'Identity' : # If constant, extract values and add to weight or bias list depending on shape param = tensor_util.MakeNdarray(op.node_def.attr['value'].tensor) if len(param.shape)>1: @@ -40,9 +40,8 @@ def processGraph(op,input_op, foundInputFlag, weights, biases): else: foundInputFlag = True return foundInputFlag - -def pb2nnet(pbFile, inputMins=None, inputMaxes=None, means=None, ranges=None, nnetFile="", inputName="", outputName="", savedModel=False, savedModelTags=[]): +def pb2nnet(pbFile, inputMins=None, inputMaxes=None, means=None, ranges=None, nnetFile="", inputName="", outputName="", savedModel=False, savedModelTags=[], order="xW"): ''' Write a .nnet file from a frozen Tensorflow protobuf or SavedModel @@ -63,6 +62,11 @@ def pb2nnet(pbFile, inputMins=None, inputMaxes=None, means=None, ranges=None, nn if nnetFile=="": nnetFile = pbFile[:-2] + 'nnet' + sess = pb2sess(pbFile,inputName="", outputName="", savedModel=False, savedModelTags=[]) + + FFTF2nnet(sess, inputMins, inputMaxes, means, ranges, order, nnetFile, inputName, outputName) + +def pb2sess(pbFile,inputName="", outputName="", savedModel=False, savedModelTags=[]): if savedModel: ### Read SavedModel ### sess = tf.Session() @@ -84,7 +88,18 @@ def pb2nnet(pbFile, inputMins=None, inputMaxes=None, means=None, ranges=None, nn tf.import_graph_def(graph_def, name="") sess = tf.Session(graph=graph) ### END reading protobuf ### + return sess +def FFTF2nnet(sess, inputMins, inputMaxes, means, ranges, order, nnetFile="", inputName="", outputName=""): + weights, biases, inputSize = FFTF2W(sess, inputName, outputName) + # Default values for input bounds and normalization constants + if inputMins is None: inputMins = inputSize*[np.finfo(np.float32).min] + if inputMaxes is None: inputMaxes = inputSize*[np.finfo(np.float32).max] + if means is None: means = (inputSize+1)*[0.0] + if ranges is None: ranges = (inputSize+1)*[1.0] + writeNNet(weights,biases,inputMins,inputMaxes,means,ranges,order, nnetFile) + +def FFTF2W(sess, inputName="", outputName=""): ### Find operations corresponding to input and output ### if inputName: inputOp = sess.graph.get_operation_by_name(inputName) @@ -111,18 +126,32 @@ def pb2nnet(pbFile, inputMins=None, inputMaxes=None, means=None, ranges=None, nn assert(len(inputShape)==2) inputSize = inputShape[1] if foundInputFlag: - - # Default values for input bounds and normalization constants - if inputMins is None: inputMins = inputSize*[np.finfo(np.float32).min] - if inputMaxes is None: inputMaxes = inputSize*[np.finfo(np.float32).max] - if means is None: means = (inputSize+1)*[0.0] - if ranges is None: ranges = (inputSize+1)*[1.0] - - # Write NNet file - writeNNet(weights,biases,inputMins,inputMaxes,means,ranges,nnetFile) + return weights, biases, inputSize else: print("Could not find the given input in graph: %s"%inputOp.name) + +def pb2W(pbFile, inputName="", outputName="", savedModel=False, savedModelTags=[]): + sess = pb2sess(pbFile,inputName, outputName, savedModel, savedModelTags) + weights, biases, inputSize = FFTF2W(sess, inputName, outputName) + return weights, biases + +def test(): + ## Script showing how to run pb2nnet + # Min and max values used to bound the inputs + inputMins = [0.0,-3.141593,-3.141593,100.0,0.0] + inputMaxes = [60760.0,3.141593,3.141593,1200.0,1200.0] + + # Mean and range values for normalizing the inputs and outputs. All outputs are normalized with the same value + means = [1.9791091e+04,0.0,0.0,650.0,600.0,7.5188840201005975] + ranges = [60261.0,6.28318530718,6.28318530718,1100.0,1200.0,373.94992] + + # Tensorflow pb file to convert to .nnet file + pbFile = 'NNet/nnet/TestNetwork2.pb' + + # Convert the file + pb2nnet(pbFile, inputMins, inputMaxes, means, ranges, order="Wx", nnetFile="NNet/nnet/TestNetwork2_converted.nnet") + if __name__ == '__main__': # Read user inputs and run pb2nnet function # If non-default values of input bounds and normalization constants are needed, this function should be run from a script diff --git a/utils/writeNNet.py b/utils/writeNNet.py index fbf1412..069bf1a 100644 --- a/utils/writeNNet.py +++ b/utils/writeNNet.py @@ -1,6 +1,6 @@ import numpy as np -def writeNNet(weights,biases,inputMins,inputMaxes,means,ranges,fileName): +def writeNNet(weights,biases,inputMins,inputMaxes,means,ranges,order,fileName): ''' Write network data to the .nnet file format @@ -40,7 +40,12 @@ def writeNNet(weights,biases,inputMins,inputMaxes,means,ranges,fileName): #Extract the necessary information and write the header information numLayers = len(weights) - inputSize = weights[0].shape[1] + if order == 'xW': + inputSize = weights[0].shape[0] + elif order == 'Wx': + inputSize = weights[0].shape[1] + else: + raise NotImplementedError outputSize = len(biases[-1]) maxLayerSize = inputSize @@ -58,6 +63,7 @@ def writeNNet(weights,biases,inputMins,inputMaxes,means,ranges,fileName): f2.write("0,\n") #Unused Flag # Write Min, Max, Mean, and Range of each of the inputs and outputs for normalization + import pdb; pdb.set_trace() f2.write(','.join(str(inputMins[i]) for i in range(inputSize)) + ',\n') #Minimum Input Values f2.write(','.join(str(inputMaxes[i]) for i in range(inputSize)) + ',\n') #Maximum Input Values f2.write(','.join(str(means[i]) for i in range(inputSize+1)) + ',\n') #Means for normalizations @@ -70,11 +76,24 @@ def writeNNet(weights,biases,inputMins,inputMaxes,means,ranges,fileName): # The pattern is repeated by next writing the weights from the first hidden layer to the second hidden layer, # followed by the biases of the second hidden layer. ################## - for w,b in zip(weights,biases): - for i in range(w.shape[0]): + if order == 'xW': + for w,b in zip(weights,biases): for j in range(w.shape[1]): - f2.write("%.5e," % w[i][j]) #Five digits written. More can be used, but that requires more more space. - f2.write("\n") - - for i in range(len(b)): - f2.write("%.5e,\n" % b[i]) #Five digits written. More can be used, but that requires more more space. + for i in range(w.shape[0]): + f2.write("%.5e," % w[i][j]) #Five digits written. More can be used, but that requires more more space. + f2.write("\n") + + for i in range(len(b)): + f2.write("%.5e,\n" % b[i]) #Five digits written. More can be used, but that requires more more space. + elif order == 'Wx': + for w,b in zip(weights,biases): + for i in range(w.shape[0]): + for j in range(w.shape[1]): + f2.write("%.5e," % w[i][j]) + f2.write("\n") + for i in range(len(b)): + f2.write("%.5e,\n" % b[i]) + else: + raise NotImplementedError + +