Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Some changes from 100 years ago #14

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
53 changes: 41 additions & 12 deletions converters/pb2nnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def processGraph(op,input_op, foundInputFlag, weights, biases):
(bool): Updated foundInputFlag
'''

if op.node_def.op=='Const':
if op.node_def.op=='Const' and op.outputs[0].consumers()[0].type == 'Identity' :
# If constant, extract values and add to weight or bias list depending on shape
param = tensor_util.MakeNdarray(op.node_def.attr['value'].tensor)
if len(param.shape)>1:
Expand All @@ -40,9 +40,8 @@ def processGraph(op,input_op, foundInputFlag, weights, biases):
else:
foundInputFlag = True
return foundInputFlag


def pb2nnet(pbFile, inputMins=None, inputMaxes=None, means=None, ranges=None, nnetFile="", inputName="", outputName="", savedModel=False, savedModelTags=[]):
def pb2nnet(pbFile, inputMins=None, inputMaxes=None, means=None, ranges=None, nnetFile="", inputName="", outputName="", savedModel=False, savedModelTags=[], order="xW"):
'''
Write a .nnet file from a frozen Tensorflow protobuf or SavedModel

Expand All @@ -63,6 +62,11 @@ def pb2nnet(pbFile, inputMins=None, inputMaxes=None, means=None, ranges=None, nn
if nnetFile=="":
nnetFile = pbFile[:-2] + 'nnet'

sess = pb2sess(pbFile,inputName="", outputName="", savedModel=False, savedModelTags=[])

FFTF2nnet(sess, inputMins, inputMaxes, means, ranges, order, nnetFile, inputName, outputName)

def pb2sess(pbFile,inputName="", outputName="", savedModel=False, savedModelTags=[]):
if savedModel:
### Read SavedModel ###
sess = tf.Session()
Expand All @@ -84,7 +88,18 @@ def pb2nnet(pbFile, inputMins=None, inputMaxes=None, means=None, ranges=None, nn
tf.import_graph_def(graph_def, name="")
sess = tf.Session(graph=graph)
### END reading protobuf ###
return sess

def FFTF2nnet(sess, inputMins, inputMaxes, means, ranges, order, nnetFile="", inputName="", outputName=""):
weights, biases, inputSize = FFTF2W(sess, inputName, outputName)
# Default values for input bounds and normalization constants
if inputMins is None: inputMins = inputSize*[np.finfo(np.float32).min]
if inputMaxes is None: inputMaxes = inputSize*[np.finfo(np.float32).max]
if means is None: means = (inputSize+1)*[0.0]
if ranges is None: ranges = (inputSize+1)*[1.0]
writeNNet(weights,biases,inputMins,inputMaxes,means,ranges,order, nnetFile)

def FFTF2W(sess, inputName="", outputName=""):
### Find operations corresponding to input and output ###
if inputName:
inputOp = sess.graph.get_operation_by_name(inputName)
Expand All @@ -111,18 +126,32 @@ def pb2nnet(pbFile, inputMins=None, inputMaxes=None, means=None, ranges=None, nn
assert(len(inputShape)==2)
inputSize = inputShape[1]
if foundInputFlag:

# Default values for input bounds and normalization constants
if inputMins is None: inputMins = inputSize*[np.finfo(np.float32).min]
if inputMaxes is None: inputMaxes = inputSize*[np.finfo(np.float32).max]
if means is None: means = (inputSize+1)*[0.0]
if ranges is None: ranges = (inputSize+1)*[1.0]

# Write NNet file
writeNNet(weights,biases,inputMins,inputMaxes,means,ranges,nnetFile)
return weights, biases, inputSize
else:
print("Could not find the given input in graph: %s"%inputOp.name)


def pb2W(pbFile, inputName="", outputName="", savedModel=False, savedModelTags=[]):
sess = pb2sess(pbFile,inputName, outputName, savedModel, savedModelTags)
weights, biases, inputSize = FFTF2W(sess, inputName, outputName)
return weights, biases

def test():
## Script showing how to run pb2nnet
# Min and max values used to bound the inputs
inputMins = [0.0,-3.141593,-3.141593,100.0,0.0]
inputMaxes = [60760.0,3.141593,3.141593,1200.0,1200.0]

# Mean and range values for normalizing the inputs and outputs. All outputs are normalized with the same value
means = [1.9791091e+04,0.0,0.0,650.0,600.0,7.5188840201005975]
ranges = [60261.0,6.28318530718,6.28318530718,1100.0,1200.0,373.94992]

# Tensorflow pb file to convert to .nnet file
pbFile = 'NNet/nnet/TestNetwork2.pb'

# Convert the file
pb2nnet(pbFile, inputMins, inputMaxes, means, ranges, order="Wx", nnetFile="NNet/nnet/TestNetwork2_converted.nnet")

if __name__ == '__main__':
# Read user inputs and run pb2nnet function
# If non-default values of input bounds and normalization constants are needed, this function should be run from a script
Expand Down
37 changes: 28 additions & 9 deletions utils/writeNNet.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import numpy as np

def writeNNet(weights,biases,inputMins,inputMaxes,means,ranges,fileName):
def writeNNet(weights,biases,inputMins,inputMaxes,means,ranges,order,fileName):
'''
Write network data to the .nnet file format

Expand Down Expand Up @@ -40,7 +40,12 @@ def writeNNet(weights,biases,inputMins,inputMaxes,means,ranges,fileName):

#Extract the necessary information and write the header information
numLayers = len(weights)
inputSize = weights[0].shape[1]
if order == 'xW':
inputSize = weights[0].shape[0]
elif order == 'Wx':
inputSize = weights[0].shape[1]
else:
raise NotImplementedError
outputSize = len(biases[-1])
maxLayerSize = inputSize

Expand All @@ -58,6 +63,7 @@ def writeNNet(weights,biases,inputMins,inputMaxes,means,ranges,fileName):
f2.write("0,\n") #Unused Flag

# Write Min, Max, Mean, and Range of each of the inputs and outputs for normalization
import pdb; pdb.set_trace()
f2.write(','.join(str(inputMins[i]) for i in range(inputSize)) + ',\n') #Minimum Input Values
f2.write(','.join(str(inputMaxes[i]) for i in range(inputSize)) + ',\n') #Maximum Input Values
f2.write(','.join(str(means[i]) for i in range(inputSize+1)) + ',\n') #Means for normalizations
Expand All @@ -70,11 +76,24 @@ def writeNNet(weights,biases,inputMins,inputMaxes,means,ranges,fileName):
# The pattern is repeated by next writing the weights from the first hidden layer to the second hidden layer,
# followed by the biases of the second hidden layer.
##################
for w,b in zip(weights,biases):
for i in range(w.shape[0]):
if order == 'xW':
for w,b in zip(weights,biases):
for j in range(w.shape[1]):
f2.write("%.5e," % w[i][j]) #Five digits written. More can be used, but that requires more more space.
f2.write("\n")

for i in range(len(b)):
f2.write("%.5e,\n" % b[i]) #Five digits written. More can be used, but that requires more more space.
for i in range(w.shape[0]):
f2.write("%.5e," % w[i][j]) #Five digits written. More can be used, but that requires more more space.
f2.write("\n")

for i in range(len(b)):
f2.write("%.5e,\n" % b[i]) #Five digits written. More can be used, but that requires more more space.
elif order == 'Wx':
for w,b in zip(weights,biases):
for i in range(w.shape[0]):
for j in range(w.shape[1]):
f2.write("%.5e," % w[i][j])
f2.write("\n")
for i in range(len(b)):
f2.write("%.5e,\n" % b[i])
else:
raise NotImplementedError