Skip to content

Commit

Permalink
Update all neuron cores to save space
Browse files Browse the repository at this point in the history
  • Loading branch information
Jinqi Huang committed Feb 22, 2022
1 parent 81375ca commit aa2fdac
Show file tree
Hide file tree
Showing 5 changed files with 322 additions and 368 deletions.
307 changes: 141 additions & 166 deletions NeuroCores/core_Izhikevich.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,13 @@ def normalise_weight(net, w):
else:
return val

def de_normalise_resistance(net, w):
PCEIL = 1.0/net.params['PFLOOR'] # conductance ceil
PFLOOR = 1.0/net.params['PCEIL'] # conductance floor

C = w * (PCEIL - PFLOOR) / net.params['WEIGHTSCALE'] + PFLOOR
R = 1 / C
return R

def init(net):
# make sure all counters are reset
Expand All @@ -29,29 +36,36 @@ def init(net):
for postidx in range(len(net.ConnMat)):
# For every presynaptic input the neuron receives.
for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]:
old_weight = net.state.weights[preidx, postidx - net.inputNum, 0]
new_weight = normalise_weight(net, old_weight)
net.state.weights[preidx, postidx - net.inputNum, 0] = new_weight
w, b=net.ConnMat[preidx, postidx, 0:2]
net.state.weights[preidx, postidx - net.inputNum, 0] = 1.0/net.read(w, b)
# f = open("C:/Users/jh1d18/debug_log.txt", "a")
# f.write('device intial state: %f, w: %d, b: %d\n' % (net.read(w, b), w, b))
# f.close()
if net.params.get('NORMALISE', False):
old_weight = net.state.weights[preidx, postidx - net.inputNum, 0]
new_weight = normalise_weight(net, old_weight)
net.state.weights[preidx, postidx - net.inputNum, 0] = new_weight

c = net.params.get('C', -0.065) # after-spike reset value of v
b = net.params.get('B', 0.2) # sensitivity of u to v
for postidx in range(len(net.ConnMat)):
net.state.NeurAccum[0][postidx] = c
net.state.NeurRecov[0][postidx] = b * net.state.NeurAccum[0][postidx]

def neurons(net, time):
def neurons(net, time, phase = 'training'):

rawin = net.rawin # Raw input
stimin = net.stimin[:, time] # Stimulus input for current timestep
rawinPseudo = net.rawinPseudo # latest fire history without wta
if phase == 'test':
stimin = net.stiminForTesting[:, time] # Stimulus input for current timestep
else:
stimin = net.stimin[:, time] # input stimuli at this time step

inputStimMask = np.hstack((np.ones(net.inputNum), np.zeros(net.outputNum)))
outputLabelMask = np.hstack((np.zeros(net.inputNum), np.ones(net.outputNum)))
inputStimMask = np.hstack((np.ones(net.inputNum), np.zeros(net.NETSIZE - net.inputNum))) # mask matrix to extract input spikes. Size: NETSIZE
outputLabelMask = np.hstack((np.zeros(net.NETSIZE - net.outputNum), np.ones(net.outputNum))) # mask matrix to extract output labels. Size: NETSIZE

inputStim = np.bitwise_and([int(x) for x in inputStimMask], [int(x) for x in stimin]) # split input stimulus and output labels
outputLabel = np.bitwise_and([int(x) for x in outputLabelMask], [int(x) for x in stimin])

full_stim = np.bitwise_or([int(x) for x in rawin], [int(x) for x in inputStim])
net.log("**** FULL_STIM = ", full_stim)
outputLabel = np.bitwise_and([int(x) for x in outputLabelMask], [int(x) for x in stimin]

# Izhikevich neuron model: v' = 0.04v^2 + 5v + 140 - u + I
# u' = a(bv - u)
Expand All @@ -64,75 +78,125 @@ def neurons(net, time):
dt = net.params.get('TIMESTEP', 1e-3)
v_peak_boundary = net.params.get('VPEAK', 0.03)

if time > 0:
# if this isn't the first step copy the accumulators
# from the previous step onto the new one
net.state.NeurAccum[time] = net.state.NeurAccum[time-1]
net.state.NeurRecov[time] = net.state.NeurRecov[time-1]

for (idx, v) in enumerate(rawin):
if v != c:
net.state.NeurAccum[time][idx] = c
net.state.NeurRecov[time][idx] += d
rawinArray = np.array(rawin) # size: NETSIZE - inputNum
wantToFire = len(net.ConnMat)*[0]
full_stim = np.bitwise_or([int(x) for x in wantToFire], [int(x) for x in inputStim])
# Gather/define other pertinent data to function of neuron.
leakage = net.params.get('LEAKAGE', 1.0)

# For this example we'll make I&F neurons - if changing this file a back-up
if time > 0:
if phase == 'test':
# if this isn't the first step copy the accumulators
# from the previous step onto the new one
net.state.NeurAccumForTest[time] = net.state.NeurAccumForTest[time-1] # size: NETSIZE - inputNum
net.state.NeurRecovForTest[time] = net.state.NeurRecovForTest[time-1]
# reset the accumulators of neurons that have already fired
net.state.NeurAccumForTest[time] = net.state.NeurAccumForTest[time] * np.where(rawinArray[net.inputNum : ] == 0, 1, 0) + np.where(rawinArray[net.inputNum : ] == 0, 0, c)
net.state.NeurRecovForTest[time] = net.state.NeurRecovForTest[time] + d * np.where(rawinArray[net.inputNum : ] == 0, 1, 0)
else:
# if this isn't the first step copy the accumulators
# from the previous step onto the new one
net.state.NeurAccum[time] = net.state.NeurAccum[time-1] # size: NETSIZE - inputNum
net.state.NeurRecov[time] = net.state.NeurRecov[time-1]
net.log('membrane from last time step:', net.state.NeurAccum[time])
net.log('membrane recoery from last time step:', net.state.NeurRecov[time])
# reset the accumulators of neurons that have already fired
net.state.NeurAccum[time] = net.state.NeurAccum[time] * np.where(rawinArray[net.inputNum : ] == 0, 1, 0) + np.where(rawinArray[net.inputNum : ] == 0, 0, c)
net.state.NeurRecov[time] = net.state.NeurRecov[time] + d * np.where(rawinArray[net.inputNum : ] == 0, 1, 0)
net.log('membrane after reset:', net.state.NeurAccum[time])
net.log('membrane recoery after reset:', net.state.NeurRecov[time])


# For this example we'll make Izhkevich neurons - if changing this file a back-up
# is strongly recommended before proceeding.

# -FIX- implementing 'memory' between calls to this function.
# NeurAccum = len(net.ConnMat)*[0] #Define neuron accumulators.
# Neurons that unless otherwise dictated to by net or ext input will
# fire.
wantToFire = len(net.ConnMat)*[0]

# STAGE I: See what neurons do 'freely', i.e. without the constraints of
# WTA or generally other neurons' activities.
for postidx in range(len(net.state.NeurAccum[time])):

v = net.state.NeurAccum[time][postidx]
u = net.state.NeurRecov[time][postidx]#!!!!!
dv = v * v * 0.04 + v * 5 + 140 - u
du = a * (b * v - u)
for postidx in range(net.inputNum, net.NETSIZE):
if phase == 'test':
v = net.state.NeurAccumForTest[time][postidx]
u = net.state.NeurRecovForTest[time][postidx]
dv = v * v * 0.04 + v * 5 + 140 - u
du = a * (b * v - u)
#For every presynaptic input the neuron receives.
for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]:

# Excitatory case
if net.ConnMat[preidx, postidx, 2] > 0:
# net.log("Excitatory at %d %d" % (preidx, postidx))
# Accumulator increases as per standard formula.
dv += full_stim[preidx] * net.state.weights[preidx, postidx - net.inputNum, time]

net.log("POST=%d PRE=%d NeurAccum=%g full_stim=%g weight=%g" % \
(postidx, preidx, net.state.NeurAccum[time][postidx], \
full_stim[preidx], net.state.weights[preidx, postidx - net.inputNum, time]))

# Inhibitory case
elif net.ConnMat[preidx, postidx, 2] < 0:
# Accumulator decreases as per standard formula.
dv -= full_stim[preidx]*net.state.weights[preidx, postidx - net.inputNum, time]

net.state.NeurAccum[time][postidx] += dv * dt
net.state.NeurRecov[time][postidx] += du * dt #!!!!!!!!

# Have neurons declare 'interest to fire'.
for neuron in range(len(net.state.NeurAccum[time])):
if net.state.NeurAccum[time][neuron] > v_peak_boundary:
# Register 'interest to fire'.
wantToFire[neuron] = 1

for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]:
# if it's in the test phase
# Excitatory case
if net.ConnMat[preidx, postidx, 2] > 0:
# net.log("Excitatory at %d %d" % (preidx, postidx))
# Accumulator increases as per standard formula.
dv += full_stim[preidx] * net.weightsForTest[preidx, postidx - net.inputNum, time]

net.log("POST=%d PRE=%d NeurAccum=%g full_stim=%g weight=%g" % \
(postidx, preidx, net.state.NeurAccumForTest[time][postidx], \
full_stim[preidx], net.weightsForTest[preidx, postidx - net.inputNum, time]))

# Inhibitory case
elif net.ConnMat[preidx, postidx, 2] < 0:
# Accumulator decreases as per standard formula.
dv -= full_stim[preidx]*net.weightsForTest[preidx, postidx - net.inputNum, time]

net.state.NeurAccumForTest[time][postidx] += dv * dt
net.state.NeurRecovForTest[time][postidx] += du * dt
if net.state.NeurAccumForTest[time][postidx - net.inputNum] > v_peak_boundary:
wantToFire[postidx] = 1

else:
v = net.state.NeurAccum[time][postidx]
u = net.state.NeurRecov[time][postidx]
dv = v * v * 0.04 + v * 5 + 140 - u
du = a * (b * v - u)
for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]:
# Excitatory case
if net.ConnMat[preidx, postidx, 2] > 0:
# net.log("Excitatory at %d %d" % (preidx, postidx))
# Accumulator increases as per standard formula.
dv += full_stim[preidx] * net.state.weights[preidx, postidx - net.inputNum, time]

net.log("POST=%d PRE=%d NeurAccum=%g full_stim=%g weight=%g" % \
(postidx, preidx, net.state.NeurAccum[time][postidx], \
full_stim[preidx], net.state.weights[preidx, postidx - net.inputNum, time]))

# Inhibitory case
elif net.ConnMat[preidx, postidx, 2] < 0:
# Accumulator decreases as per standard formula.
dv -= full_stim[preidx]*net.state.weights[preidx, postidx - net.inputNum, time]

net.state.NeurAccum[time][postidx] += dv * dt
net.state.NeurRecov[time][postidx] += du * dt
if net.state.NeurAccum[time][postidx] > v_peak_boundary:
wantToFire[postidx] = 1

full_stim = np.bitwise_or([int(x) for x in wantToFire], [int(x) for x in inputStim])
net.state.firingCellsPseudo = wantToFire
# STAGE II: Implement constraints from net-level considerations.
# Example: WTA. No resitrictions from net level yet. All neurons that
# want to fire will fire.
net.state.firingCells = wantToFire

# Barrel shift history
net.state.fireHist[:-1, np.where(np.array(full_stim) != 0)[0]] = \
net.state.fireHist[1:, np.where(np.array(full_stim) != 0)[0]]
# Save last firing time for all cells that fired in this time step.
net.state.fireHist[net.DEPTH, np.where(np.array(full_stim) != 0)[0]] = \
time
if phase == 'test':
net.state.fireHistForTest[:-1, np.where(np.array(full_stim) != 0)[0]] = \
net.state.fireHistForTest[1:, np.where(np.array(full_stim) != 0)[0]]
# Save last firing time for all cells that fired in this time step.
net.state.fireHistForTest[net.DEPTH, np.where(np.array(full_stim) != 0)[0]] = \
time
net.state.fireCellsForTest[time] = wantToFire
else:
net.state.fireHist[:-1, np.where(np.array(full_stim) != 0)[0]] = \
net.state.fireHist[1:, np.where(np.array(full_stim) != 0)[0]]
# Save last firing time for all cells that fired in this time step.
net.state.fireHist[net.DEPTH, np.where(np.array(full_stim) != 0)[0]] = \
time
net.state.fireCells[time] = wantToFire

# Load 'NN'.
net.state.fireCells[time] = full_stim
net.state.fireCells[time] = wantToFire
net.state.errorList = wantToFire - outputLabel

#############!!!!!!!!!!No valid learning rule yet!!!!!!!!!!!!!!#######################
Expand All @@ -144,6 +208,7 @@ def plast(net, time):

rawin = net.rawin # Raw input
stimin = net.stimin[:, time] # Stimulus input
rawinPseudo = net.rawinPseudo

full_stim = np.bitwise_or([int(x) for x in rawin], [int(x) for x in stimin])

Expand All @@ -159,10 +224,16 @@ def plast(net, time):
# delta = (y - y_hat)*noise
# gradient = delta * input
delta = (rawin[neuron] - outputLabel[neuron])
error[neuron] = delta * np.random.rand() * noiseScale
if abs(net.state.NeurAccum[time][neuron - net.inputNum] - net.params.get('FIRETH', 0.001)) > net.params.get('FIRETH', 0.001):
error[neuron] = 0
else:
error[neuron] = delta * 0.5 * net.state.NeurAccum[time][neuron - net.inputNum] / net.params.get('FIRETH', 0.001)
print("neuron %d has delta %f and error %f" % (neuron, delta, error[neuron]))
elif neuron < fullNum - net.outputNum and neuron >= net.inputNum: # hidden neurons
error[neuron] = directRandomTargetProj[neuron] * np.random.rand() * noiseScale
if abs(net.state.NeurAccum[time][neuron - net.inputNum] - net.params.get('FIRETH', 0.001)) > net.params.get('FIRETH', 0.001):
error[neuron] = 0
else:
error[neuron] = directRandomTargetProj[neuron] * 0.5 * net.state.NeurAccum[time][neuron - net.inputNum] / net.params.get('FIRETH', 0.001)
else: # input neuron
continue

Expand Down Expand Up @@ -221,112 +292,16 @@ def plast(net, time):
# For every valid connection between neurons, find out which the
# corresponding memristor is. Then, if the weight is still uninitialised
# take a reading and ensure that the weight has a proper value.
for preidx in range(len(rawin)):
for postidx in range(len(rawin)):
if net.ConnMat[preidx, postidx, 0] != 0:
w, b = net.ConnMat[preidx, postidx, 0:2]
if net.state.weights[preidx, postid - net.inputNumx, time] == 0.0:
net.state.weights[preidx, postidx - net.inputNum, time] = \
1.0/net.read(w, b, "NN")
# for preidx in range(len(rawin)):
# for postidx in range(len(rawin)):
# if net.ConnMat[preidx, postidx, 0] != 0:
# w, b = net.ConnMat[preidx, postidx, 0:2]
# if net.state.weights[preidx, postid - net.inputNumx, time] == 0.0:
# net.state.weights[preidx, postidx - net.inputNum, time] = \
# 1.0/net.read(w, b, "NN")

net.state.errorSteps_cnt = time

def neuronsForTest(net, time):

rawin = net.rawin # Raw input
stimin = net.stiminForTesting[:, time] # Stimulus input for current timestep

inputStimMask = np.hstack((np.ones(net.inputNum), np.zeros(net.outputNum)))
outputLabelMask = np.hstack((np.zeros(net.inputNum), np.ones(net.outputNum)))

inputStim = np.bitwise_and([int(x) for x in inputStimMask], [int(x) for x in stimin]) # split input stimulus and output labels
outputLabel = np.bitwise_and([int(x) for x in outputLabelMask], [int(x) for x in stimin])

full_stim = np.bitwise_or([int(x) for x in rawin], [int(x) for x in inputStim])
net.log("**** FULL_STIM = ", full_stim)

# Izhikevich neuron model: v' = 0.04v^2 + 5v + 140 - u + I
# u' = a(bv - u)
# if v > v_peak_boundary: v <- c, u <- u + d
# v: membrane voltage; u: recovery variable
a = net.params.get('A', 0.02) # time constant of u
b = net.params.get('B', 0.2) # sensitivity of u to v
c = net.params.get('C', -0.065) # after-spike reset value of v
d = net.params.get('D', 0.2) # increment of u after spike
dt = net.params.get('TIMESTEP', 1e-3)
v_peak_boundary = net.params.get('VPEAK', 0.03)

if time > 0:
# if this isn't the first step copy the accumulators
# from the previous step onto the new one
net.state.NeurAccumForTest[time] = net.state.NeurAccumForTest[time-1]
net.state.NeurRecovForTest[time] = net.state.NeurRecovForTest[time-1]

for (idx, v) in enumerate(rawin):
if v != c:
net.state.NeurAccumForTest[time][idx] = c
net.state.NeurRecovForTest[time][idx] += d

# For this example we'll make I&F neurons - if changing this file a back-up
# is strongly recommended before proceeding.

# -FIX- implementing 'memory' between calls to this function.
# NeurAccum = len(net.ConnMat)*[0] #Define neuron accumulators.
# Neurons that unless otherwise dictated to by net or ext input will
# fire.
wantToFire = len(net.ConnMat)*[0]

# STAGE I: See what neurons do 'freely', i.e. without the constraints of
# WTA or generally other neurons' activities.
for postidx in range(len(net.state.NeurAccumForTest[time])):

v = net.state.NeurAccumForTest[time][postidx]
u = net.state.NeurRecovForTest[time][postidx]#!!!!!
dv = v * v * 0.04 + v * 5 + 140 - u
du = a * (b * v - u)
#For every presynaptic input the neuron receives.
for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]:

# Excitatory case
if net.ConnMat[preidx, postidx, 2] > 0:
# net.log("Excitatory at %d %d" % (preidx, postidx))
# Accumulator increases as per standard formula.
dv += full_stim[preidx] * net.weightsForTest[preidx, postidx - net.inputNum, time]

net.log("POST=%d PRE=%d NeurAccum=%g full_stim=%g weight=%g" % \
(postidx, preidx, net.state.NeurAccumForTest[time][postidx], \
full_stim[preidx], net.weightsForTest[preidx, postidx - net.inputNum, time]))

# Inhibitory case
elif net.ConnMat[preidx, postidx, 2] < 0:
# Accumulator decreases as per standard formula.
dv -= full_stim[preidx]*net.weightsForTest[preidx, postidx - net.inputNum, time]

net.state.NeurAccumForTest[time][postidx] += dv * dt
net.state.NeurRecovForTest[time][postidx] += du * dt #!!!!!!!!

# Have neurons declare 'interest to fire'.
for neuron in range(len(net.state.NeurAccum[time])):
if net.state.NeurAccumForTest[time][neuron] > v_peak_boundary:
# Register 'interest to fire'.
wantToFire[neuron] = 1

# STAGE II: Implement constraints from net-level considerations.
# Example: WTA. No resitrictions from net level yet. All neurons that
# want to fire will fire.
net.state.firingCellsForTest = wantToFire

# Barrel shift history
net.state.fireHistForTest[:-1, np.where(np.array(full_stim) != 0)[0]] = \
net.state.fireHistForTest[1:, np.where(np.array(full_stim) != 0)[0]]
# Save last firing time for all cells that fired in this time step.
net.state.fireHistForTest[net.DEPTH, np.where(np.array(full_stim) != 0)[0]] = \
time

# Load 'NN'.
net.state.fireCellsForTest[time] = full_stim
net.state.errorListForTest = wantToFire - outputLabel

def additional_data(net):
# This function should return any additional data that might be produced
# by this core. In this particular case there are None.
Expand Down
Loading

0 comments on commit aa2fdac

Please sign in to comment.