-
Notifications
You must be signed in to change notification settings - Fork 5
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Add more neuron models (LIF with rate coding and temporal coding,
Izhikevich), learning rules (back propagation, stdp for unsupervised learning, tempotron, and direct random target projection) in NeuroCores folder('core_LIF_supervisedlearning', 'core_LIF_supervisedlearning_wta', 'core_temporalcodingLIF_tempotron', 'core_wta_example', 'core_Izhikevich'). (File 'core_supevisedlearning_wat_debugger' is the core without memristor model to debug the core 'core_supervisedlearning_wta'.) Update GUI for analysis tool to display weight set for each layer at each epoch, and fire history and membrane voltage for each neuron through all epochs.( in 'NeuroPack.py', 'uis/nnanalysis.ui' and 'uis/nnvarsnaprow.ui') Split training phase and test phase and make memristor parameters customizable( in 'NeuroPack.py'). Change the virtual memristor array size. Initial values can also be parameterized with user-defined values (in 'NeuroPack.py').
- Loading branch information
Jinqi Huang
committed
Nov 29, 2021
1 parent
74814be
commit 025752b
Showing
20 changed files
with
33,065 additions
and
759 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,324 @@ | ||
import numpy as np | ||
from .memristorPulses import memristorPulses as memristorPulses | ||
# This core implements Izhikevich neuron model | ||
|
||
def normalise_weight(net, w): | ||
PCEIL = 1.0/net.params['PFLOOR'] | ||
PFLOOR = 1.0/net.params['PCEIL'] | ||
|
||
val = net.params['WEIGHTSCALE']*(float(w) - PFLOOR)/(PCEIL - PFLOOR) | ||
|
||
# Clamp weights in-between 0.0 and 1.0 | ||
if val < 0.0: | ||
return 0.0 | ||
elif val > 1.0: | ||
return 1.0 | ||
else: | ||
return val | ||
|
||
|
||
def init(net): | ||
# make sure all counters are reset | ||
net.spikeTrain_cnt = 0 | ||
net.errorSteps_cnt = 0 | ||
net.errorStepsForTest_cnt = 0 | ||
# Renormalise weights if needed | ||
if not net.params.get('NORMALISE', False): | ||
return | ||
|
||
for postidx in range(len(net.ConnMat)): | ||
# For every presynaptic input the neuron receives. | ||
for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]: | ||
old_weight = net.state.weights[preidx, postidx, 0] | ||
new_weight = normalise_weight(net, old_weight) | ||
net.state.weights[preidx, postidx, 0] = new_weight | ||
|
||
c = net.params.get('C', -0.065) # after-spike reset value of v | ||
b = net.params.get('B', 0.2) # sensitivity of u to v | ||
for postidx in range(len(net.ConnMat)): | ||
net.state.NeurAccum[0][postidx] = c | ||
net.state.NeurRecov[0][postidx] = b * net.state.NeurAccum[0][postidx] | ||
|
||
def neurons(net, time): | ||
|
||
rawin = net.rawin # Raw input | ||
stimin = net.stimin[:, time] # Stimulus input for current timestep | ||
|
||
inputStimMask = np.hstack((np.ones(net.inputNum), np.zeros(net.outputNum))) | ||
outputLabelMask = np.hstack((np.zeros(net.inputNum), np.ones(net.outputNum))) | ||
|
||
inputStim = np.bitwise_and([int(x) for x in inputStimMask], [int(x) for x in stimin]) # split input stimulus and output labels | ||
outputLabel = np.bitwise_and([int(x) for x in outputLabelMask], [int(x) for x in stimin]) | ||
|
||
full_stim = np.bitwise_or([int(x) for x in rawin], [int(x) for x in inputStim]) | ||
net.log("**** FULL_STIM = ", full_stim) | ||
|
||
# Izhikevich neuron model: v' = 0.04v^2 + 5v + 140 - u + I | ||
# u' = a(bv - u) | ||
# if v > v_peak_boundary: v <- c, u <- u + d | ||
# v: membrane voltage; u: recovery variable | ||
a = net.params.get('A', 0.02) # time constant of u | ||
b = net.params.get('B', 0.2) # sensitivity of u to v | ||
c = net.params.get('C', -0.065) # after-spike reset value of v | ||
d = net.params.get('D', 0.2) # increment of u after spike | ||
dt = net.params.get('TIMESTEP', 1e-3) | ||
v_peak_boundary = net.params.get('VPEAK', 0.03) | ||
|
||
if time > 0: | ||
# if this isn't the first step copy the accumulators | ||
# from the previous step onto the new one | ||
net.state.NeurAccum[time] = net.state.NeurAccum[time-1] | ||
net.state.NeurRecov[time] = net.state.NeurRecov[time-1] | ||
|
||
for (idx, v) in enumerate(rawin): | ||
if v != c: | ||
net.state.NeurAccum[time][idx] = c | ||
net.state.NeurRecov[time][idx] += d | ||
|
||
# For this example we'll make I&F neurons - if changing this file a back-up | ||
# is strongly recommended before proceeding. | ||
|
||
# -FIX- implementing 'memory' between calls to this function. | ||
# NeurAccum = len(net.ConnMat)*[0] #Define neuron accumulators. | ||
# Neurons that unless otherwise dictated to by net or ext input will | ||
# fire. | ||
wantToFire = len(net.ConnMat)*[0] | ||
|
||
# STAGE I: See what neurons do 'freely', i.e. without the constraints of | ||
# WTA or generally other neurons' activities. | ||
for postidx in range(len(net.state.NeurAccum[time])): | ||
|
||
v = net.state.NeurAccum[time][postidx] | ||
u = net.state.NeurRecov[time][postidx]#!!!!! | ||
dv = v * v * 0.04 + v * 5 + 140 - u | ||
du = a * (b * v - u) | ||
#For every presynaptic input the neuron receives. | ||
for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]: | ||
|
||
# Excitatory case | ||
if net.ConnMat[preidx, postidx, 2] > 0: | ||
# net.log("Excitatory at %d %d" % (preidx, postidx)) | ||
# Accumulator increases as per standard formula. | ||
dv += full_stim[preidx] * net.state.weights[preidx, postidx, time] | ||
|
||
net.log("POST=%d PRE=%d NeurAccum=%g full_stim=%g weight=%g" % \ | ||
(postidx, preidx, net.state.NeurAccum[time][postidx], \ | ||
full_stim[preidx], net.state.weights[preidx, postidx, time])) | ||
|
||
# Inhibitory case | ||
elif net.ConnMat[preidx, postidx, 2] < 0: | ||
# Accumulator decreases as per standard formula. | ||
dv -= full_stim[preidx]*net.state.weights[preidx, postidx, time] | ||
|
||
net.state.NeurAccum[time][postidx] += dv * dt | ||
net.state.NeurRecov[time][postidx] += du * dt #!!!!!!!! | ||
|
||
# Have neurons declare 'interest to fire'. | ||
for neuron in range(len(net.state.NeurAccum[time])): | ||
if net.state.NeurAccum[time][neuron] > v_peak_boundary: | ||
# Register 'interest to fire'. | ||
wantToFire[neuron] = 1 | ||
|
||
# STAGE II: Implement constraints from net-level considerations. | ||
# Example: WTA. No resitrictions from net level yet. All neurons that | ||
# want to fire will fire. | ||
net.state.firingCells = wantToFire | ||
|
||
# Barrel shift history | ||
net.state.fireHist[:-1, np.where(np.array(full_stim) != 0)[0]] = \ | ||
net.state.fireHist[1:, np.where(np.array(full_stim) != 0)[0]] | ||
# Save last firing time for all cells that fired in this time step. | ||
net.state.fireHist[net.DEPTH, np.where(np.array(full_stim) != 0)[0]] = \ | ||
time | ||
|
||
# Load 'NN'. | ||
net.state.fireCells[time] = full_stim | ||
net.state.errorList = wantToFire - outputLabel | ||
|
||
#############!!!!!!!!!!No valid learning rule yet!!!!!!!!!!!!!!####################### | ||
|
||
def plast(net, time): | ||
|
||
if time+2 > net.epochs: | ||
return | ||
|
||
rawin = net.rawin # Raw input | ||
stimin = net.stimin[:, time] # Stimulus input | ||
|
||
full_stim = np.bitwise_or([int(x) for x in rawin], [int(x) for x in stimin]) | ||
|
||
net.state.weights[:, :, time+1] = net.state.weights[:, :, time] | ||
|
||
noiseScale = net.params.get('NOISESCALE', 0.01) | ||
learningRate = net.params.get('LEARNINGRATE', 0.001) | ||
|
||
directRandomTargetProj = np.matmul(net.state.fixedRandomWeights[:, net.outputNum:].T, outputLabel[net.outputNum:]) | ||
|
||
for neuron in range(len(rawin)-1, -1, -1): # update from the reversed order, in case the error hasn't been updated | ||
if neuron >= fullNum - net.outputNum: # output neurons | ||
# delta = (y - y_hat)*noise | ||
# gradient = delta * input | ||
delta = (rawin[neuron] - outputLabel[neuron]) | ||
error[neuron] = delta * np.random.rand() * noiseScale | ||
print("neuron %d has delta %f and error %f" % (neuron, delta, error[neuron])) | ||
elif neuron < fullNum - net.outputNum and neuron >= net.inputNum: # hidden neurons | ||
error[neuron] = directRandomTargetProj[neuron] * np.random.rand() * noiseScale | ||
else: # input neuron | ||
continue | ||
|
||
if error[neuron] == 0.0: | ||
continue | ||
|
||
# For every presynaptic input the neuron receives, back propogate the error | ||
for preidx in np.where(net.ConnMat[:, neuron, 0] != 0)[0]: | ||
w,b = net.ConnMat[preidx, neuron, 0:2] | ||
if allNeuronsThatFire[preidx] == 0: | ||
continue | ||
grad = error[neuron] * allNeuronsThatFire[preidx] | ||
print(" gradient: %f" % grad) | ||
dW = (-1) * learningRate * grad | ||
if dW > 0: # conductance needs to be larger, so a negative pulse is suplied | ||
pulseList = net.neg_pulseList | ||
else: | ||
pulseList = net.pos_pulseList | ||
print(" weight change: %f" % dW) | ||
p = dW + net.state.weights[preidx, neuron, time+1] # new weights | ||
#print(" final weight should be: %f" % p) | ||
# look up table mapping | ||
R_expect = 1 / p #expected R | ||
#print('expected R:', R_expect) | ||
R = net.read(w, b) # current R | ||
#print('current R:', R) | ||
virtualMemristor = memristorPulses(net.dt, net.Ap, net.An, net.a0p, net.a1p, net.a0n, net.a1n, net.tp, net.tn, R) | ||
pulseParams = virtualMemristor.BestPulseChoice(R_expect, pulseList) # takes the best pulse choice | ||
#print('pulse selected:', pulseParams) | ||
net.pulse(w, b, pulseParams[0], pulseParams[1]) | ||
R_real = net.read(w, b) | ||
#print('new R:', R_real) | ||
p_real = 1 / R_real | ||
#print('new weight:', p_real) | ||
p_error = p_real - p | ||
#print('weight error:', p_error) | ||
if net.params.get('NORMALISE', False): | ||
net.state.weights[preidx, neuron, time+1] = normalise_weight(net, p_real) | ||
net.state.weightsExpected[preidx, neuron, time+1] = normalise_weight(net, p) | ||
net.state.weightsError[preidx, neuron, time+1] = normalise_weight(net, p_error) | ||
else: | ||
net.state.weights[preidx, neuron, time+1] = p_real | ||
net.state.weightsExpected[preidx, neuron, time+1] = p | ||
net.state.weightsError[preidx, neuron, time+1] = p_error | ||
net.log(" weight change for synapse %d -- %d from %f to %f" % (preidx, neuron, net.state.weights[preidx, neuron, time], net.state.weights[preidx, neuron, time+1])) | ||
|
||
# For every valid connection between neurons, find out which the | ||
# corresponding memristor is. Then, if the weight is still uninitialised | ||
# take a reading and ensure that the weight has a proper value. | ||
for preidx in range(len(rawin)): | ||
for postidx in range(len(rawin)): | ||
if net.ConnMat[preidx, postidx, 0] != 0: | ||
w, b = net.ConnMat[preidx, postidx, 0:2] | ||
if net.state.weights[preidx, postidx, time] == 0.0: | ||
net.state.weights[preidx, postidx, time] = \ | ||
1.0/net.read(w, b, "NN") | ||
|
||
net.state.errorSteps_cnt = time | ||
|
||
def neuronsForTest(net, time): | ||
|
||
rawin = net.rawin # Raw input | ||
stimin = net.stiminForTesting[:, time] # Stimulus input for current timestep | ||
|
||
inputStimMask = np.hstack((np.ones(net.inputNum), np.zeros(net.outputNum))) | ||
outputLabelMask = np.hstack((np.zeros(net.inputNum), np.ones(net.outputNum))) | ||
|
||
inputStim = np.bitwise_and([int(x) for x in inputStimMask], [int(x) for x in stimin]) # split input stimulus and output labels | ||
outputLabel = np.bitwise_and([int(x) for x in outputLabelMask], [int(x) for x in stimin]) | ||
|
||
full_stim = np.bitwise_or([int(x) for x in rawin], [int(x) for x in inputStim]) | ||
net.log("**** FULL_STIM = ", full_stim) | ||
|
||
# Izhikevich neuron model: v' = 0.04v^2 + 5v + 140 - u + I | ||
# u' = a(bv - u) | ||
# if v > v_peak_boundary: v <- c, u <- u + d | ||
# v: membrane voltage; u: recovery variable | ||
a = net.params.get('A', 0.02) # time constant of u | ||
b = net.params.get('B', 0.2) # sensitivity of u to v | ||
c = net.params.get('C', -0.065) # after-spike reset value of v | ||
d = net.params.get('D', 0.2) # increment of u after spike | ||
dt = net.params.get('TIMESTEP', 1e-3) | ||
v_peak_boundary = net.params.get('VPEAK', 0.03) | ||
|
||
if time > 0: | ||
# if this isn't the first step copy the accumulators | ||
# from the previous step onto the new one | ||
net.state.NeurAccumForTest[time] = net.state.NeurAccumForTest[time-1] | ||
net.state.NeurRecovForTest[time] = net.state.NeurRecovForTest[time-1] | ||
|
||
for (idx, v) in enumerate(rawin): | ||
if v != c: | ||
net.state.NeurAccumForTest[time][idx] = c | ||
net.state.NeurRecovForTest[time][idx] += d | ||
|
||
# For this example we'll make I&F neurons - if changing this file a back-up | ||
# is strongly recommended before proceeding. | ||
|
||
# -FIX- implementing 'memory' between calls to this function. | ||
# NeurAccum = len(net.ConnMat)*[0] #Define neuron accumulators. | ||
# Neurons that unless otherwise dictated to by net or ext input will | ||
# fire. | ||
wantToFire = len(net.ConnMat)*[0] | ||
|
||
# STAGE I: See what neurons do 'freely', i.e. without the constraints of | ||
# WTA or generally other neurons' activities. | ||
for postidx in range(len(net.state.NeurAccumForTest[time])): | ||
|
||
v = net.state.NeurAccumForTest[time][postidx] | ||
u = net.state.NeurRecovForTest[time][postidx]#!!!!! | ||
dv = v * v * 0.04 + v * 5 + 140 - u | ||
du = a * (b * v - u) | ||
#For every presynaptic input the neuron receives. | ||
for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]: | ||
|
||
# Excitatory case | ||
if net.ConnMat[preidx, postidx, 2] > 0: | ||
# net.log("Excitatory at %d %d" % (preidx, postidx)) | ||
# Accumulator increases as per standard formula. | ||
dv += full_stim[preidx] * net.weightsForTest[preidx, postidx, time] | ||
|
||
net.log("POST=%d PRE=%d NeurAccum=%g full_stim=%g weight=%g" % \ | ||
(postidx, preidx, net.state.NeurAccumForTest[time][postidx], \ | ||
full_stim[preidx], net.weightsForTest[preidx, postidx, time])) | ||
|
||
# Inhibitory case | ||
elif net.ConnMat[preidx, postidx, 2] < 0: | ||
# Accumulator decreases as per standard formula. | ||
dv -= full_stim[preidx]*net.weightsForTest[preidx, postidx, time] | ||
|
||
net.state.NeurAccumForTest[time][postidx] += dv * dt | ||
net.state.NeurRecovForTest[time][postidx] += du * dt #!!!!!!!! | ||
|
||
# Have neurons declare 'interest to fire'. | ||
for neuron in range(len(net.state.NeurAccum[time])): | ||
if net.state.NeurAccumForTest[time][neuron] > v_peak_boundary: | ||
# Register 'interest to fire'. | ||
wantToFire[neuron] = 1 | ||
|
||
# STAGE II: Implement constraints from net-level considerations. | ||
# Example: WTA. No resitrictions from net level yet. All neurons that | ||
# want to fire will fire. | ||
net.state.firingCellsForTest = wantToFire | ||
|
||
# Barrel shift history | ||
net.state.fireHistForTest[:-1, np.where(np.array(full_stim) != 0)[0]] = \ | ||
net.state.fireHistForTest[1:, np.where(np.array(full_stim) != 0)[0]] | ||
# Save last firing time for all cells that fired in this time step. | ||
net.state.fireHistForTest[net.DEPTH, np.where(np.array(full_stim) != 0)[0]] = \ | ||
time | ||
|
||
# Load 'NN'. | ||
net.state.fireCellsForTest[time] = full_stim | ||
net.state.errorListForTest = wantToFire - outputLabel | ||
|
||
def additional_data(net): | ||
# This function should return any additional data that might be produced | ||
# by this core. In this particular case there are None. | ||
return None |
Oops, something went wrong.