diff --git a/NeuroCores/core_Izhikevich.py b/NeuroCores/core_Izhikevich.py index d01ee46..1d84bde 100644 --- a/NeuroCores/core_Izhikevich.py +++ b/NeuroCores/core_Izhikevich.py @@ -16,6 +16,13 @@ def normalise_weight(net, w): else: return val +def de_normalise_resistance(net, w): + PCEIL = 1.0/net.params['PFLOOR'] # conductance ceil + PFLOOR = 1.0/net.params['PCEIL'] # conductance floor + + C = w * (PCEIL - PFLOOR) / net.params['WEIGHTSCALE'] + PFLOOR + R = 1 / C + return R def init(net): # make sure all counters are reset @@ -29,9 +36,15 @@ def init(net): for postidx in range(len(net.ConnMat)): # For every presynaptic input the neuron receives. for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]: - old_weight = net.state.weights[preidx, postidx - net.inputNum, 0] - new_weight = normalise_weight(net, old_weight) - net.state.weights[preidx, postidx - net.inputNum, 0] = new_weight + w, b=net.ConnMat[preidx, postidx, 0:2] + net.state.weights[preidx, postidx - net.inputNum, 0] = 1.0/net.read(w, b) +# f = open("C:/Users/jh1d18/debug_log.txt", "a") +# f.write('device intial state: %f, w: %d, b: %d\n' % (net.read(w, b), w, b)) +# f.close() + if net.params.get('NORMALISE', False): + old_weight = net.state.weights[preidx, postidx - net.inputNum, 0] + new_weight = normalise_weight(net, old_weight) + net.state.weights[preidx, postidx - net.inputNum, 0] = new_weight c = net.params.get('C', -0.065) # after-spike reset value of v b = net.params.get('B', 0.2) # sensitivity of u to v @@ -39,19 +52,20 @@ def init(net): net.state.NeurAccum[0][postidx] = c net.state.NeurRecov[0][postidx] = b * net.state.NeurAccum[0][postidx] -def neurons(net, time): +def neurons(net, time, phase = 'training'): rawin = net.rawin # Raw input - stimin = net.stimin[:, time] # Stimulus input for current timestep + rawinPseudo = net.rawinPseudo # latest fire history without wta + if phase == 'test': + stimin = net.stiminForTesting[:, time] # Stimulus input for current timestep + else: + stimin = net.stimin[:, time] # input stimuli at this time step - inputStimMask = np.hstack((np.ones(net.inputNum), np.zeros(net.outputNum))) - outputLabelMask = np.hstack((np.zeros(net.inputNum), np.ones(net.outputNum))) + inputStimMask = np.hstack((np.ones(net.inputNum), np.zeros(net.NETSIZE - net.inputNum))) # mask matrix to extract input spikes. Size: NETSIZE + outputLabelMask = np.hstack((np.zeros(net.NETSIZE - net.outputNum), np.ones(net.outputNum))) # mask matrix to extract output labels. Size: NETSIZE inputStim = np.bitwise_and([int(x) for x in inputStimMask], [int(x) for x in stimin]) # split input stimulus and output labels - outputLabel = np.bitwise_and([int(x) for x in outputLabelMask], [int(x) for x in stimin]) - - full_stim = np.bitwise_or([int(x) for x in rawin], [int(x) for x in inputStim]) - net.log("**** FULL_STIM = ", full_stim) + outputLabel = np.bitwise_and([int(x) for x in outputLabelMask], [int(x) for x in stimin] # Izhikevich neuron model: v' = 0.04v^2 + 5v + 140 - u + I # u' = a(bv - u) @@ -64,75 +78,125 @@ def neurons(net, time): dt = net.params.get('TIMESTEP', 1e-3) v_peak_boundary = net.params.get('VPEAK', 0.03) - if time > 0: - # if this isn't the first step copy the accumulators - # from the previous step onto the new one - net.state.NeurAccum[time] = net.state.NeurAccum[time-1] - net.state.NeurRecov[time] = net.state.NeurRecov[time-1] - - for (idx, v) in enumerate(rawin): - if v != c: - net.state.NeurAccum[time][idx] = c - net.state.NeurRecov[time][idx] += d + rawinArray = np.array(rawin) # size: NETSIZE - inputNum + wantToFire = len(net.ConnMat)*[0] + full_stim = np.bitwise_or([int(x) for x in wantToFire], [int(x) for x in inputStim]) + # Gather/define other pertinent data to function of neuron. + leakage = net.params.get('LEAKAGE', 1.0) - # For this example we'll make I&F neurons - if changing this file a back-up + if time > 0: + if phase == 'test': + # if this isn't the first step copy the accumulators + # from the previous step onto the new one + net.state.NeurAccumForTest[time] = net.state.NeurAccumForTest[time-1] # size: NETSIZE - inputNum + net.state.NeurRecovForTest[time] = net.state.NeurRecovForTest[time-1] + # reset the accumulators of neurons that have already fired + net.state.NeurAccumForTest[time] = net.state.NeurAccumForTest[time] * np.where(rawinArray[net.inputNum : ] == 0, 1, 0) + np.where(rawinArray[net.inputNum : ] == 0, 0, c) + net.state.NeurRecovForTest[time] = net.state.NeurRecovForTest[time] + d * np.where(rawinArray[net.inputNum : ] == 0, 1, 0) + else: + # if this isn't the first step copy the accumulators + # from the previous step onto the new one + net.state.NeurAccum[time] = net.state.NeurAccum[time-1] # size: NETSIZE - inputNum + net.state.NeurRecov[time] = net.state.NeurRecov[time-1] + net.log('membrane from last time step:', net.state.NeurAccum[time]) + net.log('membrane recoery from last time step:', net.state.NeurRecov[time]) + # reset the accumulators of neurons that have already fired + net.state.NeurAccum[time] = net.state.NeurAccum[time] * np.where(rawinArray[net.inputNum : ] == 0, 1, 0) + np.where(rawinArray[net.inputNum : ] == 0, 0, c) + net.state.NeurRecov[time] = net.state.NeurRecov[time] + d * np.where(rawinArray[net.inputNum : ] == 0, 1, 0) + net.log('membrane after reset:', net.state.NeurAccum[time]) + net.log('membrane recoery after reset:', net.state.NeurRecov[time]) + + + # For this example we'll make Izhkevich neurons - if changing this file a back-up # is strongly recommended before proceeding. # -FIX- implementing 'memory' between calls to this function. # NeurAccum = len(net.ConnMat)*[0] #Define neuron accumulators. # Neurons that unless otherwise dictated to by net or ext input will # fire. - wantToFire = len(net.ConnMat)*[0] # STAGE I: See what neurons do 'freely', i.e. without the constraints of # WTA or generally other neurons' activities. - for postidx in range(len(net.state.NeurAccum[time])): - - v = net.state.NeurAccum[time][postidx] - u = net.state.NeurRecov[time][postidx]#!!!!! - dv = v * v * 0.04 + v * 5 + 140 - u - du = a * (b * v - u) + for postidx in range(net.inputNum, net.NETSIZE): + if phase == 'test': + v = net.state.NeurAccumForTest[time][postidx] + u = net.state.NeurRecovForTest[time][postidx] + dv = v * v * 0.04 + v * 5 + 140 - u + du = a * (b * v - u) #For every presynaptic input the neuron receives. - for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]: - - # Excitatory case - if net.ConnMat[preidx, postidx, 2] > 0: - # net.log("Excitatory at %d %d" % (preidx, postidx)) - # Accumulator increases as per standard formula. - dv += full_stim[preidx] * net.state.weights[preidx, postidx - net.inputNum, time] - - net.log("POST=%d PRE=%d NeurAccum=%g full_stim=%g weight=%g" % \ - (postidx, preidx, net.state.NeurAccum[time][postidx], \ - full_stim[preidx], net.state.weights[preidx, postidx - net.inputNum, time])) - - # Inhibitory case - elif net.ConnMat[preidx, postidx, 2] < 0: - # Accumulator decreases as per standard formula. - dv -= full_stim[preidx]*net.state.weights[preidx, postidx - net.inputNum, time] - - net.state.NeurAccum[time][postidx] += dv * dt - net.state.NeurRecov[time][postidx] += du * dt #!!!!!!!! - - # Have neurons declare 'interest to fire'. - for neuron in range(len(net.state.NeurAccum[time])): - if net.state.NeurAccum[time][neuron] > v_peak_boundary: - # Register 'interest to fire'. - wantToFire[neuron] = 1 - + for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]: + # if it's in the test phase + # Excitatory case + if net.ConnMat[preidx, postidx, 2] > 0: + # net.log("Excitatory at %d %d" % (preidx, postidx)) + # Accumulator increases as per standard formula. + dv += full_stim[preidx] * net.weightsForTest[preidx, postidx - net.inputNum, time] + + net.log("POST=%d PRE=%d NeurAccum=%g full_stim=%g weight=%g" % \ + (postidx, preidx, net.state.NeurAccumForTest[time][postidx], \ + full_stim[preidx], net.weightsForTest[preidx, postidx - net.inputNum, time])) + + # Inhibitory case + elif net.ConnMat[preidx, postidx, 2] < 0: + # Accumulator decreases as per standard formula. + dv -= full_stim[preidx]*net.weightsForTest[preidx, postidx - net.inputNum, time] + + net.state.NeurAccumForTest[time][postidx] += dv * dt + net.state.NeurRecovForTest[time][postidx] += du * dt + if net.state.NeurAccumForTest[time][postidx - net.inputNum] > v_peak_boundary: + wantToFire[postidx] = 1 + + else: + v = net.state.NeurAccum[time][postidx] + u = net.state.NeurRecov[time][postidx] + dv = v * v * 0.04 + v * 5 + 140 - u + du = a * (b * v - u) + for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]: + # Excitatory case + if net.ConnMat[preidx, postidx, 2] > 0: + # net.log("Excitatory at %d %d" % (preidx, postidx)) + # Accumulator increases as per standard formula. + dv += full_stim[preidx] * net.state.weights[preidx, postidx - net.inputNum, time] + + net.log("POST=%d PRE=%d NeurAccum=%g full_stim=%g weight=%g" % \ + (postidx, preidx, net.state.NeurAccum[time][postidx], \ + full_stim[preidx], net.state.weights[preidx, postidx - net.inputNum, time])) + + # Inhibitory case + elif net.ConnMat[preidx, postidx, 2] < 0: + # Accumulator decreases as per standard formula. + dv -= full_stim[preidx]*net.state.weights[preidx, postidx - net.inputNum, time] + + net.state.NeurAccum[time][postidx] += dv * dt + net.state.NeurRecov[time][postidx] += du * dt + if net.state.NeurAccum[time][postidx] > v_peak_boundary: + wantToFire[postidx] = 1 + + full_stim = np.bitwise_or([int(x) for x in wantToFire], [int(x) for x in inputStim]) + net.state.firingCellsPseudo = wantToFire # STAGE II: Implement constraints from net-level considerations. # Example: WTA. No resitrictions from net level yet. All neurons that # want to fire will fire. net.state.firingCells = wantToFire # Barrel shift history - net.state.fireHist[:-1, np.where(np.array(full_stim) != 0)[0]] = \ - net.state.fireHist[1:, np.where(np.array(full_stim) != 0)[0]] - # Save last firing time for all cells that fired in this time step. - net.state.fireHist[net.DEPTH, np.where(np.array(full_stim) != 0)[0]] = \ - time + if phase == 'test': + net.state.fireHistForTest[:-1, np.where(np.array(full_stim) != 0)[0]] = \ + net.state.fireHistForTest[1:, np.where(np.array(full_stim) != 0)[0]] + # Save last firing time for all cells that fired in this time step. + net.state.fireHistForTest[net.DEPTH, np.where(np.array(full_stim) != 0)[0]] = \ + time + net.state.fireCellsForTest[time] = wantToFire + else: + net.state.fireHist[:-1, np.where(np.array(full_stim) != 0)[0]] = \ + net.state.fireHist[1:, np.where(np.array(full_stim) != 0)[0]] + # Save last firing time for all cells that fired in this time step. + net.state.fireHist[net.DEPTH, np.where(np.array(full_stim) != 0)[0]] = \ + time + net.state.fireCells[time] = wantToFire # Load 'NN'. - net.state.fireCells[time] = full_stim + net.state.fireCells[time] = wantToFire net.state.errorList = wantToFire - outputLabel #############!!!!!!!!!!No valid learning rule yet!!!!!!!!!!!!!!####################### @@ -144,6 +208,7 @@ def plast(net, time): rawin = net.rawin # Raw input stimin = net.stimin[:, time] # Stimulus input + rawinPseudo = net.rawinPseudo full_stim = np.bitwise_or([int(x) for x in rawin], [int(x) for x in stimin]) @@ -159,10 +224,16 @@ def plast(net, time): # delta = (y - y_hat)*noise # gradient = delta * input delta = (rawin[neuron] - outputLabel[neuron]) - error[neuron] = delta * np.random.rand() * noiseScale + if abs(net.state.NeurAccum[time][neuron - net.inputNum] - net.params.get('FIRETH', 0.001)) > net.params.get('FIRETH', 0.001): + error[neuron] = 0 + else: + error[neuron] = delta * 0.5 * net.state.NeurAccum[time][neuron - net.inputNum] / net.params.get('FIRETH', 0.001) print("neuron %d has delta %f and error %f" % (neuron, delta, error[neuron])) elif neuron < fullNum - net.outputNum and neuron >= net.inputNum: # hidden neurons - error[neuron] = directRandomTargetProj[neuron] * np.random.rand() * noiseScale + if abs(net.state.NeurAccum[time][neuron - net.inputNum] - net.params.get('FIRETH', 0.001)) > net.params.get('FIRETH', 0.001): + error[neuron] = 0 + else: + error[neuron] = directRandomTargetProj[neuron] * 0.5 * net.state.NeurAccum[time][neuron - net.inputNum] / net.params.get('FIRETH', 0.001) else: # input neuron continue @@ -221,112 +292,16 @@ def plast(net, time): # For every valid connection between neurons, find out which the # corresponding memristor is. Then, if the weight is still uninitialised # take a reading and ensure that the weight has a proper value. - for preidx in range(len(rawin)): - for postidx in range(len(rawin)): - if net.ConnMat[preidx, postidx, 0] != 0: - w, b = net.ConnMat[preidx, postidx, 0:2] - if net.state.weights[preidx, postid - net.inputNumx, time] == 0.0: - net.state.weights[preidx, postidx - net.inputNum, time] = \ - 1.0/net.read(w, b, "NN") +# for preidx in range(len(rawin)): +# for postidx in range(len(rawin)): +# if net.ConnMat[preidx, postidx, 0] != 0: +# w, b = net.ConnMat[preidx, postidx, 0:2] +# if net.state.weights[preidx, postid - net.inputNumx, time] == 0.0: +# net.state.weights[preidx, postidx - net.inputNum, time] = \ +# 1.0/net.read(w, b, "NN") net.state.errorSteps_cnt = time -def neuronsForTest(net, time): - - rawin = net.rawin # Raw input - stimin = net.stiminForTesting[:, time] # Stimulus input for current timestep - - inputStimMask = np.hstack((np.ones(net.inputNum), np.zeros(net.outputNum))) - outputLabelMask = np.hstack((np.zeros(net.inputNum), np.ones(net.outputNum))) - - inputStim = np.bitwise_and([int(x) for x in inputStimMask], [int(x) for x in stimin]) # split input stimulus and output labels - outputLabel = np.bitwise_and([int(x) for x in outputLabelMask], [int(x) for x in stimin]) - - full_stim = np.bitwise_or([int(x) for x in rawin], [int(x) for x in inputStim]) - net.log("**** FULL_STIM = ", full_stim) - - # Izhikevich neuron model: v' = 0.04v^2 + 5v + 140 - u + I - # u' = a(bv - u) - # if v > v_peak_boundary: v <- c, u <- u + d - # v: membrane voltage; u: recovery variable - a = net.params.get('A', 0.02) # time constant of u - b = net.params.get('B', 0.2) # sensitivity of u to v - c = net.params.get('C', -0.065) # after-spike reset value of v - d = net.params.get('D', 0.2) # increment of u after spike - dt = net.params.get('TIMESTEP', 1e-3) - v_peak_boundary = net.params.get('VPEAK', 0.03) - - if time > 0: - # if this isn't the first step copy the accumulators - # from the previous step onto the new one - net.state.NeurAccumForTest[time] = net.state.NeurAccumForTest[time-1] - net.state.NeurRecovForTest[time] = net.state.NeurRecovForTest[time-1] - - for (idx, v) in enumerate(rawin): - if v != c: - net.state.NeurAccumForTest[time][idx] = c - net.state.NeurRecovForTest[time][idx] += d - - # For this example we'll make I&F neurons - if changing this file a back-up - # is strongly recommended before proceeding. - - # -FIX- implementing 'memory' between calls to this function. - # NeurAccum = len(net.ConnMat)*[0] #Define neuron accumulators. - # Neurons that unless otherwise dictated to by net or ext input will - # fire. - wantToFire = len(net.ConnMat)*[0] - - # STAGE I: See what neurons do 'freely', i.e. without the constraints of - # WTA or generally other neurons' activities. - for postidx in range(len(net.state.NeurAccumForTest[time])): - - v = net.state.NeurAccumForTest[time][postidx] - u = net.state.NeurRecovForTest[time][postidx]#!!!!! - dv = v * v * 0.04 + v * 5 + 140 - u - du = a * (b * v - u) - #For every presynaptic input the neuron receives. - for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]: - - # Excitatory case - if net.ConnMat[preidx, postidx, 2] > 0: - # net.log("Excitatory at %d %d" % (preidx, postidx)) - # Accumulator increases as per standard formula. - dv += full_stim[preidx] * net.weightsForTest[preidx, postidx - net.inputNum, time] - - net.log("POST=%d PRE=%d NeurAccum=%g full_stim=%g weight=%g" % \ - (postidx, preidx, net.state.NeurAccumForTest[time][postidx], \ - full_stim[preidx], net.weightsForTest[preidx, postidx - net.inputNum, time])) - - # Inhibitory case - elif net.ConnMat[preidx, postidx, 2] < 0: - # Accumulator decreases as per standard formula. - dv -= full_stim[preidx]*net.weightsForTest[preidx, postidx - net.inputNum, time] - - net.state.NeurAccumForTest[time][postidx] += dv * dt - net.state.NeurRecovForTest[time][postidx] += du * dt #!!!!!!!! - - # Have neurons declare 'interest to fire'. - for neuron in range(len(net.state.NeurAccum[time])): - if net.state.NeurAccumForTest[time][neuron] > v_peak_boundary: - # Register 'interest to fire'. - wantToFire[neuron] = 1 - - # STAGE II: Implement constraints from net-level considerations. - # Example: WTA. No resitrictions from net level yet. All neurons that - # want to fire will fire. - net.state.firingCellsForTest = wantToFire - - # Barrel shift history - net.state.fireHistForTest[:-1, np.where(np.array(full_stim) != 0)[0]] = \ - net.state.fireHistForTest[1:, np.where(np.array(full_stim) != 0)[0]] - # Save last firing time for all cells that fired in this time step. - net.state.fireHistForTest[net.DEPTH, np.where(np.array(full_stim) != 0)[0]] = \ - time - - # Load 'NN'. - net.state.fireCellsForTest[time] = full_stim - net.state.errorListForTest = wantToFire - outputLabel - def additional_data(net): # This function should return any additional data that might be produced # by this core. In this particular case there are None. diff --git a/NeuroCores/core_LIF_supervisedlearning.py b/NeuroCores/core_LIF_supervisedlearning.py index 6d7f652..cd8eb69 100644 --- a/NeuroCores/core_LIF_supervisedlearning.py +++ b/NeuroCores/core_LIF_supervisedlearning.py @@ -48,7 +48,7 @@ def init(net): new_weight = normalise_weight(net, old_weight) net.state.weights[preidx, postidx - net.inputNum, 0] = new_weight -def neurons(net, time): +def neurons(net, time, phase = 'training'): rawin = net.rawin # Raw input rawinPseudo = net.rawinPseudo # latest fire history without wta @@ -227,10 +227,10 @@ def plast(net, time): #print(" gradient: %f" % grad) dW = (-1) * learningRate * grad #print(" weight change: %f" % dW) - if dW > 0: # conductance needs to be larger, so a negative pulse is suplied - pulseList = net.neg_pulseList - else: - pulseList = net.pos_pulseList +# if dW > 0: # conductance needs to be larger, so a negative pulse is suplied +# pulseList = net.neg_pulseList +# else: +# pulseList = net.pos_pulseList p = 1 / R # new weights if net.params.get('NORMALISE', False): p_norm = normalise_weight(net, p) diff --git a/NeuroCores/core_LIF_supervisedlearning_wta.py b/NeuroCores/core_LIF_supervisedlearning_wta.py index ca1ae79..def8b13 100644 --- a/NeuroCores/core_LIF_supervisedlearning_wta.py +++ b/NeuroCores/core_LIF_supervisedlearning_wta.py @@ -267,10 +267,10 @@ def plast(net, time): net.log('dW:', dW) #f = open("C:/Users/jh1d18/debug_log.txt", "a") #f.write('dW:%f\n' % dW) - if dW > 0: # conductance needs to be larger, so a negative pulse is suplied - pulseList = net.neg_pulseList - else: - pulseList = net.pos_pulseList +# if dW > 0: # conductance needs to be larger, so a negative pulse is suplied +# pulseList = net.neg_pulseList +# else: +# pulseList = net.pos_pulseList #f.write('current R :%f\n' % R) p = 1 / R #f.write('current weight stored:%f, %f\n'% (net.state.weights[preidx, neuron - net.inputNum, time], net.state.weights[preidx, neuron - net.inputNum, time+1])) diff --git a/NeuroCores/core_temporalcodingLIF_tempotron.py b/NeuroCores/core_temporalcodingLIF_tempotron.py index 2c95513..ac7ed4e 100644 --- a/NeuroCores/core_temporalcodingLIF_tempotron.py +++ b/NeuroCores/core_temporalcodingLIF_tempotron.py @@ -18,6 +18,13 @@ def normalise_weight(net, w): else: return val +def de_normalise_resistance(net, w): + PCEIL = 1.0/net.params['PFLOOR'] # conductance ceil + PFLOOR = 1.0/net.params['PCEIL'] # conductance floor + + C = w * (PCEIL - PFLOOR) / net.params['WEIGHTSCALE'] + PFLOOR + R = 1 / C + return R def init(net): # make sure all counters are reset @@ -25,17 +32,19 @@ def init(net): net.errorSteps_cnt = 0 net.errorStepsForTest_cnt = 0 # Renormalise weights if needed - tau = net.params.get('TAU', 20e-3) - tau_s = net.params.get('TAUS', 5e-3) - if not net.params.get('NORMALISE', False): - return for postidx in range(len(net.ConnMat)): # For every presynaptic input the neuron receives. for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]: - old_weight = net.state.weights[preidx, postidx - net.inputNum, 0] - new_weight = normalise_weight(net, old_weight) - net.state.weights[preidx, postidx - net.inputNum, 0] = new_weight + w, b=net.ConnMat[preidx, postidx, 0:2] + net.state.weights[preidx, postidx - net.inputNum, 0] = 1.0/net.read(w, b) +# f = open("C:/Users/jh1d18/debug_log.txt", "a") +# f.write('device intial state: %f, w: %d, b: %d\n' % (net.read(w, b), w, b)) +# f.close() + if net.params.get('NORMALISE', False): + old_weight = net.state.weights[preidx, postidx - net.inputNum, 0] + new_weight = normalise_weight(net, old_weight) + net.state.weights[preidx, postidx - net.inputNum, 0] = new_weight def k(net, v0, t_diff): tau = net.params.get('TAU', 20e-3) @@ -55,14 +64,18 @@ def t_i_hist(net, preidx, time_start, time): # return all firing time before cur return np.array(t_i) -def neurons(net, time): +def neurons(net, time, phase = 'training'): rawin = net.rawin # Raw input - stimin = net.stimin[:, time] # Stimulus input for current timestep + rawinPseudo = net.rawinPseudo # latest fire history without wta + if phase == 'test': + stimin = net.stiminForTesting[:, time] # Stimulus input for current timestep + else: + stimin = net.stimin[:, time] # input stimuli at this time step outputSpike = net.outputSpike # signal to indicate if the output spike is generated - inputStimMask = np.hstack((np.ones(net.inputNum), np.zeros(net.outputNum))) - outputLabelMask = np.hstack((np.zeros(net.inputNum), np.ones(net.outputNum))) + inputStimMask = np.hstack((np.ones(net.inputNum), np.zeros(net.NETSIZE - net.inputNum))) # mask matrix to extract input spikes. Size: NETSIZE + outputLabelMask = np.hstack((np.zeros(net.NETSIZE - net.outputNum), np.ones(net.outputNum))) # mask matrix to extract output labels. Size: NETSIZE inputStim = np.bitwise_and([int(x) for x in inputStimMask], [int(x) for x in stimin]) # split input stimulus and output labels outputLabel = np.bitwise_and([int(x) for x in outputLabelMask], [int(x) for x in stimin]) @@ -76,7 +89,10 @@ def neurons(net, time): if time > 0: # if this isn't the first step copy the accumulators # from the previous step onto the new one - net.state.NeurAccum[time] = net.state.NeurAccum[time-1] + if phase = 'test': + net.state.NeurAccumForTest[time] = net.state.NeurAccumForTest[time-1] + else: + net.state.NeurAccum[time] = net.state.NeurAccum[time-1] wantToFire = len(net.ConnMat)*[0] @@ -88,72 +104,138 @@ def neurons(net, time): v_max = k(net, 1, t_max) v_0 = 1 / v_max v_rest = net.params.get('VREST', 0) - for postidx in range(len(net.state.NeurAccum[time])): - + for postidx in range(net.inputNum, net.NETSIZE): + if phase = 'test': + for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]: + + t_i = t_i_hist(net, preidx, net.state.lastSpikeTrain + 1, time) + + if t_i.size != 0: + t_diff = (time - t_i) * dt + K = k(net, v_0, t_diff) + input_contrib = sum(K) + else: + input_contrib = 0 + + # Excitatory case + if net.ConnMat[preidx, postidx, 2] > 0: + # net.log("Excitatory at %d %d" % (preidx, postidx)) + # Accumulator increases as per standard formula. + net.state.NeurAccumForTest[time][postidx - net.inputNum] += \ + input_contrib * net.weightsForTest[preidx, postidx - net.inputNum, time] + + net.log("POST=%d PRE=%d NeurAccum=%g input contribution=%g weight=%g" % \ + (postidx, preidx, net.state.NeurAccumForTest[time][postidx - net.inputNum], \ + input_contrib, net.weightsForTest[preidx, postidx - net.inputNum, time])) + + # Inhibitory case + elif net.ConnMat[preidx, postidx, 2] < 0: + # Accumulator decreases as per standard formula. + net.state.NeurAccumForTest[time][postidx - net.inputNum] -= \ + input_contrib * net.weightsForTest[preidx, postidx - net.inputNum, time] + + net.state.NeurAccumForTest[time][postidx] += v_rest + else: #For every presynaptic input the neuron receives. - for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]: - - t_i = t_i_hist(net, preidx, net.state.lastSpikeTrain + 1, time) - - if t_i.size != 0: - t_diff = (time - t_i) * dt - K = k(net, v_0, t_diff) - input_contrib = sum(K) - else: - input_contrib = 0 - - # Excitatory case - if net.ConnMat[preidx, postidx, 2] > 0: - # net.log("Excitatory at %d %d" % (preidx, postidx)) - # Accumulator increases as per standard formula. - net.state.NeurAccum[time][postidx] += \ - input_contrib * net.state.weights[preidx, postidx - net.inputNum, time] - - net.log("POST=%d PRE=%d NeurAccum=%g input contribution=%g weight=%g" % \ - (postidx, preidx, net.state.NeurAccum[time][postidx], \ - input_contrib, net.state.weights[preidx, postidx - net.inputNum, time])) - - # Inhibitory case - elif net.ConnMat[preidx, postidx, 2] < 0: - # Accumulator decreases as per standard formula. - net.state.NeurAccum[time][postidx] -= \ - input_contrib*net.state.weights[preidx, postidx - net.inputNum, time] - - net.state.NeurAccum[time][postidx] += v_rest - # Have neurons declare 'interest to fire'. - for neuron in range(len(net.state.NeurAccum[time])): - if net.state.NeurAccum[time][neuron] > net.params.get('FIRETH', 0.8): - # Register 'interest to fire'. - wantToFire[neuron] = 1 - if net.state.NeurAccum[time][neuron] > net.state.voltMax[neuron]: - net.state.voltMax[neuron] = net.state.NeurAccum[time][neuron] - net.state.tMax = time - - if sum(wantToFire) > 0: - outputSpike = 1 + for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]: + + t_i = t_i_hist(net, preidx, net.state.lastSpikeTrain + 1, time) + + if t_i.size != 0: + t_diff = (time - t_i) * dt + K = k(net, v_0, t_diff) + input_contrib = sum(K) + else: + input_contrib = 0 + + # Excitatory case + if net.ConnMat[preidx, postidx, 2] > 0: + # net.log("Excitatory at %d %d" % (preidx, postidx)) + # Accumulator increases as per standard formula. + net.state.NeurAccum[time][postidx - net.inputNum] += \ + input_contrib * net.state.weights[preidx, postidx - net.inputNum, time] + + net.log("POST=%d PRE=%d NeurAccum=%g input contribution=%g weight=%g" % \ + (postidx, preidx, net.state.NeurAccum[time][postidx - net.inputNum], \ + input_contrib, net.state.weights[preidx, postidx - net.inputNum, time])) + + # Inhibitory case + elif net.ConnMat[preidx, postidx, 2] < 0: + # Accumulator decreases as per standard formula. + net.state.NeurAccum[time][postidx - net.inputNum] -= \ + input_contrib*net.state.weights[preidx, postidx - net.inputNum, time] + + net.state.NeurAccum[time][postidx] += v_rest + + if phase == 'test': + for neuron in range(len(net.state.NeurAccumForTest[time])): + if net.state.NeurAccumForTest[time][neuron] > net.params.get('FIRETH', 0.8): + # Register 'interest to fire'. + wantToFire[neuron + net.inputNum] = 1 + if net.state.NeurAccumForTest[time][neuron] > net.state.voltMax[neuron]: + net.state.voltMax[neuron] = net.state.NeurAccumForTest[time][neuron] + net.state.tMax = time + + if sum(wantToFire) > 0: + outputSpike = 1 + else: + outputSpike = 0 + + # STAGE II: Implement constraints from net-level considerations. + # Example: WTA. No resitrictions from net level yet. All neurons that + # want to fire will fire. + net.state.firingCellsForTest = wantToFire + # Barrel shift history + net.state.fireHistForTest[:-1, np.where(np.array(full_stim) != 0)[0]] = \ + net.state.fireHistForTest[1:, np.where(np.array(full_stim) != 0)[0]] + # Save last firing time for all cells that fired in this time step. + net.state.fireHistForTest[net.DEPTH, np.where(np.array(full_stim) != 0)[0]] = \ + time + net.state.outputFlag = outputSpike + # Load 'NN'. + net.state.fireCellForTest[time] = full_stim + net.state.spikeTrain_cnt += 1 + net.spikeTrainStep = net.state.spikeTrain_cnt + + if net.state.spikeTrain_cnt == net.state.spikeTrain: + SumOfFireHistInOneTrain = np.sum(net.state.fireCells[time+1-net.state.spikeTrain : time+2], axis = 1) + FireHistInOneTrain = np.where(SumFireHistInOneTrain > 0, 1, 0) + net.state.errorList[(time+1) // net.state.spikeTrain] = FireHistInOneTrain - outputLable else: - outputSpike = 0 - - # STAGE II: Implement constraints from net-level considerations. - # Example: WTA. No resitrictions from net level yet. All neurons that - # want to fire will fire. - net.state.firingCells = wantToFire - # Barrel shift history - net.state.fireHist[:-1, np.where(np.array(full_stim) != 0)[0]] = \ - net.state.fireHist[1:, np.where(np.array(full_stim) != 0)[0]] - # Save last firing time for all cells that fired in this time step. - net.state.fireHist[net.DEPTH, np.where(np.array(full_stim) != 0)[0]] = \ - time - net.state.outputFlag = outputSpike - # Load 'NN'. - net.state.fireCells[time] = full_stim - net.state.spikeTrain_cnt += 1 - net.spikeTrainStep = net.state.spikeTrain_cnt - - if net.state.spikeTrain_cnt == net.state.spikeTrain: - SumOfFireHistInOneTrain = np.sum(net.state.fireCells[time+1-net.state.spikeTrain : time+2], axis = 1) - FireHistInOneTrain = np.where(SumFireHistInOneTrain > 0, 1, 0) - net.state.errorList[(time+1) // net.state.spikeTrain] = FireHistInOneTrain - outputLable + # Have neurons declare 'interest to fire'. + for neuron in range(len(net.state.NeurAccum[time])): + if net.state.NeurAccum[time][neuron] > net.params.get('FIRETH', 0.8): + # Register 'interest to fire'. + wantToFire[neuron] = 1 + if net.state.NeurAccum[time][neuron] > net.state.voltMax[neuron]: + net.state.voltMax[neuron] = net.state.NeurAccum[time][neuron] # size: netsize - inputNum + net.state.tMax = time + + if sum(wantToFire) > 0: + outputSpike = 1 + else: + outputSpike = 0 + + # STAGE II: Implement constraints from net-level considerations. + # Example: WTA. No resitrictions from net level yet. All neurons that + # want to fire will fire. + net.state.firingCells = wantToFire + # Barrel shift history + net.state.fireHist[:-1, np.where(np.array(full_stim) != 0)[0]] = \ + net.state.fireHist[1:, np.where(np.array(full_stim) != 0)[0]] + # Save last firing time for all cells that fired in this time step. + net.state.fireHist[net.DEPTH, np.where(np.array(full_stim) != 0)[0]] = \ + time + net.state.outputFlag = outputSpike + # Load 'NN'. + net.state.fireCells[time] = full_stim + net.state.spikeTrain_cnt += 1 + net.spikeTrainStep = net.state.spikeTrain_cnt + + if net.state.spikeTrain_cnt == net.state.spikeTrain: + SumOfFireHistInOneTrain = np.sum(net.state.fireCells[time+1-net.state.spikeTrain : time+2], axis = 1) + FireHistInOneTrain = np.where(SumFireHistInOneTrain > 0, 1, 0) + net.state.errorList[(time+1) // net.state.spikeTrain] = FireHistInOneTrain - outputLable def plast(net, time): @@ -167,11 +249,11 @@ def plast(net, time): stimin = net.stimin[:, time] # Stimulus input outputSpike = net.outputSpike # signal to indicate if the output spike is generated - inputStimMask = np.hstack((np.ones(net.inputNum), np.zeros(net.outputNum))) - outputLabelMask = np.hstack((np.zeros(net.inputNum), np.ones(net.outputNum))) + inputStimMask = np.hstack((np.ones(net.inputNum), np.zeros(net.NETSIZE - net.inputNum))) # mask matrix to extract input spikes. Size: NETSIZE + outputLabelMask = np.hstack((np.zeros(net.NETSIZE - net.outputNum), np.ones(net.outputNum))) # mask matrix to extract output labels. Size: NETSIZE - inputStim = np.bitwise_and([int(x) for x in inputStimMask], [int(x) for x in stimin]) # split input stimulus and output labels - outputLabel = np.bitwise_and([int(x) for x in outputLabelMask], [int(x) for x in stimin]) + inputStim = np.bitwise_and([int(x) for x in inputStimMask], [int(x) for x in stimin]) # input spike matrix. Size: NETSIZE + outputLabel = np.bitwise_and([int(x) for x in outputLabelMask], [int(x) for x in stimin]) # output label matrix. Size: NETSIZE full_stim = np.bitwise_or([int(x) for x in rawin], [int(x) for x in inputStim]) @@ -258,8 +340,8 @@ def plast(net, time): net.state.weightsError[preidx, neuron - net.inputNum, time+1] = p_error net.log(" weight change for synapse %d -- %d from %f to %f in step %d" % (preidx, neuron, net.state.weights[preidx, neuron - net.inputNum, time], net.state.weights[preidx, neuron - net.inputNum, time+1], time)) net.log('---------------') - - net.state.voltMax = np.array(net.NETSIZE*[0.0]) + + net.state.voltMax = np.array((net.NETSIZE - net.inputNum)*[0.0]) net.state.tMax = 0 net.state.spikeTrain_cnt = 0 net.state.errorSteps_cnt += 1 @@ -268,113 +350,13 @@ def plast(net, time): # For every valid connection between neurons, find out which the # corresponding memristor is. Then, if the weight is still uninitialised # take a reading and ensure that the weight has a proper value. - for preidx in range(len(rawin)): - for postidx in range(len(rawin)): - if net.ConnMat[preidx, postidx, 0] != 0: - w, b = net.ConnMat[preidx, postidx, 0:2] - if net.state.weights[preidx, postidx - net.inputNum, time] == 0.0: - net.state.weights[preidx, postidx - net.inputNum, time] = \ - 1.0/net.read(w, b, "NN") - -def neuronsForTest(net, time): - - rawin = net.rawin # Raw input - stimin = net.stiminForTesting[:, time] # Stimulus input for current timestep - outputSpike = net.outputSpike # signal to indicate if the output spike is generated - - inputStimMask = np.hstack((np.ones(net.inputNum), np.zeros(net.outputNum))) - outputLabelMask = np.hstack((np.zeros(net.inputNum), np.ones(net.outputNum))) - - inputStim = np.bitwise_and([int(x) for x in inputStimMask], [int(x) for x in stimin]) # split input stimulus and output labels - outputLabel = np.bitwise_and([int(x) for x in outputLabelMask], [int(x) for x in stimin]) - - full_stim = np.bitwise_or([int(x) for x in rawin], [int(x) for x in inputStim]) - net.log("**** FULL_STIM = ", full_stim) - - tau = net.params.get('TAU', 20e-3) - tau_s = net.params.get('TAUS', 5e-3) - - if time > 0: - # if this isn't the first step copy the accumulators - # from the previous step onto the new one - net.state.NeurAccumForTest[time] = net.state.NeurAccumForTest[time-1] - - wantToFire = len(net.ConnMat)*[0] - - # Gather/define other pertinent data to function of neuron. - dt = net.params.get('TIMESTEP', 1e-3) - # STAGE I: See what neurons do 'freely', i.e. without the constraints of - # WTA or generally other neurons' activities. - t_max = tau * tau_s * np.log(tau / tau_s) / (tau - tau_s) - v_max = k(net, 1, t_max) - v_0 = 1 / v_max - v_rest = net.params.get('VREST', 0) - for postidx in range(len(net.state.NeurAccumForTest[time])): - - #For every presynaptic input the neuron receives. - for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]: - - t_i = t_i_hist(net, preidx, net.state.lastSpikeTrain + 1, time) - - if t_i.size != 0: - t_diff = (time - t_i) * dt - K = k(net, v_0, t_diff) - input_contrib = sum(K) - else: - input_contrib = 0 - - # Excitatory case - if net.ConnMat[preidx, postidx, 2] > 0: - # net.log("Excitatory at %d %d" % (preidx, postidx)) - # Accumulator increases as per standard formula. - net.state.NeurAccumForTest[time][postidx] += \ - input_contrib * net.weightsForTest[preidx, postidx - net.inputNum, time] - - net.log("POST=%d PRE=%d NeurAccum=%g input contribution=%g weight=%g" % \ - (postidx, preidx, net.state.NeurAccumForTest[time][postidx], \ - input_contrib, net.weightsForTest[preidx, postidx - net.inputNum, time])) - - # Inhibitory case - elif net.ConnMat[preidx, postidx, 2] < 0: - # Accumulator decreases as per standard formula. - net.state.NeurAccumForTest[time][postidx] -= \ - input_contrib * net.weightsForTest[preidx, postidx - net.inputNum, time] - - net.state.NeurAccum[time][postidx] += v_rest - # Have neurons declare 'interest to fire'. - for neuron in range(len(net.state.NeurAccum[time])): - if net.state.NeurAccumForTest[time][neuron] > net.params.get('FIRETH', 0.8): - # Register 'interest to fire'. - wantToFire[neuron] = 1 - if net.state.NeurAccumForTest[time][neuron] > net.state.voltMax[neuron]: - net.state.voltMaxForTest[neuron] = net.state.NeurAccumForTest[time][neuron] - net.state.tMax = time - - if sum(wantToFire) > 0: - outputSpike = 1 - else: - outputSpike = 0 - - # STAGE II: Implement constraints from net-level considerations. - # Example: WTA. No resitrictions from net level yet. All neurons that - # want to fire will fire. - net.state.firingCellsForTest = wantToFire - # Barrel shift history - net.state.fireHistForTest[:-1, np.where(np.array(full_stim) != 0)[0]] = \ - net.state.fireHistForTest[1:, np.where(np.array(full_stim) != 0)[0]] - # Save last firing time for all cells that fired in this time step. - net.state.fireHistForTest[net.DEPTH, np.where(np.array(full_stim) != 0)[0]] = \ - time - net.state.outputFlag = outputSpike - # Load 'NN'. - net.state.fireCellsForTest[time] = full_stim - net.state.spikeTrain_cnt += 1 - net.spikeTrainStep = net.state.spikeTrain_cnt - if net.state.spikeTrain_cnt == net.spikeTrain: - net.lastSpikeTrain = time - SumOfFireHistInOneTrain = np.sum(net.state.fireCellsForTest[time+1-net.state.spikeTrain : time+2], axis = 1) - FireHistInOneTrain = np.where(SumFireHistInOneTrain > 0, 1, 0) - net.state.errorListForTest[(time+1) // net.state.spikeTrain] = FireHistInOneTrain - outputLable +# for preidx in range(len(rawin)): +# for postidx in range(len(rawin)): +# if net.ConnMat[preidx, postidx, 0] != 0: +# w, b = net.ConnMat[preidx, postidx, 0:2] +# if net.state.weights[preidx, postidx - net.inputNum, time] == 0.0: +# net.state.weights[preidx, postidx - net.inputNum, time] = \ +# 1.0/net.read(w, b, "NN") def additional_data(net): # This function should return any additional data that might be produced diff --git a/NeuroPack.py b/NeuroPack.py index 84fa9e7..3f4cf3e 100644 --- a/NeuroPack.py +++ b/NeuroPack.py @@ -67,7 +67,7 @@ def __init__(self, NETSIZE, DEPTH, inputNum, outputNum, epochs, epochsForTesting self.outputFlag = 0 self.neuronFixed = 0 self.fixedNeuronID = -1 - self.voltMax = np.array(NETSIZE*[0.0]) + self.voltMax = np.array((NETSIZE - inputNum)*[0.0]) self.voltMaxForTest = np.array(NETSIZE*[0.0]) self.tMax = 0 self.NeurRecov = np.zeros(shape=(epochs, NETSIZE)) @@ -211,7 +211,6 @@ def __init__(self, conn_mat, stimin, stiminForTesting, test_enable, data, params self.testSteps = testSteps self.rawin = np.array(self.NETSIZE*[0]) self.rawin_pseudo = np.array(self.NETSIZE*[0]) - self.outputSpike = 0 self.neuronLocked = 0 self.lockedNeuronID = -1 self.Vread = HW.conf.Vread @@ -288,7 +287,6 @@ def run(self): for t in range(self.tsteps): self.rawin = self.state.firingCells self.rawinPseudo = self.state.firingCellsPseudo - self.outputSpike = self.state.outputFlag if pattern_epoch_cnt == self.Pattern_epoch and self.neuronLock_enable == 1: self.neuronLocked = 0 self.lockedNeuronID = -1 @@ -302,7 +300,6 @@ def run(self): pattern_epoch_cnt += 1 self.rawin = self.state.firingCells self.rawinPseudo = self.state.firingCellsPseudo - self.outputSpike = self.state.outputFlag self.log("---> Time step synapses update in trianing: %d RAWIN: %s STIMIN: %s RAWINPSEUDO: %s" % (t, self.rawin, self.stimin[:, t], self.rawinPseudo)) self.core.plast(self, t) self.displayData.emit()