From 74814bede25143e312f1b602aa5bff9d33079691 Mon Sep 17 00:00:00 2001 From: Spyros Stathopoulos Date: Mon, 25 Jan 2021 18:08:29 +0000 Subject: [PATCH] Initial commit of the NeuroPack module --- .gitignore | 26 + NeuroCores/README.txt | 63 ++ NeuroCores/__init__.py | 1 + NeuroCores/core_default.py | 190 ++++ NeuroData/NeuroBase.json | 6 + NeuroData/SevenMotif.json | 13 + NeuroData/motif_connmat.txt | 8 + NeuroData/motif_stim.txt | 14 + NeuroData/seven_motif_connmat.txt | 52 ++ NeuroData/seven_motif_stim.txt | 803 ++++++++++++++++ NeuroPack.py | 1407 +++++++++++++++++++++++++++++ __init__.py | 1 + uis/nnanalysis.ui | 205 +++++ uis/nnvaravg.ui | 148 +++ uis/nnvaravgrow.ui | 135 +++ uis/nnvardiff.ui | 77 ++ uis/nnvardiffrow.ui | 131 +++ uis/nnvarsnap.ui | 118 +++ uis/nnvarsnaprow.ui | 107 +++ 19 files changed, 3505 insertions(+) create mode 100644 .gitignore create mode 100644 NeuroCores/README.txt create mode 100644 NeuroCores/__init__.py create mode 100644 NeuroCores/core_default.py create mode 100644 NeuroData/NeuroBase.json create mode 100644 NeuroData/SevenMotif.json create mode 100644 NeuroData/motif_connmat.txt create mode 100644 NeuroData/motif_stim.txt create mode 100644 NeuroData/seven_motif_connmat.txt create mode 100644 NeuroData/seven_motif_stim.txt create mode 100644 NeuroPack.py create mode 100644 __init__.py create mode 100644 uis/nnanalysis.ui create mode 100644 uis/nnvaravg.ui create mode 100644 uis/nnvaravgrow.ui create mode 100644 uis/nnvardiff.ui create mode 100644 uis/nnvardiffrow.ui create mode 100644 uis/nnvarsnap.ui create mode 100644 uis/nnvarsnaprow.ui diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..4e31c1b --- /dev/null +++ b/.gitignore @@ -0,0 +1,26 @@ +.*.swp +*.pyc +*.tmp +build/* +dist/* +dist.win32/* +*.1.gz +*.8.gz +*.1 +*.8 +man/*.1.gz +man/*.8.gz +man/*.1 +man/*.8 +*.mo +*~ +*.rej +*.orig +#*# +MANIFEST +tags +*.egg-info +deps/* +nn*.py +__pycache__/* + diff --git a/NeuroCores/README.txt b/NeuroCores/README.txt new file mode 100644 index 0000000..b5f1452 --- /dev/null +++ b/NeuroCores/README.txt @@ -0,0 +1,63 @@ +NeuroPack core library +====================== + +Place your NeuroPack cores in this folder. All python modules that have their +filename starting with "core_" will be regarded as a distinct core from ArC +ONE. You can also have additional modules if you so require (for example common +code shared by different cores) and as long as they are not prefixed with +"core_" they will be ignored by ArC ONE. + +A core file consists of the three functions + +* The `init` function is initial setup of the network before any operation is + done if required. For example you may want to introduce new fields on the + network object or its state for subsequent use. +* The `neurons` function implements the "evolution" of the network over each + time step. No training is done in this step. +* The `plast` (plasticity) function implements the "learning" capacity of the + network for each time step. + +If the core produces additional data that need to be saved at the end of the +execution an additional function must be implemented `additional_data` which +returns a dictionary with the parameters that need to be saved in the output +file. + +The network +----------- + +Common argument for all functions is the network itself (`net`). The network +should have the following fields defined. + +* `LTP_V` and `LTP_pw`: Voltage and pulse width for potentiation +* `LTD_V` and `LTD_pw`: Voltage and pulse width for depression +* `epochs`: The total number of timesteps +* `NETSIZE`: The total number of neurons +* `LTPWIN` and `LTDWIN`: Window for LTP and LTD +* `DEPTH`: Depth of the network +* `rawin`: The raw state of all neurons +* `stimin`: The stimulus input (see NeuroData/motif_stim.txt for an example) +* `ConnMat`: The connectivity matrix (see NeuroData/motif_connmat.txt for an + example) +* `params`: A dict containing any user defined parameters defined in the base + configuration excluding `NETSIZE`, `LTPWIN`, `LTDWIN` and `DEPTH` that must + be *always* defined. By using an alternate base configuration file (see + NeuroData/Neurobase.json for the base configuration) additional parameters can + be introduced and will be available under the `params` dict. +* `state`: The internal state of the network (see below). Usually the state of + the network is what is altered during the `neurons` and `plast` steps. + +Network state +------------- +The network object has a `state` field defined. This variable described the +current status of the network and has the following fields defined. + +* `weights`: The weights of the neurons for all epochs. This should be altered + during the plasticity step as it is inherent to training the network. +* `NeurAccum`: Membrane capacitance of the network. Should be updated during + the `neurons` step. +* `fireCells`: The neuros that should fire during the plasticity step. This is + introduced from the stimulus file. `fireCells` should be updated during the + `neurons` step. +* `fireHist`: History of firing neurons. It should be updated during the + `neurons` step. + diff --git a/NeuroCores/__init__.py b/NeuroCores/__init__.py new file mode 100644 index 0000000..bc751f6 --- /dev/null +++ b/NeuroCores/__init__.py @@ -0,0 +1 @@ +# NeuroPack core entry point diff --git a/NeuroCores/core_default.py b/NeuroCores/core_default.py new file mode 100644 index 0000000..1e75fb4 --- /dev/null +++ b/NeuroCores/core_default.py @@ -0,0 +1,190 @@ +import numpy as np + +# A NeuroPack core implements plasticity events and updates +# Essentialy NeuroPack will call the following functions +# +# init(network) +# for trial in trials: +# neurons(network, step) +# plast(network, step) + +# This particular core implements a LIF network, the neurons function +# calculates which neurons are set to fire and the plast function +# implements plasticity event propagation. + +# neurons and plast both take two arguments. The first one is the +# network itself (see `NeuroPack.Network`) and the second is the +# timestep. + +# This core requires `NeuroData/SevenMotif.json`. + +def normalise_weight(net, w): + PCEIL = 1.0/net.params['PFLOOR'] + PFLOOR = 1.0/net.params['PCEIL'] + + val = net.params['WEIGHTSCALE']*(float(w) - PFLOOR)/(PCEIL - PFLOOR) + + # Clamp weights in-between 0.0 and 1.0 + if val < 0.0: + return 0.0 + elif val > 1.0: + return 1.0 + else: + return val + + +def init(net): + # Renormalise weights if needed + if not net.params.get('NORMALISE', False): + return + + for postidx in range(len(net.ConnMat)): + # For every presynaptic input the neuron receives. + for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]: + old_weight = net.state.weights[preidx, postidx, 0] + new_weight = normalise_weight(net, old_weight) + net.state.weights[preidx, postidx, 0] = new_weight + + +def neurons(net, time): + + rawin = net.rawin # Raw input + stimin = net.stimin[:, time] # Stimulus input for current timestep + + full_stim = np.bitwise_or([int(x) for x in rawin], [int(x) for x in stimin]) + net.log("**** FULL_STIM = ", full_stim) + + if time > 0: + # if this isn't the first step copy the accumulators + # from the previous step onto the new one + net.state.NeurAccum[time] = net.state.NeurAccum[time-1] + + # reset the accumulators of neurons that have already fired + for (idx, v) in enumerate(full_stim): + if v != 0: + net.state.NeurAccum[time][idx] = 0.0 + + # For this example we'll make I&F neurons - if changing this file a back-up + # is strongly recommended before proceeding. + + # -FIX- implementing 'memory' between calls to this function. + # NeurAccum = len(net.ConnMat)*[0] #Define neuron accumulators. + # Neurons that unless otherwise dictated to by net or ext input will + # fire. + wantToFire = len(net.ConnMat)*[0] + + # Gather/define other pertinent data to function of neuron. + leakage = net.params.get('LEAKAGE', 1.0) + bias = np.array(len(net.ConnMat)*[leakage]) #No active biases. + + # STAGE I: See what neurons do 'freely', i.e. without the constraints of + # WTA or generally other neurons' activities. + for postidx in range(len(net.state.NeurAccum[time])): + # Unconditionally add bias term + net.state.NeurAccum[time][postidx] += bias[postidx] + if net.state.NeurAccum[time][postidx] < 0.0: + net.state.NeurAccum[time][postidx] = 0.0 + + #For every presynaptic input the neuron receives. + for preidx in np.where(net.ConnMat[:, postidx, 0] != 0)[0]: + + # Excitatory case + if net.ConnMat[preidx, postidx, 2] > 0: + # net.log("Excitatory at %d %d" % (preidx, postidx)) + # Accumulator increases as per standard formula. + net.state.NeurAccum[time][postidx] += \ + full_stim[preidx] * net.state.weights[preidx, postidx, time] + + net.log("POST=%d PRE=%d NeurAccum=%g full_stim=%g weight=%g" % \ + (postidx, preidx, net.state.NeurAccum[time][postidx], \ + full_stim[preidx], net.state.weights[preidx, postidx, time])) + + # Inhibitory case + elif net.ConnMat[preidx, postidx, 2] < 0: + # Accumulator decreases as per standard formula. + net.state.NeurAccum[time][postidx] -= \ + full_stim[preidx]*net.state.weights[preidx, postidx, time] + + # Have neurons declare 'interest to fire'. + for neuron in range(len(net.state.NeurAccum[time])): + if net.state.NeurAccum[time][neuron] > net.params.get('FIRETH', 0.8): + # Register 'interest to fire'. + wantToFire[neuron] = 1 + + # STAGE II: Implement constraints from net-level considerations. + # Example: WTA. No resitrictions from net level yet. All neurons that + # want to fire will fire. + net.state.firingCells = wantToFire + + # Barrel shift history + net.state.fireHist[:-1, np.where(np.array(full_stim) != 0)[0]] = \ + net.state.fireHist[1:, np.where(np.array(full_stim) != 0)[0]] + # Save last firing time for all cells that fired in this time step. + net.state.fireHist[net.DEPTH, np.where(np.array(full_stim) != 0)[0]] = \ + time + + # Load 'NN'. + net.state.fireCells[time] = full_stim + + +def plast(net, time): + + if time+2 > net.epochs: + return + + rawin = net.rawin # Raw input + stimin = net.stimin[:, time] # Stimulus input + + full_stim = np.bitwise_or([int(x) for x in rawin], [int(x) for x in stimin]) + + net.state.weights[:, :, time+1] = net.state.weights[:, :, time] + + # For every neuron in the raw input + for neuron in range(len(full_stim)): + + # If neuron is not set to fire (full_stim > 0) just skip the neuron + if full_stim[neuron] == 0: + continue + + # For every presynaptic input the neuron receives. + for preidx in np.where(net.ConnMat[:, neuron, 0] != 0)[0]: + w,b = net.ConnMat[preidx, neuron, 0:2] + if (time - np.max(net.state.fireHist[:, preidx])) <= net.LTPWIN: + # -FIX- parametrise learning step. + # Actually, link it to bias for devices. + p = 1.0/net.pulse(w, b, net.LTP_V, net.LTP_pw) + if net.params.get('NORMALISE', False): + net.state.weights[preidx, neuron, time+1] = normalise_weight(net, p) + else: + net.state.weights[preidx, neuron, time+1] = p + net.log(" LTP --- spiking synapse %d -- %d" % (preidx, neuron)) + + # For every postsynaptic input the neuron receives. + for postidx in np.where(net.ConnMat[neuron, :, 0] != 0)[0]: + w,b=net.ConnMat[neuron,postidx,0:2] + if (time - np.max(net.state.fireHist[:, postidx])) <= net.LTDWIN: + # -FIX- parametrise learning step. + # Actually, link it to bias for devices. + p = 1.0/net.pulse(w, b, net.LTD_V, net.LTD_pw) + if net.params.get('NORMALISE', False): + net.state.weights[neuron, postidx, time+1] = normalise_weight(net, p) + else: + net.state.weights[neuron, postidx, time+1] = p + net.log(" LTD --- spiking synapse %d -- %d" % (neuron, postidx)) + + # For every valid connection between neurons, find out which the + # corresponding memristor is. Then, if the weight is still uninitialised + # take a reading and ensure that the weight has a proper value. + for preidx in range(len(rawin)): + for postidx in range(len(rawin)): + if net.ConnMat[preidx, postidx, 0] != 0: + w, b = net.ConnMat[preidx, postidx, 0:2] + if net.state.weights[preidx, postidx, time] == 0.0: + net.state.weights[preidx, postidx, time] = \ + 1.0/net.read(w, b, "NN") + + +def additional_data(net): + # This function should return any additional data that might be produced + # by this core. In this particular case there are None. + return None diff --git a/NeuroData/NeuroBase.json b/NeuroData/NeuroBase.json new file mode 100644 index 0000000..2a339ea --- /dev/null +++ b/NeuroData/NeuroBase.json @@ -0,0 +1,6 @@ +{ + "LTDWIN": 1, + "LTPWIN": 1, + "NETSIZE": 5, + "DEPTH": 1 +} diff --git a/NeuroData/SevenMotif.json b/NeuroData/SevenMotif.json new file mode 100644 index 0000000..fbd5566 --- /dev/null +++ b/NeuroData/SevenMotif.json @@ -0,0 +1,13 @@ +{ + "LTDWIN": 1, + "EXTIN": 3, + "LTPWIN": 1, + "NETSIZE": 7, + "DEPTH": 1, + "PFLOOR": 4000, + "PCEIL": 9300, + "WEIGHTSCALE": 1.0, + "NORMALISE": true, + "LEAKAGE": -0.2, + "FIRETH": 0.8 +} diff --git a/NeuroData/motif_connmat.txt b/NeuroData/motif_connmat.txt new file mode 100644 index 0000000..35e106b --- /dev/null +++ b/NeuroData/motif_connmat.txt @@ -0,0 +1,8 @@ +# PREID, POSTID, W, B, TYPE +# TYPE can be either +1 (excitatory) or -1 (inhibitory) +1, 2, 1, 2, 1 +2, 3, 2, 3, 1 +3, 1, 3, 1, 1 +1, 3, 1, 3, 1 +3, 2, 3, 2, 1 +2, 1, 2, 1, 1 diff --git a/NeuroData/motif_stim.txt b/NeuroData/motif_stim.txt new file mode 100644 index 0000000..0ec062c --- /dev/null +++ b/NeuroData/motif_stim.txt @@ -0,0 +1,14 @@ +# timestamp - comma-separated list of neurons forced to spike at this timestep +11 - 1 +12 - 2 +13 - 3 +14 - 1 +15 - 2 +16 - 3 +17 - 1 +18 - 2 +19 - 3 +20 - 1 +21 - 2 +22 - 3 +23 - 1, 2 diff --git a/NeuroData/seven_motif_connmat.txt b/NeuroData/seven_motif_connmat.txt new file mode 100644 index 0000000..a010d50 --- /dev/null +++ b/NeuroData/seven_motif_connmat.txt @@ -0,0 +1,52 @@ +# PREID, POSTID, W, B, TYPE +# TYPE can be either +1 (excitatory) or -1 (inhibitory) +# this file has been autogenerated with +# +# for i in range(1,8): +# for j in range(1,8): +# if i == j: +# continue +# print("%d, %d, %d, %d, 1" % (i, j, i, j)) +# +1, 2, 1, 2, 1 +1, 3, 1, 3, 1 +1, 4, 1, 4, 1 +1, 5, 1, 5, 1 +1, 6, 1, 6, 1 +1, 7, 1, 7, 1 +2, 1, 2, 1, 1 +2, 3, 2, 3, 1 +2, 4, 2, 4, 1 +2, 5, 2, 5, 1 +2, 6, 2, 6, 1 +2, 7, 2, 7, 1 +3, 1, 3, 1, 1 +3, 2, 3, 2, 1 +3, 4, 3, 4, 1 +3, 5, 3, 5, 1 +3, 6, 3, 6, 1 +3, 7, 3, 7, 1 +4, 1, 4, 1, 1 +4, 2, 4, 2, 1 +4, 3, 4, 3, 1 +4, 5, 4, 5, 1 +4, 6, 4, 6, 1 +4, 7, 4, 7, 1 +5, 1, 5, 1, 1 +5, 2, 5, 2, 1 +5, 3, 5, 3, 1 +5, 4, 5, 4, 1 +5, 6, 5, 6, 1 +5, 7, 5, 7, 1 +6, 1, 6, 1, 1 +6, 2, 6, 2, 1 +6, 3, 6, 3, 1 +6, 4, 6, 4, 1 +6, 5, 6, 5, 1 +6, 7, 6, 7, 1 +7, 1, 7, 1, 1 +7, 2, 7, 2, 1 +7, 3, 7, 3, 1 +7, 4, 7, 4, 1 +7, 5, 7, 5, 1 +7, 6, 7, 6, 1 diff --git a/NeuroData/seven_motif_stim.txt b/NeuroData/seven_motif_stim.txt new file mode 100644 index 0000000..6bc5d5a --- /dev/null +++ b/NeuroData/seven_motif_stim.txt @@ -0,0 +1,803 @@ +5 - 1 +10 - 1 +11 - 2 +12 - 3 +13 - 4 +14 - 5 +15 - 6 +16 - 7 +### BATCH ENDED ### +17 - 1 +18 - 2 +19 - 3 +20 - 4 +21 - 5 +22 - 6 +23 - 7 +### BATCH ENDED ### +24 - 1 +25 - 2 +26 - 3 +27 - 4 +28 - 5 +29 - 6 +30 - 7 +### BATCH ENDED ### +31 - 1 +32 - 2 +33 - 3 +34 - 4 +35 - 5 +36 - 6 +37 - 7 +### BATCH ENDED ### +38 - 1 +39 - 2 +40 - 3 +41 - 4 +42 - 5 +43 - 6 +44 - 7 +### BATCH ENDED ### +45 - 1 +46 - 2 +47 - 3 +48 - 4 +49 - 5 +50 - 6 +51 - 7 +### BATCH ENDED ### +52 - 1 +53 - 2 +54 - 3 +55 - 4 +56 - 5 +57 - 6 +58 - 7 +### BATCH ENDED ### +59 - 1 +60 - 2 +61 - 3 +62 - 4 +63 - 5 +64 - 6 +65 - 7 +### BATCH ENDED ### +66 - 1 +67 - 2 +68 - 3 +69 - 4 +70 - 5 +71 - 6 +72 - 7 +### BATCH ENDED ### +73 - 1 +74 - 2 +75 - 3 +76 - 4 +77 - 5 +78 - 6 +79 - 7 +### BATCH ENDED ### +80 - 1 +81 - 2 +82 - 3 +83 - 4 +84 - 5 +85 - 6 +86 - 7 +### BATCH ENDED ### +87 - 1 +88 - 2 +89 - 3 +90 - 4 +91 - 5 +92 - 6 +93 - 7 +### BATCH ENDED ### +94 - 1 +95 - 2 +96 - 3 +97 - 4 +98 - 5 +99 - 6 +100 - 7 +### BATCH ENDED ### +101 - 1 +102 - 2 +103 - 3 +104 - 4 +105 - 5 +106 - 6 +107 - 7 +### BATCH ENDED ### +108 - 1 +109 - 2 +110 - 3 +111 - 4 +112 - 5 +113 - 6 +114 - 7 +### BATCH ENDED ### +115 - 1 +116 - 2 +117 - 3 +118 - 4 +119 - 5 +120 - 6 +121 - 7 +### BATCH ENDED ### +122 - 1 +123 - 2 +124 - 3 +125 - 4 +126 - 5 +127 - 6 +128 - 7 +### BATCH ENDED ### +129 - 1 +130 - 2 +131 - 3 +132 - 4 +133 - 5 +134 - 6 +135 - 7 +### BATCH ENDED ### +136 - 1 +137 - 2 +138 - 3 +139 - 4 +140 - 5 +141 - 6 +142 - 7 +### BATCH ENDED ### +143 - 1 +144 - 2 +145 - 3 +146 - 4 +147 - 5 +148 - 6 +149 - 7 +### BATCH ENDED ### +150 - 1 +151 - 2 +152 - 3 +153 - 4 +154 - 5 +155 - 6 +156 - 7 +### BATCH ENDED ### +157 - 1 +158 - 2 +159 - 3 +160 - 4 +161 - 5 +162 - 6 +163 - 7 +### BATCH ENDED ### +164 - 1 +165 - 2 +166 - 3 +167 - 4 +168 - 5 +169 - 6 +170 - 7 +### BATCH ENDED ### +171 - 1 +172 - 2 +173 - 3 +174 - 4 +175 - 5 +176 - 6 +177 - 7 +### BATCH ENDED ### +178 - 1 +179 - 2 +180 - 3 +181 - 4 +182 - 5 +183 - 6 +184 - 7 +### BATCH ENDED ### +185 - 1 +186 - 2 +187 - 3 +188 - 4 +189 - 5 +190 - 6 +191 - 7 +### BATCH ENDED ### +192 - 1 +193 - 2 +194 - 3 +195 - 4 +196 - 5 +197 - 6 +198 - 7 +### BATCH ENDED ### +199 - 1 +200 - 2 +201 - 3 +202 - 4 +203 - 5 +204 - 6 +205 - 7 +### BATCH ENDED ### +206 - 1 +207 - 2 +208 - 3 +209 - 4 +210 - 5 +211 - 6 +212 - 7 +### BATCH ENDED ### +213 - 1 +214 - 2 +215 - 3 +216 - 4 +217 - 5 +218 - 6 +219 - 7 +### BATCH ENDED ### +220 - 1 +221 - 2 +222 - 3 +223 - 4 +224 - 5 +225 - 6 +226 - 7 +### BATCH ENDED ### +227 - 1 +228 - 2 +229 - 3 +230 - 4 +231 - 5 +232 - 6 +233 - 7 +### BATCH ENDED ### +234 - 1 +235 - 2 +236 - 3 +237 - 4 +238 - 5 +239 - 6 +240 - 7 +### BATCH ENDED ### +241 - 1 +242 - 2 +243 - 3 +244 - 4 +245 - 5 +246 - 6 +247 - 7 +### BATCH ENDED ### +248 - 1 +249 - 2 +250 - 3 +251 - 4 +252 - 5 +253 - 6 +254 - 7 +### BATCH ENDED ### +255 - 1 +256 - 2 +257 - 3 +258 - 4 +259 - 5 +260 - 6 +261 - 7 +### BATCH ENDED ### +262 - 1 +263 - 2 +264 - 3 +265 - 4 +266 - 5 +267 - 6 +268 - 7 +### BATCH ENDED ### +269 - 1 +270 - 2 +271 - 3 +272 - 4 +273 - 5 +274 - 6 +275 - 7 +### BATCH ENDED ### +276 - 1 +277 - 2 +278 - 3 +279 - 4 +280 - 5 +281 - 6 +282 - 7 +### BATCH ENDED ### +283 - 1 +284 - 2 +285 - 3 +286 - 4 +287 - 5 +288 - 6 +289 - 7 +### BATCH ENDED ### +290 - 1 +291 - 2 +292 - 3 +293 - 4 +294 - 5 +295 - 6 +296 - 7 +### BATCH ENDED ### +297 - 1 +298 - 2 +299 - 3 +300 - 4 +301 - 5 +302 - 6 +303 - 7 +### BATCH ENDED ### +304 - 1 +305 - 2 +306 - 3 +307 - 4 +308 - 5 +309 - 6 +310 - 7 +### BATCH ENDED ### +311 - 1 +312 - 2 +313 - 3 +314 - 4 +315 - 5 +316 - 6 +317 - 7 +### BATCH ENDED ### +318 - 1 +319 - 2 +320 - 3 +321 - 4 +322 - 5 +323 - 6 +324 - 7 +### BATCH ENDED ### +325 - 1 +326 - 2 +327 - 3 +328 - 4 +329 - 5 +330 - 6 +331 - 7 +### BATCH ENDED ### +332 - 1 +333 - 2 +334 - 3 +335 - 4 +336 - 5 +337 - 6 +338 - 7 +### BATCH ENDED ### +339 - 1 +340 - 2 +341 - 3 +342 - 4 +343 - 5 +344 - 6 +345 - 7 +### BATCH ENDED ### +346 - 1 +347 - 2 +348 - 3 +349 - 4 +350 - 5 +351 - 6 +352 - 7 +### BATCH ENDED ### +353 - 1 +354 - 2 +355 - 3 +356 - 4 +357 - 5 +358 - 6 +359 - 7 +### BATCH ENDED ### +360 - 1 +361 - 2 +362 - 3 +363 - 4 +364 - 5 +365 - 6 +366 - 7 +### BATCH ENDED ### +367 - 1 +368 - 2 +369 - 3 +370 - 4 +371 - 5 +372 - 6 +373 - 7 +### BATCH ENDED ### +374 - 1 +375 - 2 +376 - 3 +377 - 4 +378 - 5 +379 - 6 +380 - 7 +### BATCH ENDED ### +381 - 1 +382 - 2 +383 - 3 +384 - 4 +385 - 5 +386 - 6 +387 - 7 +### BATCH ENDED ### +388 - 1 +389 - 2 +390 - 3 +391 - 4 +392 - 5 +393 - 6 +394 - 7 +### BATCH ENDED ### +395 - 1 +396 - 2 +397 - 3 +398 - 4 +399 - 5 +400 - 6 +401 - 7 +### BATCH ENDED ### +402 - 1 +403 - 2 +404 - 3 +405 - 4 +406 - 5 +407 - 6 +408 - 7 +### BATCH ENDED ### +409 - 1 +410 - 2 +411 - 3 +412 - 4 +413 - 5 +414 - 6 +415 - 7 +### BATCH ENDED ### +416 - 1 +417 - 2 +418 - 3 +419 - 4 +420 - 5 +421 - 6 +422 - 7 +### BATCH ENDED ### +423 - 1 +424 - 2 +425 - 3 +426 - 4 +427 - 5 +428 - 6 +429 - 7 +### BATCH ENDED ### +430 - 1 +431 - 2 +432 - 3 +433 - 4 +434 - 5 +435 - 6 +436 - 7 +### BATCH ENDED ### +437 - 1 +438 - 2 +439 - 3 +440 - 4 +441 - 5 +442 - 6 +443 - 7 +### BATCH ENDED ### +444 - 1 +445 - 2 +446 - 3 +447 - 4 +448 - 5 +449 - 6 +450 - 7 +### BATCH ENDED ### +451 - 1 +452 - 2 +453 - 3 +454 - 4 +455 - 5 +456 - 6 +457 - 7 +### BATCH ENDED ### +458 - 1 +459 - 2 +460 - 3 +461 - 4 +462 - 5 +463 - 6 +464 - 7 +### BATCH ENDED ### +465 - 1 +466 - 2 +467 - 3 +468 - 4 +469 - 5 +470 - 6 +471 - 7 +### BATCH ENDED ### +472 - 1 +473 - 2 +474 - 3 +475 - 4 +476 - 5 +477 - 6 +478 - 7 +### BATCH ENDED ### +479 - 1 +480 - 2 +481 - 3 +482 - 4 +483 - 5 +484 - 6 +485 - 7 +### BATCH ENDED ### +486 - 1 +487 - 2 +488 - 3 +489 - 4 +490 - 5 +491 - 6 +492 - 7 +### BATCH ENDED ### +493 - 1 +494 - 2 +495 - 3 +496 - 4 +497 - 5 +498 - 6 +499 - 7 +### BATCH ENDED ### +500 - 1 +501 - 2 +502 - 3 +503 - 4 +504 - 5 +505 - 6 +506 - 7 +### BATCH ENDED ### +507 - 1 +508 - 2 +509 - 3 +510 - 4 +511 - 5 +512 - 6 +513 - 7 +### BATCH ENDED ### +514 - 1 +515 - 2 +516 - 3 +517 - 4 +518 - 5 +519 - 6 +520 - 7 +### BATCH ENDED ### +521 - 1 +522 - 2 +523 - 3 +524 - 4 +525 - 5 +526 - 6 +527 - 7 +### BATCH ENDED ### +528 - 1 +529 - 2 +530 - 3 +531 - 4 +532 - 5 +533 - 6 +534 - 7 +### BATCH ENDED ### +535 - 1 +536 - 2 +537 - 3 +538 - 4 +539 - 5 +540 - 6 +541 - 7 +### BATCH ENDED ### +542 - 1 +543 - 2 +544 - 3 +545 - 4 +546 - 5 +547 - 6 +548 - 7 +### BATCH ENDED ### +549 - 1 +550 - 2 +551 - 3 +552 - 4 +553 - 5 +554 - 6 +555 - 7 +### BATCH ENDED ### +556 - 1 +557 - 2 +558 - 3 +559 - 4 +560 - 5 +561 - 6 +562 - 7 +### BATCH ENDED ### +563 - 1 +564 - 2 +565 - 3 +566 - 4 +567 - 5 +568 - 6 +569 - 7 +### BATCH ENDED ### +570 - 1 +571 - 2 +572 - 3 +573 - 4 +574 - 5 +575 - 6 +576 - 7 +### BATCH ENDED ### +577 - 1 +578 - 2 +579 - 3 +580 - 4 +581 - 5 +582 - 6 +583 - 7 +### BATCH ENDED ### +584 - 1 +585 - 2 +586 - 3 +587 - 4 +588 - 5 +589 - 6 +590 - 7 +### BATCH ENDED ### +591 - 1 +592 - 2 +593 - 3 +594 - 4 +595 - 5 +596 - 6 +597 - 7 +### BATCH ENDED ### +598 - 1 +599 - 2 +600 - 3 +601 - 4 +602 - 5 +603 - 6 +604 - 7 +### BATCH ENDED ### +605 - 1 +606 - 2 +607 - 3 +608 - 4 +609 - 5 +610 - 6 +611 - 7 +### BATCH ENDED ### +612 - 1 +613 - 2 +614 - 3 +615 - 4 +616 - 5 +617 - 6 +618 - 7 +### BATCH ENDED ### +619 - 1 +620 - 2 +621 - 3 +622 - 4 +623 - 5 +624 - 6 +625 - 7 +### BATCH ENDED ### +626 - 1 +627 - 2 +628 - 3 +629 - 4 +630 - 5 +631 - 6 +632 - 7 +### BATCH ENDED ### +633 - 1 +634 - 2 +635 - 3 +636 - 4 +637 - 5 +638 - 6 +639 - 7 +### BATCH ENDED ### +640 - 1 +641 - 2 +642 - 3 +643 - 4 +644 - 5 +645 - 6 +646 - 7 +### BATCH ENDED ### +647 - 1 +648 - 2 +649 - 3 +650 - 4 +651 - 5 +652 - 6 +653 - 7 +### BATCH ENDED ### +654 - 1 +655 - 2 +656 - 3 +657 - 4 +658 - 5 +659 - 6 +660 - 7 +### BATCH ENDED ### +661 - 1 +662 - 2 +663 - 3 +664 - 4 +665 - 5 +666 - 6 +667 - 7 +### BATCH ENDED ### +668 - 1 +669 - 2 +670 - 3 +671 - 4 +672 - 5 +673 - 6 +674 - 7 +### BATCH ENDED ### +675 - 1 +676 - 2 +677 - 3 +678 - 4 +679 - 5 +680 - 6 +681 - 7 +### BATCH ENDED ### +682 - 1 +683 - 2 +684 - 3 +685 - 4 +686 - 5 +687 - 6 +688 - 7 +### BATCH ENDED ### +689 - 1 +690 - 2 +691 - 3 +692 - 4 +693 - 5 +694 - 6 +695 - 7 +### BATCH ENDED ### +696 - 1 +697 - 2 +698 - 3 +699 - 4 +700 - 5 +701 - 6 +702 - 7 +### BATCH ENDED ### +703 - 1 +704 - 2 +705 - 3 +706 - 4 +707 - 5 +708 - 6 +709 - 7 +### BATCH ENDED ### +## BLOCK ENDED ## +#809 - 1 diff --git a/NeuroPack.py b/NeuroPack.py new file mode 100644 index 0000000..de290ac --- /dev/null +++ b/NeuroPack.py @@ -0,0 +1,1407 @@ +from PyQt5 import QtGui, QtCore, QtWidgets +import sys +import os +import numpy as np +import json +import re +import imp +import pkgutil +import time +import pyqtgraph as pg + +import arc1pyqt.Globals.fonts as fonts +import arc1pyqt.Globals.styles as styles +from arc1pyqt import state +HW = state.hardware +APP = state.app +CB = state.crossbar +from arc1pyqt import modutils +from arc1pyqt.modutils import BaseThreadWrapper, BaseProgPanel, \ + makeDeviceList, ModTag + +THIS_DIR = os.path.dirname(__file__) +for ui in ['nnanalysis', 'nnvaravg', 'nnvaravgrow', 'nnvardiff', + 'nnvardiffrow', 'nnvarsnap', 'nnvarsnaprow']: + + modutils.compile_ui(os.path.join(THIS_DIR, 'uis', '%s.ui' % ui), + os.path.join(THIS_DIR, '%s.py' % ui)) + +from .nnanalysis import Ui_NNAnalysis +from .nnvarsnap import Ui_NNVarSnap +from .nnvarsnaprow import Ui_NNVarSnapRow +from .nnvardiff import Ui_NNVarDiff +from .nnvardiffrow import Ui_NNVarDiffRow +from .nnvaravg import Ui_NNVarAvg +from .nnvaravgrow import Ui_NNVarAvgRow + +from . import NeuroCores + + +def _log(*args, **kwargs): + if bool(os.environ.get('NNDBG', False)): + print(*args, file=sys.stderr, **kwargs) + + +class NetworkState(object): + """ + NetworkState stores all the information for the state of the + training process. All history is available. + """ + + def __init__(self, NETSIZE, DEPTH, epochs, labelCounter=1): + super(NetworkState, self).__init__() + self.weight_addresses = [] + self.weights = \ + np.array(NETSIZE*[NETSIZE*[epochs*labelCounter*[0.0]]]) + self.NeurAccum = np.zeros(shape=(epochs, NETSIZE)) + self.fireCells = np.array(epochs*labelCounter*[NETSIZE*[0.0]]) + self.firingCells = np.array(NETSIZE*[0.0]) + self.fireHist = np.array((DEPTH+1)*[NETSIZE*[0.0]]) + + +class Network(BaseThreadWrapper): + + """ + This is the abstract represantation of the network. It includes all + information about the training process, such as potentiation and + depression potential, number of epochs, the connection matrix (mapping of + neurons to devices), the size of the network (`NETSIZE`), as well as + potentiation and depression windows. + + Network parameters are typically loaded from the network configuration file + which is a JSON file including all the necessary arguments to initialise the + network. The following arguments are always necessary, regardless whether they + are used by the core or not + + LTPWIN and LTDWIN: potentiation and depression potential windows + NETSIZE: network size (number of neurons) + DEPTH: network depth (layers of neurons) + + Check `NeuroData/NeuroBase.json` for the absolute minimal configuration of a + network. Arbitrary information can be included in the JSON file. For instance + see `NeuroData/SevenMotif.json` for a configuration file with additional data. + All JSON parameters are exposed to the core through the `Network.params` field. + + The connection matrix (`conn_mat`) maps neuros to devices and marks them as + either excitatory or inhibitory. See `NeuroData/motif_connmat.txt` for an + example. + + Argument `stimin` denotes forced stimulus of neurons. See file + `NeuroData/motif_stim.txt` for an example of such file. Essentially it denotes + at which timestamp certain neurons are forced to fire, regardless of their + current state. Forced and induced stimulus is typically aggregated to find out + the full stimulus status, although this is a process done by the network core. + Evolution of the training process is guided almost fully by the network + cores (which are placed under `NeuroCores`). This class does not do much + apart from calling the core functions for each timestep and saving the data + at the end of the process. + + It also exposes some convenience functions such as `read` and `pulse` that + core implementers can call instead of fiddling with ArC1 internals. + + List of core-accessible values and functions + + * `Network.ConnMat`: The current connection matrix + * `Network.stimin`: Forced stimulus as loaded from the stimulus file + * `Network.epochs`: Total number of iterations + * `Network.LTP_V` and `Network.LTD_V`: Potentiation and depression + amplitude in Volts (these are set in the main neuropack UI) + * `Network.LTP_pw` and `Network.LTD_pw`: Potentiation and depression + pulse widths in seconds (again these are picked up from the main + neuropack UI) + * `Network.NETSIZE`: Size of the network + * `Network.LTPWIN` and `Network.LTDWIN`: Potentiation and depression + windows. + * `Network.DEPTH`: Depth of the network + * `Network.params`: This is a dict containing all JSON parameters as + picked up from the configuration file. + * `Network.state`: Complete history of weight and accumulators as well + as calculated neuron firings. History state population is responsibility + of the core. + * `Network.pulse(w, b, A, pw)`: Function used to pulse device `(w,b)` with + a voltage pulse of amplitude `A` and pulse width `pw` (volts and seconds). + This will immediately return the new resistance of the device. + * `Network.read(w, b)`: Read-out device `(w,b)` and get its resistance. + + """ + + def __init__(self, conn_mat, stimin, data, params, tsteps, core, labelCounter=1): + super(Network, self).__init__() + + self.ConnMat = conn_mat + self.stimin = stimin + + self.LTP_V = data["LTP_V"] + self.LTP_pw = data["LTP_pw"] + self.LTD_V = data["LTD_V"] + self.LTD_pw = data["LTD_pw"] + self.epochs = data["epochs"] + self.filename = data["fname"] + + # pop the core parameters into distinct fields + self.NETSIZE = params.pop("NETSIZE") + self.LTPWIN = params.pop("LTPWIN") + self.LTDWIN = params.pop("LTDWIN") + self.DEPTH = params.pop("DEPTH") + + # and bundle the rest under self.params + self.params = params + + self.tsteps = tsteps + self.rawin = np.array(self.NETSIZE*[0]) + self.Vread = HW.conf.Vread + + self.state = NetworkState(self.NETSIZE, self.DEPTH, \ + self.epochs, labelCounter) + self.plot_counter_trigger = 100 + self.plot_counter = 0 + + self.core = self.load_core(core) + + def log(self, *args, **kwargs): + """ Write to stderr if CTSDBG is set""" + + _log(*args, **kwargs) + + def load_core(self, name): + results = imp.find_module(name, NeuroCores.__path__) + return imp.load_module(name, *results) + + @BaseThreadWrapper.runner + def run(self): + self.disableInterface.emit(True) + + self.log("Reading all devices and initialising weights") + + + # For every neuron in the system. + for postidx in range(len(self.ConnMat)): + # For every presynaptic input the neuron receives. + for preidx in np.where(self.ConnMat[:, postidx, 0] != 0)[0]: + w, b=self.ConnMat[preidx, postidx, 0:2] + self.read(w,b) + self.state.weights[preidx, postidx, 0] = \ + 1.0/self.read(w, b) + # store device address and neuron ids for easy access in + # history_panel + self.state.weight_addresses.append([[w,b],[preidx,postidx]]) + self.log("Done.") + + print("Starting Neural Net simulator") + + # Start Neural Net training + + self.core.init(self) + + for t in range(self.tsteps): + self.rawin = self.state.firingCells + self.log("---> Time step: %d RAWIN: %s STIMIN: %s" % (t, self.rawin, self.stimin[:, t])) + self.core.neurons(self, t) + self.core.plast(self, t) + self.displayData.emit() + + self.log("Final reading of all devices") + # For every neuron in the system. + for postidx in range(len(self.ConnMat)): + # For every presynaptic input the neuron receives. + for preidx in np.where(self.ConnMat[:, postidx, 0] != 0)[0]: + w,b=self.ConnMat[preidx, postidx, 0:2] + self.read(w, b) + + # Save data if so requested + if self.filename is not None: + data = {} + + # metadata; this is a numpy structured array + meta = np.array([(self.epochs, self.LTP_V, self.LTP_pw, self.LTD_V, self.LTD_pw)], + dtype=[('trials', 'u8'), ('LTP_V', 'f4'), ('LTP_pw', 'f4'), + ('LTD_V', 'f4'), ('LTD_pw', 'f4')]) + data['meta'] = meta + + # standard data first + + # all weights + data['weights'] = self.state.weights + # calculated stimuli for each step + data['stimulus'] = self.stimin + # history of cells that have fired + data['fires'] = self.state.fireCells.T + # accumulator snapshots + data['accumulator'] = self.state.NeurAccum.T + + # and then any other arrays the core has produced + additional_data = self.core.additional_data(self) + + if additional_data is not None: + for (k, v) in self.core.additional_data(self): + data[k] = v + + np.savez_compressed(self.filename, **data) + + self.disableInterface.emit(False) + self.finished.emit() + + def read(self, w, b): + # read a device and return read value + # update interface + self.highlight.emit(w, b) + + Mnow = HW.ArC.read_one(w, b) + + self.sendData.emit(w, b, Mnow, self.Vread, 0, \ + 'S R%d V=%.1f' % (HW.conf.readmode, HW.conf.Vread)) + self.updateTree.emit(w, b) + + return Mnow + + def pulse(self, w, b, A, pw): + # apply a pulse and return + # can instead apply any voltage series + self.highlight.emit(w,b) + + Mnow = HW.ArC.pulseread_one(w, b, A, pw) + + self.sendData.emit(w, b, Mnow, A, pw, 'P') + self.updateTree.emit(w, b) + + return Mnow + + +class NeuroPack(BaseProgPanel): + + def __init__(self, short=False): + super().__init__(title="NeuroPack", + description="Flexible neural nets", short=short) + self.short = short + self.base_conf_fname = None + self.conn_matrix_fname = None + self.stim_file_fname = None + self.output_file_fname = None + self.initUI() + + fname = os.path.join(THIS_DIR, "NeuroData", "NeuroBase.json") + params = self.load_base_conf(os.path.join(THIS_DIR, "NeuroData",\ + "NeuroBase.json")) + self.apply_base_conf(params, os.path.basename(fname), fname) + + def initUI(self): + + vbox1=QtWidgets.QVBoxLayout() + + titleLabel = QtWidgets.QLabel('NeuroPack') + titleLabel.setFont(fonts.font1) + descriptionLabel = QtWidgets.QLabel('Flexible neural net application module.') + descriptionLabel.setFont(fonts.font3) + descriptionLabel.setWordWrap(True) + + isInt=QtGui.QIntValidator() + isFloat=QtGui.QDoubleValidator() + + gridLayout = QtWidgets.QGridLayout() + gridLayout.setColumnStretch(0, 3) + gridLayout.setColumnStretch(1, 1) + gridLayout.setColumnStretch(2, 1) + gridLayout.setColumnStretch(3, 1) + + #setup a line separator + lineLeft=QtWidgets.QFrame() + lineLeft.setFrameShape(QtWidgets.QFrame.VLine) + lineLeft.setFrameShadow(QtWidgets.QFrame.Raised) + lineLeft.setLineWidth(1) + gridLayout.addWidget(lineLeft, 0, 2, 2, 1) + + ################################################ LOAD ############## + self.push_load_base_conf = QtWidgets.QPushButton("Load Base conf.") + self.push_load_base_conf.clicked.connect(self.open_base_conf) + self.base_conf_filename = QtWidgets.QLabel("Filename") + gridLayout.addWidget(self.push_load_base_conf, 0, 1) + gridLayout.addWidget(self.base_conf_filename, 0, 0) + + self.push_load_conn_matrix = QtWidgets.QPushButton("Load Conn. Matrix") + self.push_load_conn_matrix.clicked.connect(self.open_conn_matrix) + self.matrix_filename = QtWidgets.QLabel("Filename") + gridLayout.addWidget(self.push_load_conn_matrix, 1, 1) + gridLayout.addWidget(self.matrix_filename, 1, 0) + + self.push_load_stim_file = QtWidgets.QPushButton("Load Stim. File") + self.push_load_stim_file.clicked.connect(self.open_stim_file) + self.stim_filename = QtWidgets.QLabel("Filename") + gridLayout.addWidget(self.push_load_stim_file, 2 ,1) + gridLayout.addWidget(self.stim_filename, 2, 0) + + self.check_save_data = QtWidgets.QCheckBox("Save to:") + self.check_save_data.clicked.connect(self.check_save_data_clicked) + self.push_save_filename = QtWidgets.QPushButton("No file selected") + self.push_save_filename.clicked.connect(self.load_output_file) + self.push_save_filename.setEnabled(False) + gridLayout.addWidget(self.check_save_data, 4, 0) + gridLayout.addWidget(self.push_save_filename, 4, 1) + + self.push_show_analysis_tool = QtWidgets.QPushButton("Start analysis tool") + self.push_show_analysis_tool.clicked.connect(self.startAnalysisTool) + gridLayout.addWidget(self.push_show_analysis_tool, 5, 0, 1, 2) + + #################################################################### + + ################################################## CORES ########### + + self.rulesCombo = QtWidgets.QComboBox() + for _, name, is_pkg in pkgutil.iter_modules(NeuroCores.__path__): + if not is_pkg and name.startswith("core_"): + self.rulesCombo.addItem(name.replace("core_", ""), name) + + gridLayout.addWidget(QtWidgets.QLabel("Network core:"), 3, 0) + gridLayout.addWidget(self.rulesCombo, 3, 1) + + #################################################################### + + leftLabels=[] + self.leftEdits=[] + + rightLabels=['LTP pulse voltage (V)', \ + 'LTP pulse width (us)',\ + 'LTD pulse voltage (V)',\ + 'LTD pulse width (us)', \ + 'Trials'] + + self.rightEdits=[] + + leftInit= [] + rightInit= ['1.1', \ + '10',\ + '-1.1',\ + '10',\ + '100',\ + '10'] + + #setup a line separator + lineLeft = QtWidgets.QFrame() + lineLeft.setFrameShape(QtWidgets.QFrame.VLine); + lineLeft.setFrameShadow(QtWidgets.QFrame.Raised); + lineLeft.setLineWidth(1) + + gridLayout.addWidget(lineLeft, 0, 2, 7, 1) + + for i in range(len(leftLabels)): + lineLabel=QtWidgets.QLabel() + lineLabel.setText(leftLabels[i]) + gridLayout.addWidget(lineLabel, i,0) + + lineEdit=QtWidgets.QLineEdit() + lineEdit.setText(leftInit[i]) + lineEdit.setValidator(isFloat) + self.leftEdits.append(lineEdit) + gridLayout.addWidget(lineEdit, i,1) + + for i in range(len(rightLabels)): + lineLabel=QtWidgets.QLabel() + lineLabel.setText(rightLabels[i]) + gridLayout.addWidget(lineLabel, i,4) + + lineEdit=QtWidgets.QLineEdit() + lineEdit.setText(rightInit[i]) + lineEdit.setValidator(isFloat) + self.rightEdits.append(lineEdit) + gridLayout.addWidget(lineEdit, i,5) + + self.LTP_V=float(self.rightEdits[0].text()) + self.LTP_pw=float(self.rightEdits[1].text())/1000000 + self.LTD_V=float(self.rightEdits[2].text()) + self.LTD_pw=float(self.rightEdits[3].text())/1000000 + + ################################################ LTD/LTP ########### + + vbox1.addWidget(titleLabel) + vbox1.addWidget(descriptionLabel) + + self.vW=QtWidgets.QWidget() + self.vW.setLayout(gridLayout) + self.vW.setContentsMargins(0,0,0,0) + + scrlArea=QtWidgets.QScrollArea() + scrlArea.setWidget(self.vW) + scrlArea.setContentsMargins(0,0,0,0) + scrlArea.setWidgetResizable(False) + scrlArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff) + + scrlArea.installEventFilter(self) + + vbox1.addWidget(scrlArea) + vbox1.addStretch() + + if not self.short: + self.hboxProg = QtWidgets.QHBoxLayout() + + push_train = QtWidgets.QPushButton('Train Network') + push_train.setStyleSheet(styles.btnStyle) + push_train.clicked.connect(self.runTrain) + self.hboxProg.addWidget(push_train) + + vbox1.addLayout(self.hboxProg) + + self.labelCounter=1 + + self.setLayout(vbox1) + self.gridLayout=gridLayout + + # def update_learning_rule(self): + # pass + + def gather_data(self): + + if self.check_save_data.isChecked(): + fname = self.output_file_fname + else: + fname = None + + return { \ + "LTP_V": float(self.rightEdits[0].text()), \ + "LTP_pw": float(self.rightEdits[1].text())/1000000, \ + "LTD_V": float(self.rightEdits[2].text()),\ + "LTD_pw": float(self.rightEdits[3].text())/1000000,\ + "epochs": int(self.rightEdits[4].text()), + "fname": fname + } + + def gather_params(self): + return json.load(open(self.base_conf_fname)) + + def runTrain(self): + + def _check_output_file(fname): + + if fname is None: + return False + + if os.path.exists(fname) and os.stat(fname).st_size > 0: + btns = QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No + text = "File exists and is of non-zero size. Overwrite?" + reply = QtWidgets.QMessageBox.question(self, "File exists", \ + text, btns) + if reply == QtWidgets.QMessageBox.Yes: + return True + return False + else: + return True + + if (self.conn_matrix_fname is None) or (self.stim_file_fname is None): + errMessage = QtWidgets.QMessageBox() + errMessage.setText("No connection matrix or stimulus file") + errMessage.setIcon(QtWidgets.QMessageBox.Critical) + errMessage.setWindowTitle("Error") + errMessage.exec_() + return + + data = self.gather_data() + params = self.gather_params() + + if not _check_output_file(data['fname']): + data['fname'] = None + + # epochs times the nr of time steps + # that are defined in the stimulus file + tsteps = data["epochs"] * self.labelCounter + + # Reload the stimulus file to account for any changes in the epochs + # Could possibly check if the field is "tainted" before loading to + # avoid accessing the file again + self.stimin = self.load_stim_file(self.stim_file_fname, \ + params["NETSIZE"], data["epochs"]) + + if HW.ArC is not None: + coreIdx = self.rulesCombo.currentIndex() + coreName = self.rulesCombo.itemData(coreIdx) + + network = Network(self.ConnMat, self.stimin, data, params, \ + tsteps, coreName, self.labelCounter) + self.execute(network, network.run, True) + + def load_base_conf(self, fname): + return json.load(open(fname)) + + def open_base_conf(self): + path = QtCore.QFileInfo(QtWidgets.QFileDialog().getOpenFileName(self,\ + 'Open base configuration', THIS_DIR, filter="*.json")[0]) + name = path.fileName() + + try: + data = self.load_base_conf(path.filePath()) + for x in ["LTDWIN", "LTPWIN", "NETSIZE", "DEPTH"]: + if x not in data.keys(): + errMessage = QtWidgets.QMessageBox() + errMessage.setText("Missing required parameter %s" % x) + errMessage.setIcon(QtWidgets.QMessageBox.Critical) + errMessage.setWindowTitle("Error") + errMessage.exec_() + return + self.apply_base_conf(data, name, path.filePath()) + except Exception as exc: + _log("!!", exc) + + def apply_base_conf(self, params, name, path): + self.base_conf_fname = str(path) + self.base_conf_filename.setText(name) + if self.conn_matrix_fname is not None: + data = self.gather_data() + res = self.load_conn_matrix(self.conn_matrix_fname,\ + params["NETSIZE"]) + self.apply_conn_matrix(res) + + if self.stim_file_fname is not None: + data = self.gather_data() + stims = self.load_stim_file(self.stim_file_fname, params["NETSIZE"],\ + data["epochs"]) + self.apply_stim_file(stims) + + def load_stim_file(self, fname, NETSIZE, epochs): + _log("Allocating stimin") + stimin = np.array(NETSIZE*[epochs*[0]]) + + with open(fname, 'r') as f: + _log("File opened") + for line in f: + line = line.strip() + if (line[0] != "\n") and (line[0] != "#"): + # split into timestamp - list of neurons IDs scheduled to spike + timestamp, neuronIDs = re.split("\s*-\s*", line) + timestamp = int(timestamp) + if timestamp >= epochs: + break + _log(timestamp, neuronIDs) + # split the string into an int list of neurons + spikeNeuronID = [int(x) - 1 for x in re.split("\s*,\s*", neuronIDs.strip())] + for i, spiker in enumerate(spikeNeuronID): + stimin[spiker, timestamp] = 1 + return stimin + + def open_stim_file(self): + _log("Loading stimulation file...") + + params = self.gather_params() + data = self.gather_data() + + path = QtCore.QFileInfo(QtWidgets.QFileDialog().getOpenFileName(self,\ + 'Open stimulation file', THIS_DIR, filter="*.txt")[0]) + name = path.fileName() + + error = False + try: + res = self.load_stim_file(path.filePath(), params["NETSIZE"], \ + data["epochs"]) + except Exception as exc: + _log(exc) + error = True + + #print(self.stimin) + + if error: + self.stimin = np.array(params["NETSIZE"]*[data["epochs"]*[0]]) + errMessage = QtWidgets.QMessageBox() + errMessage.setText("Invalid network stimulation file! Possible problem with syntax.") + errMessage.setIcon(QtWidgets.QMessageBox.Critical) + errMessage.setWindowTitle("Error") + errMessage.exec_() + else: + self.apply_stim_file(res, name, path.filePath()) + + _log("done") + + def apply_stim_file(self, stim, name=None, path=None): + self.stimin = stim + if name is not None: + self.stim_filename.setText(name) + if path is not None: + self.stim_file_fname = str(path) + + def load_conn_matrix(self, fname, NETSIZE): + ConnMat = np.array(NETSIZE*[NETSIZE*[3*[0]]]) + + with open(fname, 'r') as f: + for line in f: + if (line[0] != "\n") and (line[0] != "#"): + preid, postid, w, b, type_of=line.split(", ") + ConnMat[int(preid) - 1, int(postid) - 1] = \ + [int(w), int(b), int(type_of)] + + return ConnMat + + def open_conn_matrix(self): + _log("Loading connectivity matrix...") + + params = self.gather_params() + + # self.ConnMat=np.array(params["NETSIZE"]*[params["NETSIZE"]*[3*[0]]]) + + path = QtCore.QFileInfo(QtWidgets.QFileDialog().getOpenFileName(self,\ + 'Open connectivity matrix file', THIS_DIR, filter="*.txt")[0]) + name=path.fileName() + + error = False + + try: + res = self.load_conn_matrix(path.filePath(), params["NETSIZE"]) + except Exception as exc: + error = True + _log(exc) + + if error: + self.ConnMat=np.array(params["NETSIZE"]*[params["NETSIZE"]*[3*[0]]]) + errMessage = QtWidgets.QMessageBox() + errMessage.setText("Invalid connectivity matrix file! Possible problem with syntax.") + errMessage.setIcon(QtWidgets.QMessageBox.Critical) + errMessage.setWindowTitle("Error") + errMessage.exec_() + else: + # self.matrix_filename.setText(name) + # self.ConnMat = res + # self.conn_matrix_fname = path.filePath() + self.apply_conn_matrix(res, name, path.filePath()) + + _log("done") + + def apply_conn_matrix(self, matrix, name=None, path=None): + self.ConnMat = matrix + if path is not None: + self.matrix_filename.setText(name) + if name is not None: + self.conn_matrix_fname = str(path) + + def check_save_data_clicked(self, checked): + self.push_save_filename.setEnabled(checked) + + def load_output_file(self): + + if self.output_file_fname is not None: + curpath = os.path.dirname(self.output_file_fname) + else: + curpath = '' + + path = QtCore.QFileInfo(QtWidgets.QFileDialog().getSaveFileName(self,\ + 'Save to...', curpath, "Numpy arrays (*.npz)")[0]) + fname = path.fileName() + + if fname is None or len(fname) == 0: + if self.output_file_fname is None: + self.check_save_data.setChecked(False) + self.push_save_filename.setEnabled(False) + return + + self.output_file_fname = path.filePath() + self.push_save_filename.setText(fname) + + _log("Set output file to %s..." % path.fileName()) + + def startAnalysisTool(self): + self._analysisWindow = QtWidgets.QMainWindow() + self._analysisWindow.setWindowTitle("NeuroPack Analysis tool") + self._analysisWindow.setCentralWidget(NeuroAnalysis()) + self._analysisWindow.show() + + def eventFilter(self, object, event): + if event.type()==QtCore.QEvent.Resize: + self.vW.setFixedWidth(event.size().width() - \ + object.verticalScrollBar().width()) + return False + + def disableProgPanel(self,state): + self.hboxProg.setEnabled(not state) + + +class NeuroVarSnapRowWidget(Ui_NNVarSnapRow, QtWidgets.QWidget): + + def __init__(self, dataset, parent=None): + super(NeuroVarSnapRowWidget, self).__init__(parent=parent) + + self.dataset = None + self.selected = False + self.setupUi(self) + self.plotWidget = None + + self.updateDataset(dataset) + self.setSelected(self.selected) + self.stepSlider.valueChanged.connect(self.sliderChanged) + self.stepSpinBox.valueChanged.connect(self.stepSpinBoxChanged) + self.variableSelectionCombo.currentIndexChanged.connect(self.variableChanged) + + def _clearGraphs(self): + for idx in reversed(range(self.graphHolderLayout.count())): + wdg = self.graphHolderLayout.itemAt(idx).widget() + self.graphHolderLayout.removeWidget(wdg) + wdg.setParent(None) + + def stepSpinBoxChanged(self, val): + self.stepSlider.blockSignals(True) + self.stepSlider.setValue(val) + self.stepSlider.blockSignals(False) + self.updatePlotToStep(val) + + def sliderChanged(self, val): + self.stepSpinBox.blockSignals(True) + self.stepSpinBox.setValue(val) + self.stepSpinBox.blockSignals(False) + self.updatePlotToStep(val) + + def setStep(self, step): + self.stepSlider.setValue(step) + self.stepSpinBox.setValue(step) + self.updatePlotToStep(step) + + def _updateGraph(self, data, step): + + plotArgs = {'pen': None, 'symbolPen': None, 'symbolBrush': (255,0,0), \ + 'symbol':'+'} + + # determine the plot type for the existing graph, if any + if self.plotWidget is None: + wdgDim = -1 # no widget yet + else: + if isinstance(self.plotWidget, pg.PlotWidget): + wdgDim = 2 + else: + wdgDim = 3 + + # and either generate a new plot or update the existing one + # (if dimensions match) + if wdgDim != len(data.shape): + # changed from 2 to 3D or vice-versa; need to update widget + self._clearGraphs() + if len(data.shape) == 2: + wdg = pg.PlotWidget() + wdg.plot(np.arange(len(data.T[step]))+1, data.T[step], **plotArgs) + elif len(data.shape) == 3: + wdg = pg.ImageView() + wdg.ui.menuBtn.hide() + wdg.ui.roiBtn.hide() + wdg.setImage(data.T[step]) + else: + # more dimensions unable to visualise + wdg = QtWidgets.QLabel("Cannot visualise > 3 dimensions") + self.graphHolderLayout.addWidget(wdg) + self.plotWidget = wdg + else: + if wdgDim == 2: + self.plotWidget.plot(np.arange(len(data.T[step]))+1, + data.T[step], clear=True, **plotArgs) + elif wdgDim == 3: + self.plotWidget.setImage(data.T[step]) + + def updatePlotToStep(self, step): + idx = self.variableSelectionCombo.currentIndex() + data = self.variableSelectionCombo.itemData(idx) + + self._updateGraph(data, step) + + def updateDataset(self, dataset): + + if dataset is None: + return + + self.variableSelectionCombo.clear() + + for k in dataset.files: + if k == 'meta': + continue + self.variableSelectionCombo.addItem(k, dataset[k]) + + self.stepSlider.setMinimum(0) + self.stepSlider.setMaximum(int(dataset['meta']['trials'][0])-1) + + self.stepSpinBox.setMinimum(0) + self.stepSpinBox.setMaximum(int(dataset['meta']['trials'][0])-1) + + self.dataset = dataset + + def variableChanged(self, idx): + currentStep = self.stepSpinBox.value() + self.updatePlotToStep(currentStep) + + def setSelected(self, status): + self.selected = status + if self.selected: + colour = "#F00" + else: + colour = "#000" + + self.rowFrame.setStyleSheet("#rowFrame {border: 1px solid %s}" % colour) + + +class NeuroVarSnapWidget(Ui_NNVarSnap, QtWidgets.QWidget): + + def __init__(self, dataset, parent=None): + super(NeuroVarSnapWidget, self).__init__(parent=parent) + self.dataset = dataset + + self.rows = [] + self.selectedRow = None + + self.setupUi(self) + + self.lockStepsCheckBox.stateChanged.connect(self.lockStepsChecked) + self.globalStepSlider.valueChanged.connect(self.stepSliderChanged) + self.globalStepSpinBox.valueChanged.connect(self.stepSpinBoxChanged) + self.addRowButton.clicked.connect(self.addRow) + self.deleteRowButton.clicked.connect(self.removeRow) + + def mousePressEvent(self, evt): + for (idx, row) in enumerate(self.rows): + if row.underMouse(): + self.selectedRow = idx + row.setSelected(row.underMouse()) + + def lockStepsChecked(self): + checked = self.lockStepsCheckBox.isChecked() + self.globalStepSlider.setEnabled(checked) + self.globalStepSpinBox.setEnabled(checked) + + def stepSliderChanged(self, val): + self.globalStepSpinBox.blockSignals(True) + self.globalStepSpinBox.setValue(val) + self.globalStepSpinBox.blockSignals(False) + self.updateGlobalStep(val) + + def stepSpinBoxChanged(self, val): + self.globalStepSlider.blockSignals(True) + self.globalStepSlider.setValue(val) + self.globalStepSlider.blockSignals(False) + self.updateGlobalStep(val) + + def updateGlobalStep(self, step): + for row in self.rows: + row.setStep(step) + + def updateDataset(self, dataset): + if dataset is None: + return + + for row in self.rows: + row.updateDataset(dataset) + + self.globalStepSlider.setMinimum(0) + self.globalStepSlider.setMaximum(int(dataset['meta']['trials'][0])-1) + self.globalStepSpinBox.setMinimum(0) + self.globalStepSpinBox.setMaximum(int(dataset['meta']['trials'][0])-1) + self.dataset = dataset + + def addRow(self): + self.rows.append(NeuroVarSnapRowWidget(self.dataset)) + self.mainSnapLayout.addWidget(self.rows[-1]) + self.rows[-1].setMinimumHeight(350) + + def removeRow(self): + if self.selectedRow is None: + return + + wdg = self.rows.pop(self.selectedRow) + self.mainSnapLayout.removeWidget(wdg) + wdg.setParent(None) + self.selectedRow = None + for row in self.rows: + row.setSelected(False) + + +class NeuroVarDiffRowWidget(Ui_NNVarDiffRow, QtWidgets.QWidget): + + def __init__(self, dataset, parent=None): + super(NeuroVarDiffRowWidget, self).__init__(parent=parent) + self.dataset = None + self.selected = False + self.setupUi(self) + self.plotWidget = None + self.updateDataset(dataset) + self.setSelected(self.selected) + + self.initialStepSpinBox.valueChanged.connect(\ + lambda val: self.stepSpinBoxChanged(self.initialStepSpinBox, self.initialStepSlider, val)) + self.initialStepSlider.valueChanged.connect(\ + lambda val: self.stepSliderChanged(self.initialStepSpinBox, self.initialStepSlider, val)) + self.finalStepSpinBox.valueChanged.connect(\ + lambda val: self.stepSpinBoxChanged(self.finalStepSpinBox, self.finalStepSlider, val)) + self.finalStepSlider.valueChanged.connect(\ + lambda val: self.stepSliderChanged(self.finalStepSpinBox, self.finalStepSlider, val)) + self.variableSelectionCombo.currentIndexChanged.connect(self.variableChanged) + + def _clearGraphs(self): + for idx in reversed(range(self.graphHolderLayout.count())): + wdg = self.graphHolderLayout.itemAt(idx).widget() + self.graphHolderLayout.removeWidget(wdg) + wdg.setParent(None) + + def updateDataset(self, dataset): + + if dataset is None: + return + + self.variableSelectionCombo.clear() + + for k in dataset.files: + if k == 'meta': + continue + self.variableSelectionCombo.addItem(k, dataset[k]) + + self.initialStepSlider.setMinimum(0) + self.finalStepSlider.setMinimum(0) + self.initialStepSlider.setMaximum(int(dataset['meta']['trials'][0])-1) + self.finalStepSlider.setMaximum(int(dataset['meta']['trials'][0])-1) + + self.initialStepSpinBox.setMinimum(0) + self.finalStepSpinBox.setMinimum(0) + self.initialStepSpinBox.setMaximum(int(dataset['meta']['trials'][0])-1) + self.finalStepSpinBox.setMaximum(int(dataset['meta']['trials'][0])-1) + + def setSelected(self, status): + self.selected = status + if self.selected: + colour = "#F00" + else: + colour = "#000" + + self.rowFrame.setStyleSheet("#rowFrame {border: 1px solid %s}" % colour) + + def variableChanged(self, idx): + initialStep = self.initialStepSpinBox.value() + finalStep = self.finalStepSpinBox.value() + self.updatePlotToStep(initialStep, finalStep) + + def stepSpinBoxChanged(self, box, slider, val): + slider.blockSignals(True) + slider.setValue(val) + slider.blockSignals(False) + + initialStep = self.initialStepSpinBox.value() + finalStep = self.finalStepSpinBox.value() + self.updatePlotToStep(initialStep, finalStep) + + def stepSliderChanged(self, box, slider, val): + box.blockSignals(True) + box.setValue(val) + box.blockSignals(False) + + initialStep = self.initialStepSpinBox.value() + finalStep = self.finalStepSpinBox.value() + self.updatePlotToStep(initialStep, finalStep) + + def updatePlotToStep(self, initialStep, finalStep): + idx = self.variableSelectionCombo.currentIndex() + data = self.variableSelectionCombo.itemData(idx) + + self._updateGraph(data, initialStep, finalStep) + + def _updateGraph(self, data, initialStep, finalStep): + + plotArgs = {'pen': None, 'symbolPen': None, 'symbolBrush': (255,0,0), \ + 'symbol':'+'} + + # determine the plot type for the existing graph, if any + if self.plotWidget is None: + wdgDim = -1 # no widget yet + else: + if isinstance(self.plotWidget, pg.PlotWidget): + wdgDim = 2 + else: + wdgDim = 3 + + # and either generate a new plot or update the existing one + # (if dimensions match) + if wdgDim != len(data.shape): + # changed from 2 to 3D or vice-versa; need to update widget + self._clearGraphs() + if len(data.shape) == 2: + wdg = pg.PlotWidget() + diff = data.T[finalStep] - data.T[initialStep] + # regardless of where you are in time the length of + # the data will always be the same; so either finalStep + # or initialStep is good enough for the X-axis + wdg.plot(np.arange(len(data.T[finalStep]))+1, diff, **plotArgs) + elif len(data.shape) == 3: + wdg = pg.ImageView() + wdg.ui.menuBtn.hide() + wdg.ui.roiBtn.hide() + diff = data.T[finalStep] - data.T[initialStep] + wdg.setImage(diff) + else: + # more dimensions unable to visualise + wdg = QtWidgets.QLabel("Cannot visualise > 3 dimensions") + self.graphHolderLayout.addWidget(wdg) + self.plotWidget = wdg + else: + if wdgDim == 2: + diff = data.T[finalStep] - data.T[initialStep] + # regardless of where you are in time the length of + # the data will always be the same; so either finalStep + # or initialStep is good enough for the X-axis + self.plotWidget.plot(np.arange(len(data.T[finalStep]))+1, + diff, clear=True, **plotArgs) + elif wdgDim == 3: + diff = data.T[finalStep] - data.T[initialStep] + self.plotWidget.setImage(diff) + + +class NeuroVarDiffWidget(Ui_NNVarDiff, QtWidgets.QWidget): + + def __init__(self, dataset, parent=None): + super(NeuroVarDiffWidget, self).__init__(parent=parent) + self.dataset = dataset + + self.rows = [] + self.selectedRow = None + + self.setupUi(self) + + self.addRowButton.clicked.connect(self.addRow) + self.deleteRowButton.clicked.connect(self.removeRow) + + def mousePressEvent(self, evt): + for (idx, row) in enumerate(self.rows): + if row.underMouse(): + self.selectedRow = idx + row.setSelected(row.underMouse()) + + def updateDataset(self, dataset): + if dataset is None: + return + + for row in self.rows: + row.updateDataset(dataset) + + self.dataset = dataset + + def addRow(self): + self.rows.append(NeuroVarDiffRowWidget(self.dataset)) + self.mainSnapLayout.addWidget(self.rows[-1]) + self.rows[-1].setMinimumHeight(350) + + def removeRow(self): + if self.selectedRow is None: + return + + wdg = self.rows.pop(self.selectedRow) + self.mainSnapLayout.removeWidget(wdg) + wdg.setParent(None) + self.selectedRow = None + for row in self.rows: + row.setSelected(False) + + +class NeuroVarAvgRowWidget(Ui_NNVarAvgRow, QtWidgets.QWidget): + + def __init__(self, dataset, parent=None): + super(NeuroVarAvgRowWidget, self).__init__(parent=parent) + self.dataset = None + self.selected = False + self.setupUi(self) + self.plotWidget = None + self.updateDataset(dataset) + self.setSelected(self.selected) + + self.fromStepSpinBox.valueChanged.connect(\ + lambda val: self.stepSpinBoxChanged(self.fromStepSpinBox, self.fromStepSlider, val)) + self.fromStepSlider.valueChanged.connect(\ + lambda val: self.stepSliderChanged(self.fromStepSpinBox, self.fromStepSlider, val)) + self.toStepSpinBox.valueChanged.connect(\ + lambda val: self.stepSpinBoxChanged(self.toStepSpinBox, self.toStepSlider, val)) + self.toStepSlider.valueChanged.connect(\ + lambda val: self.stepSliderChanged(self.toStepSpinBox, self.toStepSlider, val)) + self.variableSelectionCombo.currentIndexChanged.connect(self.variableChanged) + + def _clearGraphs(self): + for idx in reversed(range(self.graphHolderLayout.count())): + wdg = self.graphHolderLayout.itemAt(idx).widget() + self.graphHolderLayout.removeWidget(wdg) + wdg.setParent(None) + + def updateDataset(self, dataset): + + if dataset is None: + return + + self.variableSelectionCombo.clear() + + for k in dataset.files: + if k == 'meta': + continue + self.variableSelectionCombo.addItem(k, dataset[k]) + + self.fromStepSlider.setMinimum(0) + self.toStepSlider.setMinimum(0) + self.fromStepSlider.setMaximum(int(dataset['meta']['trials'][0])-1) + self.toStepSlider.setMaximum(int(dataset['meta']['trials'][0])-1) + + self.fromStepSpinBox.setMinimum(0) + self.toStepSpinBox.setMinimum(0) + self.fromStepSpinBox.setMaximum(int(dataset['meta']['trials'][0])-1) + self.toStepSpinBox.setMaximum(int(dataset['meta']['trials'][0])-1) + + def setSelected(self, status): + self.selected = status + if self.selected: + colour = "#F00" + else: + colour = "#000" + + self.rowFrame.setStyleSheet("#rowFrame {border: 1px solid %s}" % colour) + + def variableChanged(self, idx): + fromStep = self.fromStepSpinBox.value() + toStep = self.toStepSpinBox.value() + self.updatePlotToStep(fromStep, toStep) + + def stepSpinBoxChanged(self, box, slider, val): + slider.blockSignals(True) + slider.setValue(val) + slider.blockSignals(False) + + fromStep = self.fromStepSpinBox.value() + toStep = self.toStepSpinBox.value() + self.updatePlotToStep(fromStep, toStep) + + def stepSliderChanged(self, box, slider, val): + box.blockSignals(True) + box.setValue(val) + box.blockSignals(False) + + fromStep = self.fromStepSpinBox.value() + toStep = self.toStepSpinBox.value() + self.updatePlotToStep(fromStep, toStep) + + def setSteps(self, fromStep, toStep): + self.fromStepSlider.blockSignals(True) + self.toStepSlider.blockSignals(True) + self.fromStepSpinBox.blockSignals(True) + self.toStepSpinBox.blockSignals(True) + + self.fromStepSlider.setValue(fromStep) + self.fromStepSpinBox.setValue(fromStep) + self.toStepSlider.setValue(toStep) + self.toStepSpinBox.setValue(toStep) + self.updatePlotToStep(fromStep, toStep) + + self.fromStepSlider.blockSignals(False) + self.toStepSlider.blockSignals(False) + self.fromStepSpinBox.blockSignals(False) + self.toStepSpinBox.blockSignals(False) + + def updatePlotToStep(self, fromStep, toStep): + idx = self.variableSelectionCombo.currentIndex() + data = self.variableSelectionCombo.itemData(idx) + + self._updateGraph(data, fromStep, toStep) + + def _updateGraph(self, data, fromStep, toStep): + + if toStep < fromStep: + # swap variables if from > to + toStep, fromStep = fromStep, toStep + + plotArgs = {'pen': None, 'symbolPen': None, 'symbolBrush': (255,0,0), \ + 'symbol':'+'} + + # determine the plot type for the existing graph, if any + if self.plotWidget is None: + wdgDim = -1 # no widget yet + else: + if isinstance(self.plotWidget, pg.PlotWidget): + wdgDim = 2 + else: + wdgDim = 3 + + # and either generate a new plot or update the existing one + # (if dimensions match) + if wdgDim != len(data.shape): + # changed from 2 to 3D or vice-versa; need to update widget + self._clearGraphs() + if len(data.shape) == 2: + wdg = pg.PlotWidget() + # nothing to average between identical timesteps + if fromStep == toStep: + avg = data[:,fromStep] + else: + avg = np.average(data[:, fromStep:toStep], axis=1) + # regardless of where you are in time the length of + # the data will always be the same; so either finalStep + # or initialStep is good enough for the X-axis + wdg.plot(np.arange(len(data.T[fromStep]))+1, avg, **plotArgs) + elif len(data.shape) == 3: + wdg = pg.ImageView() + wdg.ui.menuBtn.hide() + wdg.ui.roiBtn.hide() + if fromStep == toStep: + avg = data[:,:,fromStep] + else: + avg = np.average(data[:,:,fromStep:toStep], axis=2) + wdg.setImage(avg) + else: + # more dimensions unable to visualise + wdg = QtWidgets.QLabel("Cannot visualise > 3 dimensions") + self.graphHolderLayout.addWidget(wdg) + self.plotWidget = wdg + else: + if wdgDim == 2: + if fromStep == toStep: + avg = data[:,fromStep] + else: + avg = np.average(data[:, fromStep:toStep], axis=1) + # regardless of where you are in time the length of + # the data will always be the same; so either finalStep + # or initialStep is good enough for the X-axis + self.plotWidget.plot(np.arange(len(data.T[fromStep]))+1, + avg, clear=True, **plotArgs) + elif wdgDim == 3: + if fromStep == toStep: + avg = data[:,:,fromStep] + else: + avg = np.average(data[:,:,fromStep:toStep], axis=2) + self.plotWidget.setImage(avg) + + +class NeuroVarAvgWidget(Ui_NNVarAvg, QtWidgets.QWidget): + + def __init__(self, dataset, parent=None): + super(NeuroVarAvgWidget, self).__init__(parent=parent) + self.dataset = dataset + + self.rows = [] + self.selectedRow = None + + self.setupUi(self) + + self.lockStepsCheckBox.stateChanged.connect(self.lockStepsChecked) + self.globalFromSpinBox.valueChanged.connect(self.globalFromSpinBoxChanged) + self.globalToSpinBox.valueChanged.connect(self.globalToSpinBoxChanged) + self.globalFromSlider.valueChanged.connect(self.globalFromSliderChanged) + self.globalToSlider.valueChanged.connect(self.globalToSliderChanged) + self.addRowButton.clicked.connect(self.addRow) + self.deleteRowButton.clicked.connect(self.removeRow) + + def mousePressEvent(self, evt): + for (idx, row) in enumerate(self.rows): + if row.underMouse(): + self.selectedRow = idx + row.setSelected(row.underMouse()) + + def lockStepsChecked(self): + checked = self.lockStepsCheckBox.isChecked() + self.globalFromSlider.setEnabled(checked) + self.globalToSlider.setEnabled(checked) + self.globalFromSpinBox.setEnabled(checked) + self.globalToSpinBox.setEnabled(checked) + + def globalFromSliderChanged(self, val): + toStep = self.globalToSpinBox.value() + + self.globalFromSpinBox.blockSignals(True) + self.globalFromSpinBox.setValue(val) + self.globalFromSpinBox.blockSignals(False) + self.updateGlobalSteps(val, toStep) + + def globalToSliderChanged(self, val): + fromStep = self.globalFromSpinBox.value() + + self.globalToSpinBox.blockSignals(True) + self.globalToSpinBox.setValue(val) + self.globalToSpinBox.blockSignals(False) + self.updateGlobalSteps(fromStep, val) + + def globalFromSpinBoxChanged(self, val): + toStep = self.globalToSpinBox.value() + + self.globalFromSlider.blockSignals(True) + self.globalFromSlider.setValue(val) + self.globalFromSlider.blockSignals(False) + self.updateGlobalSteps(val, toStep) + + def globalToSpinBoxChanged(self, val): + fromStep = self.globalFromSpinBox.value() + + self.globalToSlider.blockSignals(True) + self.globalToSlider.setValue(val) + self.globalToSlider.blockSignals(False) + self.updateGlobalSteps(fromStep, val) + + def updateGlobalSteps(self, fromStep, toStep): + for row in self.rows: + row.setSteps(fromStep, toStep) + + def updateDataset(self, dataset): + if dataset is None: + return + + for row in self.rows: + row.updateDataset(dataset) + + self.dataset = dataset + + def addRow(self): + self.rows.append(NeuroVarAvgRowWidget(self.dataset)) + self.mainSnapLayout.addWidget(self.rows[-1]) + self.rows[-1].setMinimumHeight(350) + + def removeRow(self): + if self.selectedRow is None: + return + + wdg = self.rows.pop(self.selectedRow) + self.mainSnapLayout.removeWidget(wdg) + wdg.setParent(None) + self.selectedRow = None + for row in self.rows: + row.setSelected(False) + + +class NeuroAnalysis(Ui_NNAnalysis, QtWidgets.QWidget): + + def __init__(self, parent=None): + super(NeuroAnalysis, self).__init__(parent=parent) + self.dataset = None + + self.setupUi(self) + self.setWindowTitle("NeuroPack Analysis Tool") + + self.openDatasetButton.clicked.connect(self._openDataset) + + self.mainStackedWidget.addWidget(NeuroVarSnapWidget(self.dataset)) + self.mainStackedWidget.addWidget(NeuroVarDiffWidget(self.dataset)) + self.mainStackedWidget.addWidget(NeuroVarAvgWidget(self.dataset)) + self.mainStackedWidget.setCurrentIndex(0) + + self.toolsListWidget.currentRowChanged.connect(self.mainStackedWidget.setCurrentIndex) + + self.show() + + def _openDataset(self): + path = QtWidgets.QFileDialog().getOpenFileName(self, \ + 'Open dataset', filter="*.npz")[0] + + if path is None or len(path) == 0: + return + + try: + self._updateFromDataset(path) + except (Exception, ValueError) as exc: + msgbox = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Critical, \ + "Error loading file", str(exc), parent=self) + msgbox.exec_() + + def _updateFromDataset(self, path): + self.dataset = np.load(path) + + meta = self.dataset['meta'] + + trials = pg.siFormat(meta['trials'][0]) + LTP_V = pg.siFormat(meta['LTP_V'][0], suffix='V') + LTP_pw = pg.siFormat(meta['LTP_pw'][0], suffix='s') + LTD_V = pg.siFormat(meta['LTD_V'][0], suffix='V') + LTD_pw = pg.siFormat(meta['LTD_pw'][0], suffix='s') + + self.datasetEdit.setText(os.path.basename(path)) + self.trialsEdit.setText(trials) + self.ltpVEdit.setText(LTP_V) + self.ltpPWEdit.setText(LTP_pw) + self.ltdVEdit.setText(LTD_V) + self.ltdPWEdit.setText(LTD_pw) + + for i in range(self.mainStackedWidget.count()): + wdg = self.mainStackedWidget.widget(i) + wdg.updateDataset(self.dataset) + + +tags = { 'top': modutils.ModTag("NN", "NeuroPack", None) } diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..302a91d --- /dev/null +++ b/__init__.py @@ -0,0 +1 @@ +# NeuroPack entry point diff --git a/uis/nnanalysis.ui b/uis/nnanalysis.ui new file mode 100644 index 0000000..8d35c12 --- /dev/null +++ b/uis/nnanalysis.ui @@ -0,0 +1,205 @@ + + + NNAnalysis + + + + 0 + 0 + 676 + 540 + + + + Form + + + + + + + + Trials + + + + + + + LTD Voltage + + + + + + + LTP Pulse Width + + + + + + + LTD Pulse Width + + + + + + + false + + + false + + + true + + + + + + + false + + + false + + + true + + + + + + + false + + + false + + + true + + + + + + + false + + + false + + + true + + + + + + + LTP Voltage + + + + + + + false + + + false + + + true + + + + + + + Dataset + + + + + + + + + false + + + false + + + true + + + + + + + Open + + + + + + + + + + + + 0 + 0 + + + + Qt::Horizontal + + + + + 175 + 16777215 + + + + #toolsListWidget::item { + padding: 5px; +} + + + + Variable snapshots + + + + + Difference snapshots + + + + + Variable averages + + + + + + + 0 + 0 + + + + + + + + + + diff --git a/uis/nnvaravg.ui b/uis/nnvaravg.ui new file mode 100644 index 0000000..4cf73ad --- /dev/null +++ b/uis/nnvaravg.ui @@ -0,0 +1,148 @@ + + + NNVarAvg + + + + 0 + 0 + 490 + 401 + + + + Form + + + + + + + + From + + + + + + + false + + + Qt::Horizontal + + + + + + + false + + + + 75 + 0 + + + + + + + + To + + + + + + + false + + + Qt::Horizontal + + + + + + + false + + + + 75 + 0 + + + + + + + + Lock steps + + + + + + + + + 0 + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Add + + + + + + + Remove + + + + + + + + + true + + + + + 0 + 0 + 470 + 314 + + + + + + + + + + + + + + + diff --git a/uis/nnvaravgrow.ui b/uis/nnvaravgrow.ui new file mode 100644 index 0000000..f071ef9 --- /dev/null +++ b/uis/nnvaravgrow.ui @@ -0,0 +1,135 @@ + + + NNVarAvgRow + + + + 0 + 0 + 448 + 351 + + + + Form + + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + + + QFrame::Box + + + QFrame::Plain + + + + + + + + + 0 + 0 + + + + Variable + + + + + + + + + + + + From + + + + + + + Qt::Horizontal + + + + + + + + 75 + 0 + + + + + + + + To + + + + + + + Qt::Horizontal + + + + + + + + 75 + 0 + + + + + + + + + + + + + 0 + 0 + + + + + + + + + + + + + + + + + diff --git a/uis/nnvardiff.ui b/uis/nnvardiff.ui new file mode 100644 index 0000000..2632b0e --- /dev/null +++ b/uis/nnvardiff.ui @@ -0,0 +1,77 @@ + + + NNVarDiff + + + + 0 + 0 + 490 + 401 + + + + Form + + + + + + 0 + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Add + + + + + + + Remove + + + + + + + + + true + + + + + 0 + 0 + 470 + 346 + + + + + + + + + + + + + + + diff --git a/uis/nnvardiffrow.ui b/uis/nnvardiffrow.ui new file mode 100644 index 0000000..a580023 --- /dev/null +++ b/uis/nnvardiffrow.ui @@ -0,0 +1,131 @@ + + + NNVarDiffRow + + + + 0 + 0 + 581 + 407 + + + + Form + + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + + + QFrame::Box + + + QFrame::Plain + + + + + + + + Qt::Horizontal + + + + + + + Step #2 + + + + + + + Qt::Horizontal + + + + + + + Variable + + + + + + + + 0 + 0 + + + + Step #1 + + + + + + + + 75 + 0 + + + + + + + + + 75 + 0 + + + + + + + + + + + + + + 0 + 0 + + + + + + + + + + + + + + + + + diff --git a/uis/nnvarsnap.ui b/uis/nnvarsnap.ui new file mode 100644 index 0000000..5d84e06 --- /dev/null +++ b/uis/nnvarsnap.ui @@ -0,0 +1,118 @@ + + + NNVarSnap + + + + 0 + 0 + 490 + 401 + + + + Form + + + + + + + + Step + + + + + + + false + + + Qt::Horizontal + + + + + + + false + + + + 75 + 0 + + + + + + + + Lock steps + + + + + + + + + 0 + + + + + Qt::Horizontal + + + + 40 + 20 + + + + + + + + Add + + + + + + + Remove + + + + + + + + + true + + + + + 0 + 0 + 470 + 314 + + + + + + + + + + + + + + + diff --git a/uis/nnvarsnaprow.ui b/uis/nnvarsnaprow.ui new file mode 100644 index 0000000..125c2be --- /dev/null +++ b/uis/nnvarsnaprow.ui @@ -0,0 +1,107 @@ + + + NNVarSnapRow + + + + 0 + 0 + 581 + 426 + + + + Form + + + + 0 + + + 0 + + + 0 + + + 0 + + + 0 + + + + + QFrame::Box + + + QFrame::Plain + + + + + + + + Variable + + + + + + + + 0 + 0 + + + + Step + + + + + + + + + + Qt::Horizontal + + + + + + + + 75 + 0 + + + + + + + + + + + 0 + 0 + + + + + + + + + + + + + + + + +