diff --git a/extras/q15.16_to_string.c b/extras/q15.16_to_string.c new file mode 100644 index 000000000..e847eb706 --- /dev/null +++ b/extras/q15.16_to_string.c @@ -0,0 +1,104 @@ +#include +#include +#include +#include + +/** + * @brief Converts a 32-bit Q15.16 fixed-point number to a decimal string. + * + * This function extracts the sign, integer, and fractional parts of the Q15.16 + * number without using floating-point arithmetic. It then constructs a string + * representation. + * + * @param q_num The 32-bit Q15.16 fixed-point number. + * @param buffer A character buffer to store the resulting string. + * @param buffer_size The size of the buffer. + */ +void q15_16_to_string(int32_t q_num, char *buffer, size_t buffer_size) { + if (buffer == NULL || buffer_size == 0) { + return; + } + + // Determine the sign and work with the absolute value for calculations. + // The sign bit is the MSB (bit 31) + int is_negative = (q_num < 0); + if (is_negative) { + q_num = -q_num; + } + + // Extract the integer part by right-shifting by 16 bits. + // The integer part occupies bits 16 to 30 (15 bits) + int32_t integer_part = q_num >> 16; + + // Extract the fractional part by masking the lower 16 bits. + // The fractional part occupies bits 0 to 15 (16 bits) + int32_t fractional_part = q_num & 0xFFFF; + + // To convert the fractional part to a decimal string without floats, we + // treat it as a fraction of 2^16 (65536). + // For example, if the fractional part is 32768, the value is 32768/65536 = 0.5. + // To get a decimal string with, say, 5 digits, we multiply by 10^5 and + // then divide by 65536, using a 64-bit integer to avoid overflow. + // The fractional part will be (fractional_part * 100000) / 65536. + int64_t decimal_fraction = ((int64_t)fractional_part * 100000 + 32768) >> 16; + // We add 32768 (0.5 * 65536) for rounding before the bit shift, which is + // equivalent to division. This is a common fixed-point rounding technique. + + // Start printing the string. + char *ptr = buffer; + if (is_negative) { + *ptr++ = '-'; + } + + // Print the integer part. + ptr += snprintf(ptr, buffer_size - (ptr - buffer), "%d.", integer_part); + + // Print the fractional part with leading zeros to maintain precision. + snprintf(ptr, buffer_size - (ptr - buffer), "%05lld", (long long)decimal_fraction); +} + +/** + * @brief Main function to demonstrate the conversion. + */ +int main() { + // Test cases for various Q15.16 numbers + // 1. A positive number: 10.5 + // 10 << 16 = 655360 + // 0.5 * 2^16 = 0.5 * 65536 = 32768 = 0x8000 + // 10.5 = (10 << 16) + 32768 = 688128 = 0xA8000 + int32_t q_positive = 688128; + + // 2. A negative number: -2.75 + // 2 << 16 = 131072 + // 0.75 * 2^16 = 0.75 * 65536 = 49152 = 0xC000 + // -2.75 = - (131072 + 49152) = -180224 = -0x2C000 + int32_t q_negative = -180224; + + // 3. A small number: 0.001 + // 0.001 * 2^16 = 0.001 * 65536 = 65.536 -> round to 66 + int32_t q_small = 66; + + // 4. A large number: 30000.12345 + // 30000 << 16 = 1966080000 + // 0.12345 * 2^16 = 8090.88 -> round to 8091 = 0x1F9B + // 1966080000 + 8091 = 1966088091 = 0x75231F9B + int32_t q_large = 1966088091; + + // Buffer to hold the resulting strings + char result_buffer[50]; + + // Convert and print each number + q15_16_to_string(q_positive, result_buffer, sizeof(result_buffer)); + printf("Q15.16 value 0x%X -> %s\n", q_positive, result_buffer); + + q15_16_to_string(q_negative, result_buffer, sizeof(result_buffer)); + printf("Q15.16 value 0x%X -> %s\n", q_negative, result_buffer); + + q15_16_to_string(q_small, result_buffer, sizeof(result_buffer)); + printf("Q15.16 value 0x%X -> %s\n", q_small, result_buffer); + + q15_16_to_string(q_large, result_buffer, sizeof(result_buffer)); + printf("Q15.16 value 0x%X -> %s\n", q_large, result_buffer); + + return 0; +} diff --git a/models/neurons/iaf_psc_delta_neuron.nestml b/models/neurons/iaf_psc_delta_neuron.nestml deleted file mode 100644 index 392d76f4a..000000000 --- a/models/neurons/iaf_psc_delta_neuron.nestml +++ /dev/null @@ -1,103 +0,0 @@ -# iaf_psc_delta - Current-based leaky integrate-and-fire neuron model with delta-kernel post-synaptic currents -# ############################################################################################################ -# -# Description -# +++++++++++ -# -# iaf_psc_delta is an implementation of a leaky integrate-and-fire model -# where the potential jumps on each spike arrival. -# -# The threshold crossing is followed by an absolute refractory period -# during which the membrane potential is clamped to the resting potential. -# -# Spikes arriving while the neuron is refractory, are discarded by -# default. If the property ``with_refr_input`` is set to true, such -# spikes are added to the membrane potential at the end of the -# refractory period, dampened according to the interval between -# arrival and end of refractoriness. -# -# The general framework for the consistent formulation of systems with -# neuron like dynamics interacting by point events is described in -# [1]_. A flow chart can be found in [2]_. -# -# -# References -# ++++++++++ -# -# .. [1] Rotter S, Diesmann M (1999). Exact simulation of -# time-invariant linear systems with applications to neuronal -# modeling. Biologial Cybernetics 81:381-402. -# DOI: https://doi.org/10.1007/s004220050570 -# .. [2] Diesmann M, Gewaltig M-O, Rotter S, & Aertsen A (2001). State -# space analysis of synchronous spiking in cortical neural -# networks. Neurocomputing 38-40:565-571. -# DOI: https://doi.org/10.1016/S0925-2312(01)00409-X -# -# -# See also -# ++++++++ -# -# iaf_psc_alpha, iaf_psc_exp -# -# -# Copyright statement -# +++++++++++++++++++ -# -# This file is part of NEST. -# -# Copyright (C) 2004 The NEST Initiative -# -# NEST is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 2 of the License, or -# (at your option) any later version. -# -# NEST is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with NEST. If not, see . -# -model iaf_psc_delta_neuron: - state: - V_m mV = E_L # Membrane potential - refr_t ms = 0 ms # Refractory period timer - - equations: - kernel K_delta = delta(t) - V_m' = -(V_m - E_L) / tau_m + convolve(K_delta, spikes) * (mV / ms) + (I_e + I_stim) / C_m - refr_t' = -1e3 * ms/s # refractoriness is implemented as an ODE, representing a timer counting back down to zero. XXX: TODO: This should simply read ``refr_t' = -1 / s`` (see https://github.com/nest/nestml/issues/984) - - parameters: - tau_m ms = 10 ms # Membrane time constant - C_m pF = 250 pF # Capacity of the membrane - refr_T ms = 2 ms # Duration of refractory period - E_L mV = -70 mV # Resting membrane potential - V_reset mV = -70 mV # Reset potential of the membrane - V_th mV = -55 mV # Spike threshold - - # constant external input current - I_e pA = 0 pA - - input: - spikes <- spike - I_stim pA <- continuous - - output: - spike - - update: - if refr_t > 0 ms: - # neuron is absolute refractory, do not evolve V_m - integrate_odes(refr_t) - else: - # neuron not refractory - integrate_odes(V_m) - - onCondition(refr_t <= 0 ms and V_m >= V_th): - # threshold crossing - V_m = V_reset - refr_t = refr_T # start of the refractory period - emit_spike() diff --git a/models/synapses/stdp_additive_synapse.nestml b/models/synapses/stdp_additive_synapse.nestml new file mode 100644 index 000000000..948cc46a6 --- /dev/null +++ b/models/synapses/stdp_additive_synapse.nestml @@ -0,0 +1,111 @@ +# stdp_synapse - Synapse model for spike-timing dependent plasticity +# ################################################################## +# +# XXX: changed this into hard coded multiplicative STDP because SpiNNaker is having difficulty with logk and expk +# +# Description +# +++++++++++ +# +# stdp_synapse is a synapse with spike-timing dependent plasticity (as defined in [1]_). Here the weight dependence exponent can be set separately for potentiation and depression. Examples: +# +# =================== ==== ============================= +# Multiplicative STDP [2]_ mu_plus = mu_minus = 1 +# Additive STDP [3]_ mu_plus = mu_minus = 0 +# Guetig STDP [1]_ mu_plus, mu_minus in [0, 1] +# Van Rossum STDP [4]_ mu_plus = 0 mu_minus = 1 +# =================== ==== ============================= +# +# +# References +# ++++++++++ +# +# .. [1] Guetig et al. (2003) Learning Input Correlations through Nonlinear +# Temporally Asymmetric Hebbian Plasticity. Journal of Neuroscience +# +# .. [2] Rubin, J., Lee, D. and Sompolinsky, H. (2001). Equilibrium +# properties of temporally asymmetric Hebbian plasticity, PRL +# 86,364-367 +# +# .. [3] Song, S., Miller, K. D. and Abbott, L. F. (2000). Competitive +# Hebbian learning through spike-timing-dependent synaptic +# plasticity,Nature Neuroscience 3:9,919--926 +# +# .. [4] van Rossum, M. C. W., Bi, G-Q and Turrigiano, G. G. (2000). +# Stable Hebbian learning from spike timing-dependent +# plasticity, Journal of Neuroscience, 20:23,8812--8821 +# +# +# Copyright statement +# +++++++++++++++++++ +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . +# +model stdp_synapse: + state: + w real = 1 # Synaptic weight + pre_trace real = 0. + post_trace real = 0. + + parameters: + d ms = 1 ms # Synaptic transmission delay + tau_tr_pre ms = 20 ms + tau_tr_post ms = 20 ms + A_POT real = 0.01 # Potentiation factor (LTP) + A_DEP real = 0.02 # Depression factor (LTD) + eta real = .01 # learning rate + Wmax real = 50. + Wmin real = 0. + + equations: + pre_trace' = -pre_trace / tau_tr_pre + post_trace' = -post_trace / tau_tr_post + + input: + pre_spikes <- spike + post_spikes <- spike + + output: + spike(weight real, delay ms) + + onReceive(post_spikes): + post_trace += 1 + + # potentiate synapse + #w_ real = Wmax * ( w / Wmax + (lambda * ( 1. - ( w / Wmax ) )**mu_plus * pre_trace )) + #w_ real = Wmax * ( w + (lambda * ( 1. - w ) * pre_trace )) + w += eta * A_POT * pre_trace + w = min(Wmax, w) + + onReceive(pre_spikes): + pre_trace += 1 + + # depress synapse + #w_ real = Wmax * ( w / Wmax - ( alpha * lambda * ( w / Wmax )**mu_minus * post_trace )) + #w_ real = Wmax * ( w - ( alpha * lambda * w * post_trace )) + if eta * A_DEP * post_trace >= w: # prevent w from ever becoming less than 0 + w = 0. + else: + w -= eta * A_DEP * post_trace + + # w = max(Wmin, w) + + # deliver spike to postsynaptic partner + emit_spike(w, d) + + update: + integrate_odes() diff --git a/models/synapses/stdp_synapse.nestml b/models/synapses/stdp_synapse.nestml index 142abf354..323d362a5 100644 --- a/models/synapses/stdp_synapse.nestml +++ b/models/synapses/stdp_synapse.nestml @@ -1,6 +1,7 @@ # stdp_synapse - Synapse model for spike-timing dependent plasticity # ################################################################## # +# XXX: changed this into hard coded multiplicative STDP because SpiNNaker is having difficulty with logk and expk # # Description # +++++++++++ @@ -86,15 +87,19 @@ model stdp_synapse: post_trace += 1 # potentiate synapse - w_ real = Wmax * ( w / Wmax + (lambda * ( 1. - ( w / Wmax ) )**mu_plus * pre_trace )) - w = min(Wmax, w_) + #w_ real = Wmax * ( w / Wmax + (lambda * ( 1. - ( w / Wmax ) )**mu_plus * pre_trace )) + #w_ real = Wmax * ( w + (lambda * ( 1. - w ) * pre_trace )) + w += lambda * pre_trace + w = min(Wmax, w) onReceive(pre_spikes): pre_trace += 1 # depress synapse - w_ real = Wmax * ( w / Wmax - ( alpha * lambda * ( w / Wmax )**mu_minus * post_trace )) - w = max(Wmin, w_) + #w_ real = Wmax * ( w / Wmax - ( alpha * lambda * ( w / Wmax )**mu_minus * post_trace )) + #w_ real = Wmax * ( w - ( alpha * lambda * w * post_trace )) + w -= lambda * post_trace + w = max(Wmin, w) # deliver spike to postsynaptic partner emit_spike(w, d) diff --git a/pynestml/codegeneration/code_generator.py b/pynestml/codegeneration/code_generator.py index d69784542..8e7c2d90a 100644 --- a/pynestml/codegeneration/code_generator.py +++ b/pynestml/codegeneration/code_generator.py @@ -168,6 +168,7 @@ def generate_synapses(self, synapses: Sequence[ASTModel]) -> None: """ from pynestml.frontend.frontend_configuration import FrontendConfiguration + for synapse in synapses: self.generate_synapse_code(synapse) code, message = Messages.get_code_generated(synapse.get_name(), FrontendConfiguration.get_target_path()) diff --git a/pynestml/codegeneration/nest_code_generator.py b/pynestml/codegeneration/nest_code_generator.py index 066109e3d..351abf84d 100644 --- a/pynestml/codegeneration/nest_code_generator.py +++ b/pynestml/codegeneration/nest_code_generator.py @@ -120,6 +120,7 @@ class NESTCodeGenerator(CodeGenerator): """ + _default_options = { "neuron_parent_class": "ArchivingNode", "neuron_parent_class_include": "archiving_node.h", @@ -253,12 +254,16 @@ def set_options(self, options: Mapping[str, Any]) -> Mapping[str, Any]: return ret def generate_synapse_code(self, synapse: ASTModel) -> None: + # special case for delay variable synapse_name_stripped = removesuffix(removesuffix(synapse.name.split("_with_")[0], "_"), FrontendConfiguration.suffix) self._check_delay_variable_codegen_opt(synapse) +#!! this causes error that set is not scriptable +# variables_special_cases = {self.get_option("delay_variable")[synapse_name_stripped]: "get_delay()"} + + variables_special_cases = {"delay_variable":self.get_option("delay_variable")} - variables_special_cases = {self.get_option("delay_variable")[synapse_name_stripped]: "get_delay()"} self._nest_variable_printer.variables_special_cases = variables_special_cases self._nest_variable_printer_no_origin.variables_special_cases = variables_special_cases @@ -484,11 +489,18 @@ def _check_delay_variable_codegen_opt(self, synapse: ASTModel) -> None: return - if not (synapse_name_stripped in self.get_option("delay_variable").keys() and ASTUtils.get_variable_by_name(synapse, self.get_option("delay_variable")[synapse_name_stripped])): - code, message = Messages.get_delay_variable_not_found(variable_name=self.get_option("delay_variable")[synapse_name_stripped]) - Logger.log_message(synapse, code, message, None, LoggingLevel.ERROR) +#!! +#not sure if this is even needed.. + +# +# if not (synapse_name_stripped in self.get_option("delay_variable").keys() and ASTUtils.get_variable_by_name(synapse, self.get_option("delay_variable")[synapse_name_stripped])): +# code, message = Messages.get_delay_variable_not_found(variable_name=self.get_option("delay_variable")[synapse_name_stripped]) +# Logger.log_message(synapse, code, message, None, LoggingLevel.ERROR) + +# return + + - return def _get_model_namespace(self, astnode: ASTModel) -> Dict: namespace = {} @@ -569,6 +581,12 @@ def _get_synapse_model_namespace(self, synapse: ASTModel) -> Dict: for input_block in synapse.get_input_blocks(): all_input_port_names.extend([p.name for p in input_block.get_input_ports()]) + + xfrm = SynapsePostNeuronTransformer() + xfrm.set_options({"neuron_synapse_pairs": self.get_option("neuron_synapse_pairs")}) + namespace["post_ports"] = xfrm.get_post_port_names(synapse, None, synapse.name.removesuffix("_nestml")) + namespace["spiking_post_ports"] = xfrm.get_spiking_post_port_names(synapse, None, synapse.name) + if "paired_neuron" in dir(synapse): # synapse is being co-generated with neuron namespace["paired_neuron"] = synapse.paired_neuron @@ -591,11 +609,16 @@ def _get_synapse_model_namespace(self, synapse: ASTModel) -> Dict: namespace["continuous_post_ports"] = [v for v in post_ports if isinstance(v, tuple) or isinstance(v, list)] namespace["vt_ports"] = synapse.vt_port_names - namespace["pre_ports"] = list(set(all_input_port_names) - - set(namespace["post_ports"]) - set(namespace["vt_ports"])) + namespace["pre_ports"] = list(set(all_input_port_names) - set(namespace["post_ports"]) - set(namespace["vt_ports"])) else: - # separate (not neuron+synapse co-generated) - namespace["pre_ports"] = all_input_port_names + opts = FrontendConfiguration.get_codegen_opts() + if "neuron_synapse_pairs" in opts: + assert len(opts["neuron_synapse_pairs"]) == 1, "Only one pair supported for now!" + namespace["post_ports"] = opts["neuron_synapse_pairs"][0]["post_ports"] + namespace["pre_ports"] = list(set(all_input_port_names) - set(namespace["post_ports"])) + else: + # separate (not neuron+synapse co-generated) + namespace["pre_ports"] = all_input_port_names assert len(namespace["pre_ports"]) <= 1, "Synapses only support one spiking input port" diff --git a/pynestml/codegeneration/printers/cpp_expression_printer.py b/pynestml/codegeneration/printers/cpp_expression_printer.py index e392a35b6..847eaa989 100644 --- a/pynestml/codegeneration/printers/cpp_expression_printer.py +++ b/pynestml/codegeneration/printers/cpp_expression_printer.py @@ -41,7 +41,6 @@ def print(self, node: ASTNode) -> str: if isinstance(node, ASTExpression): if node.get_implicit_conversion_factor() and not node.get_implicit_conversion_factor() == 1: return "(" + str(node.get_implicit_conversion_factor()) + " * (" + self.print_expression(node) + "))" - return self.print_expression(node) return self._simple_expression_printer.print(node) @@ -189,14 +188,13 @@ def _print_arithmetic_operator_expression(self, node: ASTExpressionNode) -> str: """ op = node.get_binary_operator() - if op.is_pow_op: - # make a dummy ASTFunctionCall so we can delegate this to the FunctionCallPrinter - dummy_ast_function_call: ASTFunctionCall = ASTNodeFactory.create_ast_function_call(callee_name="pow", args=(node.get_lhs(), node.get_rhs()), source_position=ASTSourceLocation.get_added_source_position()) - return self._simple_expression_printer._function_call_printer.print(dummy_ast_function_call) - lhs = self.print(node.get_lhs()) rhs = self.print(node.get_rhs()) + if op.is_pow_op: + # TODO: make a dummy ASTFunctionCall so we can delegate this to the FunctionCallPrinter + return "(expk(" + rhs + " * logk(" + lhs + ")))" + if op.is_plus_op: return lhs + " + " + rhs diff --git a/pynestml/codegeneration/printers/cpp_printer.py b/pynestml/codegeneration/printers/cpp_printer.py index 341597f06..db000d04d 100644 --- a/pynestml/codegeneration/printers/cpp_printer.py +++ b/pynestml/codegeneration/printers/cpp_printer.py @@ -79,6 +79,9 @@ def print_assignment(self, node: ASTAssignment) -> str: ret += '=' ret += ' ' + self.print(node.rhs) + print("IN CppPrinter:: print_assignment()") + if self.print(node.rhs) == "1": + import pdb;pdb.set_trace() return ret diff --git a/pynestml/codegeneration/printers/spinnaker_c_function_call_printer.py b/pynestml/codegeneration/printers/spinnaker_c_function_call_printer.py index be84e5187..6342ebb31 100644 --- a/pynestml/codegeneration/printers/spinnaker_c_function_call_printer.py +++ b/pynestml/codegeneration/printers/spinnaker_c_function_call_printer.py @@ -30,7 +30,8 @@ class SpinnakerCFunctionCallPrinter(FunctionCallPrinter): Printer for ASTFunctionCall in C Spinnaker API syntax. """ - def print_function_call(self, node: ASTFunctionCall) -> str: +#!! function_call was called node before + def print_function_call(self, function_call: ASTFunctionCall) -> str: r""" Converts a single handed over function call to C Spinnaker API syntax. @@ -44,8 +45,65 @@ def print_function_call(self, node: ASTFunctionCall) -> str: s The function call string in C syntax. """ + + + assert isinstance(function_call, ASTFunctionCall) + +#taken from previous version of print_function_call() + + if function_call.get_name() in [PredefinedFunctions.TIME_RESOLUTION, PredefinedFunctions.TIME_TIMESTEP]: + # context dependent; we assume the template contains the necessary definitions + return 'parameter->__h' + + if function_call.get_name() == PredefinedFunctions.TIME_STEPS: + raise Exception("time_steps() function not yet implemented") + + if function_call.get_name() == PredefinedFunctions.RANDOM_NORMAL: + raise Exception("rng functions not yet implemented") + + if function_call.get_name() == PredefinedFunctions.RANDOM_UNIFORM: + raise Exception("rng functions not yet implemented") + +#structure taken from python_function_call_printer.py + function_name = self._print_function_call_format_string(function_call) + + +# import pdb + # pdb.set_trace() + + if ASTUtils.needs_arguments(function_call): + if function_call.get_name() == PredefinedFunctions.PRINT or function_call.get_name() == PredefinedFunctions.PRINT: + return function_name.format(self._print_print_statement(function_call)) + + return function_name.format(*self._print_function_call_argument_list(function_call)) + + return function_name + + + + + """ original function function_name = node.get_name() +#!! + + import pdb + pdb.set_trace() + + +#!! TODO add cases for min and max + if function_name == PredefinedFunctions.MIN: + raise Exception("min() not implemented yet") + + + if function_name == PredefinedFunctions.MAX: + raise Exception("max() not implemented yet") + +#!! TODO add case for EXP + + if function_name == PredefinedFunctions.EXP: + raise Exception("exp() not implemented yet") + if function_name in [PredefinedFunctions.TIME_RESOLUTION, PredefinedFunctions.TIME_TIMESTEP]: # context dependent; we assume the template contains the necessary definitions return 'parameter->__h' @@ -60,6 +118,18 @@ def print_function_call(self, node: ASTFunctionCall) -> str: raise Exception("rng functions not yet implemented") return super().print_function_call(node) + """ + + +#!! + def _print_function_call_argument_list(self, function_call: ASTFunctionCall) -> tuple[str, ...]: + ret = [] + + for arg in function_call.get_args(): + ret.append(self._expression_printer.print(arg)) + + return tuple(ret) + def _print_function_call_format_string(self, function_call: ASTFunctionCall) -> str: r""" @@ -75,6 +145,13 @@ def _print_function_call_format_string(self, function_call: ASTFunctionCall) -> s The function call string in C syntax. """ + +#!! + + # import pdb + # pdb.set_trace() + + function_name = function_call.get_name() if function_name == PredefinedFunctions.CLIP: diff --git a/pynestml/codegeneration/printers/spinnaker_c_variable_printer.py b/pynestml/codegeneration/printers/spinnaker_c_variable_printer.py index b9ba5dfc1..b86539ce1 100644 --- a/pynestml/codegeneration/printers/spinnaker_c_variable_printer.py +++ b/pynestml/codegeneration/printers/spinnaker_c_variable_printer.py @@ -42,13 +42,17 @@ class SpinnakerCVariablePrinter(CppVariablePrinter): r""" Variable printer for C syntax and the Spinnaker API. """ - - def __init__(self, expression_printer: ExpressionPrinter, with_origin: bool = True, with_vector_parameter: bool = True) -> None: + def __init__(self, expression_printer: ExpressionPrinter, with_origin: bool = True, with_vector_parameter: bool = True, variables_special_cases: Optional[Dict[str, str]] = None) -> None: super().__init__(expression_printer) self.with_origin = with_origin self.with_vector_parameter = with_vector_parameter self._state_symbols = [] + self.variables_special_cases = variables_special_cases + + + + def print_variable(self, variable: ASTVariable) -> str: """ Converts a single variable to Spinnaker processable format. @@ -58,6 +62,13 @@ def print_variable(self, variable: ASTVariable) -> str: assert isinstance(variable, ASTVariable) if isinstance(variable, ASTExternalVariable): +#!! +# import pdb +# pdb.set_trace() + + + return "state->post_trace" +# return variable.name + "________EXT" # XXX REMOVE THIS LINE raise Exception("SpiNNaker does not suport external variables") if variable.get_name() == PredefinedVariables.E_CONSTANT: diff --git a/pynestml/codegeneration/printers/spinnaker_constant_printer.py b/pynestml/codegeneration/printers/spinnaker_constant_printer.py new file mode 100644 index 000000000..066b120ac --- /dev/null +++ b/pynestml/codegeneration/printers/spinnaker_constant_printer.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# +# constant_printer.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +from typing import Union + + +class SpiNNakerConstantPrinter: + r""" + """ + + def print_constant(self, const: Union[str, float, int]) -> str: + """ + Converts a single handed over constant. + :param constant_name: a constant as string. + :type constant_name: str + :return: the corresponding nest representation + """ + print("XXXXXXXX printing constant " + str(const)) + if isinstance(const, float) or isinstance(const, int): + return "(" + str(const) + " << 16)" + + return const diff --git a/pynestml/codegeneration/printers/spinnaker_cpp_expression_printer.py b/pynestml/codegeneration/printers/spinnaker_cpp_expression_printer.py new file mode 100644 index 000000000..ccd591d92 --- /dev/null +++ b/pynestml/codegeneration/printers/spinnaker_cpp_expression_printer.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +# +# spinnaker_cpp_expression_printer.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +from pynestml.codegeneration.printers.expression_printer import ExpressionPrinter +from pynestml.codegeneration.printers.cpp_expression_printer import CppExpressionPrinter +from pynestml.meta_model.ast_arithmetic_operator import ASTArithmeticOperator +from pynestml.meta_model.ast_bit_operator import ASTBitOperator +from pynestml.meta_model.ast_expression import ASTExpression +from pynestml.meta_model.ast_expression_node import ASTExpressionNode +from pynestml.meta_model.ast_function_call import ASTFunctionCall +from pynestml.meta_model.ast_logical_operator import ASTLogicalOperator +from pynestml.meta_model.ast_comparison_operator import ASTComparisonOperator +from pynestml.meta_model.ast_node import ASTNode +from pynestml.meta_model.ast_node_factory import ASTNodeFactory +from pynestml.utils.ast_source_location import ASTSourceLocation + +class SpiNNakerCppExpressionPrinter(CppExpressionPrinter): + r""" + Printer for ``ASTExpression`` nodes in C++ syntax. + """ + + def _print_arithmetic_operator_expression(self, node: ASTExpressionNode) -> str: + """ + Prints an arithmetic operator. + :param node: an expression with arithmetic operator + :return: a string representation + """ + op = node.get_binary_operator() + + lhs = self.print(node.get_lhs()) + rhs = self.print(node.get_rhs()) + + if op.is_pow_op: + # TODO: make a dummy ASTFunctionCall so we can delegate this to the FunctionCallPrinter + return "(expk(" + rhs + " * logk(" + lhs + ")))" + + if op.is_plus_op: + return lhs + " + " + rhs + + if op.is_minus_op: + return lhs + " - " + rhs + + if op.is_times_op: +# return "(" + lhs + " * " + rhs + " >> 16)" + return lhs + " * " + rhs + + if op.is_div_op: + #raise Exception("SpiNNaker does not feature an FPU so division is not implemented on this platform") i + print("WARNING: SpiNNaker does not feature an FPU so division is not implemented on this platform") + return "_kdivk(" + lhs + ", " + rhs + ")" + + if op.is_modulo_op: + return lhs + " % " + rhs + + raise RuntimeError("Cannot determine arithmetic operator!") diff --git a/pynestml/codegeneration/printers/spinnaker_python_variable_printer.py b/pynestml/codegeneration/printers/spinnaker_python_variable_printer.py new file mode 100644 index 000000000..be5dfde1d --- /dev/null +++ b/pynestml/codegeneration/printers/spinnaker_python_variable_printer.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +# +# python_variable_printer.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +from __future__ import annotations + +from pynestml.codegeneration.nest_unit_converter import NESTUnitConverter +from pynestml.codegeneration.printers.expression_printer import ExpressionPrinter +from pynestml.codegeneration.printers.variable_printer import VariablePrinter +from pynestml.codegeneration.python_code_generator_utils import PythonCodeGeneratorUtils +from pynestml.meta_model.ast_external_variable import ASTExternalVariable +from pynestml.meta_model.ast_variable import ASTVariable +from pynestml.symbols.predefined_units import PredefinedUnits +from pynestml.symbols.predefined_variables import PredefinedVariables +from pynestml.symbols.symbol import SymbolKind +from pynestml.symbols.unit_type_symbol import UnitTypeSymbol +from pynestml.symbols.variable_symbol import BlockType +from pynestml.utils.logger import Logger, LoggingLevel +from pynestml.utils.messages import Messages + + +class SpiNNakerPythonVariablePrinter(VariablePrinter): + r""" + Variable printer for Python syntax. + """ + + def __init__(self, expression_printer: ExpressionPrinter, with_origin: bool = True, with_vector_parameter: bool = True) -> None: + super().__init__(expression_printer) + self.with_origin = with_origin + self.with_vector_parameter = with_vector_parameter + + @classmethod + def _print_python_name(cls, variable_name: str) -> str: + """ + Converts a handed over name to the corresponding Python naming guideline. This is chosen to be compatible with the naming strategy for ode-toolbox, such that the variable name in a NESTML statement like "G_ahp' += 1" will be converted into "G_ahp__d". + + :param variable_name: a single name. + :return: a string representation + """ + differential_order = variable_name.count("\"") + if differential_order > 0: + return variable_name.replace("\"", "").replace("$", "__DOLLAR") + "__" + "d" * differential_order + + return variable_name.replace("$", "__DOLLAR") + + def print_variable(self, variable: ASTVariable) -> str: + """ + Converts a single variable to nest processable format. + :param variable: a single variable. + :return: a string representation + """ + assert isinstance(variable, ASTVariable) + + # print external variables (such as a variable in the synapse that needs to call the getter method on the postsynaptic partner) + if isinstance(variable, ASTExternalVariable): + _name = str(variable) + if variable.get_alternate_name(): + if not variable._altscope: + # get the value from the postsynaptic partner continuous-time buffer (for post_connected_continuous_input_ports); this has been buffered in a local temp variable starting with "__" + return variable.get_alternate_name() + + # get the value from the postsynaptic partner (without time specified) + # the disadvantage of this approach is that the time the value is to be obtained is not explicitly specified, so we will actually get the value at the end of the min_delay timestep + return "__target.get_" + variable.get_alternate_name() + "()" + + # grab the value from the postsynaptic spiking history buffer + return "start.get_" + _name + "()" + + if variable.get_name() == PredefinedVariables.E_CONSTANT: + return "math.e" + + if variable.get_name() == PredefinedVariables.PI_CONSTANT: + return "math.pi" + + symbol = variable.get_scope().resolve_to_symbol(variable.get_complete_name(), SymbolKind.VARIABLE) + if symbol is None: + # test if variable name can be resolved to a type + if PredefinedUnits.is_unit(variable.get_complete_name()): + return str(NESTUnitConverter.get_factor(PredefinedUnits.get_unit(variable.get_complete_name()).get_unit())) + + code, message = Messages.get_could_not_resolve(variable.get_name()) + Logger.log_message(log_level=LoggingLevel.ERROR, code=code, message=message, + error_position=variable.get_source_position()) + return "" + + vector_param = "" + if self.with_vector_parameter and symbol.has_vector_parameter(): + vector_param = "[" + self._expression_printer.print(variable.get_vector_parameter()) + "]" + + if symbol.is_buffer(): + if isinstance(symbol.get_type_symbol(), UnitTypeSymbol): + units_conversion_factor = NESTUnitConverter.get_factor(symbol.get_type_symbol().unit.unit) + else: + units_conversion_factor = 1 + s = "" + if not units_conversion_factor == 1: + s += "(" + str(units_conversion_factor) + " * " + s += self._print(variable, symbol, with_origin=self.with_origin) + vector_param + s += vector_param + if not units_conversion_factor == 1: + s += ")" + return s + + if symbol.is_inline_expression: + # there might not be a corresponding defined state variable; insist on calling the getter function + return "get_" + self._print(variable, symbol, with_origin=False) + vector_param + "()" + + assert not symbol.is_kernel(), "Cannot print kernel; kernel should have been converted during code generation" + + if symbol.is_state() or symbol.is_inline_expression: + return self._print(variable, symbol, with_origin=self.with_origin) + vector_param + + return self._print(variable, symbol, with_origin=self.with_origin) + vector_param + + def _print_delay_variable(self, variable: ASTVariable): + """ + Converts a delay variable to NEST processable format + :param variable: + :return: + """ + symbol = variable.get_scope().resolve_to_symbol(variable.get_complete_name(), SymbolKind.VARIABLE) + if symbol: + if symbol.is_state() and symbol.has_delay_parameter(): + return "get_delayed_" + variable.get_name() + "()" + return "" + + def _print(self, variable, symbol, with_origin: bool = True) -> str: + variable_name = SpiNNakerPythonVariablePrinter._print_python_name(variable.get_complete_name()) + + if symbol.is_local(): + return variable_name + + if variable.is_delay_variable(): + return self._print_delay_variable(variable) + + if with_origin: + return PythonCodeGeneratorUtils.print_symbol_origin(symbol, variable) % variable_name + + return "self._nestml_model_variables[\"" + variable_name + "\"]" diff --git a/pynestml/codegeneration/printers/spinnaker_synapse_c_variable_printer.py b/pynestml/codegeneration/printers/spinnaker_synapse_c_variable_printer.py new file mode 100644 index 000000000..107b75efe --- /dev/null +++ b/pynestml/codegeneration/printers/spinnaker_synapse_c_variable_printer.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# +# spinnaker_c_variable_printer.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +from __future__ import annotations + +from pynestml.utils.ast_utils import ASTUtils + +from pynestml.codegeneration.nest_unit_converter import NESTUnitConverter +from pynestml.codegeneration.printers.cpp_variable_printer import CppVariablePrinter +from pynestml.codegeneration.printers.expression_printer import ExpressionPrinter +from pynestml.codegeneration.printers.spinnaker_c_variable_printer import SpinnakerCVariablePrinter +from pynestml.codegeneration.spinnaker_code_generator_utils import SPINNAKERCodeGeneratorUtils +from pynestml.meta_model.ast_external_variable import ASTExternalVariable +from pynestml.meta_model.ast_variable import ASTVariable +from pynestml.symbols.predefined_units import PredefinedUnits +from pynestml.symbols.predefined_variables import PredefinedVariables +from pynestml.symbols.symbol import SymbolKind +from pynestml.symbols.unit_type_symbol import UnitTypeSymbol +from pynestml.symbols.variable_symbol import BlockType +from pynestml.utils.logger import Logger, LoggingLevel +from pynestml.utils.messages import Messages + + +class SpinnakerSynapseCVariablePrinter(SpinnakerCVariablePrinter): + r""" + Variable printer for C syntax and the Spinnaker API -- for synapses + """ + + def _print(self, variable: ASTVariable, symbol, with_origin: bool = True) -> str: + assert all([isinstance(s, str) for s in self._state_symbols]) + + variable_name = CppVariablePrinter._print_cpp_name(variable.get_complete_name()) + + if symbol.is_local(): + return variable_name + + if variable.is_delay_variable(): + return self._print_delay_variable(variable) + + if with_origin: + return SPINNAKERCodeGeneratorUtils.print_symbol_origin(symbol, numerical_state_symbols=self._state_symbols, for_synapse=True) % variable_name + + return variable_name diff --git a/pynestml/codegeneration/resources_spinnaker/@NEURON_NAME@_impl.h.jinja2 b/pynestml/codegeneration/resources_spinnaker/@NEURON_NAME@_impl.h.jinja2 index 268aa61ac..3e955ab56 100644 --- a/pynestml/codegeneration/resources_spinnaker/@NEURON_NAME@_impl.h.jinja2 +++ b/pynestml/codegeneration/resources_spinnaker/@NEURON_NAME@_impl.h.jinja2 @@ -31,7 +31,7 @@ {%- endif %} // uncomment the next line to enable printing of detailed debug information -// #define DEBUG +#define DEBUG {% set ns = namespace(count=0) %} @@ -162,6 +162,10 @@ static neuron_impl_t *neuron_array; __attribute__((unused)) // Marked unused as only used sometimes static bool neuron_impl_initialise(uint32_t n_neurons) { +#ifdef DEBUG + log_info("[NESTML neuron] neuron_impl_initialise()"); +#endif + // Allocate DTCM for neuron array if (sizeof(neuron_impl_t) != 0) { neuron_array = spin1_malloc(n_neurons * sizeof(neuron_impl_t)); @@ -209,7 +213,7 @@ static void neuron_impl_store_neuron_parameters( void on_receive_block_{{ blk.port_name }}(neuron_state_t *state, neuron_input_t *input, neuron_parameter_t *parameter) { #ifdef DEBUG - log_info("on_receive_block_{{ blk.port_name }}()\n"); + log_info("[NESTML neuron] on_receive_block_{{ blk.port_name }}()\n"); #endif {% filter indent(4, True) -%} @@ -233,8 +237,12 @@ static void neuron_impl_add_inputs( } __attribute__((unused)) // Marked unused as only used sometimes -static void neuron_impl_do_timestep_update( - uint32_t timer_count, uint32_t time, uint32_t n_neurons) { +static void neuron_impl_do_timestep_update(uint32_t timer_count, uint32_t time, uint32_t n_neurons) { + +#ifdef DEBUG + log_info("[NESTML neuron] neuron_impl_do_timestep_update()\n"); +#endif + for (uint32_t neuron_index = 0; neuron_index < n_neurons; neuron_index++) { // Get the neuron itself neuron_impl_t *neuron = &neuron_array[neuron_index]; @@ -242,9 +250,6 @@ static void neuron_impl_do_timestep_update( neuron_input_t *input = &neuron->input; neuron_parameter_t *parameter = &neuron->parameter; -#ifdef DEBUG - log_info("neuron_impl_do_timestep_update()"); -#endif // Store the recorded membrane voltage {%- for variable in neuron.get_state_symbols() %} {%- if variable.is_recordable %} diff --git a/pynestml/codegeneration/resources_spinnaker/@NEURON_NAME@_impl.py.jinja2 b/pynestml/codegeneration/resources_spinnaker/@NEURON_NAME@_impl.py.jinja2 index 7e34aa110..02efab116 100644 --- a/pynestml/codegeneration/resources_spinnaker/@NEURON_NAME@_impl.py.jinja2 +++ b/pynestml/codegeneration/resources_spinnaker/@NEURON_NAME@_impl.py.jinja2 @@ -27,6 +27,12 @@ from spynnaker.pyNN.models.neuron.implementations import AbstractNeuronImpl from spinn_utilities.overrides import overrides +#!! +import os +from pynestml.frontend.frontend_configuration import FrontendConfiguration + + + class {{neuronName}}Impl(AbstractNeuronImpl): def __init__(self, @@ -110,6 +116,16 @@ class {{neuronName}}Impl(AbstractNeuronImpl): @property @overrides(AbstractNeuronImpl.binary_name) def binary_name(self): +#!! +# import pdb + # pdb.set_trace() + +# install_path = FrontendConfiguration.get_install_path() + # if os.path.isfile(os.path.join(install_path, "python_models8", "model_binaries", "iaf_psc_exp_neuron_nestml__with_stdp_synapse_nestml_impl_stdp_mad_my_timing_my_weight.aplx")): + # pdb.set_trace() + # return "{{neuronName}}_impl_stdp_mad_my_timing_my_weight.aplx" + + return "{{neuronName}}_impl.aplx" @overrides(AbstractNeuronImpl.get_global_weight_scale) diff --git a/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@.py.jinja2 b/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@.py.jinja2 deleted file mode 100644 index 516b26082..000000000 --- a/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@.py.jinja2 +++ /dev/null @@ -1,13 +0,0 @@ -# TODO: This should be class of SynapseDynamicSTDP with default values set to generated timing and weight -from spynnaker.pyNN.models.neuron.synapse_dynamics.synapse_dynamics_stdp import SynapseDynamicsSTDP - -from python_models8.neuron.builds.{{synapseName}}_timing import MyTimingDependence -from python_models8.neuron.builds.{{synapseName}}_weight import MyWeightDependence - -class {{synapseName}}_synapse_impl(SynapseDynamicsSTDP): - - def __init__(self): - super().__init__( - timing_dependence=MyTimingDependence(my_potentiation_parameter=2., my_depression_parameter=0.1), - weight_dependence=MyWeightDependence(w_min=0., w_max=10., my_weight_parameter=0.5), - ) diff --git a/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_impl.c.jinja2 b/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_impl.c.jinja2 index a36f0d12c..7f1047492 100644 --- a/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_impl.c.jinja2 +++ b/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_impl.c.jinja2 @@ -1,91 +1,124 @@ -#include "{{synapseName}}_impl.h" +#include "{{ synapseName }}_impl.h" + +// uncomment the next line to enable printing of detailed debug information +#define DEBUG + + +//! Global plasticity parameter data (in DTCM) +plasticity_weight_region_data_t *plasticity_weight_region_data; + + + + + + +uint64_t _udiv64(uint64_t num, uint64_t den) { + + uint64_t quot = 0; + uint64_t qbit = 1; + + if (den == 0) { + /* Intentional divide by zero, without + triggering a compiler warning which + would abort the build */ + return 1/((unsigned)den); + } + + /* Left-justify denominator and count shift */ + while ((int64_t) den >= 0) { + den <<= 1; + qbit <<= 1; + } + + while (qbit) { + if (den <= num) { + num -= den; + quot += qbit; + } + den >>= 1; + qbit >>= 1; + } + return quot; +} + + + + +//static inline accum _kdivk(accum a, accum b) { +accum _kdivk(accum a, accum b) { + if (a < 0k && b < 0k) { + const accum ret = _kdivk(-1k * a, -1k * b); +// log_info("\t\tkdivk: (1) returning %k / %k = 0x%x = %k", a, b, ret, ret); + return ret; + } + + if (a < 0k) { + const accum ret = -1k * _kdivk(-1k * a, b); +// log_info("\t\tkdivk: (2) returning %k / %k = 0x%x = %k", a, b, ret, ret); + return ret; + } + + if (b < 0k) { + const accum ret = -1k * _kdivk(a, -1k * b); +// log_info("\t\tkdivk: (3) returning %k / %k = 0x%x = %k", a, b, ret, ret); + return ret; + } + + const accum ret = kbits((uint32_t)_udiv64(((uint64_t) bitsk(a) << 15), (uint64_t) bitsk(b))); +// log_info("\t\tkdivk: (4) returning %k / %k = 0x%x = %k", a, b, ret, ret); + + return ret; +} + + -#include -// Only adds prototypes for weight depression and potentiation -> maybe remove -// TODO: Ensure this includes and implements the correct interface -#include -// Should be good at the moment -// defines plastic_synapse_t -> plastic part of synapse -// similar to state for nestml, but that also contains pre and posttrace -// TODO: Choose the required synapse structure -#include -#include -#include -//layout of plastic area of row -// fixed layout + + + +//! The format of the plastic data region of a synaptic row struct synapse_row_plastic_data_t { //! The pre-event history pre_event_history_t history; //! The per-synapse information - plastic_synapse_t synapses[]; + synapse_word_t synapses[]; }; - -// Possibly irrelevant -//static stdp_params params; - extern uint32_t skipped_synapses; -/*** in post_events.h -typedef struct { - //! Number of events stored (minus one) - uint32_t count_minus_one; - //! Event times - uint32_t times[MAX_POST_SYNAPTIC_EVENTS]; - //! Event traces - post_trace_t traces[MAX_POST_SYNAPTIC_EVENTS]; -} post_event_history_t; -***/ -static post_event_history_t *post_event_history; - -//! Count of pre-synaptic events relevant to plastic processing -static uint32_t num_plastic_pre_synaptic_events = 0; - -//! Count of times that the plastic math became saturated -static uint32_t plastic_saturation_count = 0; - -// Global weight plasticity data for each synapse -plasticity_weight_region_data_t *plasticity_weight_region_data; - -uint32_t *weight_shift; -// TAG: INIT -bool synapse_dynamics_initialise ( +bool synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, uint32_t *ring_buffer_to_input_buffer_left_shifts) { - // Read parameters from pyNN - plasticity_weight_region_data_t *config = (plasticity_weight_region_data_t *) address; + char _string_buf[20]; - // Create space in dtcm - plasticity_weight_region_data_t *dtcm_copy = plasticity_weight_region_data = - spin1_malloc(sizeof(plasticity_weight_region_data_t) * n_synapse_types); - if (dtcm_copy == NULL) { - return false; - } + //log_info("[NESTML synapse] In synapse_dynamics_initialise(n_neurons = %d, n_synapse_types = %d)", n_neurons, n_synapse_types); + + // Load parameters + plasticity_weight_region_data_t *config = (plasticity_weight_region_data_t *) address; + plasticity_weight_region_data = spin1_malloc(sizeof(plasticity_weight_region_data_t) * n_synapse_types); - // Shift parameter to convert int32 to s1615 - weight_shift = spin1_malloc(sizeof(uint32_t) * n_synapse_types); - if (weight_shift == NULL) { + if (plasticity_weight_region_data == NULL) { + log_error("Could not initialise weight region data"); return false; } - // Initialise created space with read parameters - for (uint32_t s = 0; s < n_synapse_types; s++, config++) { -{%- for sym in synapse.get_parameter_symbols() | sort(attribute="name") %} - dtcm_copy[s].{{ sym.get_symbol_name()}} = config->{{ sym.get_symbol_name()}}; + for (uint32_t s = 0; s < 1; s++, config++) { // XXX: only permit one synapse type for now +{%- for sym in synapse.get_parameter_symbols() + synapse.get_internal_symbols() %} +{%- if (not sym.name == "__h") and (not sym.name.startswith("__P")) %} + plasticity_weight_region_data[s].{{ sym.get_symbol_name() }} = config->{{ sym.get_symbol_name() }}; +#ifdef DEBUG +// log_info("\t[NESTML synapse] \tSynapse type %u: Parameter {{ sym.get_symbol_name() }}, Raw value: %x, Value: %k", s, plasticity_weight_region_data[s].{{ sym.get_symbol_name() }}, plasticity_weight_region_data[s].{{ sym.get_symbol_name() }}); +#endif +{%- endif %} {%- endfor %} - - // Copy weight shift - weight_shift[s] = ring_buffer_to_input_buffer_left_shifts[s]; } - // Initialise post event buffers post_event_history = post_events_init_buffers(n_neurons); if (post_event_history == NULL) { return false; @@ -94,612 +127,410 @@ bool synapse_dynamics_initialise ( return true; } -/***** -Methods from stdp common --> Could be we can reuse them, without suing their initialise method -*****/ +//--------------------------------------- +// Synaptic row plastic-region implementation +//--------------------------------------- +void synapse_dynamics_print_plastic_synapses( + synapse_row_plastic_data_t *plastic_region_data, + synapse_row_fixed_part_t *fixed_region, + uint32_t *ring_buffer_to_input_buffer_left_shifts) { + return; -input_t synapse_dynamics_get_intrinsic_bias( - UNUSED uint32_t time, UNUSED index_t neuron_index) { - return ZERO; + __use(plastic_region_data); + __use(fixed_region); + __use(ring_buffer_to_input_buffer_left_shifts); } -uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) { - return num_plastic_pre_synaptic_events; +//--------------------------------------- +//! \brief Get the axonal delay +//! \param[in] x: The packed plastic synapse control word +//! \return the axonal delay +static inline index_t sparse_axonal_delay(uint32_t x) { +#if 1 + // No axonal delay, ever + __use(x); + return 0; +#else + return (x >> synapse_delay_index_type_bits) & SYNAPSE_AXONAL_DELAY_MASK; +#endif } -uint32_t synapse_dynamics_get_plastic_saturation_count(void) { - return plastic_saturation_count; -} -static inline fixed_stdp_synapse synapse_dynamics_stdp_get_fixed( - uint32_t control_word, uint32_t time, uint32_t colour_delay) { - // Extract control-word components - // **NOTE** cunningly, control word is just the same as lower - // 16-bits of 32-bit fixed synapse so same functions can be used - uint32_t delay_dendritic = synapse_row_sparse_delay(control_word, - synapse_type_index_bits, synapse_delay_mask); - uint32_t delay_axonal = 0; //sparse_axonal_delay(control_word); - uint32_t type_index = synapse_row_sparse_type_index(control_word, - synapse_type_index_mask); - return (fixed_stdp_synapse) { - .delay_dendritic = delay_dendritic, - .delay_axonal = delay_axonal, - .type = synapse_row_sparse_type( - control_word, synapse_index_bits, synapse_type_mask), - .index = synapse_row_sparse_index( - control_word, synapse_index_mask), - .type_index = type_index, - .ring_buffer_index = synapse_row_get_ring_buffer_index_combined( - (delay_axonal + delay_dendritic + time) - colour_delay, type_index, - synapse_type_index_bits, synapse_delay_mask) - }; -} -static inline void synapse_dynamics_stdp_update_ring_buffers( - weight_t *ring_buffers, fixed_stdp_synapse s, int32_t weight) { - uint32_t accumulation = ring_buffers[s.ring_buffer_index] + weight; +static void update_internal_state_(synapse_state_t *state, uint32_t t_start, uint32_t timestep) { + + /** + * Recompute propagator constants + **/ + + const accum __h = timestep; // XXX: time constants are in ms, so __h should be as well! convert uint32_t to accum type + +//log_info("[NESTML synapse] ************** tau_tr_pre = 0x%x = %k\n", plasticity_weight_region_data->tau_tr_pre, plasticity_weight_region_data->tau_tr_pre); +//log_info("[NESTML synapse] ************** __h = 0x%x = %k\n", __h, __h); // OK! +//log_info("[NESTML synapse] ************** -__h = 0x%x = %k\n", -__h, -__h); // OK! +//log_info("[NESTML synapse] ************** -(-__h) = 0x%x = %k\n", -(-__h), -(-__h)); // OK! - uint32_t sat_test = accumulation & 0x10000; - if (sat_test) { - accumulation = sat_test - 1; - plastic_saturation_count++; +//log_info("[NESTML synapse] ************** __h / tau_tr_pre = 0x%x = %k\n", _kdivk(__h, plasticity_weight_region_data->tau_tr_pre), _kdivk(__h, plasticity_weight_region_data->tau_tr_pre)); +/* +const accum foobar0 = -_kdivk(__h, plasticity_weight_region_data->tau_tr_pre); +log_info("[NESTML synapse] ************** -(__h / tau_tr_pre) = 0x%x = %k\n", foobar0, foobar0); + + +const accum minus_h = -__h; +log_info("[NESTML synapse] ************** minus_h = 0x%x = %k\n", -minus_h, -minus_h); + + +const accum foobar1 = _kdivk(minus_h, plasticity_weight_region_data->tau_tr_pre); +log_info("[NESTML synapse] ************** (-__h) / tau_tr_pre = 0x%x = %k\n", foobar1, foobar1); + +const accum foobar2 = _kdivk(__h, -plasticity_weight_region_data->tau_tr_pre); +log_info("[NESTML synapse] ************** (-__h) / tau_tr_pre = 0x%x = %k\n", foobar2, foobar2); +*/ +//log_info("[NESTML synapse] ************** -(-__h) / tau_tr_pre = 0x%x = %k\n", _kdivk(-(__h, plasticity_weight_region_data->tau_tr_pre), _kdivk(-__h, plasticity_weight_region_data->tau_tr_pre)); +//log_info("[NESTML synapse] ************** exp(-__h / tau_tr_pre) = 0x%x = %k\n", expk(-_kdivk(__h, plasticity_weight_region_data->tau_tr_pre)), expk(-_kdivk(__h, plasticity_weight_region_data->tau_tr_pre))); +//log_info("expk(_kdivk((-__h), plasticity_weight_region_data->tau_tr_pre)); + + if (timestep == 0) { + return; } - ring_buffers[s.ring_buffer_index] = accumulation; -} +{%- for sym in synapse.get_internal_symbols() %} +{%- if sym.get_symbol_name().startswith("__P") %} + const accum {{ sym.get_symbol_name() }} = {{ printer.print(sym.get_declaring_expression()) }}; // type: {{ sym.get_type_symbol().print_symbol() }} +{%- endif %} +{%- endfor %} -//! packing all of the information into the required plastic control word -static inline control_t control_conversion( - uint32_t id, uint32_t delay, uint32_t type) { - control_t new_control = - (delay & ((1 << synapse_delay_bits) - 1)) << synapse_type_index_bits; - new_control |= (type & ((1 << synapse_type_index_bits) - 1)) << synapse_index_bits; - new_control |= id & ((1 << synapse_index_bits) - 1); - return new_control; -} -uint32_t synapse_dynamics_n_connections_in_row(synapse_row_fixed_part_t *fixed) { - return synapse_row_num_fixed_synapses(fixed); -} -/***** -*****/ + /** + * Begin NESTML generated code for the update block + **/ -// TAG: POTENTIATION -// TAG: GENERATE -static inline update_state_t timing_apply_post_spike( - uint32_t time, post_trace_t trace, uint32_t last_pre_time, - pre_trace_t last_pre_trace, uint32_t last_post_time, - post_trace_t last_post_trace, update_state_t previous_state) { +{%- if synapse.get_update_blocks() %} +{%- filter indent(2) %} +{%- for block in synapse.get_update_blocks() %} +{%- set ast = block.get_stmts_body() %} +{%- if ast.print_comment('*')|length > 1 %} +/* + {{ast.print_comment('*')}} + */ +{%- endif %} +{%- include "directives_cpp/StmtsBody.jinja2" %} +{%- endfor %} +{%- endfilter %} +{%- endif %} - // update_state_t == weight_state_t - update_state_t *state = &previous_state; - const plasticity_weight_region_data_t* parameter = state->parameter; + /** + * End NESTML generated code for the update block + **/ +} - uint32_t time_since_last_pre = time - last_pre_time; - if (time_since_last_pre > 0) { - // TODO: decay +static void process_post_spike(synapse_state_t *state, uint32_t time) { {%- filter indent(8, True) %} {%- if post_ports is defined %} -{%- for post_port in post_ports %} -// -// NESTML generated onReceive code block for postsynaptic port "{{post_port}}" begins here! -// - -{%- set dynamics = synapse.get_on_receive_block(post_port) %} -{%- with ast = dynamics.get_stmts_body() %} -{%- include "directives_cpp/StmtsBody.jinja2" %} -{%- endwith %} +{%- for post_port in spiking_post_ports %} +/** + * NESTML generated onReceive code block for postsynaptic port "{{ post_port }}" begins here! +**/ +{%- if synapse.get_on_receive_block(post_port) %} +{%- set dynamics = synapse.get_on_receive_block(post_port) %} +{%- with ast = dynamics.get_stmts_body() %} +{%- include "directives_cpp/StmtsBody.jinja2" %} +{%- endwith %} +{%- endif %} {%- endfor %} {%- endif %} {%- endfilter %} - - // Apply potentiation to state (which is a weight_state) - return previous_state; - } else { - return previous_state; - } } -// TAG: DEPRESSION -// TAG: GENERATE -static inline update_state_t timing_apply_pre_spike( - uint32_t time, pre_trace_t trace, uint32_t last_pre_time, - pre_trace_t last_pre_trace, uint32_t last_post_time, - post_trace_t last_post_trace, update_state_t previous_state) { - update_state_t *state = &previous_state; - const plasticity_weight_region_data_t* parameter = state->parameter; - // Get time of event relative to last post-synaptic event - uint32_t time_since_last_post = time - last_post_time; - // TODO: decay +static void process_pre_spike(synapse_state_t *state, uint32_t time) { + /** + * Process pre-synaptic spike + **/ -{%- filter indent(4, True) %} {%- for pre_port in pre_ports %} -// -// NESTML generated onReceive code block for presynaptic port "{{pre_port}}" begins here! -// - -{%- set dynamics = synapse.get_on_receive_block(pre_port) %} -{%- with ast = dynamics.get_stmts_body() %} -{%- include "directives_cpp/StmtsBody.jinja2" %} -{%- endwith %} + + /** + * NESTML generated onReceive code block for presynaptic port "{{ pre_port }}" begins here! + **/ +{% if synapse.get_on_receive_block(pre_port) %} +{%- set dynamics = synapse.get_on_receive_block(pre_port) %} +{%- with ast = dynamics.get_stmts_body() %} +{%- filter indent(4, True) %} +{%- include "directives_cpp/StmtsBody.jinja2" %} +{%- endfilter %} +{%- endwith %} +{%- endif %} {%- endfor %} -{%- endfilter %} +} - // Apply depression to state (which is a weight_state) - return previous_state; -} +#define PLOT_DETAILED_STATE -/*** synapse_dynamics_mad_impl.h ***/ +static void process_plastic_synapse( + uint32_t control_word, uint32_t last_pre_time, pre_trace_t last_pre_trace, + pre_trace_t new_pre_trace, weight_t *ring_buffers, uint32_t pre_spike_time, + uint32_t colour_delay, synapse_word_t *synapse_word) { -// TAG: PRE -//--------------------------------------- -//! \brief Synapse update loop core -//! \param[in] time: The current time -//! \param[in] last_pre_time: The time of the last previous pre-event -//! \param[in] last_pre_trace: The last previous pre-trace -//! \param[in] new_pre_trace: The new pre-trace -//! \param[in] delay_dendritic: The dendritic delay for the synapse -//! \param[in] delay_axonal: The axonal delay for the synapse -//! \param[in] current_state: The current state -//! \param[in] post_event_history: The history -//! \return The new basic state of the synapse -static inline final_state_t plasticity_update_synapse( - const uint32_t time, - const uint32_t last_pre_time, const pre_trace_t last_pre_trace, - const pre_trace_t new_pre_trace, const uint32_t delay_dendritic, - const uint32_t delay_axonal, update_state_t current_state, - const post_event_history_t *post_event_history) { - // Apply axonal delay to time of last presynaptic spike - const uint32_t delayed_last_pre_time = last_pre_time + delay_axonal; + fixed_{{ synapseName }} s = synapse_dynamics_stdp_get_fixed(control_word, pre_spike_time, colour_delay); - // Get the post-synaptic window of events to be processed - const uint32_t window_begin_time = - (delayed_last_pre_time >= delay_dendritic) - ? (delayed_last_pre_time - delay_dendritic) : 0; - const uint32_t delayed_pre_time = time + delay_axonal; - const uint32_t window_end_time = - (delayed_pre_time >= delay_dendritic) - ? (delayed_pre_time - delay_dendritic) : 0; - post_event_window_t post_window = post_events_get_window_delayed( - post_event_history, window_begin_time, window_end_time); + log_info("[NESTML synapse] In process_plastic_synapse(time = %d, last_pre_time = %d, dendritic_delay = %x)", pre_spike_time, last_pre_time, s.delay_dendritic); + uint32_t post_delay = s.delay_dendritic; + if (!params.backprop_delay) { + post_delay = 0; + } -#if LOG_LEVEL >= LOG_DEBUG - print_event_history(post_event_history); - print_delayed_window_events(post_event_history, window_begin_time, - window_end_time, delay_dendritic); + synapse_state_t state = synapse_word_to_state_t(synapse_word, plastic_synapse_word_stride); +#ifdef PLOT_DETAILED_STATE + log_info("[NESTML synapse] \tCurrent state:"); +{%- for sym in synapse.get_state_symbols() %} + log_info("[NESTML synapse] \t\t{{ sym.get_symbol_name() }} = 0x%x\n", state.{{ sym.get_symbol_name() }}); +{%- endfor %} #endif - // Process events in post-synaptic window - while (post_window.num_events > 0) { - const uint32_t delayed_post_time = *post_window.next_time + delay_dendritic; + //log_info("[NESTML synapse] \t__P__pre_trace__pre_trace: %x", plasticity_weight_region_data->__P__pre_trace__pre_trace); - // Apply spike to state - current_state = timing_apply_post_spike( - delayed_post_time, *post_window.next_trace, delayed_last_pre_time, - last_pre_trace, post_window.prev_time, post_window.prev_trace, - current_state); - // Go onto next event - post_window = post_events_next(post_window); - } - - // Apply spike to state only if there has been a post spike ever - if (post_window.prev_time_valid) { - const uint32_t delayed_last_post = post_window.prev_time + delay_dendritic; - current_state = timing_apply_pre_spike( - delayed_pre_time, new_pre_trace, delayed_last_pre_time, last_pre_trace, - delayed_last_post, post_window.prev_trace, current_state); - } - // Return final synaptic word and weight - return synapse_structure_get_final_state(current_state); -} + uint32_t current_time = last_pre_time; // at the start, the state is either the initial state, or however we left it when the previous pre-synaptic spike was processed + // Apply axonal delay to time of last presynaptic spike + const uint32_t delay_axonal = 0; + const uint32_t delayed_last_pre_time = last_pre_time + delay_axonal; -//--------------------------------------- -// Synaptic row plastic-region implementation -//--------------------------------------- + // Get the post-synaptic window of events to be processed + const uint32_t window_begin_time = + (delayed_last_pre_time >= s.delay_dendritic) + ? (delayed_last_pre_time - s.delay_dendritic) : 0; + const uint32_t delayed_pre_time = pre_spike_time + delay_axonal; + const uint32_t window_end_time = + (delayed_pre_time >= s.delay_dendritic) + ? (delayed_pre_time - s.delay_dendritic) : 0; + post_event_window_t post_window = post_events_get_window_delayed(post_event_history, window_begin_time, window_end_time); +// log_info("[NESTML synapse] \t Fetching post window from %u to %u\n", window_begin_time, window_end_time); -// TAG: POST + // Process events in post-synaptic window + while (post_window.num_events > 0) { + const uint32_t delayed_post_time = *post_window.next_time + s.delay_dendritic; -// Exponential decay of post spike +#ifdef PLOT_DETAILED_STATE + log_info("[NESTML synapse] \t\tApplying post-synaptic event at delayed time: %u\n", delayed_post_time); +#endif -// TAG: GENERATE -// Add -static inline post_trace_t timing_decay_post( - uint32_t time, uint32_t last_time, post_trace_t last_trace) { - extern int16_lut *tau_minus_lookup; - // Get time since last spike - uint32_t delta_time = time - last_time; + /** + * update synapse internal state from `current_time` to `delayed_post_time` + **/ - // Decay previous o1 and o2 traces - return (post_trace_t) STDP_FIXED_MUL_16X16(last_trace, - maths_lut_exponential_decay(delta_time, tau_minus_lookup)); -} + update_internal_state_(&state, current_time, delayed_post_time - current_time); + current_time = delayed_post_time; -// Line 500pp in cogenerated neuron -//--------------------------------------- -//! \brief Add a post spike to the post trace -//! \param[in] time: the time of the spike -//! \param[in] last_time: the time of the previous spike update -//! \param[in] last_trace: the post trace to update -//! \return the updated post trace -static inline post_trace_t timing_add_post_spike( - uint32_t time, uint32_t last_time, post_trace_t last_trace) { - plasticity_weight_region_data_t* parameter = &plasticity_weight_region_data[0]; - - struct tmp_struct { - post_trace_t post_trace; - } tmp = {.post_trace = last_trace}; - struct tmp_struct *state = &tmp; - - // Update propagator based on deltatime - int32_t temp__h = parameter->__h; - parameter->__h = time - last_time; - -// TODO: move to update internal variables -{% filter indent(2) %} -{%- for variable_symbol in synapse.get_internal_symbols() %} -{%- set variable = utils.get_internal_variable_by_name(astnode, variable_symbol.get_symbol_name()) %} -{%- if not variable_symbol.get_symbol_name() == "__h" %} -{%- include "directives_cpp/MemberInitialization.jinja2" %} -{%- endif %} +#ifdef PLOT_DETAILED_STATE + log_info("[NESTML synapse] \tNEW state after updating internal state to t = %u", current_time); +{%- for sym in synapse.get_state_symbols() %} + log_info("[NESTML synapse] \t\t{{ sym.get_symbol_name() }} = 0x%x\n", state.{{ sym.get_symbol_name() }}); {%- endfor %} -{%- endfilter %} - parameter->__h = temp__h; +#endif -{%- filter indent(4, True) %} -{%- set analytic_state_variables_ = [] %} -{%- for item in analytic_state_variables %} -{%- if item == "post_trace"%} -{%- set _ = analytic_state_variables_.append(item) %} -{%- endif %} + /** + * handle the postsynaptic spike + **/ + + process_post_spike(&state, current_time); + +#ifdef PLOT_DETAILED_STATE + log_info("[NESTML synapse] \tNEW state after applying post spike:"); +{%- for sym in synapse.get_state_symbols() %} + log_info("[NESTML synapse] \t\t{{ sym.get_symbol_name() }} = 0x%x\n", state.{{ sym.get_symbol_name() }}); {%- endfor %} -{%- include "directives_cpp/AnalyticIntegrationStep_begin.jinja2" %} -{%- include "directives_cpp/AnalyticIntegrationStep_end.jinja2" %} -{%- endfilter %} +#endif - state->post_trace += 1; + // Go onto next event + post_window = post_events_next(post_window); + } - return (post_trace_t) 0; -} -// init new post_trace_t and ad to post_events -//--------------------------------------- -void synapse_dynamics_process_post_synaptic_event( - uint32_t time, index_t neuron_index) { - // Add post-event - post_event_history_t *history = &post_event_history[neuron_index]; - const uint32_t last_post_time = history->times[history->count_minus_one]; - const post_trace_t last_post_trace = - history->traces[history->count_minus_one]; - post_events_add(time, history, - timing_add_post_spike(time, last_post_time, last_post_trace)); // Create post_trace_t and add to buffers -} + /** + * update synapse internal state from `current_time` to `pre_spike_time` + **/ -// TAG: PRE -//--------------------------------------- -static inline plastic_synapse_t process_plastic_synapse( - uint32_t control_word, uint32_t last_pre_time, pre_trace_t last_pre_trace, - pre_trace_t new_pre_trace, weight_t *ring_buffers, uint32_t time, - uint32_t colour_delay, plastic_synapse_t synapse) { - fixed_stdp_synapse s = synapse_dynamics_stdp_get_fixed(control_word, time, - colour_delay); - - // Create update state from the plastic synaptic word - // update_state_t = weight_state_t - update_state_t current_state = synapse_structure_get_update_state( - synapse, s.type); - - // Update the synapse state - // TODO: delay or no delay on backpropagation - uint32_t post_delay = s.delay_dendritic; - - // final_stat_t = weight_t - final_state_t final_state = plasticity_update_synapse( - time - colour_delay, last_pre_time, last_pre_trace, new_pre_trace, - post_delay, s.delay_axonal, current_state, - &post_event_history[s.index]); - - // Add weight to ring-buffer entry, but only if not too late + update_internal_state_(&state, current_time, pre_spike_time - current_time); + current_time = pre_spike_time; + +#ifdef PLOT_DETAILED_STATE + log_info("[NESTML synapse] \tNEW state after integration to t_pre:"); +{%- for sym in synapse.get_state_symbols() %} + log_info("[NESTML synapse] \t\t{{ sym.get_symbol_name() }} = 0x%x\n", state.{{ sym.get_symbol_name() }}); +{%- endfor %} +#endif + /** + * process pre spike + **/ + + process_pre_spike(&state, current_time); + +#ifdef PLOT_DETAILED_STATE + log_info("[NESTML synapse] \tNEW state after processing pre_spikes:"); +{%- for sym in synapse.get_state_symbols() %} + log_info("[NESTML synapse] \t\t{{ sym.get_symbol_name() }} = 0x%x\n", state.{{ sym.get_symbol_name() }}); +{%- endfor %} +#endif + /** + * Add weight to ring-buffer entry, but only if not too late + **/ + if (s.delay_axonal + s.delay_dendritic > colour_delay) { - int32_t weight = synapse_structure_get_final_weight(final_state); + int32_t weight = TYPECAST_TO_UINT32_T(state.w); // XXX: the variable name ``w`` is hard-coded here synapse_dynamics_stdp_update_ring_buffers(ring_buffers, s, weight); + log_info("[NESTML synapse] \thard-coding weight, weight = 0x%x", weight); } else { skipped_synapses++; + log_info("[NESTML synapse] \tskipping synapse"); } - return synapse_structure_get_final_synaptic_word(final_state); + /** + * Encode the new state into synaptic word + **/ + + state_t_to_synapse_word(&state, synapse_word, plastic_synapse_word_stride); +// log_info("[NESTML synapse] ---> the new synapse word is 0x%x, 0x%x, 0x%x", synapse_word->w, synapse_word->pre_trace, synapse_word->post_trace); } -// TAG: PRE +/** + * print low-level synaptic row data +**/ +void print_low_level_synaptic_row_data(synapse_row_plastic_data_t *plastic_region_address, + synapse_row_fixed_part_t *fixed_region) { -static inline pre_trace_t timing_add_pre_spike( - uint32_t time, uint32_t last_time, pre_trace_t last_trace) { +#ifdef FOOBAR + // data words: for each synapse, stores the complete state in a 32-bit word + synapse_word_t *plastic_words = plastic_region_address->synapses; - struct tmp_struct { - pre_trace_t pre_trace; - } tmp = {.pre_trace = last_trace}; - struct tmp_struct *state = &tmp; + // control words: for each synapse, stores the delay, synapse type, and postsynaptic neuron ID + control_t *control_words = synapse_row_plastic_controls(fixed_region); - plasticity_weight_region_data_t* parameter = &plasticity_weight_region_data[0]; - // update propagator based on deltatime - int32_t temp__h = parameter->__h; - parameter->__h = time - last_time; -{% filter indent(2) %} -{%- for variable_symbol in synapse.get_internal_symbols() %} -{%- set variable = utils.get_internal_variable_by_name(astnode, variable_symbol.get_symbol_name()) %} -{%- if not variable_symbol.get_symbol_name() == "__h" %} -{%- include "directives_cpp/MemberInitialization.jinja2" %} -{%- endif %} -{%- endfor %} -{%- endfilter %} - parameter->__h = temp__h; + // Print out parsed data for static synapses + uint32_t *synaptic_words = synapse_row_fixed_weight_controls(fixed_region); + const uint32_t n_fixed_synapses = synapse_row_num_fixed_synapses(fixed_region); + log_info("[NESTML synapse] \tFixed-Fixed Region (%u synapses):", n_fixed_synapses); + for (size_t i = 0; i < n_fixed_synapses; ++i) { + uint32_t synaptic_word = *synaptic_words++; + + uint32_t delay = synapse_row_sparse_delay(synaptic_word, synapse_type_index_bits, synapse_delay_mask); + uint32_t type = synapse_row_sparse_type(synaptic_word, synapse_index_bits, synapse_type_mask); + uint32_t neuron = synapse_row_sparse_index(synaptic_word, synapse_index_mask); + log_info("[NESTML synapse] \t\tDelay %u, Synapse Type %u, Neuron %u", delay, type, neuron); + } -{%- filter indent(4, True) %} -{%- set analytic_state_variables_ = [] %} -{%- for item in analytic_state_variables %} -{%- if item == "pre_trace"%} -{%- set _ = analytic_state_variables_.append(item) %} -{%- endif %} -{%- endfor %} -{%- include "directives_cpp/AnalyticIntegrationStep_begin.jinja2" %} -{%- include "directives_cpp/AnalyticIntegrationStep_end.jinja2" %} -{%- endfilter %} + const size_t n_plastic_synapses = synapse_row_num_plastic_controls(fixed_region); - state->pre_trace += 1; + log_info("[NESTML synapse] \tPlastic region %u synapses:", n_plastic_synapses); + for (uint32_t i = 0; i < n_plastic_synapses; ++i) { + // Get next control word (auto incrementing control word) + uint32_t control_word = *(control_words + i); + uint32_t synapse_type = synapse_row_sparse_type(control_word, synapse_index_bits, synapse_type_mask); - return 0; + // Get state + const synapse_word_t data_word = *(plastic_words + i * plastic_synapse_word_stride * 4); // 4 because of 32-bit word length + synapse_state_t update_state = synapse_structure_get_update_state(data_word, synapse_type); + + log_info("[NESTML synapse] \t\tidx = %d; control word = %08x; data word = %08x; w = %5u; pre_trace = %d; post_trace = %d; d = %2u, post neuron id = %3u", i, control_word, data_word, update_state.w, update_state.pre_trace, update_state.post_trace, synapse_row_sparse_delay(control_word, synapse_type_index_bits, synapse_delay_mask), synapse_row_sparse_index(control_word, synapse_index_mask)); +/* log_info("[NESTML synapse] \td: %2u, n = %3u)] - { %08x %08x }\n", + synapse_row_sparse_delay(control_word, synapse_type_index_bits, synapse_delay_mask), + synapse_row_sparse_index(control_word, synapse_index_mask), + synapse_delay_mask, synapse_type_index_bits);*/ + + } +#endif } -// TAG: PRE -// Can stay as is -// Process spikes in window -// Similar to send for nest + + +//! \brief Process the dynamics of the synapses +//! \param[in,out] plastic_region_data: Where the plastic data is +//! \param[in] fixed_region: Where the fixed data is +//! \param[in,out] ring_buffers: The ring buffers +//! \param[in] time: The current simulation time +//! \param[out] Whether to write back anything +//! \return Whether the processing was successful or not bool synapse_dynamics_process_plastic_synapses( synapse_row_plastic_data_t *plastic_region_address, synapse_row_fixed_part_t *fixed_region, weight_t *ring_buffers, uint32_t time, uint32_t colour_delay, bool *write_back) { - // Array of weights - plastic_synapse_t *plastic_words = plastic_region_address->synapses; + log_info("[NESTML synapse] synapse_dynamics_process_plastic_synapses()"); + + // Extract separate arrays of plastic synapses (from plastic region), + // Control words (from fixed region) and number of plastic synapses + synapse_word_t *plastic_words = plastic_region_address->synapses; + control_t *control_words = synapse_row_plastic_controls(fixed_region); + const size_t n_plastic_synapses = synapse_row_num_plastic_controls(fixed_region); + +// log_info("[NESTML synapse] ------------------ AAAAAAAAAAAAAAAAAAA"); +// print_low_level_synaptic_row_data(plastic_region_address, fixed_region); + - // control_t = uint16_t - const control_t *control_words = synapse_row_plastic_controls(fixed_region); - size_t n_plastic_synapses = synapse_row_num_plastic_controls(fixed_region); - // This method is called on presynaptic event num_plastic_pre_synaptic_events += n_plastic_synapses; - // Backup last presynaptic spike + // Get last pre-synaptic event from event history const uint32_t last_pre_time = plastic_region_address->history.prev_time; const pre_trace_t last_pre_trace = plastic_region_address->history.prev_trace; // Update pre-synaptic trace plastic_region_address->history.prev_time = time - colour_delay; - plastic_region_address->history.prev_trace = - timing_add_pre_spike(time - colour_delay, last_pre_time, last_pre_trace); - - // Loop through plastic synapses - for (; n_plastic_synapses > 0; n_plastic_synapses--) { - // Get control word, increment after - uint32_t control_word = *control_words++; - - plastic_words[0] = process_plastic_synapse( - control_word, last_pre_time, last_pre_trace, - plastic_region_address->history.prev_trace, ring_buffers, time, - colour_delay, plastic_words[0]); - plastic_words++; - } - *write_back = true; - return true; -} - - - - - - - - - - +// plastic_region_address->history.prev_trace = timing_add_pre_spike(time - colour_delay, last_pre_time, last_pre_trace); -//--------------------------------------- -// STDP weight dependence functions -//--------------------------------------- -static inline weight_state_t weight_get_initial( - weight_t weight, index_t synapse_type) { - - s1615 s1615_weight = kbits(weight << weight_shift[synapse_type]); - return (weight_state_t) { -{%- for variable_symbol in synapse.get_state_symbols() %} -{%- if variable_symbol.variable_symbol.get_symbol_name() == synapse_weight_variable %} -{%- set variable = utils.get_state_variable_by_name(astnode, variable_symbol.get_symbol_name()) %} - // special case for weight variable - .{{ printer_no_origin.print(variable) }} = s1615_weight, -{%- endif %} -{%- endfor %} - .weight_shift = weight_shift[synapse_type], - .parameter = &plasticity_weight_region_data[synapse_type] - }; -} - -// TAG: GENERATE -static inline weight_state_t weight_one_term_apply_depression( - weight_state_t state, int32_t depression) { - return state; -} - -// TAG: GENERATE -static inline weight_state_t weight_one_term_apply_potentiation( - weight_state_t state, int32_t potentiation) { - return state; -} - -//--------------------------------------- -static inline weight_t weight_get_final(weight_state_t state) { - - // TODO: Perform operations to get the final weight from the intermediate - // state, taking into account all potentiation and depression - // Note: it is recommended to do a single complex operation here rather - // than one for each potentiation or depression if possible -{%- for variable_symbol in synapse.get_state_symbols() %} -{%- if variable_symbol.variable_symbol.get_symbol_name() == synapse_weight_variable %} -{%- set variable = utils.get_state_variable_by_name(astnode, variable_symbol.get_symbol_name()) %} - // special case for weight variable - return (weight_t)(bitsk(state.{{ printer_no_origin.print(utils.get_state_variable_by_name(astnode, variable_symbol.get_symbol_name()))}}) >> state.weight_shift); -{%- endif %} -{%- endfor %} -} - -//--------------------------------------- -__attribute__((unused)) // Marked unused as only used sometimes -static void weight_decay(weight_state_t *state, int32_t decay) { - // state->weight = mul_accum_fixed(state->weight, decay); - - // TODO: Decay the weight inside the state -} - -//--------------------------------------- -__attribute__((unused)) // Marked unused as only used sometimes -static accum weight_get_update(weight_state_t state) { -{%- for variable_symbol in synapse.get_state_symbols() %} -{%- if variable_symbol.variable_symbol.get_symbol_name() == synapse_weight_variable %} -{%- set variable = utils.get_state_variable_by_name(astnode, variable_symbol.get_symbol_name()) %} - // special case for weight variable - return state.{{ printer_no_origin.print(utils.get_state_variable_by_name(astnode, variable_symbol.get_symbol_name()))}}; -{%- endif %} -{%- endfor %} -} - -static inline post_trace_t timing_get_initial_post_trace(void) { - return ZERO; -} - - - - - -/***** -UNRELEVANT AT THE MOMENT -*****/ - -bool synapse_dynamics_find_neuron( - uint32_t id, synaptic_row_t row, weight_t *weight, uint16_t *delay, - uint32_t *offset, uint32_t *synapse_type) { - synapse_row_fixed_part_t *fixed_region = synapse_row_fixed_region(row); - const synapse_row_plastic_data_t *plastic_data = (void *) - synapse_row_plastic_region(row); - const plastic_synapse_t *plastic_words = plastic_data->synapses; - const control_t *control_words = synapse_row_plastic_controls(fixed_region); - const size_t n_plastic_synapses = synapse_row_num_plastic_controls(fixed_region); + control_words = synapse_row_plastic_controls(fixed_region); + plastic_words = plastic_region_address->synapses; // Loop through plastic synapses - for (size_t plastic_synapse = n_plastic_synapses; plastic_synapse > 0; - plastic_synapse--) { - // Take the weight anyway as this updates the plastic words - *weight = synapse_structure_get_weight(*plastic_words++); - - // Check if index is the one I'm looking for - uint32_t control_word = *control_words++; - if (synapse_row_sparse_index(control_word, synapse_index_mask) == id) { - *offset = n_plastic_synapses - plastic_synapse; - *delay = synapse_row_sparse_delay(control_word, - synapse_type_index_bits, synapse_delay_mask); - *synapse_type = synapse_row_sparse_type( - control_word, synapse_index_bits, synapse_type_mask); - return true; - } - } - - return false; -} + for (size_t i = 0; i < n_plastic_synapses; ++i) { + // Get next control word (auto incrementing) + uint32_t control_word = *(control_words + i); +// log_info("[NESTML synapse] old plastic word[%d] = %x\n", i, plastic_words[i * plastic_synapse_word_stride]); + process_plastic_synapse(control_word, last_pre_time, last_pre_trace, plastic_region_address->history.prev_trace, ring_buffers, time, colour_delay, plastic_words); +// log_info("[NESTML synapse] new plastic word[%d] = %x\n", i, plastic_words[i * plastic_synapse_word_stride]); -bool synapse_dynamics_remove_neuron(uint32_t offset, synaptic_row_t row) { - synapse_row_fixed_part_t *fixed_region = synapse_row_fixed_region(row); - synapse_row_plastic_data_t *plastic_data = (void *) - synapse_row_plastic_region(row); - plastic_synapse_t *plastic_words = plastic_data->synapses; - - control_t *control_words = synapse_row_plastic_controls(fixed_region); - int32_t plastic_synapse = synapse_row_num_plastic_controls(fixed_region); + } - // Delete weight at offset - plastic_words[offset] = plastic_words[plastic_synapse - 1]; - // Delete control word at offset - control_words[offset] = control_words[plastic_synapse - 1]; - control_words[plastic_synapse - 1] = 0; + *write_back = true; - // Decrement FP - fixed_region->num_plastic--; return true; } -bool synapse_dynamics_add_neuron(uint32_t id, synaptic_row_t row, - weight_t weight, uint32_t delay, uint32_t type) { - synapse_row_fixed_part_t *fixed_region = synapse_row_fixed_region(row); - synapse_row_plastic_data_t *plastic_data = synapse_row_plastic_region(row); - plastic_synapse_t *plastic_words = plastic_data->synapses; - plastic_synapse_t new_weight = synapse_structure_create_synapse(weight); - control_t new_control = control_conversion(id, delay, type); - control_t *control_words = synapse_row_plastic_controls(fixed_region); - int32_t plastic_synapse = synapse_row_num_plastic_controls(fixed_region); - // Add weight at offset - plastic_words[plastic_synapse] = new_weight; - // Add control word at offset - control_words[plastic_synapse] = new_control; +//! \brief Inform the synapses that the neuron fired +//! \param[in] time: The current simulation time +//! \param[in] neuron_index: Which neuron are we processing +void synapse_dynamics_process_post_synaptic_event(uint32_t time, index_t neuron_index) { + log_info("[NESTML synapse] Adding post-synaptic event to buffer at time: %u", time); - // Increment FP - fixed_region->num_plastic++; - return true; -} + // Add post-event + post_event_history_t *history = &post_event_history[neuron_index]; + //const uint32_t last_post_time = history->times[history->count_minus_one]; + post_events_add(time, history); + print_delayed_window_events(history, 0., 9999, 0); +} -/********** -PRINTS -**********/ -void synapse_dynamics_print_plastic_synapses( - synapse_row_plastic_data_t *plastic_region_data, - synapse_row_fixed_part_t *fixed_region, - uint32_t *ring_buffer_to_input_buffer_left_shifts) { - -} -//--------------------------------------- -//! \brief Get the axonal delay -//! \param[in] x: The packed plastic synapse control word -//! \return the axonal delay -static inline index_t sparse_axonal_delay(uint32_t x) { -#if 1 - // No axonal delay, ever - __use(x); - return 0; -#else - return (x >> synapse_delay_index_type_bits) & SYNAPSE_AXONAL_DELAY_MASK; -#endif -} \ No newline at end of file diff --git a/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_impl.h.jinja2 b/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_impl.h.jinja2 index cf25a8ca1..6ab0a8053 100644 --- a/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_impl.h.jinja2 +++ b/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_impl.h.jinja2 @@ -1,116 +1,623 @@ -#include -#include -#include -#include -#include -#include +{#- +@SYNAPSE_NAME@_impl.h.jinja2 +---------------------------- + +This file is part of NEST. + +Copyright (C) 2004 The NEST Initiative + +NEST is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +NEST is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with NEST. If not, see . +#} +/** + * {{ synapseName }}.h + * + * This file is part of NEST. + * + * Copyright (C) 2004 The NEST Initiative + * + * NEST is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 2 of the License, or + * (at your option) any later version. + * + * NEST is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with NEST. If not, see . + * + * Generated from NESTML {{ nestml_version }} at time: {{ now }} +**/ + +#ifndef {{synapseName.upper()}}_IMPL_H +#define {{synapseName.upper()}}_IMPL_H + #include +// Include generic plasticity maths functions #include #include +#include + +// Standard includes +#include +#include +#include + +// Include debug header for log_info etc +#include + + +// Spinn_common includes +#include + +// sPyNNaker neural modelling includes #include -// Row describes the connection from presynaptic to all postsynaptic neurons -#include + +// Plasticity includes +#include #include -//--------------------------------------- -// Defines -//--------------------------------------- +// fixed-point math functions from spinn_common +#include "log.h" +#include "stdfix-exp.h" -// These need to be defined before including any synapse stuff -#define SYNAPSE_TYPE_BITS 1 -#define SYNAPSE_TYPE_COUNT 2 -#ifndef SYNAPSE_AXONAL_DELAY_BITS -#define SYNAPSE_AXONAL_DELAY_BITS 3 -#endif -#define SYNAPSE_AXONAL_DELAY_MASK \ - ((1 << SYNAPSE_AXONAL_DELAY_BITS) - 1) -#ifndef __use -#define __use(x) do { (void) (x); } while (0) -#endif -// post_trace / pre_trace from nestml -typedef REAL post_trace_t; -typedef REAL pre_trace_t; + +// all parameters are externally linked (XXX: into DTCM memory?) + +typedef struct { + /** + * parameters from the NESTML model + **/ + +{%- for sym in synapse.get_parameter_symbols() %} + accum {{ sym.get_symbol_name() }}; +{%- endfor %} + + /** + * internals from the NESTML model + **/ + +{%- for sym in synapse.get_internal_symbols() %} +{%- if (not sym.name == "__h") and (not sym.name.startswith("__P")) %} + accum {{ sym.get_symbol_name() }}; +{%- endif %} +{%- endfor %} +} plasticity_weight_region_data_t; + + + +// stuff from timing.h + +typedef struct post_trace_t { + int16_t post_tr; + uint32_t last_spike_time; +} post_trace_t; + +//! The type of pre-spike traces +typedef struct pre_trace_t { + int16_t pre_tr; +} pre_trace_t; + + + +/** + * for a definition of the SpyNNaker "API", see: + * https://github.com/SpiNNakerManchester/sPyNNaker/blob/master/neural_modelling/src/neuron/plasticity/synapse_dynamics.h +**/ + + -/***** -FIXED STRUCTS -*****/ -// Fixed format from spinnaker + + + + + + + + + +// stuff from post_events.h + + +//--------------------------------------- +// Macros +//--------------------------------------- +//! Maximum number of post-synaptic events supported +#define MAX_POST_SYNAPTIC_EVENTS 16 + +//--------------------------------------- +// Structures +//--------------------------------------- +//! Trace history of post-synaptic events typedef struct { - //! The event time - uint32_t prev_time; - //! The event trace - pre_trace_t prev_trace; // 16Bit (sPyNNaker paper, Fig 8), shouldn't be hard specification -} pre_event_history_t; + //! Number of events stored (minus one) + uint32_t count_minus_one; + //! Event times + uint32_t times[MAX_POST_SYNAPTIC_EVENTS]; + //! Event traces + //post_trace_t traces[MAX_POST_SYNAPTIC_EVENTS]; +} post_event_history_t; -// Fixed format from spinnaker -// Fixed synapse parameters -// Describes layout of stdp fixed data -typedef struct fixed_stdp_synapse { - uint32_t delay_dendritic; // NESTML delay - uint32_t delay_axonal; // ?? - uint32_t type; // Synapse type - uint32_t index; // Index of synapse - uint32_t type_index; // type and index packed together - uint32_t ring_buffer_index; // neuron index -} fixed_stdp_synapse; - - -/***** -MODEL STRUCTS -*****/ - -// TODO: switch to fixed point -// Add synapse paramters here +//! Post event window description typedef struct { -{%- for sym in synapse.get_parameter_symbols() | sort(attribute="name") %} - {{type_symbol_printer.print(sym.type_symbol)}} {{ sym.get_symbol_name() }}; -{%- endfor %} + //! The previous post-synaptic event trace +// post_trace_t prev_trace; + //! The previous post-synaptic event time + uint32_t prev_time; + //! The next post-synaptic event trace + // const post_trace_t *next_trace; + //! The next post-synaptic event time + const uint32_t *next_time; + //! The number of events + uint32_t num_events; + //! Whether the previous post-synaptic event is valid (based on time) + uint32_t prev_time_valid; +} post_event_window_t; + + + + + + -{%- for sym in synapse.get_internal_symbols() | sort(attribute="name") %} - {{type_symbol_printer.print(sym.type_symbol)}} {{sym.get_symbol_name()}}; +//! \brief Plastic synapse word +// n.b.: this should be a multiple of 32 bits. Bit packing can be used, e.g. +// +// unsigned int w : 16; //!< The weight +// unsigned int pre_trace : 8; // pre trace +// unsigned int post_trace : 8; // post trace +// +typedef struct synapse_word_t { +{%- for sym in synapse.get_state_symbols() %} + uint32_t {{ sym.get_symbol_name() }}; // 32 bits by default for every state variable; the actual number is encoded as S1615 {%- endfor %} +} synapse_word_t; - // TODO: Put in all required parameters +const size_t plastic_synapse_word_stride = {{ synapse.get_state_symbols() | length }}; -} plasticity_weight_region_data_t; -//TODO: switch to fixed point +//! The current state data for the rule typedef struct { -{%- for variable_symbol in synapse.get_state_symbols() %} - {{type_symbol_printer.print(variable_symbol.type_symbol) }} {{ printer_no_origin.print(utils.get_state_variable_by_name(astnode, variable_symbol.get_symbol_name()))}}; +{%- for sym in synapse.get_state_symbols() %} + accum {{ sym.get_symbol_name() }}; {%- endfor %} - uint32_t weight_shift; - plasticity_weight_region_data_t *parameter; + //! Reference to the configuration data + const plasticity_weight_region_data_t *weight_region; +} synapse_state_t; - // TODO: Put in any variables required in the intermediate state -} weight_state_t; -/***** -UNUSED STRUCTS -*****/ -/* -// layout of initializing datastructure, saved as global variables -typedef struct my_timing_config { - accum my_potentiation_parameter; - accum my_depression_parameter; -} my_timing_config_t; -*/ -/* + + +// stuff from synapse_structure_...._impl.h + +//! \brief Get the update state from the synapse structure +//! \param[in] synaptic_word: The plastic synapse data +//! \param[in] synapse_type: What (supported) type of synapse is this? +//! \return The update state +static inline synapse_state_t synapse_structure_get_update_state( + synapse_word_t synaptic_word, index_t synapse_type) { + // Create update state, using weight dependance to initialise the weight + // state and copying other parameters from the synaptic word into 32-bit + // form + synapse_state_t update_state; + log_info("[NESTML synapse] synapse_structure_get_update_state()"); + + + extern plasticity_weight_region_data_t *plasticity_weight_region_data; + update_state.weight_region = &plasticity_weight_region_data[synapse_type]; + +{%- for sym in synapse.get_state_symbols() %} + update_state.{{ sym.get_symbol_name() }} = synaptic_word.{{ sym.get_symbol_name() }}; + log_info("[NESTML synapse] \t{{ sym.get_symbol_name() }} ====== %x ====== %x", synaptic_word.{{ sym.get_symbol_name() }}, update_state.{{ sym.get_symbol_name() }}); +// log_info("[NESTML synapse] \t{{ sym.get_symbol_name() }} ====== %x", synaptic_word.{{ sym.get_symbol_name() }}); +{%- endfor %} + + return update_state; +} + + + + +// stuff from synapse_dynamics_stdp_common.h + + +//--------------------------------------- +// Macros +//--------------------------------------- +// The plastic control words used by Morrison synapses store an axonal delay +// in the upper 3 bits. +// Assuming a maximum of 16 delay slots, this is all that is required as: +// +// 1) Dendritic + Axonal <= 15 +// 2) Dendritic >= Axonal +// +// Therefore: +// +// * Maximum value of dendritic delay is 15 (with axonal delay of 0) +// - It requires 4 bits +// * Maximum value of axonal delay is 7 (with dendritic delay of 8) +// - It requires 3 bits +// +// | Axonal delay | Dendritic delay | Type | Index | +// |---------------------------|--------------------|-------------------|--------------------| +// | SYNAPSE_AXONAL_DELAY_BITS | SYNAPSE_DELAY_BITS | SYNAPSE_TYPE_BITS | SYNAPSE_INDEX_BITS | +// | | | SYNAPSE_TYPE_INDEX_BITS | +// |---------------------------|--------------------|----------------------------------------| +#ifndef SYNAPSE_AXONAL_DELAY_BITS +#define SYNAPSE_AXONAL_DELAY_BITS 3 +#endif + +#define SYNAPSE_AXONAL_DELAY_MASK \ + ((1 << SYNAPSE_AXONAL_DELAY_BITS) - 1) + +//--------------------------------------- +// Structures +//--------------------------------------- +//! \brief The type of history data of pre-events +//! +//! This data is stored in SDRAM in the plastic part of the synaptic matrix +typedef struct { + //! The event time + uint32_t prev_time; + //! The event trace + pre_trace_t prev_trace; +} pre_event_history_t; + //! The type of configuration parameters in SDRAM (written by host) typedef struct stdp_params { //! The back-propagation delay, in basic simulation timesteps uint32_t backprop_delay; } stdp_params; + +typedef struct fixed_{{ synapseName }} { + uint32_t delay_dendritic; + uint32_t delay_axonal; + uint32_t type; + uint32_t index; + uint32_t type_index; + uint32_t ring_buffer_index; +} fixed_{{ synapseName }}; + +//! Configuration parameters +static stdp_params params; + +//! \brief The history data of post-events +static post_event_history_t *post_event_history; + +//! Count of pre-synaptic events relevant to plastic processing +static uint32_t num_plastic_pre_synaptic_events = 0; + +//! Count of times that the plastic math became saturated +static uint32_t plastic_saturation_count = 0; + + + + + + + + + + + +#define TYPECAST_TO_ACCUM(x) *((accum*)(&(x))) +#define TYPECAST_TO_UINT32_T(x) *((uint32_t*)(&(x))) + + +synapse_state_t synapse_word_to_state_t(const synapse_word_t const *synapse_word, const size_t synapse_word_len) { + return (synapse_state_t) { +{%- for sym in synapse.get_state_symbols() %} + .{{ sym.get_symbol_name() }} = TYPECAST_TO_ACCUM(synapse_word->{{ sym.get_symbol_name() }}), // use the typecast to copy bit-for-bit. If we don't do this, the compiler will do a data conversion, discarding and shifting the fractional bits. +{%- endfor %} + .weight_region = NULL + }; +} + + +void state_t_to_synapse_word(const synapse_state_t *state, synapse_word_t *synapse_word, const size_t synapse_word_len) { +{%- for sym in synapse.get_state_symbols() %} + synapse_word->{{ sym.get_symbol_name() }} = TYPECAST_TO_UINT32_T(state->{{ sym.get_symbol_name() }}); // use the typecast to copy bit-for-bit. If we don't do this, the compiler will do a data conversion, discarding and shifting the fractional bits. +// log_info("[NESTML synapse] ~~~~~~~~~~~~~ assigning 0x%x to 0x%x", state->{{ sym.get_symbol_name() }}, synapse_word->{{ sym.get_symbol_name() }}); +{%- endfor %} +} + + + + +/* +synapse_state_t synapse_word_to_state_t(const size_t const *synapse_word, const size_t synapse_word_len) { + return (synapse_state_t) { +{%- for sym in synapse.get_state_symbols() %} + .{{ sym.get_symbol_name() }} = *(synapse_word + {{ loop.index0 }}), +{%- endfor %} + }; +} + + +void state_t_to_synapse_word(const synapse_state_t state, size_t *synapse_word, const size_t synapse_word_len) { +{%- for sym in synapse.get_state_symbols() %} + *(synapse_word + {{ loop.index0 }}) = state.{{ sym.get_symbol_name() }}; +{%- endfor %} +} + */ + + + + + + + + + + + + + + + + + + + + + +/* PRIVATE FUNCTIONS */ + +// Mark a value as possibly unused while not using any instructions, guaranteed +#ifndef __use +#define __use(x) do { (void) (x); } while (0) +#endif + +input_t synapse_dynamics_get_intrinsic_bias( + UNUSED uint32_t time, UNUSED index_t neuron_index) { + return ZERO; +} + +uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) { + return num_plastic_pre_synaptic_events; +} + +uint32_t synapse_dynamics_get_plastic_saturation_count(void) { + return plastic_saturation_count; +} + +static inline fixed_{{ synapseName }} synapse_dynamics_stdp_get_fixed( + uint32_t control_word, uint32_t time, uint32_t colour_delay) { + // Extract control-word components + // **NOTE** cunningly, control word is just the same as lower + // 16-bits of 32-bit fixed synapse so same functions can be used +// log_info("[NESTML synapse] synapse_dynamics_stdp_get_fixed()"); + uint32_t delay_dendritic = synapse_row_sparse_delay(control_word, + synapse_type_index_bits, synapse_delay_mask); + uint32_t delay_axonal = 0; //sparse_axonal_delay(control_word); + uint32_t type_index = synapse_row_sparse_type_index(control_word, + synapse_type_index_mask); + + +// log_info("[NESTML synapse] \tdelay_dendritic = %d, delay_axonal = %d, type = %d, index = %d", delay_dendritic, delay_axonal, synapse_row_sparse_type(control_word, synapse_index_bits, synapse_type_mask), synapse_row_sparse_index(control_word, synapse_index_mask)); + + return (fixed_{{ synapseName }}) { + .delay_dendritic = delay_dendritic, + .delay_axonal = delay_axonal, + .type = synapse_row_sparse_type(control_word, synapse_index_bits, synapse_type_mask), + .index = synapse_row_sparse_index(control_word, synapse_index_mask), + .type_index = type_index, + .ring_buffer_index = synapse_row_get_ring_buffer_index_combined( + (delay_axonal + delay_dendritic + time) - colour_delay, type_index, + synapse_type_index_bits, synapse_delay_mask) + }; +} + +static inline void synapse_dynamics_stdp_update_ring_buffers( + weight_t *ring_buffers, fixed_{{ synapseName }} s, int32_t weight) { + uint32_t accumulation = ring_buffers[s.ring_buffer_index] + weight; + + uint32_t sat_test = accumulation & 0xFFFF0000; + if (sat_test) { + accumulation = 0xFFFF; + plastic_saturation_count++; + } + + ring_buffers[s.ring_buffer_index] = accumulation; +} + +//! packing all of the information into the required plastic control word +static inline control_t control_conversion( + uint32_t id, uint32_t delay, uint32_t type) { + control_t new_control = + (delay & ((1 << synapse_delay_bits) - 1)) << synapse_type_index_bits; + new_control |= (type & ((1 << synapse_type_index_bits) - 1)) << synapse_index_bits; + new_control |= id & ((1 << synapse_index_bits) - 1); + return new_control; +} + +uint32_t synapse_dynamics_n_connections_in_row(synapse_row_fixed_part_t *fixed) { + return synapse_row_num_plastic_controls(fixed); +} + + + + + + +// stuff from post_events.h + +//--------------------------------------- +// Inline functions +//--------------------------------------- + +#if LOG_LEVEL >= LOG_DEBUG +//! \brief Print a post-synaptic event history +//! \param[in] events: The history +static inline void print_event_history(const post_event_history_t *events) { + log_info(" ## printing entire post event history ##"); + for (uint32_t i = 0; i <= events->count_minus_one; i++) { + log_info("post event: %u, time: %u", + i, events->times[i]); + } +} +#endif + +//! \brief Initialise an array of post-synaptic event histories +//! \param[in] n_neurons: Number of neurons +//! \return The array +static inline post_event_history_t *post_events_init_buffers( + uint32_t n_neurons) { + post_event_history_t *post_event_history = + spin1_malloc(n_neurons * sizeof(post_event_history_t)); + // Check allocations succeeded + if (post_event_history == NULL) { + log_error("Unable to allocate global STDP structures - Out of DTCM: Try " + "reducing the number of neurons per core to fix this problem "); + return NULL; + } + + // Loop through neurons + for (uint32_t n = 0; n < n_neurons; n++) { + // Add initial placeholder entry to buffer + post_event_history[n].times[0] = 0; + //post_event_history[n].traces[0] = (post_trace_t){.post_tr=0, .last_spike_time = 0};// timing_get_initial_post_trace(); + post_event_history[n].count_minus_one = 0; + } + + return post_event_history; +} + +//--------------------------------------- +//! \brief Get the post-synaptic event window +//! \param[in] events: The post-synaptic event history +//! \param[in] begin_time: The start of the window +//! \param[in] end_time: The end of the window +//! \return The window +static inline post_event_window_t post_events_get_window_delayed( + const post_event_history_t *events, uint32_t begin_time, + uint32_t end_time) { + // Start at end event - beyond end of post-event history + const uint32_t count = events->count_minus_one + 1; + const uint32_t *end_event_time = events->times + count; + const uint32_t *event_time = end_event_time; + //const post_trace_t *event_trace = events->traces + count; + + post_event_window_t window; + do { + // If this event is still in the future, set it as the end + if (*event_time > end_time) { + end_event_time = event_time; + } + + // Cache pointer to this event as potential next event and go back one + // event. + // **NOTE** next_time can be invalid + window.next_time = event_time--; + //window.next_trace = event_trace--; + + // Keep looping while event occurred after start of window and we + // haven't hit beginning of array... + } while (*event_time > begin_time && event_time != events->times); + + // Deference event to use as previous + window.prev_time = *event_time; + //window.prev_trace = *event_trace; + window.prev_time_valid = event_time != events->times; + + // Calculate number of events + window.num_events = (end_event_time - window.next_time); + + // Return window + return window; +} + +//--------------------------------------- +//! \brief Advance a post-synaptic event window to the next event +//! \param[in] window: The window to advance +//! \return the advanced window +static inline post_event_window_t post_events_next( + post_event_window_t window) { + // Update previous time and increment next time + window.prev_time = *window.next_time++; + //window.prev_trace = *window.next_trace++; + + // Time will now be valid for sure! + window.prev_time_valid = 1; + + // Decrement remaining events + window.num_events--; + return window; +} + +//--------------------------------------- +//! \brief Add a post-synaptic event to the history +//! \param[in] time: the time of the event +//! \param[in,out] events: the history to add to +//! \param[in] trace: the trace of the event +static inline void post_events_add(uint32_t time, post_event_history_t *events/*, post_trace_t trace*/) { + if (events->count_minus_one < MAX_POST_SYNAPTIC_EVENTS - 1) { + // If there's still space, store time at current end and increment count minus 1 + const uint32_t new_index = ++events->count_minus_one; + events->times[new_index] = time; + //events->traces[new_index] = trace; + } else { + // Otherwise Shuffle down elements + // **NOTE** 1st element is always an entry at time 0 + for (uint32_t e = 2; e < MAX_POST_SYNAPTIC_EVENTS; e++) { + events->times[e - 1] = events->times[e]; + //events->traces[e - 1] = events->traces[e]; + } + + // Stick new time at end + events->times[MAX_POST_SYNAPTIC_EVENTS - 1] = time; + //events->traces[MAX_POST_SYNAPTIC_EVENTS - 1] = trace; + } +} + +//! \brief Print the post-synaptic event history +//! \param[in] post_event_history: the history +//! \param[in] begin_time: The start time of the history +//! \param[in] end_time: The end time of the history +//! \param[in] delay_dendritic: The amount of dendritic delay +static inline void print_delayed_window_events( + const post_event_history_t *post_event_history, + uint32_t begin_time, uint32_t end_time, uint32_t delay_dendritic) { +/* log_info("[NESTML synapse] ## printing post window ##"); + post_event_window_t post_window = post_events_get_window_delayed(post_event_history, begin_time, end_time); + + while (post_window.num_events > 0) { + const uint32_t delayed_post_time = + *post_window.next_time + delay_dendritic; + log_info("[NESTML synapse] \t\t-> post spike: %u, time: %u", + post_window.num_events, delayed_post_time); + + post_window = post_events_next(post_window); + }*/ +} + + + + +#endif // {{synapseName.upper()}}_IMPL_H diff --git a/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_impl.py.jinja2 b/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_impl.py.jinja2 index 2cb4a362d..3bef1453e 100644 --- a/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_impl.py.jinja2 +++ b/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_impl.py.jinja2 @@ -1,3 +1,25 @@ +# +# {{ synapseName }}_impl.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . +# +# Generated from NESTML {{ nestml_version }} at time: {{ now }} + # Copyright (c) 2015 The University of Manchester # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,22 +34,53 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + +from typing import Any, Iterable, List, Optional, Tuple, TYPE_CHECKING + import math -import numpy + +from math import exp # XXX how to support all NESTML functions that can be used in the initiialisation of parameters and internals? + +import numpy +import numpy as np +from numpy import floating, integer, uint8, uint16, uint32 +from numpy.typing import NDArray + from pyNN.standardmodels.synapses import StaticSynapse + from spinn_utilities.overrides import overrides + +from spinn_front_end_common.interface.ds import DataType +from spinn_front_end_common.interface.ds import DataSpecificationBase from spinn_front_end_common.utilities.constants import ( BYTES_PER_WORD, BYTES_PER_SHORT) from spynnaker.pyNN.data import SpynnakerDataView from spynnaker.pyNN.exceptions import ( SynapticConfigurationException, InvalidParameterType) +from spynnaker.pyNN.models.neural_projections.connectors import ( + AbstractConnector) +from spynnaker.pyNN.types import Weight_Types +from spynnaker.pyNN.types import Weight_Delay_In_Types as _In_Types from spynnaker.pyNN.utilities.utility_calls import get_n_bits -from .abstract_plastic_synapse_dynamics import AbstractPlasticSynapseDynamics -from .abstract_synapse_dynamics_structural import ( +from spynnaker.pyNN.models.neuron.synapse_dynamics.types import ( + NUMPY_CONNECTORS_DTYPE) +from spynnaker.pyNN.models.neuron.synapse_dynamics.abstract_plastic_synapse_dynamics import AbstractPlasticSynapseDynamics +from spynnaker.pyNN.models.neuron.synapse_dynamics.abstract_synapse_dynamics_structural import ( AbstractSynapseDynamicsStructural) -from .abstract_generate_on_machine import ( +from spynnaker.pyNN.models.neuron.synapse_dynamics.abstract_generate_on_machine import ( AbstractGenerateOnMachine, MatrixGeneratorID) -from .synapse_dynamics_neuromodulation import SynapseDynamicsNeuromodulation +from spynnaker.pyNN.models.neuron.synapse_dynamics.synapse_dynamics_neuromodulation import SynapseDynamicsNeuromodulation +from spynnaker.pyNN.models.neuron.synapse_dynamics.synapse_dynamics_weight_changable import SynapseDynamicsWeightChangable +from spynnaker.pyNN.models.neuron.synapse_dynamics.synapse_dynamics_weight_changer import SynapseDynamicsWeightChanger + +if TYPE_CHECKING: + from spynnaker.pyNN.models.neural_projections import ( + ProjectionApplicationEdge, SynapseInformation) + from spynnaker.pyNN.models.neuron.synapse_dynamics.types import ( + ConnectionsArray) + from spynnaker.pyNN.models.neuron.synapse_io import MaxRowInfo + from .abstract_synapse_dynamics import AbstractSynapseDynamics # How large are the time-stamps stored with each event TIME_STAMP_BYTES = BYTES_PER_WORD @@ -39,7 +92,32 @@ NEUROMODULATION_TARGETS = { } -class {{synapseName}}Dynamics( + +def float_to_s16_15(x: float) -> int: + """Converts a float to a Q16.15 fixed-point 32-bit integer. + + The S16.15 format uses 1 sign bit, 16 integer bits, and 15 + fractional bits. + + Args: + float_val: The floating-point number to convert. + + Returns: + The 32-bit signed integer representing the fixed-point number. + """ + + sign_bit: int = 1 if np.sign(x) == -1 else 0 + integer_bits: int = int(np.abs(x)) + fractional_bits: int = int((np.abs(x) - int(np.abs(x))) * 2**15) + + fixp_x: int = (sign_bit << 31) + (integer_bits << 15) + fractional_bits + + return fixp_x + + + + +class {{ synapseName }}Dynamics( AbstractPlasticSynapseDynamics, AbstractGenerateOnMachine): """ @@ -47,32 +125,32 @@ class {{synapseName}}Dynamics( Spike Timing Dependent Plasticity (STDP) rule. """ - __slots__ = [ + """__slots__ = ( # Fraction of delay that is dendritic (instead of axonal or synaptic) "__dendritic_delay_fraction", - # timing dependence to use for the STDP rule - "__timing_dependence", - # weight dependence to use for the STDP rule - "__weight_dependence", # The neuromodulation instance if enabled "__neuromodulation", # padding to add to a synaptic row for synaptic rewiring "__pad_to_length", - # Weight of connections formed by connector - "__weight", - # Delay of connections formed by connector - "__delay", # Whether to use back-propagation delay or not - "__backprop_delay"] + "__backprop_delay")""" + + + def _init_nestml_model_variables(self): + timestep = 1. # XXX: hard-coded for now! + self._nestml_model_variables = {} +{%- for sym in synapse.get_parameter_symbols() + synapse.get_internal_symbols() %} + self._nestml_model_variables["{{ sym.get_symbol_name() }}"] = {{ printer.print(sym.get_declaring_expression()) }} # type: {{ sym.get_type_symbol().print_symbol() }} +{%- endfor %} def __init__( - self, timing_dependence, weight_dependence, - voltage_dependence=None, dendritic_delay_fraction=1.0, - weight=StaticSynapse.default_parameters['weight'], - delay=None, pad_to_length=None, backprop_delay=True): + self, + voltage_dependence: None = None, + dendritic_delay_fraction: float = 1.0, + weight: _In_Types = StaticSynapse.default_parameters['weight'], + delay: _In_Types = None, pad_to_length: Optional[int] = None, + backprop_delay: bool = True): """ - :param AbstractTimingDependence timing_dependence: - :param AbstractWeightDependence weight_dependence: :param None voltage_dependence: not supported :param float dendritic_delay_fraction: must be 1.0! :param float weight: @@ -82,32 +160,23 @@ class {{synapseName}}Dynamics( :type pad_to_length: int or None :param bool backprop_delay: """ - if timing_dependence is None or weight_dependence is None: - raise NotImplementedError( - "Both timing_dependence and weight_dependence must be" - "specified") + print("Initialising the synapse with weight = " + str(weight)) if voltage_dependence is not None: raise NotImplementedError( "Voltage dependence has not been implemented") - - self.__timing_dependence = timing_dependence - self.__weight_dependence = weight_dependence - # move data from timing to weight dependence; that's where we need it - weight_dependence.set_a_plus_a_minus( - timing_dependence.A_plus, timing_dependence.A_minus) + super().__init__(delay=delay, weight=weight) self.__dendritic_delay_fraction = float(dendritic_delay_fraction) self.__pad_to_length = pad_to_length - self.__weight = weight - if delay is None: - delay = SpynnakerDataView.get_min_delay() - self.__delay = self._round_delay(delay) self.__backprop_delay = backprop_delay - self.__neuromodulation = None + self.__neuromodulation: Optional[SynapseDynamicsNeuromodulation] = None + + self._init_nestml_model_variables() if self.__dendritic_delay_fraction != 1.0: raise NotImplementedError("All delays must be dendritic!") - def merge_neuromodulation(self, neuromodulation): + def _merge_neuromodulation( + self, neuromodulation: SynapseDynamicsNeuromodulation) -> None: if self.__neuromodulation is None: self.__neuromodulation = neuromodulation elif not self.__neuromodulation.is_neuromodulation_same_as( @@ -117,15 +186,17 @@ class {{synapseName}}Dynamics( " edges to the same Population") @overrides(AbstractPlasticSynapseDynamics.merge) - def merge(self, synapse_dynamics): + def merge(self, synapse_dynamics: AbstractSynapseDynamics + ) -> AbstractSynapseDynamics: + # If dynamics is Neuromodulation, merge with other neuromodulation, # and then return ourselves, as neuromodulation can't be used by # itself if isinstance(synapse_dynamics, SynapseDynamicsNeuromodulation): - self.merge_neuromodulation(synapse_dynamics) + self._merge_neuromodulation(synapse_dynamics) return self - # If dynamics is STDP, test if same as + """# If dynamics is STDP, test if same as if isinstance(synapse_dynamics, SynapseDynamicsSTDP): if not self.is_same_as(synapse_dynamics): raise SynapticConfigurationException( @@ -133,7 +204,8 @@ class {{synapseName}}Dynamics( " edges to the same population") if self.__neuromodulation is not None: - synapse_dynamics.merge_neuromodulation(self.__neuromodulation) + # pylint: disable=protected-access + synapse_dynamics._merge_neuromodulation(self.__neuromodulation) # If STDP part matches, return the other, as it might also be # structural @@ -141,34 +213,27 @@ class {{synapseName}}Dynamics( # If dynamics is structural but not STDP (as here), merge # NOTE: Import here as otherwise we get a circular dependency + # pylint: disable=import-outside-toplevel from .synapse_dynamics_structural_stdp import ( SynapseDynamicsStructuralSTDP) if isinstance(synapse_dynamics, AbstractSynapseDynamicsStructural): - return SynapseDynamicsStructuralSTDP( - synapse_dynamics.partner_selection, synapse_dynamics.formation, - synapse_dynamics.elimination, - self.timing_dependence, self.weight_dependence, - # voltage dependence is not supported - None, self.dendritic_delay_fraction, - synapse_dynamics.f_rew, synapse_dynamics.initial_weight, - synapse_dynamics.initial_delay, synapse_dynamics.s_max, - synapse_dynamics.seed, - backprop_delay=self.backprop_delay) + assert False, "Not supported yet" + """ # Otherwise, it is static or neuromodulation, so return ourselves return self @overrides(AbstractPlasticSynapseDynamics.get_value) - def get_value(self, key): - for obj in [self.__timing_dependence, self.__weight_dependence, self]: + def get_value(self, key: str) -> Any: + for obj in [self]: if hasattr(obj, key): return getattr(obj, key) raise InvalidParameterType( f"Type {type(self)} does not have parameter {key}") @overrides(AbstractPlasticSynapseDynamics.set_value) - def set_value(self, key, value): - for obj in [self.__timing_dependence, self.__weight_dependence, self]: + def set_value(self, key: str, value: Any) -> None: + for obj in [self]: if hasattr(obj, key): setattr(obj, key, value) SpynnakerDataView.set_requires_mapping() @@ -177,21 +242,7 @@ class {{synapseName}}Dynamics( f"Type {type(self)} does not have parameter {key}") @property - def weight_dependence(self): - """ - :rtype: AbstractTimingDependence - """ - return self.__weight_dependence - - @property - def timing_dependence(self): - """ - :rtype: AbstractTimingDependence - """ - return self.__timing_dependence - - @property - def dendritic_delay_fraction(self): + def dendritic_delay_fraction(self) -> float: """ Settable. @@ -200,11 +251,11 @@ class {{synapseName}}Dynamics( return self.__dendritic_delay_fraction @dendritic_delay_fraction.setter - def dendritic_delay_fraction(self, new_value): + def dendritic_delay_fraction(self, new_value: float) -> None: self.__dendritic_delay_fraction = new_value @property - def backprop_delay(self): + def backprop_delay(self) -> bool: """ Settable. @@ -213,105 +264,97 @@ class {{synapseName}}Dynamics( return self.__backprop_delay @backprop_delay.setter - def backprop_delay(self, backprop_delay): + def backprop_delay(self, backprop_delay: bool) -> None: self.__backprop_delay = bool(backprop_delay) @property - def neuromodulation(self): + def neuromodulation(self) -> Optional[SynapseDynamicsNeuromodulation]: """ :rtype: SynapseDynamicsNeuromodulation """ return self.__neuromodulation @overrides(AbstractPlasticSynapseDynamics.is_same_as) - def is_same_as(self, synapse_dynamics): + def is_same_as(self, synapse_dynamics: AbstractSynapseDynamics) -> bool: + raise NotImplementedError() + if not isinstance(synapse_dynamics, SynapseDynamicsSTDP): return False - return ( - self.__timing_dependence.is_same_as( - synapse_dynamics.timing_dependence) and - self.__weight_dependence.is_same_as( - synapse_dynamics.weight_dependence) and - (self.__dendritic_delay_fraction == - synapse_dynamics.dendritic_delay_fraction)) - - def get_vertex_executable_suffix(self): + return self.__dendritic_delay_fraction == synapse_dynamics.dendritic_delay_fraction + + def get_vertex_executable_suffix(self) -> str: """ :rtype: str """ - # Get the suffix values for timing and weight dependence - timing_suffix = self.__timing_dependence.vertex_executable_suffix - weight_suffix = self.__weight_dependence.vertex_executable_suffix + name = "" - if self.__neuromodulation: - name = ( - "_stdp_" + - self.__neuromodulation.get_vertex_executable_suffix()) - else: - name = "_stdp_mad_" - name += timing_suffix + "_" + weight_suffix return name - def get_parameters_sdram_usage_in_bytes(self, n_neurons, n_synapse_types): + def get_parameters_sdram_usage_in_bytes( + self, n_neurons: int, n_synapse_types: int) -> int: """ :param int n_neurons: :param int n_synapse_types: :rtype: int """ - # 32-bits for back-prop delay - size = BYTES_PER_WORD - size += self.__timing_dependence.get_parameters_sdram_usage_in_bytes() - size += self.__weight_dependence.get_parameters_sdram_usage_in_bytes( - n_synapse_types, self.__timing_dependence.n_weight_terms) + n_parameters = {{ synapse.get_parameter_symbols() | length }} + size = n_parameters * BYTES_PER_WORD * n_synapse_types + if self.__neuromodulation: size += self.__neuromodulation.get_parameters_sdram_usage_in_bytes( n_neurons, n_synapse_types) + + print("[NESTML synapse] get_parameters_sdram_usage_in_bytes(n_neurons = " + str(n_neurons) + ", n_synapse_types = " + str(n_synapse_types) + ") = " + str(size) + " bytes") + return size @overrides(AbstractPlasticSynapseDynamics.write_parameters) def write_parameters( - self, spec, region, global_weight_scale, synapse_weight_scales): - spec.comment("Writing Plastic Parameters") + self, spec: DataSpecificationBase, region: int, + global_weight_scale: float, + synapse_weight_scales: NDArray[floating]) -> None: - # Switch focus to the region: - spec.switch_write_focus(region) - # Whether to use back-prop delay - spec.write_value(int(self.__backprop_delay)) + i = 0 + - # Write timing dependence parameters to region - self.__timing_dependence.write_parameters( - spec, global_weight_scale, synapse_weight_scales) + spec.comment("Writing Plastic Parameters") - # Write weight dependence information to region - self.__weight_dependence.write_parameters( - spec, global_weight_scale, synapse_weight_scales, - self.__timing_dependence.n_weight_terms) + # Switch focus to the region: + spec.switch_write_focus(region) +{%- for sym in synapse.get_parameter_symbols() + synapse.get_internal_symbols() %} + # write value for parameter "{{ sym.get_symbol_name() }}" + print("XXXXXXXXX writing value for parameter {{ sym.get_symbol_name() }} ======= " + str(self._nestml_model_variables["{{ sym.get_symbol_name() }}"]) + " ==== encoded --> " + str(hex(float_to_s16_15(self._nestml_model_variables["{{ sym.get_symbol_name() }}"])))) + spec.write_value( + data=float_to_s16_15(self._nestml_model_variables["{{ sym.get_symbol_name() }}"]), # int(i), # XXX should be actual data; use int(i) for testing + data_type=DataType.INT32) + i += 1 +{%- endfor %} + if self.__neuromodulation: self.__neuromodulation.write_parameters( spec, region, global_weight_scale, synapse_weight_scales) @property - def _n_header_bytes(self): + def _n_header_bytes(self) -> int: """ :rtype: int """ - # The header contains a single timestamp and pre-trace - n_bytes = ( - TIME_STAMP_BYTES + self.__timing_dependence.pre_trace_n_bytes) + # The header contains a single timestamp + n_bytes = TIME_STAMP_BYTES + BYTES_PER_SHORT # The actual number of bytes is in a word-aligned struct, so work out # the number of bytes as a number of words return int(math.ceil(float(n_bytes) / BYTES_PER_WORD)) * BYTES_PER_WORD - def __get_n_connections(self, n_connections, check_length_padded=True): + def __get_n_connections( + self, n_connections: int, check_length_padded: bool = True) -> int: """ :param int n_connections: :param bool check_length_padded: :rtype: int """ - synapse_structure = self.__timing_dependence.synaptic_structure if self.__pad_to_length is not None and check_length_padded: n_connections = max(n_connections, self.__pad_to_length) if n_connections == 0: @@ -322,16 +365,19 @@ class {{synapseName}}Dynamics( else (n_connections + 1) // 2) pp_size_bytes = ( self._n_header_bytes + - (synapse_structure.get_n_half_words_per_connection() * - BYTES_PER_SHORT * n_connections)) - # Neuromodulated synapses have the actual weight separately + 10 * BYTES_PER_SHORT * n_connections) # 1 because synapse only has one (half-word) weight + + # Neuromodulation synapses have the actual weight separately if self.__neuromodulation: pp_size_bytes += BYTES_PER_SHORT * n_connections + pp_size_words = int(math.ceil(float(pp_size_bytes) / BYTES_PER_WORD)) return fp_size_words + pp_size_words - def get_n_words_for_plastic_connections(self, n_connections): + @overrides(AbstractPlasticSynapseDynamics. + get_n_words_for_plastic_connections) + def get_n_words_for_plastic_connections(self, n_connections: int) -> int: """ :param int n_connections: :rtype: int @@ -340,25 +386,26 @@ class {{synapseName}}Dynamics( @overrides(AbstractPlasticSynapseDynamics.get_plastic_synaptic_data) def get_plastic_synaptic_data( - self, connections, connection_row_indices, n_rows, - post_vertex_slice, n_synapse_types, max_n_synapses, - max_atoms_per_core): - # pylint: disable=too-many-arguments + self, connections: ConnectionsArray, + connection_row_indices: NDArray[integer], n_rows: int, + n_synapse_types: int, + max_n_synapses: int, max_atoms_per_core: int) -> Tuple[ + List[NDArray[uint32]], List[NDArray[uint32]], + NDArray[uint32], NDArray[uint32]]: n_synapse_type_bits = get_n_bits(n_synapse_types) n_neuron_id_bits = get_n_bits(max_atoms_per_core) neuron_id_mask = (1 << n_neuron_id_bits) - 1 # Get the fixed data fixed_plastic = ( - (connections["delay"].astype("uint16") << + (connections["delay"].astype(uint16) << (n_neuron_id_bits + n_synapse_type_bits)) | - (connections["synapse_type"].astype("uint16") + (connections["synapse_type"].astype(uint16) << n_neuron_id_bits) | - ((connections["target"].astype("uint16") - - post_vertex_slice.lo_atom) & neuron_id_mask)) + (connections["target"].astype(uint16) & neuron_id_mask)) fixed_plastic_rows = self.convert_per_connection_data_to_rows( connection_row_indices, n_rows, - fixed_plastic.view(dtype="uint8").reshape((-1, 2)), + fixed_plastic.view(dtype=uint8).reshape((-1, 2)), max_n_synapses) fp_size = self.get_n_items(fixed_plastic_rows, BYTES_PER_SHORT) if self.__pad_to_length is not None: @@ -369,24 +416,24 @@ class {{synapseName}}Dynamics( # Get the plastic data by inserting the weight into the half-word # specified by the synapse structure - synapse_structure = self.__timing_dependence.synaptic_structure - n_half_words = synapse_structure.get_n_half_words_per_connection() - half_word = synapse_structure.get_weight_half_word() + n_half_words = 10 # 1 because only have one (half-word) weight + half_word = 0 # If neuromodulation, the real weight comes first if self.__neuromodulation: n_half_words += 1 half_word = 0 plastic_plastic = numpy.zeros( - len(connections) * n_half_words, dtype="uint16") + len(connections) * n_half_words, dtype=uint16) plastic_plastic[half_word::n_half_words] = \ - numpy.rint(numpy.abs(connections["weight"])).astype("uint16") + numpy.rint(numpy.abs(connections["weight"])).astype(uint16) # Convert the plastic data into groups of bytes per connection and # then into rows - plastic_plastic = plastic_plastic.view(dtype="uint8").reshape( + plastic_plastic_bytes = plastic_plastic.view(dtype=uint8).reshape( (-1, n_half_words * BYTES_PER_SHORT)) plastic_plastic_row_data = self.convert_per_connection_data_to_rows( - connection_row_indices, n_rows, plastic_plastic, max_n_synapses) + connection_row_indices, n_rows, plastic_plastic_bytes, + max_n_synapses) # pp_size = fp_size in words => fp_size * no_bytes / 4 (bytes) if self.__pad_to_length is not None: @@ -394,7 +441,7 @@ class {{synapseName}}Dynamics( plastic_plastic_row_data = self._pad_row( plastic_plastic_row_data, n_half_words * BYTES_PER_SHORT) plastic_headers = numpy.zeros( - (n_rows, self._n_header_bytes), dtype="uint8") + (n_rows, self._n_header_bytes), dtype=uint8) plastic_plastic_rows = [ numpy.concatenate(( plastic_headers[i], plastic_plastic_row_data[i])) @@ -404,45 +451,51 @@ class {{synapseName}}Dynamics( return fp_data, pp_data, fp_size, pp_size - def _pad_row(self, rows, no_bytes_per_connection): + def _pad_row(self, rows: List[NDArray], + no_bytes_per_connection: int) -> List[NDArray]: """ :param list(~numpy.ndarray) rows: :param int no_bytes_per_connection: :rtype: list(~numpy.ndarray) """ + pad_len = self.__pad_to_length or 1 # Row elements are (individual) bytes return [ numpy.concatenate(( row, numpy.zeros( numpy.clip( - (no_bytes_per_connection * self.__pad_to_length - - row.size), - 0, None)).astype(dtype="uint8")) - ).view(dtype="uint8") + no_bytes_per_connection * pad_len - row.size, + 0, None)).astype(dtype=uint8)) + ).view(dtype=uint8) for row in rows] @overrides( AbstractPlasticSynapseDynamics.get_n_plastic_plastic_words_per_row) - def get_n_plastic_plastic_words_per_row(self, pp_size): + def get_n_plastic_plastic_words_per_row( + self, pp_size: NDArray[uint32]) -> NDArray[integer]: # pp_size is in words, so return return pp_size @overrides( AbstractPlasticSynapseDynamics.get_n_fixed_plastic_words_per_row) - def get_n_fixed_plastic_words_per_row(self, fp_size): + def get_n_fixed_plastic_words_per_row( + self, fp_size: NDArray[uint32]) -> NDArray[integer]: # fp_size is in half-words - return numpy.ceil(fp_size / 2.0).astype(dtype="uint32") + return numpy.ceil(fp_size / 2.0).astype(dtype=uint32) @overrides(AbstractPlasticSynapseDynamics.get_n_synapses_in_rows) - def get_n_synapses_in_rows(self, pp_size, fp_size): + def get_n_synapses_in_rows(self, pp_size: NDArray[uint32], + fp_size: NDArray[uint32]) -> NDArray[integer]: # Each fixed-plastic synapse is a half-word and fp_size is in half # words so just return it return fp_size @overrides(AbstractPlasticSynapseDynamics.read_plastic_synaptic_data) def read_plastic_synaptic_data( - self, post_vertex_slice, n_synapse_types, pp_size, pp_data, - fp_size, fp_data, max_atoms_per_core): + self, n_synapse_types: int, pp_size: NDArray[uint32], + pp_data: List[NDArray[uint32]], fp_size: NDArray[uint32], + fp_data: List[NDArray[uint32]], + max_atoms_per_core: int) -> ConnectionsArray: # pylint: disable=too-many-arguments n_rows = len(fp_size) @@ -451,69 +504,80 @@ class {{synapseName}}Dynamics( neuron_id_mask = (1 << n_neuron_id_bits) - 1 data_fixed = numpy.concatenate([ - fp_data[i].view(dtype="uint16")[0:fp_size[i]] + fp_data[i].view(dtype=uint16)[0:fp_size[i]] for i in range(n_rows)]) pp_without_headers = [ - row.view(dtype="uint8")[self._n_header_bytes:] for row in pp_data] - synapse_structure = self.__timing_dependence.synaptic_structure - n_half_words = synapse_structure.get_n_half_words_per_connection() - half_word = synapse_structure.get_weight_half_word() + row.view(dtype=uint8)[self._n_header_bytes:] for row in pp_data] + n_half_words = 10 + half_word = 0 if self.__neuromodulation: n_half_words += 1 half_word = 0 pp_half_words = numpy.concatenate([ - pp[:size * n_half_words * BYTES_PER_SHORT].view("uint16")[ + pp[:size * n_half_words * BYTES_PER_SHORT].view(uint16)[ half_word::n_half_words] for pp, size in zip(pp_without_headers, fp_size)]) connections = numpy.zeros( - data_fixed.size, dtype=self.NUMPY_CONNECTORS_DTYPE) + data_fixed.size, dtype=NUMPY_CONNECTORS_DTYPE) connections["source"] = numpy.concatenate( [numpy.repeat(i, fp_size[i]) for i in range(len(fp_size))]) - connections["target"] = ( - (data_fixed & neuron_id_mask) + post_vertex_slice.lo_atom) + connections["target"] = data_fixed & neuron_id_mask connections["weight"] = pp_half_words connections["delay"] = data_fixed >> ( n_neuron_id_bits + n_synapse_type_bits) return connections @overrides(AbstractPlasticSynapseDynamics.get_weight_mean) - def get_weight_mean(self, connector, synapse_info): + def get_weight_mean(self, connector: AbstractConnector, + synapse_info: SynapseInformation) -> float: # Because the weights could all be changed to the maximum, the mean # has to be given as the maximum for scaling return self.get_weight_maximum(connector, synapse_info) @overrides(AbstractPlasticSynapseDynamics.get_weight_variance) - def get_weight_variance(self, connector, weights, synapse_info): + def get_weight_variance( + self, connector: AbstractConnector, weights: Weight_Types, + synapse_info: SynapseInformation) -> float: # Because the weights could all be changed to the maximum, the variance # has to be given as no variance return 0.0 @overrides(AbstractPlasticSynapseDynamics.get_weight_maximum) - def get_weight_maximum(self, connector, synapse_info): + def get_weight_maximum(self, connector: AbstractConnector, + synapse_info: SynapseInformation) -> float: w_max = super().get_weight_maximum(connector, synapse_info) # The maximum weight is the largest that it could be set to from # the weight dependence - return max(w_max, self.__weight_dependence.weight_maximum) + return max(w_max, 9999) # XXX ??? @overrides(AbstractPlasticSynapseDynamics.get_parameter_names) - def get_parameter_names(self): - names = ['weight', 'delay'] - names.extend(self.__timing_dependence.get_parameter_names()) - names.extend(self.__weight_dependence.get_parameter_names()) - return names + def get_parameter_names(self) -> Iterable[str]: +# yield 'weight' +# yield 'delay' +{%- for variable_symbol in synapse.get_parameter_symbols() %} +{%- set variable = utils.get_parameter_variable_by_name(astnode, variable_symbol.get_symbol_name()) %} +{%- set isHomogeneous = PyNestMLLexer["DECORATOR_HOMOGENEOUS"] in variable_symbol.get_decorators() %} +{# {%- if not isHomogeneous and variable.get_name() != synapse_weight_variable and variable.get_name() != synapse_delay_variable %} #} + yield '{{ variable.name }}' +{# {%- endif %} #} +{%- endfor %} @overrides(AbstractPlasticSynapseDynamics.get_max_synapses) - def get_max_synapses(self, n_words): + def get_max_synapses(self, n_words: int) -> int: + """ + Get the maximum number of synapses that can be held in the given + number of words. + :param n_words: The number of words the synapses must fit in + """ # Subtract the header size that will always exist n_header_words = self._n_header_bytes // BYTES_PER_WORD n_words_space = n_words - n_header_words # Get plastic plastic size per connection - synapse_structure = self.__timing_dependence.synaptic_structure bytes_per_pp = ( - synapse_structure.get_n_half_words_per_connection() * + 10 * # 1 because we use one weight per connection and it occupies one half-word BYTES_PER_SHORT) if self.__neuromodulation: bytes_per_pp += BYTES_PER_SHORT @@ -536,26 +600,29 @@ class {{synapseName}}Dynamics( @property @overrides(AbstractGenerateOnMachine.gen_matrix_id) - def gen_matrix_id(self): + def gen_matrix_id(self) -> int: return MatrixGeneratorID.STDP_MATRIX.value @overrides(AbstractGenerateOnMachine.gen_matrix_params) def gen_matrix_params( - self, synaptic_matrix_offset, delayed_matrix_offset, app_edge, - synapse_info, max_row_info, max_pre_atoms_per_core, - max_post_atoms_per_core): + self, synaptic_matrix_offset: int, delayed_matrix_offset: int, + app_edge: ProjectionApplicationEdge, + synapse_info: SynapseInformation, max_row_info: MaxRowInfo, + max_pre_atoms_per_core: int, max_post_atoms_per_core: int + ) -> NDArray[uint32]: vertex = app_edge.post_vertex n_synapse_type_bits = get_n_bits( vertex.neuron_impl.get_n_synapse_types()) n_synapse_index_bits = get_n_bits(max_post_atoms_per_core) max_delay = app_edge.post_vertex.splitter.max_support_delay() max_delay_bits = get_n_bits(max_delay) - synapse_struct = self.__timing_dependence.synaptic_structure - n_half_words = synapse_struct.get_n_half_words_per_connection() - half_word = synapse_struct.get_weight_half_word() + n_half_words = 10 + half_word = 0 if self.__neuromodulation: n_half_words += 1 half_word = 0 + write_row_number_to_header = 0 + row_offset = 0 return numpy.array([ synaptic_matrix_offset, delayed_matrix_offset, max_row_info.undelayed_max_n_synapses, @@ -565,36 +632,37 @@ class {{synapseName}}Dynamics( n_synapse_index_bits, app_edge.n_delay_stages + 1, max_delay, max_delay_bits, app_edge.pre_vertex.n_atoms, max_pre_atoms_per_core, self._n_header_bytes // BYTES_PER_SHORT, - n_half_words, half_word], - dtype=numpy.uint32) + n_half_words, half_word, write_row_number_to_header, row_offset], + dtype=uint32) @property - @overrides(AbstractGenerateOnMachine. - gen_matrix_params_size_in_bytes) - def gen_matrix_params_size_in_bytes(self): - return 17 * BYTES_PER_WORD + @overrides(AbstractGenerateOnMachine.gen_matrix_params_size_in_bytes) + def gen_matrix_params_size_in_bytes(self) -> int: + return 19 * BYTES_PER_WORD @property @overrides(AbstractPlasticSynapseDynamics.changes_during_run) - def changes_during_run(self): + def changes_during_run(self) -> bool: return True - @property - @overrides(AbstractPlasticSynapseDynamics.weight) - def weight(self): - return self.__weight - - @property - @overrides(AbstractPlasticSynapseDynamics.delay) - def delay(self): - return self.__delay - @property @overrides(AbstractPlasticSynapseDynamics.is_combined_core_capable) - def is_combined_core_capable(self): + def is_combined_core_capable(self) -> bool: return self.__neuromodulation is None + @property + @overrides(AbstractPlasticSynapseDynamics.is_split_core_capable) + def is_split_core_capable(self) -> bool: + return False + @property @overrides(AbstractPlasticSynapseDynamics.pad_to_length) - def pad_to_length(self): + def pad_to_length(self) -> Optional[int]: return self.__pad_to_length + + @property + @overrides(AbstractPlasticSynapseDynamics.synapses_per_second) + def synapses_per_second(self) -> int: + # From Synapse-Centric Mapping of Cortical Models to the SpiNNaker + # Neuromorphic Architecture + return 1400000 diff --git a/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_timing.py.jinja2 b/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_timing.py.jinja2 deleted file mode 100644 index c4dab597d..000000000 --- a/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_timing.py.jinja2 +++ /dev/null @@ -1,136 +0,0 @@ -from spinn_utilities.overrides import overrides -from spinn_front_end_common.interface.ds import DataType -from spinn_front_end_common.utilities.constants import BYTES_PER_WORD -from spynnaker.pyNN.models.neuron.plasticity.stdp.timing_dependence import ( - AbstractTimingDependence) -from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure import ( - SynapseStructureWeightOnly) - - -class MyTimingDependence(AbstractTimingDependence): - __slots__ = [ - "_a_minus", - "_a_plus", - "_my_depression_parameter", - "_my_potentiation_parameter", - "_synapse_structure"] - - NUM_PARAMETERS = 2 - - # noinspection PyPep8Naming - def __init__( - self, - - # TODO: update parameters - my_potentiation_parameter, - my_depression_parameter, - - A_plus=0.01, A_minus=0.01): - - # TODO: Store any parameters - self._my_potentiation_parameter = my_potentiation_parameter - self._my_depression_parameter = my_depression_parameter - - # TODO: Update to match the synapse structure in the C code - self._synapse_structure = SynapseStructureWeightOnly() - - # Are these in the c code? - self._a_plus = A_plus - self._a_minus = A_minus - - # TODO: Add getters and setters for parameters - - @property - def my_potentiation_parameter(self): - return self._my_potentiation_parameter - - @my_potentiation_parameter.setter - def my_potentiation_parameter(self, my_potentiation_parameter): - self._my_potentiation_parameter = my_potentiation_parameter - - @property - def my_depression_parameter(self): - return self._my_depression_parameter - - @my_depression_parameter.setter - def my_depression_parameter(self, my_depression_parameter): - self._my_depression_parameter = my_depression_parameter - - @overrides(AbstractTimingDependence.is_same_as) - def is_same_as(self, timing_dependence): - # TODO: Update with the correct class name - if not isinstance(timing_dependence, MyTimingDependence): - return False - - # TODO: update to check parameters are equal - return ( - (self._my_potentiation_parameter == - timing_dependence.my_potentiation_parameter) and - (self._my_depression_parameter == - timing_dependence.my_depression_parameter)) - - @property - def vertex_executable_suffix(self): - """ The suffix to be appended to the vertex executable for this rule - """ - # TODO: Add the extension to be added to the binary executable name - # to indicate that it is compiled with this timing dependence - # Note: The expected format of the binary name is: - # _stdp[_mad|]__ - return "my_timing" - - @property - def pre_trace_n_bytes(self): - """ The number of bytes used by the pre-trace of the rule per neuron - """ - # TODO: update to match the number of bytes in the pre_trace_t data - # structure in the C code - return 0 - - @overrides(AbstractTimingDependence.get_parameters_sdram_usage_in_bytes) - def get_parameters_sdram_usage_in_bytes(self): - # TODO: update to match the number of bytes used by the parameters - return self.NUM_PARAMETERS * BYTES_PER_WORD - - @property - def n_weight_terms(self): - """ The number of weight terms expected by this timing rule - """ - # TODO: update to match the number of weight terms expected in the - # weight rule according to the C code - return 1 - - @overrides(AbstractTimingDependence.write_parameters) - def write_parameters( - self, spec, global_weight_scale, synapse_weight_scales): - # TODO: update to write the parameters - spec.write_value( - self._my_potentiation_parameter, data_type=DataType.S1615) - spec.write_value( - self._my_depression_parameter, data_type=DataType.S1615) - - @overrides(AbstractTimingDependence.get_parameter_names) - def get_parameter_names(self): - return ['my_potentiation_parameter', 'my_depression_parameter'] - - @property - def synaptic_structure(self): - """ Get the synaptic structure of the plastic part of the rows - """ - return self._synapse_structure - - @property - def A_plus(self): - return self._a_plus - - @A_plus.setter - def A_plus(self, new_value): - self._a_plus = new_value - - @property - def A_minus(self): - return self._a_minus - - @A_minus.setter - def A_minus(self, new_value): - self._a_minus = new_value \ No newline at end of file diff --git a/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_timing_impl.c.jinja2 b/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_timing_impl.c.jinja2 deleted file mode 100644 index e69de29bb..000000000 diff --git a/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_timing_impl.h.jinja2 b/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_timing_impl.h.jinja2 deleted file mode 100644 index e69de29bb..000000000 diff --git a/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_weight.py.jinja2 b/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_weight.py.jinja2 deleted file mode 100644 index 723075dec..000000000 --- a/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_weight.py.jinja2 +++ /dev/null @@ -1,123 +0,0 @@ -from spinn_utilities.overrides import overrides -from spinn_front_end_common.interface.ds import DataType -from spinn_front_end_common.utilities.constants import BYTES_PER_WORD -from spynnaker.pyNN.models.neuron.plasticity.stdp.weight_dependence import ( - AbstractWeightDependence, AbstractHasAPlusAMinus) - - -class MyWeightDependence(AbstractHasAPlusAMinus, AbstractWeightDependence): - __slots__ = [ - "_my_weight_parameter", - "_w_max", - "_w_min"] - - # Must match number of words written by write_parameters() method - WORDS_PER_SYNAPSE_TYPE = 3 - - def __init__( - self, - - # TODO: update the parameters - w_min=0.0, w_max=1.0, my_weight_parameter=0.1): - super().__init__() - - # TODO: Store any parameters - self._w_min = w_min - self._w_max = w_max - self._my_weight_parameter = my_weight_parameter - - # TODO: Add getters and setters for the parameters - - @property - def w_min(self): - return self._w_min - - @w_min.setter - def w_min(self, w_min): - self._w_min = w_min - - @property - def w_max(self): - return self._w_max - - @w_max.setter - def w_max(self, w_max): - self._w_max = w_max - - @property - def my_weight_parameter(self): - return self._my_weight_parameter - - @my_weight_parameter.setter - def my_weight_parameter(self, my_weight_parameter): - self._my_weight_parameter = my_weight_parameter - - @overrides(AbstractWeightDependence.is_same_as) - def is_same_as(self, weight_dependence): - # TODO: Update with the correct class name - if not isinstance(weight_dependence, MyWeightDependence): - return False - - # TODO: update to check parameters are equal - # pylint: disable=protected-access - return ( - (self._w_min == weight_dependence._w_min) and - (self._w_max == weight_dependence._w_max) and - (self._my_weight_parameter == - weight_dependence._my_weight_parameter)) - - @property - def vertex_executable_suffix(self): - """ The suffix to be appended to the vertex executable for this rule - """ - # TODO: Add the extension to be added to the binary executable name - # to indicate that it is compiled with this weight dependence - # Note: The expected format of the binary name is: - # _stdp[_mad|]__ - return "my_weight" - - @overrides(AbstractWeightDependence.get_parameters_sdram_usage_in_bytes) - def get_parameters_sdram_usage_in_bytes( - self, n_synapse_types, n_weight_terms): - # TODO: update to match the number of bytes used by the parameters - if n_weight_terms != 1: - raise NotImplementedError( - "My weight dependence only supports one term") - - return self.WORDS_PER_SYNAPSE_TYPE * BYTES_PER_WORD * n_synapse_types - - @overrides(AbstractWeightDependence.write_parameters) - def write_parameters( - self, spec, global_weight_scale, synapse_weight_scales, - n_weight_terms): - # TODO: update to write the parameters - # Loop through each synapse type's weight scale - for w in synapse_weight_scales: - # Scale the maximum and minimum weights to fixed-point values - # based on the weight scaling that has been done externally - spec.write_value( - data=int(round(self._w_min * w)), data_type=DataType.INT32) - spec.write_value( - data=int(round(self._w_max * w)), data_type=DataType.INT32) - - # Write my parameter as an appropriately scaled fixed-point number - spec.write_value( - data=int(round(self._my_weight_parameter * w)), - data_type=DataType.INT32) - - if n_weight_terms != 1: - raise NotImplementedError( - "My weight dependence only supports one term") - - @property - def weight_maximum(self): - """ The maximum weight that will ever be set in a synapse as a result\ - of this rule - """ - # TODO: update to return the maximum weight that this rule will ever - # give to a synapse - return self._w_max - - @overrides(AbstractWeightDependence.get_parameter_names) - def get_parameter_names(self): - return ['w_min', 'w_max', 'my_weight_parameter'] diff --git a/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_weight_impl.c.jinja2 b/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_weight_impl.c.jinja2 deleted file mode 100644 index e69de29bb..000000000 diff --git a/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_weight_impl.h.jinja2 b/pynestml/codegeneration/resources_spinnaker/@SYNAPSE_NAME@_weight_impl.h.jinja2 deleted file mode 100644 index e69de29bb..000000000 diff --git a/pynestml/codegeneration/resources_spinnaker/Makefile_@NEURON_NAME@_impl.jinja2 b/pynestml/codegeneration/resources_spinnaker/Makefile_@NEURON_NAME@_impl.jinja2 index e9d17b4dd..66764a104 100644 --- a/pynestml/codegeneration/resources_spinnaker/Makefile_@NEURON_NAME@_impl.jinja2 +++ b/pynestml/codegeneration/resources_spinnaker/Makefile_@NEURON_NAME@_impl.jinja2 @@ -1,6 +1,13 @@ APP = $(notdir $(CURDIR)) -NEURON_IMPL_H = $(EXTRA_SRC_DIR)/my_models/implementations/{{neuronName}}_impl.h -SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c + +# neuron co-generated with plastic synapse + +NEURON_IMPL_H = $(EXTRA_SRC_DIR)/my_models/implementations/{{ neuronName }}_impl.h +{# XXX: TODO: replace synapse name with NESTML generated name #} +SYNAPSE_DYNAMICS = $(EXTRA_SRC_DIR)/my_models/implementations/stdp_synapse_nestml_impl.c +SYNAPSE_DYNAMICS_CUSTOM = 1 include ../extra.mk + +{# #} diff --git a/pynestml/codegeneration/resources_spinnaker/Makefile_@SYNAPSE_NAME@_impl.jinja2 b/pynestml/codegeneration/resources_spinnaker/Makefile_@SYNAPSE_NAME@_impl.jinja2 index 4dea5783e..e285f57d8 100644 --- a/pynestml/codegeneration/resources_spinnaker/Makefile_@SYNAPSE_NAME@_impl.jinja2 +++ b/pynestml/codegeneration/resources_spinnaker/Makefile_@SYNAPSE_NAME@_impl.jinja2 @@ -1,11 +1,8 @@ APP = $(notdir $(CURDIR)) -BUILD_DIR = build/ - -NEURON_IMPL_H = $(EXTRA_SRC_DIR)/my_models/implementations/iaf_psc_exp_nestml_impl.h -SYNAPSE_DYNAMICS = $(EXTRA_SRC_DIR)/my_models/implementations/{{synapseName}}_impl.c -TIMING_DEPENDENCE = $(EXTRA_SRC_DIR)/my_models/implementations/{{synapseName}}_timing_impl.c -TIMING_DEPENDENCE_H = $(EXTRA_SRC_DIR)/my_models/implementations/{{synapseName}}_timing_impl.h -WEIGHT_DEPENDENCE = $(EXTRA_SRC_DIR)/my_models/implementations/{{synapseName}}_weight_impl.c -WEIGHT_DEPENDENCE_H = $(EXTRA_SRC_DIR)/my_models/implementations/{{synapseName}}_weight_impl.h +BUILD_DIR = build/ -include ../extra.mk +{# NEURON_IMPL_H = $(EXTRA_SRC_DIR)/my_models/implementations/{{paired_neuron_name}}_impl.h #} +SYNAPSE_DYNAMICS = $(EXTRA_SRC_DIR)/my_models/implementations/{{ synapseName }}_impl.c +{# SYNAPSE_DYNAMICS_CUSTOM = 1 #} + +include ../extra_synapse.mk diff --git a/pynestml/codegeneration/resources_spinnaker/Makefile_models.jinja2 b/pynestml/codegeneration/resources_spinnaker/Makefile_models.jinja2 index 79dd6bacd..8d9f58c36 100644 --- a/pynestml/codegeneration/resources_spinnaker/Makefile_models.jinja2 +++ b/pynestml/codegeneration/resources_spinnaker/Makefile_models.jinja2 @@ -1,13 +1,15 @@ -MODELS = \ -{%- for neuron in neurons %} -{{neuron.get_name()}}_impl \ -{%- endfor %} -{%- for synapse in synapses %} -{{synapse.get_name()}}_impl \ -{%- endfor %} +#find all directories of synapse and neuron models +IMPL_DIRS := $(wildcard *_impl) all: - for d in $(MODELS); do $(MAKE) -C $$d || exit $$?; done + for dir in $(IMPL_DIRS); do \ + $(MAKE) -C $$dir; \ + done clean: - for d in $(MODELS); do $(MAKE) -C $$d clean || exit $$?; done + for dir in $(IMPL_DIRS); do \ + $(MAKE) -C $$dir clean; \ + done + +# declare all model directories as phony targets +.PHONY: all clean $(IMPL_DIRS) diff --git a/pynestml/codegeneration/resources_spinnaker/NewSynapseApi.h b/pynestml/codegeneration/resources_spinnaker/NewSynapseApi.h deleted file mode 100644 index 565f69636..000000000 --- a/pynestml/codegeneration/resources_spinnaker/NewSynapseApi.h +++ /dev/null @@ -1,328 +0,0 @@ -// Interesting template in NewModelTemplate/c_models/src/my_models/playsticity/stdp/timing_dependence/my_timing.h => _timing_impl.h -// maybe this s not necessaryif we just base the synapse on 'synapse_dynamics_stdp_mad_impl.c' which implements as a synapse base class -// alternatively we could copy that file here for changes - -// The current for the models makefile must be named the same as the put toghether name from python, where synaps dynamics timing and weight is added to the name. -// we also have to change the synapse_build.mk when we want to use a single module - - -/*** COMMON INCLUDES ***/ -#include -#include -#include -#include -#include -#include -#include - -/*** SYSTEM INCLUDES ***/ -#include // => Used in system -#include // => Used in system -/* Structure for one synapse, called row - * | Weight | Delay | Synapse Type | Neuron Index | - * |-------------------|------------------|-----------------|------------------| - * |SYNAPSE_WEIGHT_BITS|SYNAPSE_DELAY_BITS|SYNAPSE_TYPE_BITS|SYNAPSE_INDEX_BITS| - * | | | SYNAPSE_TYPE_INDEX_BITS | - */ - -/*** POST EVENTS ***/ -// API for pre and post spike processing and data structures, independent of system -#include - -/*** SYNAPSE STRUCTURE ***/ -// API for state management of synapse, mostly independet of system -#include - -/*** SYNAPSE DYNAMICS***/ -// For implementation of this API see synapse_dynamics_stdp_common and synapse_dynamics_stdp_mad_impl -#include - -/*** TIMING DEPENDENCE ***/ -// API for timing implementation, independent of system -#include - - -/*** WEIGHT DEPENDENCE ***/ -// API for weight implementation, independent of system -#include -// We can ommit this, adds API for depression and potentiation, independet of system -#include - - -/*** DATA STRUCTURES ***/ - -//Used only by us -typedef struct stdp_params { -} stdp_params; - -// Fixed parameters of synapse -//Used only by us -typedef struct fixed_stdp_synapse { -} fixed_stdp_synapse; - -//Used only by us -typedef struct { -} pre_event_history_t; - -//Used only by us -typedef struct { -} post_event_history_t; - -//Used only by us -typedef struct { -} post_event_window_t; - -// Parameters shared by all weights => modify for synapses -//Used only by us -typedef struct { -} plasticity_weight_region_data_t; - -//Intermediate datastructure for eg. updating = more bits => modify for synapse -//Used only by us -typedef struct { -} weight_state_t; - -//Used only by us -typedef struct post_trace_t { -} post_trace_t; - -//Used only by us -typedef struct pre_trace_t { -} pre_trace_t; - -//Used only by us -typedef struct my_timing_config { -} my_timing_config_t; - - -/*** TYPE DEFS ***/ -// weight_t from synapse_row.h (uint of size SYNAPSE_WEIGHT_BITS: DEFAULT 16) -// => First bits of synapse row - -// Below only when only using weight -//Used only by us -typedef weight_t plastic_synapse_t; -//Used only by us -typedef weight_state_t update_state_t; -//Used only by us -typedef weight_t final_state_t; - - -/*** POST EVENTS ***/ -// => All methods from the timing API are called only by us - -//Called only by us -static inline void print_event_history(const post_event_history_t *events) { -} - -//Called only by us -static inline post_event_history_t *post_events_init_buffers( - uint32_t n_neurons) { -} - -// Setup a window pointing to first event in window, linking to previous event and counting all events in window -//Called only by us -static inline post_event_window_t post_events_get_window_delayed( - const post_event_history_t *events, uint32_t begin_time, - uint32_t end_time) { -} - -// Return next event in window -//Called only by us -static inline post_event_window_t post_events_next( - post_event_window_t window) { -} - -// Add event and time to buffer, drop oldest event if buffer is full -//Called only by us -static inline void post_events_add( - uint32_t time, post_event_history_t *events, post_trace_t trace) { - -} - - -//Called only by us -static inline void print_delayed_window_events( - const post_event_history_t *post_event_history, - uint32_t begin_time, uint32_t end_time, uint32_t delay_dendritic) { - -} - -/*** SYNAPSE STRUCTURE ***/ -// => All methods from the timing API are called only by us - -// Translate from 16 bit to 32 bit for computing updates -//Called only by us -static update_state_t synapse_structure_get_update_state( - plastic_synapse_t synaptic_word, index_t synapse_type); - -// Translate updated state from 32 bit to 16 bit -//Called only by us -static final_state_t synapse_structure_get_final_state( - update_state_t state); - -// Extract weight from final state -//Called only by us -static weight_t synapse_structure_get_final_weight( - final_state_t final_state); - -// Translate from state to synapse word -> could be the same data structure -//Called only by us -static plastic_synapse_t synapse_structure_get_final_synaptic_word( - final_state_t final_state); - -// Initialise state -//Called only by us, weight_t not defined by us -static plastic_synapse_t synapse_structure_create_synapse(weight_t weight); - -// Extract weight -//Called only by us, weight_t not defined by us -static weight_t synapse_structure_get_weight(plastic_synapse_t synaptic_word); - -// Rely on implementation of weight dependece to decay -//Called only by us -static void synapse_structure_decay_weight(update_state_t *state, uint32_t decay); - -// Rely on implementation in weight dependece to update weight -//Called only by us -static accum synapse_structure_get_update_weight(update_state_t state); - - -/*** SYNAPTIC DYNAMICS ***/ -// => Entrypoint for system calls - -//System called (c_main_synapse_common.h) -bool synapse_dynamics_initialise( - address_t address, uint32_t n_neurons, uint32_t n_synapse_types, - uint32_t *ring_buffer_to_input_buffer_left_shifts); - -//System called (synapses.c) -bool synapse_dynamics_process_plastic_synapses( - synapse_row_plastic_data_t *plastic_region_data, - synapse_row_fixed_part_t *fixed_region, - weight_t *ring_buffers, uint32_t time, uint32_t colour_delay, - bool *write_back); - -//System called (send_spike.h) -void synapse_dynamics_process_post_synaptic_event( - uint32_t time, index_t neuron_index); - - -//System called (synapses.c) -void synapse_dynamics_print_plastic_synapses( - synapse_row_plastic_data_t *plastic_region_data, - synapse_row_fixed_part_t *fixed_region, - uint32_t *ring_buffer_to_input_buffer_left_shifts); - -//System called (synapses.c) -uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void); - -//System called (c_main_synapse_common.h) -uint32_t synapse_dynamics_get_plastic_saturation_count(void); - -//Border zone (topographic_map_impl.c) -> neuromodulation only -bool synapse_dynamics_find_neuron( - uint32_t id, synaptic_row_t row, weight_t *weight, uint16_t *delay, - uint32_t *offset, uint32_t *synapse_type); - -//Border zone (sp_structs.h) -> neuromodulation only -bool synapse_dynamics_remove_neuron(uint32_t offset, synaptic_row_t row); - -//Border zone (sp_structs.h) -> neuromodulation only -bool synapse_dynamics_add_neuron( - uint32_t id, synaptic_row_t row, weight_t weight, - uint32_t delay, uint32_t type); - -//Border zone (topographic_map_impl.c) -> neuromodulation only -uint32_t synapse_dynamics_n_connections_in_row(synapse_row_fixed_part_t *fixed); - - -/*** WEIGHT DEPENDENCE ***/ -// => All methods from the timing API are called only by us - -//Called only by us -address_t weight_initialise( - address_t address, - uint32_t n_synapse_types, - uint32_t *ring_buffer_to_input_buffer_leftshift); - -//Called only by us, weight_t not defined by us -static inline weight_state_t weight_get_initial( - weight_t weight, index_t synapse_type) { - -} - -// Intermediate store depression -// Called only by us -static inline weight_state_t weight_one_term_apply_depression( - weight_state_t state, int32_t depression) { - -} - -// Intermediate store potentiation -// Called only by us -static inline weight_state_t weight_one_term_apply_potentiation( - weight_state_t state, int32_t potentiation) { - - -} - -// Apply potentiation and depression to weight and return -//Called only by us, weight_t not defined by us -static inline weight_t weight_get_final(weight_state_t new_state) { - -} - -//Called only by us -__attribute__((unused)) // Marked unused as only used sometimes -static void weight_decay(weight_state_t *state, int32_t decay) { -} - -//Called only by us -__attribute__((unused)) // Marked unused as only used sometimes -static accum weight_get_update(weight_state_t state) { - -} - - -/*** TIMING DEPENDENCE ***/ -// => All methods from the timing API are called only by us - -//Called only by us -address_t timing_initialise(address_t address); - -//Called only by us -static inline post_trace_t timing_get_initial_post_trace(void) { - -} - -//Called only by us -static inline post_trace_t timing_add_post_spike( - uint32_t time, uint32_t last_time, post_trace_t last_trace) { - -} - -//Called only by us -static inline pre_trace_t timing_add_pre_spike( - uint32_t time, uint32_t last_time, pre_trace_t last_trace) { - -} - -//Called only by us -static inline update_state_t timing_apply_pre_spike( - uint32_t time, pre_trace_t trace, uint32_t last_pre_time, - pre_trace_t last_pre_trace, uint32_t last_post_time, - post_trace_t last_post_trace, update_state_t previous_state) { -} - -//Called only by us -static inline update_state_t timing_apply_post_spike( - uint32_t time, post_trace_t trace, uint32_t last_pre_time, - pre_trace_t last_pre_trace, uint32_t last_post_time, - post_trace_t last_post_trace, update_state_t previous_state) { -} - -//Called only by us -static post_trace_t timing_decay_post( - uint32_t time, uint32_t last_time, post_trace_t last_trace) { -} \ No newline at end of file diff --git a/pynestml/codegeneration/resources_spinnaker/directives_cpp/PredefinedFunction_emit_spike.jinja2 b/pynestml/codegeneration/resources_spinnaker/directives_cpp/PredefinedFunction_emit_spike.jinja2 index fc87b70d2..9f6cd44e8 100644 --- a/pynestml/codegeneration/resources_spinnaker/directives_cpp/PredefinedFunction_emit_spike.jinja2 +++ b/pynestml/codegeneration/resources_spinnaker/directives_cpp/PredefinedFunction_emit_spike.jinja2 @@ -11,6 +11,6 @@ neuron_recording_record_bit(SPIKE_RECORDING_BITFIELD, neuron_index); send_spike(timer_count, time, neuron_index); {%- else %} {#- weight and delay parameters given -- emit_spike() called from within synapse #} -#error "Synapse code generation not supported yet for SpiNNaker" +//#error "Synapse code generation not supported yet for SpiNNaker" {%- endif %} // end generated code for emit_spike() function diff --git a/pynestml/codegeneration/resources_spinnaker/synapse_build.mk b/pynestml/codegeneration/resources_spinnaker/synapse_build.mk deleted file mode 100644 index b6bfaffa2..000000000 --- a/pynestml/codegeneration/resources_spinnaker/synapse_build.mk +++ /dev/null @@ -1,265 +0,0 @@ -# See Notes in sPyNNaker/neural_modelling/CHANGES_April_2018 - -# Copyright (c) 2017 The University of Manchester -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# If SPINN_DIRS is not defined, this is an error! -ifndef SPINN_DIRS - $(error SPINN_DIRS is not set. Please define SPINN_DIRS (possibly by running "source setup" in the spinnaker package folder)) -endif - -# If NEURAL_MODELLING_DIRS is not defined, this is an error! -ifndef NEURAL_MODELLING_DIRS - $(error NEURAL_MODELLING_DIRS is not set. Please define NEURAL_MODELLING_DIRS (possibly by running "source setup" in the sPyNNaker folder)) -endif -#Check NEURAL_MODELLING_DIRS -MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST))) -CHECK_PATH := $(NEURAL_MODELLING_DIRS)/makefiles/synapse_only/synapse_build.mk -ifneq ($(CHECK_PATH), $(MAKEFILE_PATH)) - $(error Please check NEURAL_MODELLING_DIRS as based on that this file is at $(CHECK_PATH) when it is actually at $(MAKEFILE_PATH)) -endif - -# Set logging levels -ifeq ($(SPYNNAKER_DEBUG), DEBUG) - SYNAPSE_DEBUG = LOG_DEBUG - PLASTIC_DEBUG = LOG_DEBUG -endif - -ifndef SYNAPSE_DEBUG - SYNAPSE_DEBUG = LOG_INFO -endif - -ifndef PLASTIC_DEBUG - PLASTIC_DEBUG = LOG_INFO -endif - -#POPULATION_TABLE_IMPL := fixed -POPULATION_TABLE_IMPL := binary_search - -# Add source directory - -# Define the directories -# Path flag to replace with the modified dir (abspath drops the final /) -NEURON_DIR := $(abspath $(NEURAL_MODELLING_DIRS)/src) -MODIFIED_DIR :=$(dir $(abspath $(NEURON_DIR)))modified_src/ -SOURCE_DIRS += $(NEURON_DIR) - -# Define a rule to find the source directory of the given file. -# This attempts to find each of SOURCE_DIRS within the given file name; the -# first one that matches is then returned. If none match, an empty string -# will be returned. -define get_source_dir#(file) -$(firstword $(strip $(foreach d, $(sort $(SOURCE_DIRS)), $(findstring $(d), $(1))))) -endef - -# Define rule to strip any SOURCE_DIRS from source_file to allow use via local.mk. -# If no match is found, the value is returned untouched -# (though this will probably fail later). -define strip_source_dirs#(source_file) -$(or $(patsubst $(call get_source_dir, $(1))/%,%,$(1)), $(1)) -endef - -# Define a rule to replace any SOURCE_DIRS from header_file with the modified_src folder. -define replace_source_dirs#(header_file) -$(patsubst $(call get_source_dir, $(1))%, $(dir $(call get_source_dir, $(1)))modified_src%, $(1)) -endef - -# Need to build each neuron seperately or complier gets confused -# BUILD_DIR and APP_OUTPUT_DIR end with a / for historictical/ shared reasons -ifndef BUILD_DIR - BUILD_DIR := $(NEURAL_MODELLING_DIRS)/builds/$(APP)/ -endif -ifndef APP_OUTPUT_DIR - APP_OUTPUT_DIR := $(NEURAL_MODELLING_DIRS)/../spynnaker/pyNN/model_binaries -endif - -ifndef SYNAPSE_DYNAMICS - $(error SYNAPSE_DYNAMICS is not set. Please select a synapse dynamics implementation) -else - SYNAPSE_DYNAMICS_C := $(call replace_source_dirs,$(SYNAPSE_DYNAMICS)) - SYNAPSE_DYNAMICS := $(call strip_source_dirs,$(SYNAPSE_DYNAMICS)) - SYNAPSE_DYNAMICS_O := $(BUILD_DIR)$(SYNAPSE_DYNAMICS:%.c=%.o) - - SYNAPSE_DYNAMICS_STATIC := neuron/plasticity/synapse_dynamics_static_impl.c - STDP_ENABLED = 0 - ifneq ($(SYNAPSE_DYNAMICS), $(SYNAPSE_DYNAMICS_STATIC)) - STDP_ENABLED = 1 - - ifndef TIMING_DEPENDENCE_H - $(error TIMING_DEPENDENCE_H is not set which is required when SYNAPSE_DYNAMICS ($(SYNAPSE_DYNAMICS_C)) != $(SYNAPSE_DYNAMICS_STATIC)) - endif - ifndef WEIGHT_DEPENDENCE_H - $(error WEIGHT_DEPENDENCE_H is not set which is required when SYNAPSE_DYNAMICS ($(SYNAPSE_DYNAMICS_C)) != $(SYNAPSE_DYNAMICS_STATIC)) - endif - endif -endif - -ifdef WEIGHT_DEPENDENCE - WEIGHT_DEPENDENCE_H := $(call replace_source_dirs,$(WEIGHT_DEPENDENCE_H)) - WEIGHT_DEPENDENCE_C := $(call replace_source_dirs,$(WEIGHT_DEPENDENCE)) - WEIGHT_DEPENDENCE := $(call strip_source_dirs,$(WEIGHT_DEPENDENCE)) - WEIGHT_DEPENDENCE_O := $(BUILD_DIR)$(WEIGHT_DEPENDENCE:%.c=%.o) -endif - -ifdef TIMING_DEPENDENCE - TIMING_DEPENDENCE_H := $(call replace_source_dirs,$(TIMING_DEPENDENCE_H)) - TIMING_DEPENDENCE_C := $(call replace_source_dirs,$(TIMING_DEPENDENCE)) - TIMING_DEPENDENCE := $(call strip_source_dirs,$(TIMING_DEPENDENCE)) - TIMING_DEPENDENCE_O := $(BUILD_DIR)$(TIMING_DEPENDENCE:%.c=%.o) -endif - -SYNGEN_ENABLED = 1 -ifndef SYNAPTOGENESIS_DYNAMICS - SYNAPTOGENESIS_DYNAMICS := neuron/structural_plasticity/synaptogenesis_dynamics_static_impl.c - SYNAPTOGENESIS_DYNAMICS_C := $(MODIFIED_DIR)$(SYNAPTOGENESIS_DYNAMICS) - SYNGEN_ENABLED = 0 -else - SYNAPTOGENESIS_DYNAMICS_C := $(call replace_source_dirs,$(SYNAPTOGENESIS_DYNAMICS)) - SYNAPTOGENESIS_DYNAMICS := $(call strip_source_dirs,$(SYNAPTOGENESIS_DYNAMICS)) - ifndef PARTNER_SELECTION - $(error PARTNER_SELECTION is not set which is required when SYNAPTOGENESIS_DYNAMICS is set) - endif - ifndef FORMATION - $(error FORMATION is not set which is required when SYNAPTOGENESIS_DYNAMICS is set) - endif - ifndef ELIMINATION - $(error ELIMINATION is not set which is required when SYNAPTOGENESIS_DYNAMICS is set) - endif -endif -SYNAPTOGENESIS_DYNAMICS_O := $(BUILD_DIR)$(SYNAPTOGENESIS_DYNAMICS:%.c=%.o) - -ifdef PARTNER_SELECTION - PARTNER_SELECTION_H := $(call replace_source_dirs,$(PARTNER_SELECTION_H)) - PARTNER_SELECTION_C := $(call replace_source_dirs,$(PARTNER_SELECTION)) - PARTNER_SELECTION := $(call strip_source_dirs,$(PARTNER_SELECTION)) - PARTNER_SELECTION_O := $(BUILD_DIR)$(PARTNER_SELECTION:%.c=%.o) -endif - -ifdef FORMATION - FORMATION_H := $(call replace_source_dirs,$(FORMATION_H)) - FORMATION_C := $(call replace_source_dirs,$(FORMATION)) - FORMATION := $(call strip_source_dirs,$(FORMATION)) - FORMATION_O := $(BUILD_DIR)$(FORMATION:%.c=%.o) -endif - -ifdef ELIMINATION - ELIMINATION_H := $(call replace_source_dirs,$(ELIMINATION_H)) - ELIMINATION_C := $(call replace_source_dirs,$(ELIMINATION)) - ELIMINATION := $(call strip_source_dirs,$(ELIMINATION)) - ELIMINATION_O := $(BUILD_DIR)$(ELIMINATION:%.c=%.o) -endif - -OTHER_SOURCES_CONVERTED := $(call strip_source_dirs,$(OTHER_SOURCES)) - -# List all the sources relative to one of SOURCE_DIRS -SOURCES = neuron/c_main_synapses.c \ - neuron/synapses.c \ - neuron/spike_processing_fast.c \ - neuron/population_table/population_table_$(POPULATION_TABLE_IMPL)_impl.c \ - $(SYNAPSE_DYNAMICS) $(WEIGHT_DEPENDENCE) \ - $(TIMING_DEPENDENCE) $(SYNAPTOGENESIS_DYNAMICS) \ - $(PARTNER_SELECTION) $(FORMATION) $(ELIMINATION) $(OTHER_SOURCES_CONVERTED) - -include $(SPINN_DIRS)/make/local.mk - -FEC_OPT = $(OTIME) - -# Extra compile options -DO_COMPILE = $(CC) -DLOG_LEVEL=$(SYNAPSE_DEBUG) $(CFLAGS) -DSTDP_ENABLED=$(STDP_ENABLED) - -$(BUILD_DIR)neuron/synapses.o: $(MODIFIED_DIR)neuron/synapses.c - #synapses.c - -@mkdir -p $(dir $@) - $(DO_COMPILE) -o $@ $< - -$(BUILD_DIR)neuron/direct_synapses.o: $(MODIFIED_DIR)neuron/direct_synapses.c - #direct_synapses.c - -mkdir -p $(dir $@) - $(DO_COMPILE) -o $@ $< - -$(BUILD_DIR)neuron/spike_processing_fast.o: $(MODIFIED_DIR)neuron/spike_processing_fast.c - #spike_processing_fast.c - -@mkdir -p $(dir $@) - $(DO_COMPILE) -o $@ $< - -$(BUILD_DIR)neuron/population_table/population_table_binary_search_impl.o: $(MODIFIED_DIR)neuron/population_table/population_table_binary_search_impl.c - #population_table/population_table_binary_search_impl.c - -@mkdir -p $(dir $@) - $(DO_COMPILE) -o $@ $< - -SYNGEN_INCLUDES:= -ifeq ($(SYNGEN_ENABLED), 1) - SYNGEN_INCLUDES:= -include $(PARTNER_SELECTION_H) -include $(FORMATION_H) -include $(ELIMINATION_H) -endif - -#STDP Build rules If and only if STDP used -ifeq ($(STDP_ENABLED), 1) - STDP_INCLUDES:= -include $(WEIGHT_DEPENDENCE_H) -include $(TIMING_DEPENDENCE_H) - STDP_COMPILE = $(CC) -DLOG_LEVEL=$(PLASTIC_DEBUG) $(CFLAGS) -DSTDP_ENABLED=$(STDP_ENABLED) -DSYNGEN_ENABLED=$(SYNGEN_ENABLED) $(STDP_INCLUDES) - - $(SYNAPSE_DYNAMICS_O): $(SYNAPSE_DYNAMICS_C) - # SYNAPSE_DYNAMICS_O stdp - -@mkdir -p $(dir $@) - $(STDP_COMPILE) -o $@ $< - - $(SYNAPTOGENESIS_DYNAMICS_O): $(SYNAPTOGENESIS_DYNAMICS_C) - # SYNAPTOGENESIS_DYNAMICS_O stdp - -@mkdir -p $(dir $@) - $(STDP_COMPILE) $(SYNGEN_INCLUDES) -o $@ $< - - $(BUILD_DIR)neuron/plasticity/common/post_events.o: $(MODIFIED_DIR)neuron/plasticity/common/post_events.c - # plasticity/common/post_events.c - -@mkdir -p $(dir $@) - $(STDP_COMPILE) -o $@ $< - -else - $(SYNAPTOGENESIS_DYNAMICS_O): $(SYNAPTOGENESIS_DYNAMICS_C) - # SYNAPTOGENESIS_DYNAMICS_O without stdp - -@mkdir -p $(dir $@) - $(DO_COMPILE) $(SYNGEN_INCLUDES) -o $@ $< - - $(SYNAPSE_DYNAMICS_O): $(SYNAPSE_DYNAMICS_C) - # SYNAPSE_DYNAMICS_O without stdp - -@mkdir -p $(dir $@) - $(DO_COMPILE) -o $@ $< - -endif - -$(WEIGHT_DEPENDENCE_O): $(WEIGHT_DEPENDENCE_C) - # WEIGHT_DEPENDENCE_O - -@mkdir -p $(dir $@) - $(CC) -DLOG_LEVEL=$(PLASTIC_DEBUG) $(CFLAGS) -o $@ $< - -$(TIMING_DEPENDENCE_O): $(TIMING_DEPENDENCE_C) $(WEIGHT_DEPENDENCE_H) - # TIMING_DEPENDENCE_O - -@mkdir -p $(dir $@) - $(CC) -DLOG_LEVEL=$(PLASTIC_DEBUG) $(CFLAGS) \ - -include $(WEIGHT_DEPENDENCE_H) -o $@ $< - -$(PARTNER_SELECTION_O): $(PARTNER_SELECTION_C) - # PARTNER_SELECTION_O - -mkdir -p $(dir $@) - $(CC) -DLOG_LEVEL=$(PLASTIC_DEBUG) $(CFLAGS) -o $@ $< - -$(FORMATION_O): $(FORMATION_C) - # FORMATION_O - -mkdir -p $(dir $@) - $(CC) -DLOG_LEVEL=$(PLASTIC_DEBUG) $(CFLAGS) -o $@ $< - -$(ELIMINATION_O): $(ELIMINATION_C) - # ELIMINATION_O - -mkdir -p $(dir $@) - $(CC) -DLOG_LEVEL=$(PLASTIC_DEBUG) $(CFLAGS) -o $@ $< - -.PRECIOUS: $(MODIFIED_DIR)%.c $(MODIFIED_DIR)%.h $(LOG_DICT_FILE) $(EXTRA_PRECIOUS) diff --git a/pynestml/codegeneration/spinnaker_builder.py b/pynestml/codegeneration/spinnaker_builder.py index fe06552b2..dfd539b6d 100644 --- a/pynestml/codegeneration/spinnaker_builder.py +++ b/pynestml/codegeneration/spinnaker_builder.py @@ -250,6 +250,15 @@ def build(self) -> None: except subprocess.CalledProcessError: pass + # Copy the extra_synapse.mk file + try: + subprocess.check_call(["cp", "-v", "extra_synapse.mk", os.path.join(install_path, "c_models", "makefiles")], + stderr=subprocess.STDOUT, + shell=shell, + cwd=target_path) + except subprocess.CalledProcessError: + pass + # Copy the model Makefile for fn in generated_file_names_makefiles: neuron_subdir = fn[len("Makefile_"):] @@ -297,18 +306,31 @@ def build(self) -> None: raise GeneratedCodeBuildException( 'Error occurred during \'make\'! More detailed error messages can be found in stdout.') - # # rename the aplx file so sPyNNaker can find it - # generated_file_names_aplx = [fn for fn in os.listdir(os.path.join(install_path, "python_models8", "model_binaries")) if fnmatch.fnmatch(fn, "*.aplx")] - # assert len(generated_file_names_aplx) == 1 - # generated_file_name_aplx = generated_file_names_aplx[0] - # try: - # subprocess.check_call(["mv", "-v", generated_file_name_aplx, os.path.splitext(generated_file_name_aplx)[0] + "_neuron.aplx"], - # stderr=subprocess.STDOUT, - # shell=shell, - # cwd=os.path.join(install_path, "python_models8", "model_binaries")) - # except subprocess.CalledProcessError: - # raise GeneratedCodeBuildException( - # 'Error occurred during renaming aplx file! More detailed error messages can be found in stdout.') +#!! + + + #check if neuron is generated together with synapse + # if os.path.isfile(os.path.join(install_path, "python_models8", "model_binaries", "iaf_psc_exp_neuron_nestml__with_stdp_synapse_nestml_impl.aplx")): + + + #import pdb + #pdb.set_trace() + + # rename the aplx file so sPyNNaker can find it + #generated_file_names_aplx = [fn for fn in os.listdir(os.path.join(install_path, "python_models8", "model_binaries")) if fnmatch.fnmatch(fn, "*.aplx")] + # generated_file_names_aplx = [fn for fn in os.listdir(os.path.join(install_path, "python_models8", "model_binaries")) if fnmatch.fnmatch(fn, "iaf_psc_exp_neuron_nestml__with_stdp_synapse_nestml_impl.aplx")] + #assert len(generated_file_names_aplx) == 1 + # generated_file_name_aplx = generated_file_names_aplx[0] + #!! mv -v change to cp + # try: + # subprocess.check_call(["mv","-v", generated_file_name_aplx, os.path.splitext(generated_file_name_aplx)[0] + "_stdp_mad_my_timing_my_weight.aplx"], + # stderr=subprocess.STDOUT, + # shell=shell, + # cwd=os.path.join(install_path, "python_models8", "model_binaries")) + #except subprocess.CalledProcessError: + # raise GeneratedCodeBuildException( + # 'Error occurred during renaming aplx file! More detailed error messages can be found in stdout.') + finally: os.chdir(old_cwd) diff --git a/pynestml/codegeneration/spinnaker_code_generator.py b/pynestml/codegeneration/spinnaker_code_generator.py index ccf6e654f..da4881baf 100644 --- a/pynestml/codegeneration/spinnaker_code_generator.py +++ b/pynestml/codegeneration/spinnaker_code_generator.py @@ -25,11 +25,14 @@ import os from pynestml.codegeneration.code_generator import CodeGenerator +from pynestml.codegeneration.code_generator_utils import CodeGeneratorUtils from pynestml.codegeneration.nest_code_generator import NESTCodeGenerator +from pynestml.codegeneration.printers.spinnaker_cpp_expression_printer import SpiNNakerCppExpressionPrinter from pynestml.codegeneration.printers.cpp_expression_printer import CppExpressionPrinter from pynestml.codegeneration.printers.cpp_printer import CppPrinter from pynestml.codegeneration.printers.c_simple_expression_printer import CSimpleExpressionPrinter from pynestml.codegeneration.printers.constant_printer import ConstantPrinter +from pynestml.codegeneration.printers.spinnaker_constant_printer import SpiNNakerConstantPrinter from pynestml.codegeneration.printers.gsl_variable_printer import GSLVariablePrinter from pynestml.codegeneration.printers.ode_toolbox_expression_printer import ODEToolboxExpressionPrinter from pynestml.codegeneration.printers.ode_toolbox_function_call_printer import ODEToolboxFunctionCallPrinter @@ -39,9 +42,11 @@ from pynestml.codegeneration.printers.python_stepping_function_function_call_printer import PythonSteppingFunctionFunctionCallPrinter from pynestml.codegeneration.printers.python_stepping_function_variable_printer import PythonSteppingFunctionVariablePrinter from pynestml.codegeneration.printers.python_variable_printer import PythonVariablePrinter +from pynestml.codegeneration.printers.spinnaker_python_variable_printer import SpiNNakerPythonVariablePrinter from pynestml.codegeneration.printers.spinnaker_c_function_call_printer import SpinnakerCFunctionCallPrinter from pynestml.codegeneration.printers.spinnaker_c_type_symbol_printer import SpinnakerCTypeSymbolPrinter from pynestml.codegeneration.printers.spinnaker_c_variable_printer import SpinnakerCVariablePrinter +from pynestml.codegeneration.printers.spinnaker_synapse_c_variable_printer import SpinnakerSynapseCVariablePrinter from pynestml.codegeneration.printers.spinnaker_gsl_function_call_printer import SpinnakerGSLFunctionCallPrinter from pynestml.codegeneration.printers.spinnaker_python_function_call_printer import SpinnakerPythonFunctionCallPrinter from pynestml.codegeneration.printers.spinnaker_python_simple_expression_printer import SpinnakerPythonSimpleExpressionPrinter @@ -52,17 +57,28 @@ class CustomNESTCodeGenerator(NESTCodeGenerator): - def setup_printers(self): - self._constant_printer = ConstantPrinter() + def setup_printers(self, for_neuron: bool = True): + self._constant_printer = SpiNNakerConstantPrinter() # C/Spinnaker API printers self._type_symbol_printer = SpinnakerCTypeSymbolPrinter() - self._nest_variable_printer = SpinnakerCVariablePrinter(expression_printer=None, with_origin=True, - with_vector_parameter=True) + if for_neuron: + self._nest_variable_printer = SpinnakerCVariablePrinter(expression_printer=None, with_origin=True, + with_vector_parameter=True) + else: + # for synapse + self._nest_variable_printer = SpinnakerSynapseCVariablePrinter(expression_printer=None, with_origin=True, + with_vector_parameter=True) self._nest_function_call_printer = SpinnakerCFunctionCallPrinter(None) self._nest_function_call_printer_no_origin = SpinnakerCFunctionCallPrinter(None) - self._printer = CppExpressionPrinter( + if for_neuron: + self._printer = CppExpressionPrinter( + simple_expression_printer=CSimpleExpressionPrinter(variable_printer=self._nest_variable_printer, + constant_printer=self._constant_printer, + function_call_printer=self._nest_function_call_printer)) + else: + self._printer = SpiNNakerCppExpressionPrinter( simple_expression_printer=CSimpleExpressionPrinter(variable_printer=self._nest_variable_printer, constant_printer=self._constant_printer, function_call_printer=self._nest_function_call_printer)) @@ -72,10 +88,16 @@ def setup_printers(self): self._nest_variable_printer_no_origin = SpinnakerCVariablePrinter(None, with_origin=False, with_vector_parameter=False) - self._printer_no_origin = CppExpressionPrinter( - simple_expression_printer=CSimpleExpressionPrinter(variable_printer=self._nest_variable_printer_no_origin, - constant_printer=self._constant_printer, - function_call_printer=self._nest_function_call_printer_no_origin)) + if for_neuron: + self._printer_no_origin = CppExpressionPrinter( + simple_expression_printer=CSimpleExpressionPrinter(variable_printer=self._nest_variable_printer_no_origin, + constant_printer=self._constant_printer, + function_call_printer=self._nest_function_call_printer_no_origin)) + else: + self._printer_no_origin = SpiNNakerCppExpressionPrinter( + simple_expression_printer=CSimpleExpressionPrinter(variable_printer=self._nest_variable_printer_no_origin, + constant_printer=self._constant_printer, + function_call_printer=self._nest_function_call_printer_no_origin)) self._nest_variable_printer_no_origin._expression_printer = self._printer_no_origin self._nest_function_call_printer_no_origin._expression_printer = self._printer_no_origin @@ -83,7 +105,13 @@ def setup_printers(self): self._gsl_variable_printer = GSLVariablePrinter(None) self._gsl_function_call_printer = SpinnakerGSLFunctionCallPrinter(None) - self._gsl_printer = CppExpressionPrinter( + if for_neuron: + self._gsl_printer = CppExpressionPrinter( + simple_expression_printer=CSimpleExpressionPrinter(variable_printer=self._gsl_variable_printer, + constant_printer=self._constant_printer, + function_call_printer=self._gsl_function_call_printer)) + else: + self._gsl_printer = SpiNNakerCppExpressionPrinter( simple_expression_printer=CSimpleExpressionPrinter(variable_printer=self._gsl_variable_printer, constant_printer=self._constant_printer, function_call_printer=self._gsl_function_call_printer)) @@ -102,15 +130,20 @@ def setup_printers(self): class CustomPythonStandaloneCodeGenerator(PythonStandaloneCodeGenerator): - def setup_printers(self): + def setup_printers(self, for_neuron: bool = True): super().setup_printers() self._type_symbol_printer = SpinnakerPythonTypeSymbolPrinter() self._constant_printer = ConstantPrinter() # Python/mini simulation environment API printers - self._nest_variable_printer = PythonVariablePrinter(expression_printer=None, with_origin=False, + if for_neuron: + self._nest_variable_printer = PythonVariablePrinter(expression_printer=None, with_origin=False, + with_vector_parameter=True) + else: + self._nest_variable_printer = SpiNNakerPythonVariablePrinter(expression_printer=None, with_origin=False, with_vector_parameter=True) + self._nest_function_call_printer = SpinnakerPythonFunctionCallPrinter(None) self._nest_function_call_printer_no_origin = SpinnakerPythonFunctionCallPrinter(None) @@ -122,8 +155,13 @@ def setup_printers(self): self._nest_function_call_printer._expression_printer = self._printer self._nest_printer = PythonStandalonePrinter(expression_printer=self._printer) - self._nest_variable_printer_no_origin = PythonVariablePrinter(None, with_origin=False, + if for_neuron: + self._nest_variable_printer_no_origin = PythonVariablePrinter(None, with_origin=False, with_vector_parameter=False) + else: + self._nest_variable_printer_no_origin = SpiNNakerPythonVariablePrinter(None, with_origin=False, + with_vector_parameter=False) + self._printer_no_origin = PythonExpressionPrinter( simple_expression_printer=SpinnakerPythonSimpleExpressionPrinter( variable_printer=self._nest_variable_printer_no_origin, @@ -146,11 +184,21 @@ def setup_printers(self): class SpiNNakerCodeGenerator(CodeGenerator): r""" Code generator for SpiNNaker + + For descriptions of the code generator options, please see the NESTCodeGenerator documentation. + + The supported code generator options for SpiNNaker are: ``delay_variable``, ``weight_variable``, ``synapse_models``, ``neuron_synapse_pairs`` and ``templates``. """ codegen_cpp: Optional[NESTCodeGenerator] = None _default_options = { + + "delay_variable": {}, + "weight_variable": {}, + + "synapse_models": [], + "neuron_synapse_pairs": [], "templates": { "path": os.path.join(os.path.realpath(os.path.join(os.path.dirname(__file__), "resources_spinnaker"))), @@ -163,13 +211,6 @@ class SpiNNakerCodeGenerator(CodeGenerator): "Makefile_@NEURON_NAME@_impl.jinja2"], "synapse": ["@SYNAPSE_NAME@_impl.c.jinja2", "@SYNAPSE_NAME@_impl.h.jinja2", - "@SYNAPSE_NAME@_timing_impl.h.jinja2", - "@SYNAPSE_NAME@_timing_impl.c.jinja2", - "@SYNAPSE_NAME@_weight_impl.h.jinja2", - "@SYNAPSE_NAME@_weight_impl.c.jinja2", - "@SYNAPSE_NAME@.py.jinja2", - "@SYNAPSE_NAME@_timing.py.jinja2", - "@SYNAPSE_NAME@_weight.py.jinja2", "@SYNAPSE_NAME@_impl.py.jinja2", "Makefile_@SYNAPSE_NAME@_impl.jinja2"], }, @@ -182,6 +223,10 @@ def __init__(self, options: Optional[Mapping[str, Any]] = None): super().__init__(options) options_cpp = copy.deepcopy(NESTCodeGenerator._default_options) + options_cpp["delay_variable"] = self._options["delay_variable"] + options_cpp["weight_variable"] = self._options["weight_variable"] + + options_cpp["neuron_synapse_pairs"] = self._options["neuron_synapse_pairs"] options_cpp["templates"]["model_templates"]["neuron"] = [fname for fname in self._options["templates"]["model_templates"]["neuron"] @@ -205,14 +250,51 @@ def __init__(self, options: Optional[Mapping[str, Any]] = None): options_py["nest_version"] = "" options_py["templates"]["module_templates"] = [] options_py["templates"]["path"] = self._options["templates"]["path"] + + options_py["delay_variable"] = self._options["delay_variable"] + options_py["weight_variable"] = self._options["weight_variable"] + self.codegen_py = CustomPythonStandaloneCodeGenerator(options_py) + def set_options(self, options: Mapping[str, Any]) -> Mapping[str, Any]: + import copy + options_copy = copy.deepcopy(options) + options_copy2 = copy.deepcopy(options) + ret = super().set_options(options) + self.codegen_cpp.set_options(options_copy) + self.codegen_py.set_options(options_copy2) + + return ret + + def generate_code(self, models: Sequence[ASTModel]) -> None: for model in models: cloned_model = model.clone() cloned_model.accept(ASTSymbolTableVisitor()) + if "paired_neuron" in dir(model): + cloned_model.paired_neuron = model.paired_neuron + cloned_model.spiking_post_port_names = model.spiking_post_port_names + cloned_model.post_port_names = model.post_port_names + if "vt_port_names" in dir(model): + cloned_model.vt_port_names = model.vt_port_names + + neurons, synapses = CodeGeneratorUtils.get_model_types_from_names(models, synapse_models=self.get_option("synapse_models")) + if model in neurons: + self.codegen_cpp.setup_printers(for_neuron=True) + self.codegen_py.setup_printers(for_neuron=True) + else: + assert model in synapses + self.codegen_cpp.setup_printers(for_neuron=False) + self.codegen_py.setup_printers(for_neuron=False) + self.codegen_cpp.generate_code([cloned_model]) cloned_model = model.clone() cloned_model.accept(ASTSymbolTableVisitor()) + if "paired_neuron" in dir(model): + cloned_model.paired_neuron = model.paired_neuron + cloned_model.spiking_post_port_names = model.spiking_post_port_names + cloned_model.post_port_names = model.post_port_names + if "vt_port_names" in dir(model): + cloned_model.vt_port_names = model.vt_port_names self.codegen_py.generate_code([cloned_model]) diff --git a/pynestml/codegeneration/spinnaker_code_generator_utils.py b/pynestml/codegeneration/spinnaker_code_generator_utils.py index b344e4f9f..be31879b6 100644 --- a/pynestml/codegeneration/spinnaker_code_generator_utils.py +++ b/pynestml/codegeneration/spinnaker_code_generator_utils.py @@ -26,15 +26,22 @@ class SPINNAKERCodeGeneratorUtils: @classmethod - def print_symbol_origin(cls, variable_symbol: VariableSymbol, numerical_state_symbols=None) -> str: + def print_symbol_origin(cls, variable_symbol: VariableSymbol, numerical_state_symbols=None, for_synapse=False) -> str: """ Returns a prefix corresponding to the origin of the variable symbol. :param variable_symbol: a single variable symbol. :return: the corresponding prefix """ + if for_synapse: + return SPINNAKERCodeGeneratorUtils._print_symbol_origin_for_synapse(variable_symbol, numerical_state_symbols) + + return SPINNAKERCodeGeneratorUtils._print_symbol_origin_for_neuron(variable_symbol, numerical_state_symbols) + + @classmethod + def _print_symbol_origin_for_neuron(cls, variable_symbol: VariableSymbol, numerical_state_symbols=None) -> str: if variable_symbol.block_type in [BlockType.STATE, BlockType.EQUATION]: if numerical_state_symbols and variable_symbol.get_symbol_name() in numerical_state_symbols: - return 'S_.ode_state[State_::%s]' + return 'state.S_.ode_state[State_::%s]' return 'state->%s' @@ -51,3 +58,39 @@ def print_symbol_origin(cls, variable_symbol: VariableSymbol, numerical_state_sy return 'input->%s' return '' + + @classmethod + def _print_symbol_origin_for_synapse(cls, variable_symbol: VariableSymbol, numerical_state_symbols=None) -> str: + + if variable_symbol.name == "__h" or variable_symbol.name.startswith("__P"): + # these are just local temporaries + return '%s' + + if variable_symbol.block_type in [BlockType.STATE, BlockType.EQUATION]: + #if numerical_state_symbols and variable_symbol.get_symbol_name() in numerical_state_symbols: + # return 'state.S_.ode_state[State_::%s]' + + return 'state->%s' + + if variable_symbol.block_type == BlockType.PARAMETERS: + # parameters are global variables + #return '%s' + #return 'state.weight_region->%s' + return 'plasticity_weight_region_data->%s' + + if variable_symbol.block_type == BlockType.COMMON_PARAMETERS: + return 'plasticity_weight_region_data->%s' + # parameters are global variables + #return 'state.weight_region->%s' + #return 'parameter->%s' + + if variable_symbol.block_type == BlockType.INTERNALS: + return 'plasticity_weight_region_data->%s' + # parameters are global variables + #return 'state.weight_region->%s' + #return 'parameter->%s' + + if variable_symbol.block_type == BlockType.INPUT: + return 'input->%s' + + return '' diff --git a/pynestml/frontend/pynestml_frontend.py b/pynestml/frontend/pynestml_frontend.py index e5497a008..8bc8cb61b 100644 --- a/pynestml/frontend/pynestml_frontend.py +++ b/pynestml/frontend/pynestml_frontend.py @@ -76,15 +76,7 @@ def transformers_from_target_name(target_name: str, options: Optional[Mapping[st "goto", "if", "inline", "int", "long", "mutable", "namespace", "new", "noexcept", "not", "not_eq", "nullptr", "operator", "or", "or_eq", "private", "protected", "public", "register", "reinterpret_cast", "requires", "return", "short", "signed", "sizeof", "static", "static_assert", "static_cast", "struct", "switch", "template", "this", "thread_local", "throw", "true", "try", "typedef", "typeid", "typename", "union", "unsigned", "using", "virtual", "void", "volatile", "wchar_t", "while", "xor", "xor_eq"]}) transformers.append(variable_name_rewriter) - if target_name.upper() in ["SPINNAKER"]: - from pynestml.transformers.synapse_remove_post_port import SynapseRemovePostPortTransformer - - # co-generate neuron and synapse - synapse_post_neuron_co_generation = SynapseRemovePostPortTransformer() - options = synapse_post_neuron_co_generation.set_options(options) - transformers.append(synapse_post_neuron_co_generation) - - if target_name.upper() == "NEST": + if target_name.upper() in ["NEST"]:# , "SPINNAKER"]: from pynestml.transformers.synapse_post_neuron_transformer import SynapsePostNeuronTransformer # co-generate neuron and synapse @@ -115,6 +107,7 @@ def code_generator_from_target_name(target_name: str, options: Optional[Mapping[ assert target_name.upper() in get_known_targets( ), "Unknown target platform requested: \"" + str(target_name) + "\"" + if target_name.upper() == "NEST": from pynestml.codegeneration.nest_code_generator import NESTCodeGenerator return NESTCodeGenerator(options) @@ -470,6 +463,7 @@ def get_parsed_models() -> List[ASTModel]: if not type(nestml_files) is list: nestml_files = [nestml_files] + for nestml_file in nestml_files: parsed_unit = ModelParser.parse_file(nestml_file) if parsed_unit: @@ -516,6 +510,14 @@ def process() -> bool: code_generator = code_generator_from_target_name(FrontendConfiguration.get_target_platform()) unused_opts_codegen = code_generator.set_options(FrontendConfiguration.get_codegen_opts()) + +#!! + #give spinnaker code generator codegen_cpp and codegen_py codegenopts + #if FrontendConfiguration.get_target_platform().upper() in ["SPINNAKER"]: + # print("HALLO test") + # code_generator.codegen_cpp.set_options(FrontendConfiguration.get_codegen_opts()) + + # initialise builder _builder, unused_opts_builder = builder_from_target_name(FrontendConfiguration.get_target_platform(), options=FrontendConfiguration.get_codegen_opts()) diff --git a/pynestml/transformers/synapse_post_neuron_transformer.py b/pynestml/transformers/synapse_post_neuron_transformer.py index 95d7e087e..37b27a93d 100644 --- a/pynestml/transformers/synapse_post_neuron_transformer.py +++ b/pynestml/transformers/synapse_post_neuron_transformer.py @@ -80,7 +80,7 @@ def set_options(self, options: Mapping[str, Any]) -> Mapping[str, Any]: return unused_options - def is_special_port(self, special_type: str, port_name: str, neuron_name: str, synapse_name: str) -> bool: + def is_special_port(self, special_type: str, port_name: str, neuron_name: Optional[str], synapse_name: str) -> bool: """ Check if a port by the given name is specified as connecting to the postsynaptic neuron. Only makes sense for synapses. @@ -90,8 +90,10 @@ def is_special_port(self, special_type: str, port_name: str, neuron_name: str, s return False for neuron_synapse_pair in self._options["neuron_synapse_pairs"]: - if not (neuron_name in [neuron_synapse_pair["neuron"], neuron_synapse_pair["neuron"] + FrontendConfiguration.suffix] - and synapse_name in [neuron_synapse_pair["synapse"], neuron_synapse_pair["synapse"] + FrontendConfiguration.suffix]): + if not synapse_name in [neuron_synapse_pair["synapse"], neuron_synapse_pair["synapse"] + FrontendConfiguration.suffix]: + continue + + if neuron_name is not None and not neuron_name in [neuron_synapse_pair["neuron"], neuron_synapse_pair["neuron"] + FrontendConfiguration.suffix]: continue if not special_type + "_ports" in neuron_synapse_pair.keys(): @@ -119,10 +121,10 @@ def is_continuous_port(self, port_name: str, parent_node: ASTModel): return True return False - def is_post_port(self, port_name: str, neuron_name: str, synapse_name: str) -> bool: + def is_post_port(self, port_name: str, neuron_name: Optional[str], synapse_name: str) -> bool: return self.is_special_port("post", port_name, neuron_name, synapse_name) - def is_vt_port(self, port_name: str, neuron_name: str, synapse_name: str) -> bool: + def is_vt_port(self, port_name: str, neuron_name: Optional[str], synapse_name: str) -> bool: return self.is_special_port("vt", port_name, neuron_name, synapse_name) def get_spiking_post_port_names(self, synapse, neuron_name: str, synapse_name: str): diff --git a/tests/spinnaker_tests/test_spinnaker_iaf_psc_exp.py b/tests/spinnaker_tests/test_spinnaker_iaf_psc_exp.py index 065d64420..430f32c24 100644 --- a/tests/spinnaker_tests/test_spinnaker_iaf_psc_exp.py +++ b/tests/spinnaker_tests/test_spinnaker_iaf_psc_exp.py @@ -31,13 +31,8 @@ class TestSpiNNakerIafPscExp: @pytest.fixture(autouse=True, scope="module") def generate_code(self): - # codegen_opts = {"neuron_synapse_pairs": [{"neuron": "iaf_psc_exp_neuron", - # "synapse": "stdp_synapse", - # "post_ports": ["post_spikes"]}]} - files = [ os.path.join("models", "neurons", "iaf_psc_exp_neuron.nestml"), - # os.path.join("models", "synapses", "stdp_synapse.nestml") ] input_path = [os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.join( os.pardir, os.pardir, s))) for s in files] @@ -52,7 +47,6 @@ def generate_code(self): logging_level=logging_level, module_name=module_name, suffix=suffix) - # codegen_opts=codegen_opts) def test_iaf_psc_exp(self): # import spynnaker and plotting stuff @@ -69,10 +63,10 @@ def test_iaf_psc_exp(self): synapticRsp = "I_syn_exc" # Set the run time of the execution - run_time = 150 + run_time = 200 # Set the time step of the simulation in milliseconds - time_step = 0.1 + time_step = 1 # Set the number of neurons to simulate n_neurons = 1 @@ -87,7 +81,7 @@ def test_iaf_psc_exp(self): spike_times = [1, 5, 100] p.setup(time_step) - p.set_number_of_synapse_cores(iaf_psc_exp_neuron_nestml, 0) # Fix an issue with new feature in the main code, where sPyNNaker is trying to determine whether to use a split core model where neurons and synapses are on separate cores, or a single core model where they are processed on the same core. In the older code, this was a more manual decision, but in the main code it is happening automatically unless overridden. This is particularly true when you use the 0.1ms timestep, where it will be attempting to keep to real-time execution by using split cores. + #p.set_number_of_synapse_cores(iaf_psc_exp_neuron_nestml, 0) # Fix an issue with new feature in the main code, where sPyNNaker is trying to determine whether to use a split core model where neurons and synapses are on separate cores, or a single core model where they are processed on the same core. In the older code, this was a more manual decision, but in the main code it is happening automatically unless overridden. This is particularly true when you use the 0.1ms timestep, where it will be attempting to keep to real-time execution by using split cores. spikeArray = {"spike_times": spike_times} excitation = p.Population( @@ -131,7 +125,6 @@ def test_iaf_psc_exp(self): combined_spikes.append(spike) Figure( - # pylint: disable=no-member # membrane potentials for each example Panel(combined_spikes, diff --git a/tests/spinnaker_tests/test_spinnaker_stdp.py b/tests/spinnaker_tests/test_spinnaker_stdp.py new file mode 100644 index 000000000..bd3c836fe --- /dev/null +++ b/tests/spinnaker_tests/test_spinnaker_stdp.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- +# +# test_spinnaker_stdp.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +import os +import matplotlib.pyplot as plt +import numpy as np +import pytest + +from pynestml.frontend.pynestml_frontend import generate_spinnaker_target + + +class TestSpiNNakerSTDP: + """SpiNNaker code generation tests""" + + @pytest.fixture(autouse=True, + scope="module") + def generate_code(self): + codegen_opts = {"neuron_synapse_pairs": [{"neuron": "iaf_psc_exp_neuron", + "synapse": "stdp_synapse", + "post_ports": ["post_spikes"]}], + "delay_variable":{"stdp_synapse":"d"}, + "weight_variable":{"stdp_synapse":"w"}} + + files = [ + os.path.join("models", "neurons", "iaf_psc_exp_neuron.nestml"), + os.path.join("models", "synapses", "stdp_synapse.nestml") + ] + input_path = [os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.join( + os.pardir, os.pardir, s))) for s in files] + target_path = "spinnaker-target" + install_path = "spinnaker-install" + logging_level = "DEBUG" + module_name = "nestmlmodule" + suffix = "_nestml" + generate_spinnaker_target(input_path, + target_path=target_path, + install_path=install_path, + logging_level=logging_level, + module_name=module_name, + suffix=suffix, + codegen_opts=codegen_opts) + + def run_sim(self, pre_spike_times, post_spike_times, simtime=1100): + import pyNN.spiNNaker as p + from pyNN.utility.plotting import Figure, Panel + + from python_models8.neuron.builds.iaf_psc_exp_neuron_nestml import iaf_psc_exp_neuron_nestml as iaf_psc_exp_neuron_nestml + from python_models8.neuron.implementations.stdp_synapse_nestml_impl import stdp_synapse_nestmlDynamics as stdp_synapse_nestml + +# p.reset() + p.setup(timestep=1.0) + exc_input = "exc_spikes" + inh_input = "inh_spikes" + + #inputs for pre and post synaptic neurons + pre_input = p.Population(1, p.SpikeSourceArray(spike_times=[0]), label="pre_input") + post_input = p.Population(1, p.SpikeSourceArray(spike_times=[0]), label="post_input") + + #pre and post synaptic spiking neuron populations + pre_spiking = p.Population(1, iaf_psc_exp_neuron_nestml(), label="pre_spiking") + post_spiking = p.Population(1, iaf_psc_exp_neuron_nestml(), label="post_spiking") + + weight_pre = 3000 + weight_post = 3000 + + p.Projection(pre_input, pre_spiking, p.OneToOneConnector(), receptor_type=exc_input, synapse_type=p.StaticSynapse(weight=weight_pre)) + p.Projection(post_input, post_spiking, p.OneToOneConnector(), receptor_type=exc_input, synapse_type=p.StaticSynapse(weight=weight_post)) + + stdp_model = stdp_synapse_nestml(weight=0) #0x8000) + stdp_projection = p.Projection(pre_spiking, post_spiking, p.AllToAllConnector(), synapse_type=stdp_model, receptor_type=exc_input) + #stdp_projection_inh = p.Projection(pre_spiking, post_spiking, p.AllToAllConnector(), synapse_type=stdp_model, receptor_type=inh_input) + + #record spikes + pre_spiking.record(["spikes"]) + post_spiking.record(["spikes"]) + + #pre_input.set(spike_times=[100, 110, 120, 1000]) + pre_input.set(spike_times=pre_spike_times) + post_input.set(spike_times=post_spike_times) + + p.run(simtime) + + pre_neo = pre_spiking.get_data("spikes") + post_neo = post_spiking.get_data("spikes") + + pre_spike_times = pre_neo.segments[0].spiketrains + post_spike_times = post_neo.segments[0].spiketrains + + w_curr = stdp_projection.get("weight", format="float") + + p.end() + + return w_curr[0][0], pre_spike_times, post_spike_times + + + def test_stdp(self): + res_weights = [] + spike_time_axis = [] + + pre_spike_times = [250, 1000] + + for t_post in np.linspace(200, 300, 19): + #for t_post in [450.]: + dw, actual_pre_spike_times, actual_post_spike_times = self.run_sim(pre_spike_times, [t_post]) + + spike_time_axis.append(float(actual_post_spike_times[0][0]) - float(actual_pre_spike_times[0][0])) + + if dw > 16000: # XXX TODO REMOVE THIS IF...THEN..ELSE + res_weights.append(dw - 32768) + else: + res_weights.append(dw) + + print("actual pre_spikes: " + str(actual_pre_spike_times)) + print("actual post_spikes: " + str(actual_post_spike_times)) + print("weights after simulation: " + str(dw)) + + print("Simulation results") + print("------------------") + print("timevec after sim = " + str(spike_time_axis)) + print("weights after sim = " + str(res_weights)) + + + + + fig, ax = plt.subplots() + ax.plot(spike_time_axis, res_weights, '.') + ax.set_xlabel(r"$t_{pre} - t_{post} [ms]$") + ax.set_ylabel(r"$\Delta w$") + ax.set_title("STDP-Window") + ax.grid(True) + +# ax.subplots_adjust(bottom=0.2) + + """ax.figtext(0.5, 0.05, + r"$\tau_+ = 20ms,\tau_- = 20ms, A_+ = 0.5, A_- = 0.5$", + ha='center', # horizontal alignment + va='bottom', # vertical alignment + fontsize=10, + color='gray')""" + + + + fig.savefig("plot.png") diff --git a/tests/spinnaker_tests/test_spinnaker_stdp_distribution.py b/tests/spinnaker_tests/test_spinnaker_stdp_distribution.py new file mode 100644 index 000000000..cf0b06922 --- /dev/null +++ b/tests/spinnaker_tests/test_spinnaker_stdp_distribution.py @@ -0,0 +1,215 @@ +# -*- coding: utf-8 -*- +# +# test_spinnaker_stdp_distribution.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +import os +import matplotlib.pyplot as plt +import numpy as np +import pytest + +from pynestml.frontend.pynestml_frontend import generate_spinnaker_target + + + + + + + +def float_to_s15_16(x: float) -> int: + """Converts a float to a Q15.16 fixed-point 32-bit integer. + + The S15.16 format uses 1 sign bit, 15 integer bits, and 16 + fractional bits. + + Args: + float_val: The floating-point number to convert. + + Returns: + The 32-bit signed integer representing the fixed-point number. + """ + + sign_bit: int = 1 if np.sign(x) == -1 else 0 + integer_bits: int = int(np.abs(x)) + fractional_bits: int = int((np.abs(x) - int(np.abs(x))) * 2**15) + + fixp_x: int = (sign_bit << 31) + (integer_bits << 15) + fractional_bits + + return fixp_x + + + + + + + + +class TestSpiNNakerSTDPDistribution: + """SpiNNaker code generation tests""" + + @pytest.fixture(autouse=True, + scope="module") + def generate_code(self): + codegen_opts = {"neuron_synapse_pairs": [{"neuron": "iaf_delta_neuron", #"iaf_psc_exp", + "synapse": "stdp_synapse", + "post_ports": ["post_spikes"]}], + "delay_variable":{"stdp_synapse":"d"}, + "weight_variable":{"stdp_synapse":"w"}} + + files = [ +# os.path.join("models", "neurons", "iaf_psc_exp_neuron.nestml"), + os.path.join("models", "neurons", "iaf_delta_neuron.nestml"), + os.path.join("models", "synapses", "stdp_additive_synapse.nestml") + ] + input_path = [os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.join( + os.pardir, os.pardir, s))) for s in files] + target_path = "spinnaker-target" + install_path = "spinnaker-install" + logging_level = "DEBUG" + module_name = "nestmlmodule" + suffix = "_nestml" + generate_spinnaker_target(input_path, + target_path=target_path, + install_path=install_path, + logging_level=logging_level, + module_name=module_name, + suffix=suffix, + codegen_opts=codegen_opts) + + def run_sim(self, n_inputs=1, input_rate=10., simtime=1000): + r""" + input_rate is in spikes/s + simtime is in ms and should be about 100 s for convergence + """ + import pyNN.spiNNaker as p + from pyNN.utility.plotting import Figure, Panel + + #from python_models8.neuron.builds.iaf_psc_exp_neuron_nestml import iaf_psc_exp_neuron_nestml as iaf_neuron_nestml + from python_models8.neuron.builds.iaf_delta_neuron_nestml import iaf_delta_neuron_nestml as iaf_neuron_nestml + from python_models8.neuron.implementations.stdp_synapse_nestml_impl import stdp_synapse_nestmlDynamics as stdp_synapse_nestml + +# p.reset() + p.setup(timestep=1.0) + exc_input = "exc_spikes" + + neuron_parameters = { # XXX UNUSED + "E_L": -70., # [mV] + "V_reset": -65., # [mV] + "V_th": -50., # [mV] + "tau_m": 20., # [ms] + "t_refr": 5., # [ms] + } + + stdp_parameters = { # XXX UNUSED + "W_min": 0., + "W_max": 30., + "tau_pre": 20., # [ms] + "tau_post": 20., # [ms] + "A_pot": 0.01, + "A_dep": 0.02 + } + + #initial_weight = float_to_s15_16(2.5) # [mV] + initial_weight = 25 # [mV] + + #inputs for pre and post synaptic neurons + pre_input = p.Population(n_inputs, p.SpikeSourcePoisson(rate=input_rate), label="pre_input") + post_neuron = p.Population(1, iaf_neuron_nestml(), label="post_neuron") + + #weight_pre = 3000 + #weight_post = 3000 + + #p.Projection(pre_input, pre_spiking, p.OneToOneConnector(), receptor_type=exc_input, synapse_type=p.StaticSynapse(weight=weight_pre)) + #p.Projection(pre_input, post_neuron, p.OneToOneConnector(), receptor_type=exc_input, synapse_type=p.StaticSynapse(weight=weight_post)) + + #stdp_model = stdp_synapse_nestml(weight=50000) # should cause a 5 mV deflection in the postsynaptic potential + + + + #stdp_model = p.StaticSynapse(weight=initial_weight) # should cause a 2.5 mV deflection in the postsynaptic potential + stdp_model = stdp_synapse_nestml(weight=initial_weight) # should cause a 2.5 mV deflection in the postsynaptic potential + stdp_projection = p.Projection(pre_input, post_neuron, p.AllToAllConnector(), synapse_type=stdp_model, receptor_type=exc_input) + + # XXX: TODO: # Initialize weights to a random value around the midpoint + # stdp_projection.w = f'rand() * {INITIAL_WEIGHT / b2.mV} * mV' + + + #record spikes + pre_input.record(["spikes"]) + post_neuron.record(["spikes"]) + post_neuron.record(["V_m"]) + + + #pre_input.set(spike_times=[100, 110, 120, 1000]) + #pre_input.set(spike_times=pre_spike_times) + #post_input.set(spike_times=post_spike_times) + + p.run(simtime) + + pre_neo = pre_input.get_data("spikes") + post_neo = post_neuron.get_data("spikes") + + pre_spike_times = pre_neo.segments[0].spiketrains + post_spike_times = post_neo.segments[0].spiketrains + + w = stdp_projection.get("weight", format="float") + + v_post_neuron = post_neuron.get_data("V_m") + times = v_post_neuron.segments[0].analogsignals[0].times + v_post_neuron = np.array(v_post_neuron.segments[0].filter(name="V_m")[0]) + + p.end() + print(w) + + import pdb;pdb.set_trace() + + return times, v_post_neuron, w, pre_spike_times, post_spike_times + + + def test_stdp(self): + simtime = 10000. # [ms] + times, v_post_neuron, dw, actual_pre_spike_times, actual_post_spike_times = self.run_sim(simtime=simtime) + + print("actual pre_spikes: " + str(actual_pre_spike_times)) + print("actual post_spikes: " + str(actual_post_spike_times)) + print("weights after simulation: " + str(dw)) + + fig, ax = plt.subplots(nrows=2) + ax[0].plot(actual_pre_spike_times, np.zeros_like(actual_pre_spike_times), '.') + ax[0].plot(actual_post_spike_times, np.ones_like(actual_post_spike_times), '.') + ax[1].plot(times, v_post_neuron) + ax[1].set_ylabel("V_m") + ax[-1].set_xlabel(r"$t$ [ms]") + ax[0].set_ylabel(r"pre spikes post spikes") + for _ax in ax: + _ax.grid(True) + _ax.set_xlim(0, simtime) + +# ax.subplots_adjust(bottom=0.2) + + """ax.figtext(0.5, 0.05, + r"$\tau_+ = 20ms,\tau_- = 20ms, A_+ = 0.5, A_- = 0.5$", + ha='center', # horizontal alignment + va='bottom', # vertical alignment + fontsize=10, + color='gray')""" + + + + fig.savefig("plot.png") diff --git a/tests/spinnaker_tests/test_spinnaker_stdp_psp.py b/tests/spinnaker_tests/test_spinnaker_stdp_psp.py new file mode 100644 index 000000000..abab287ae --- /dev/null +++ b/tests/spinnaker_tests/test_spinnaker_stdp_psp.py @@ -0,0 +1,129 @@ +# -*- coding: utf-8 -*- +# +# test_spinnaker_stdp_psp.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +import os +import matplotlib.pyplot as plt +import numpy as np +import pytest +import time + +from pynestml.frontend.pynestml_frontend import generate_spinnaker_target + + +class TestSpiNNakerSTDPPSP: + """SpiNNaker code generation tests""" + + @pytest.fixture(autouse=True, + scope="module") + def generate_code(self): + codegen_opts = {"neuron_synapse_pairs": [{"neuron": "iaf_psc_exp_neuron", + "synapse": "stdp_synapse", + "post_ports": ["post_spikes"]}], + "delay_variable":{"stdp_synapse":"d"}, + "weight_variable":{"stdp_synapse":"w"}} + + files = [ + os.path.join("models", "neurons", "iaf_psc_exp_neuron.nestml"), + os.path.join("models", "synapses", "stdp_synapse.nestml") + ] + input_path = [os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.join( + os.pardir, os.pardir, s))) for s in files] + target_path = "spinnaker-target" + install_path = "spinnaker-install" + logging_level = "DEBUG" + module_name = "nestmlmodule" + suffix = "_nestml" + generate_spinnaker_target(input_path, + target_path=target_path, + install_path=install_path, + logging_level=logging_level, + module_name=module_name, + suffix=suffix, + codegen_opts=codegen_opts) + + def run_sim(self, pre_spike_times, weight=123, simtime=50): + import pyNN.spiNNaker as p + from pyNN.utility.plotting import Figure, Panel + + from python_models8.neuron.builds.iaf_psc_exp_neuron_nestml import iaf_psc_exp_neuron_nestml as iaf_psc_exp_neuron_nestml + from python_models8.neuron.implementations.stdp_synapse_nestml_impl import stdp_synapse_nestmlDynamics as stdp_synapse_nestml + + p.setup(timestep=1.0) + exc_input = "exc_spikes" + inh_input = "inh_spikes" + + #inputs for presynaptic neuron + pre_input = p.Population(1, p.SpikeSourceArray(spike_times=[0]), label="pre_input") + post_neuron = p.Population(1, iaf_psc_exp_neuron_nestml(), label="post_neuron") + + stdp_model = stdp_synapse_nestml(weight=weight) + stdp_projection = p.Projection(pre_input, post_neuron, p.OneToOneConnector(), receptor_type=exc_input, synapse_type=stdp_model) + + #record spikes + pre_input.record(["spikes"]) + post_neuron.record(["spikes"]) + post_neuron.record(["V_m"]) + post_neuron.record(["I_syn_exc"]) + + pre_input.set(spike_times=pre_spike_times) + + p.run(simtime) + + + v_post_neuron = post_neuron.get_data("V_m") + times = v_post_neuron.segments[0].analogsignals[0].times + v_post_neuron = np.array(v_post_neuron.segments[0].filter(name="V_m")[0]) + i_syn_exc_post_neuron = post_neuron.get_data("I_syn_exc") + i_syn_exc_post_neuron = np.array(i_syn_exc_post_neuron.segments[0].filter(name="I_syn_exc")[0]) + + """pre_neo = pre_input.get_data("spikes") + post_neo = post_neuron.get_data("spikes") + + pre_spike_times = pre_neo.segments[0].spiketrains + post_spike_times = post_neo.segments[0].spiketrains + + import pdb;pdb.set_trace()""" + + p.end() + + return times, v_post_neuron, i_syn_exc_post_neuron + + + @pytest.mark.parametrize("weight", [123, 1234]) + def test_stdp(self, weight): + pre_spike_times = [10.] + times, v_post_neuron, i_syn_exc_post_neuron = self.run_sim(pre_spike_times, weight=weight) + + fig, ax = plt.subplots(nrows=2) + ax[0].plot(times, v_post_neuron, label="V_m") + ax[1].plot(times, i_syn_exc_post_neuron, label="I_exc") + for _ax in ax: + _ax.grid(True) + _ax.legend() + _ax.set_xlim(np.amin(times), np.amax(times)) + + ax[0].get_xticklabels([]) + ax[-1].set_xlabel("Time [ms]") + + fig.savefig("test_spinnaker_stdp_psp_" + str(time.strftime("%Y-%m-%d %H:%M:%S")) + ".png") + + assert len(np.unique(v_post_neuron)) > 1, "No PSPs detected in postsynaptic membrane potential" + np.testing.assert_allclose(np.amax(i_syn_exc_post_neuron), weight)