diff --git a/.gitignore b/.gitignore
index d80c8cffd..4db8d5ca5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,14 +7,20 @@ target/
build/
buildNest/
venv/
-spinnaker-install/
+tests/spinnaker_tests/bak/spinnaker-install/
spinnaker-target/
report/
reports/
nestml-*/
*.egg-info
stash/
-
+tests/spinnaker2_tests/spinnaker2-install
+tests/spinnaker2_tests/spinnaker2-target
+tests/spinnaker_tests/report
+tests/spinnaker_tests/spinnaker-install
+tests/spinnaker_tests/spinnaker-target
+tests/nest_tests/target_*/
+tests/spinnaker2_tests/nestml_*/
__*
@@ -35,3 +41,27 @@ venv
*.gdf
*~
*.iml
+tests/nest_tests/non_dimensionalisation_transformer/resources/non_dimensionalisation_transformer_test_neuron.nestml.bak
+tests/nest_tests/non_dimensionalisation_transformer/tests/reference_test_non_dim_transformer_function_call_in_equation_block.png
+tests/nest_tests/non_dimensionalisation_transformer/tests/transformed_model_test_exp_in_equation_block.txt
+tests/nest_tests/non_dimensionalisation_transformer/tests/transformed_model.txt
+pynestml/codegeneration/resources_spinnaker2/bak/@NEURON_NAME@.c.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/@NEURON_NAME@.h.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/decay.h.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/global_params.h.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/maths-util.h.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/neuron_model_@NEURON_NAME@_impl.c.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/neuron_model_@NEURON_NAME@_impl.h.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/neuron_model.h.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/neuron-typedefs.h.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/neuron.c.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/neuron.h.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/param_defs.h.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/population_table.h.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/regions.h.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/simulation.h.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/synapse_row.h.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/synapse_types_exponential_impl.h.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/synapse_types.h.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/synapses.c.jinja2
+pynestml/codegeneration/resources_spinnaker2/bak/synapses.h.jinja2
diff --git a/models/neurons/iaf_psc_exp_neuron_NO_ISTIM.nestml b/models/neurons/iaf_psc_exp_neuron_NO_ISTIM.nestml
new file mode 100644
index 000000000..f7d95d04a
--- /dev/null
+++ b/models/neurons/iaf_psc_exp_neuron_NO_ISTIM.nestml
@@ -0,0 +1,130 @@
+# iaf_psc_exp_NO_ISTIM - Leaky integrate-and-fire neuron model
+# ###################################################
+#
+# Description
+# +++++++++++
+#
+# iaf_psc_exp is an implementation of a leaky integrate-and-fire model
+# with exponentially decaying synaptic currents according to [1]_.
+# Thus, postsynaptic currents have an infinitely short rise time.
+# The input current I_stim is removed for code generation testing purposes.
+#
+# The threshold crossing is followed by an absolute refractory period
+# during which the membrane potential is clamped to the resting potential
+# and spiking is prohibited.
+#
+# The general framework for the consistent formulation of systems with
+# neuron like dynamics interacting by point events is described in
+# [1]_. A flow chart can be found in [2]_.
+#
+# Critical tests for the formulation of the neuron model are the
+# comparisons of simulation results for different computation step
+# sizes.
+#
+# .. note::
+#
+# If tau_m is very close to tau_syn_exc or tau_syn_inh, numerical problems
+# may arise due to singularities in the propagator matrics. If this is
+# the case, replace equal-valued parameters by a single parameter.
+#
+# For details, please see ``IAF_neurons_singularity.ipynb`` in
+# the NEST source code (``docs/model_details``).
+#
+#
+# References
+# ++++++++++
+#
+# .. [1] Rotter S, Diesmann M (1999). Exact simulation of
+# time-invariant linear systems with applications to neuronal
+# modeling. Biologial Cybernetics 81:381-402.
+# DOI: https://doi.org/10.1007/s004220050570
+# .. [2] Diesmann M, Gewaltig M-O, Rotter S, & Aertsen A (2001). State
+# space analysis of synchronous spiking in cortical neural
+# networks. Neurocomputing 38-40:565-571.
+# DOI: https://doi.org/10.1016/S0925-2312(01)00409-X
+# .. [3] Morrison A, Straube S, Plesser H E, Diesmann M (2006). Exact
+# subthreshold integration with continuous spike times in discrete time
+# neural network simulations. Neural Computation, in press
+# DOI: https://doi.org/10.1162/neco.2007.19.1.47
+#
+#
+# See also
+# ++++++++
+#
+# iaf_psc_delta, iaf_psc_alpha, iaf_cond_exp
+#
+#
+# Copyright statement
+# +++++++++++++++++++
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+#
+#
+model iaf_psc_exp_neuron_NO_ISTIM:
+
+ state:
+ V_m mV = E_L # Membrane potential
+ refr_t ms = 0 ms # Refractory period timer
+ I_syn_exc pA = 0 pA
+ I_syn_inh pA = 0 pA
+
+ equations:
+ I_syn_exc' = -I_syn_exc / tau_syn_exc
+ I_syn_inh' = -I_syn_inh / tau_syn_inh
+ V_m' = -(V_m - E_L) / tau_m + (I_syn_exc - I_syn_inh + I_e) / C_m
+ refr_t' = -1e3 * ms/s # refractoriness is implemented as an ODE, representing a timer counting back down to zero. XXX: TODO: This should simply read ``refr_t' = -1 / s`` (see https://github.com/nest/nestml/issues/984)
+
+ parameters:
+ C_m pF = 250 pF # Capacitance of the membrane
+ tau_m ms = 10 ms # Membrane time constant
+ tau_syn_inh ms = 2 ms # Time constant of inhibitory synaptic current
+ tau_syn_exc ms = 2 ms # Time constant of excitatory synaptic current
+ refr_T ms = 2 ms # Duration of refractory period
+ E_L mV = -70 mV # Resting potential
+ V_reset mV = -70 mV # Reset value of the membrane potential
+ V_th mV = -55 mV # Spike threshold potential
+
+ # constant external input current
+ I_e pA = 0 pA
+
+ input:
+ exc_spikes <- excitatory spike
+ inh_spikes <- inhibitory spike
+
+ output:
+ spike
+
+ update:
+ if refr_t > 0 ms:
+ # neuron is absolute refractory, do not evolve V_m
+ integrate_odes(I_syn_exc, I_syn_inh, refr_t)
+ else:
+ # neuron not refractory
+ integrate_odes(I_syn_exc, I_syn_inh, V_m)
+
+ onReceive(exc_spikes):
+ I_syn_exc += exc_spikes * pA * s
+
+ onReceive(inh_spikes):
+ I_syn_inh += inh_spikes * pA * s
+
+ onCondition(refr_t <= 0 ms and V_m >= V_th):
+ # threshold crossing
+ refr_t = refr_T # start of the refractory period
+ V_m = V_reset
+ emit_spike()
diff --git a/pynestml/cocos/co_co_function_calls_consistent.py b/pynestml/cocos/co_co_function_calls_consistent.py
index 2b9baa544..e993240f3 100644
--- a/pynestml/cocos/co_co_function_calls_consistent.py
+++ b/pynestml/cocos/co_co_function_calls_consistent.py
@@ -28,7 +28,6 @@
from pynestml.utils.ast_utils import ASTUtils
from pynestml.utils.logger import Logger, LoggingLevel
from pynestml.utils.messages import Messages
-from pynestml.utils.type_caster import TypeCaster
from pynestml.visitors.ast_visitor import ASTVisitor
@@ -109,5 +108,5 @@ def visit_function_call(self, node):
# variadic type symbol accepts anything
return
- if not actual_type.equals(expected_type) and not isinstance(expected_type, TemplateTypeSymbol):
- TypeCaster.try_to_recover_or_error(expected_type, actual_type, actual_arg)
+ # if not actual_type.equals(expected_type) and not isinstance(expected_type, TemplateTypeSymbol):
+ # TypeCaster.try_to_recover_or_error(expected_type, actual_type, actual_arg)
diff --git a/pynestml/cocos/co_co_illegal_expression.py b/pynestml/cocos/co_co_illegal_expression.py
index c362d0dc5..41e431af4 100644
--- a/pynestml/cocos/co_co_illegal_expression.py
+++ b/pynestml/cocos/co_co_illegal_expression.py
@@ -28,7 +28,6 @@
from pynestml.utils.logger import LoggingLevel, Logger
from pynestml.utils.logging_helper import LoggingHelper
from pynestml.utils.messages import Messages
-from pynestml.utils.type_caster import TypeCaster
from pynestml.visitors.ast_visitor import ASTVisitor
@@ -68,9 +67,8 @@ def visit_declaration(self, node):
if isinstance(rhs_type, ErrorTypeSymbol):
LoggingHelper.drop_missing_type_error(node)
return
- if self.__types_do_not_match(lhs_type, rhs_type):
- TypeCaster.try_to_recover_or_error(lhs_type, rhs_type, node.get_expression())
- return
+ # if self.__types_do_not_match(lhs_type, rhs_type):
+ # TypeCaster.try_to_recover_or_error(lhs_type, rhs_type, node.get_expression())
def visit_inline_expression(self, node):
"""
@@ -82,8 +80,8 @@ def visit_inline_expression(self, node):
if isinstance(rhs_type, ErrorTypeSymbol):
LoggingHelper.drop_missing_type_error(node)
return
- if self.__types_do_not_match(lhs_type, rhs_type):
- TypeCaster.try_to_recover_or_error(lhs_type, rhs_type, node.get_expression())
+ # if self.__types_do_not_match(lhs_type, rhs_type):
+ # TypeCaster.try_to_recover_or_error(lhs_type, rhs_type, node.get_expression())
def visit_assignment(self, node):
"""
@@ -120,23 +118,23 @@ def handle_compound_assignment(self, node):
lhs_type_symbol = lhs_variable_symbol.get_type_symbol()
if node.is_compound_product:
- if self.__types_do_not_match(lhs_type_symbol, lhs_type_symbol * rhs_type_symbol):
- TypeCaster.try_to_recover_or_error(lhs_type_symbol, lhs_type_symbol * rhs_type_symbol,
- node.get_expression())
- return
+ # if self.__types_do_not_match(lhs_type_symbol, lhs_type_symbol * rhs_type_symbol):
+ # TypeCaster.try_to_recover_or_error(lhs_type_symbol, lhs_type_symbol * rhs_type_symbol,
+ # node.get_expression())
+ # return
return
if node.is_compound_quotient:
- if self.__types_do_not_match(lhs_type_symbol, lhs_type_symbol / rhs_type_symbol):
- TypeCaster.try_to_recover_or_error(lhs_type_symbol, lhs_type_symbol / rhs_type_symbol,
- node.get_expression())
- return
+ # if self.__types_do_not_match(lhs_type_symbol, lhs_type_symbol / rhs_type_symbol):
+ # TypeCaster.try_to_recover_or_error(lhs_type_symbol, lhs_type_symbol / rhs_type_symbol,
+ # node.get_expression())
+ # return
return
assert node.is_compound_sum or node.is_compound_minus
- if self.__types_do_not_match(lhs_type_symbol, rhs_type_symbol):
- TypeCaster.try_to_recover_or_error(lhs_type_symbol, rhs_type_symbol,
- node.get_expression())
+ # if self.__types_do_not_match(lhs_type_symbol, rhs_type_symbol):
+ # TypeCaster.try_to_recover_or_error(lhs_type_symbol, rhs_type_symbol,
+ # node.get_expression())
@staticmethod
def __types_do_not_match(lhs_type_symbol, rhs_type_symbol):
@@ -154,11 +152,10 @@ def handle_simple_assignment(self, node):
LoggingHelper.drop_missing_type_error(node)
return
- if lhs_variable_symbol is not None and self.__types_do_not_match(lhs_variable_symbol.get_type_symbol(),
- rhs_type_symbol):
- TypeCaster.try_to_recover_or_error(lhs_variable_symbol.get_type_symbol(), rhs_type_symbol,
- node.get_expression())
- return
+ # if lhs_variable_symbol is not None and self.__types_do_not_match(lhs_variable_symbol.get_type_symbol(),
+ # rhs_type_symbol):
+ # TypeCaster.try_to_recover_or_error(lhs_variable_symbol.get_type_symbol(), rhs_type_symbol,
+ # node.get_expression())
def visit_if_clause(self, node):
"""
diff --git a/pynestml/cocos/co_co_user_defined_function_correctly_defined.py b/pynestml/cocos/co_co_user_defined_function_correctly_defined.py
index ec62a9ac4..bfc61b843 100644
--- a/pynestml/cocos/co_co_user_defined_function_correctly_defined.py
+++ b/pynestml/cocos/co_co_user_defined_function_correctly_defined.py
@@ -28,7 +28,6 @@
from pynestml.symbols.symbol import SymbolKind
from pynestml.utils.logger import LoggingLevel, Logger
from pynestml.utils.messages import Messages
-from pynestml.utils.type_caster import TypeCaster
class CoCoUserDefinedFunctionCorrectlyDefined(CoCo):
@@ -128,9 +127,9 @@ def __check_return_recursively(cls, type_symbol=None, stmts=None, ret_defined=Fa
code, message = Messages.get_type_could_not_be_derived(cls.processed_function.get_name())
Logger.log_message(error_position=stmt.get_source_position(),
code=code, message=message, log_level=LoggingLevel.ERROR)
- elif not type_of_return.equals(type_symbol):
- TypeCaster.try_to_recover_or_error(type_symbol, type_of_return,
- stmt.get_return_stmt().get_expression())
+ # elif not type_of_return.equals(type_symbol):
+ # TypeCaster.try_to_recover_or_error(type_symbol, type_of_return,
+ # stmt.get_return_stmt().get_expression())
elif isinstance(stmt, ASTCompoundStmt):
# otherwise it is a compound stmt, thus check recursively
if stmt.is_if_stmt():
diff --git a/pynestml/codegeneration/nest_code_generator.py b/pynestml/codegeneration/nest_code_generator.py
index ccf2ad11c..0c6396268 100644
--- a/pynestml/codegeneration/nest_code_generator.py
+++ b/pynestml/codegeneration/nest_code_generator.py
@@ -18,7 +18,6 @@
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see .
-
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple
import datetime
@@ -312,11 +311,14 @@ def analyse_transform_neurons(self, neurons: List[ASTModel]) -> None:
for neuron in neurons:
code, message = Messages.get_analysing_transforming_model(neuron.get_name())
Logger.log_message(None, code, message, None, LoggingLevel.INFO)
- spike_updates, post_spike_updates, equations_with_delay_vars, equations_with_vector_vars = self.analyse_neuron(neuron)
+ spike_updates, post_spike_updates, equations_with_delay_vars, equations_with_vector_vars = self.analyse_neuron(neuron) # , parameter_value_dict, updated_state_dict = self.analyse_neuron(neuron)
neuron.spike_updates = spike_updates
neuron.post_spike_updates = post_spike_updates
neuron.equations_with_delay_vars = equations_with_delay_vars
neuron.equations_with_vector_vars = equations_with_vector_vars
+ # neuron.analytic_solver = analytic_solver
+ # neuron.parameter_value_dict = parameter_value_dict
+ # neuron.updated_state_dict = updated_state_dict
def analyse_transform_synapses(self, synapses: List[ASTModel]) -> None:
"""
@@ -362,6 +364,15 @@ def analyse_neuron(self, neuron: ASTModel) -> Tuple[Dict[str, ASTAssignment], Di
neuron.accept(equations_with_delay_vars_visitor)
equations_with_delay_vars = equations_with_delay_vars_visitor.equations
+ # Collect all parameters and their attached values
+ # parameter_block = neuron.get_parameters_blocks()[0]
+ # parameter_value_dict = ASTUtils.generate_parameter_value_dict(neuron, parameter_block)
+ # state_block = neuron.get_state_blocks()[0]
+ # updated_state_dict = ASTUtils.generate_updated_state_dict(neuron, state_block, parameter_value_dict)
+
+
+
+
# Collect all the equations with vector variables
eqns_with_vector_vars_visitor = ASTEquationsWithVectorVariablesVisitor()
neuron.accept(eqns_with_vector_vars_visitor)
@@ -413,7 +424,7 @@ def analyse_neuron(self, neuron: ASTModel) -> Tuple[Dict[str, ASTAssignment], Di
spike_updates, post_spike_updates = self.get_spike_update_expressions(neuron, kernel_buffers, [analytic_solver, numeric_solver], delta_factors)
- return spike_updates, post_spike_updates, equations_with_delay_vars, equations_with_vector_vars
+ return spike_updates, post_spike_updates, equations_with_delay_vars, equations_with_vector_vars# , analytic_solver , parameter_value_dict, updated_state_dict
def analyse_synapse(self, synapse: ASTModel) -> Dict[str, ASTAssignment]:
"""
@@ -915,6 +926,12 @@ def ode_toolbox_analysis(self, neuron: ASTModel, kernel_buffers: Mapping[ASTKern
odetoolbox_indict["options"]["output_timestep_symbol"] = "__h"
odetoolbox_indict["options"]["simplify_expression"] = self.get_option("simplify_expression")
disable_analytic_solver = self.get_option("solver") != "analytic"
+ # solver_result = odetoolbox.analysis(odetoolbox_indict,
+ # disable_stiffness_check=True,
+ # disable_analytic_solver=disable_analytic_solver,
+ # preserve_expressions=self.get_option("preserve_expressions"),
+ # simplify_expression=self.get_option("simplify_expression"),
+ # log_level=FrontendConfiguration.logging_level)
solver_result = odetoolbox.analysis(odetoolbox_indict,
disable_stiffness_check=True,
disable_analytic_solver=disable_analytic_solver,
diff --git a/pynestml/codegeneration/nest_unit_converter.py b/pynestml/codegeneration/nest_unit_converter.py
deleted file mode 100644
index d8d88e2c5..000000000
--- a/pynestml/codegeneration/nest_unit_converter.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# nest_unit_converter.py
-#
-# This file is part of NEST.
-#
-# Copyright (C) 2004 The NEST Initiative
-#
-# NEST is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 2 of the License, or
-# (at your option) any later version.
-#
-# NEST is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with NEST. If not, see .
-
-from astropy import units
-
-
-class NESTUnitConverter:
- r"""
- NEST Simulator uses a set of default physical units internally. This class calculates the factor needed to convert any given physical unit to its NEST counterpart.
- """
-
- @classmethod
- def get_factor(cls, unit: units.UnitBase) -> float:
- """
- Gives a factor for a given unit that transforms it to a "neuroscience" scale. If the given unit is not listed as a neuroscience unit, the factor is 1.
-
- :param unit: an astropy unit
- :type unit: IrreducibleUnit or Unit or CompositeUnit
- :return: a factor to that unit, converting it to "neuroscience" scales.
- """
- assert (isinstance(unit, units.IrreducibleUnit) or isinstance(unit, units.CompositeUnit)
- or isinstance(unit, units.Unit) or isinstance(unit, units.PrefixUnit)), \
- "UnitConverter: given parameter is not a unit (%s)!" % type(unit)
-
- # check if it is dimensionless, thus only a prefix
- if unit.physical_type == 'dimensionless':
- return unit.si
-
- # otherwise check if it is one of the base units
- target_unit = None
- if unit.physical_type == 'electrical conductance':
- target_unit = units.nS
-
- if unit.physical_type == 'electrical resistance':
- target_unit = units.Gohm
-
- if unit.physical_type == 'time':
- target_unit = units.ms
-
- if unit.physical_type == 'electrical capacitance':
- target_unit = units.pF
-
- if unit.physical_type == 'electrical potential':
- target_unit = units.mV
-
- if unit.physical_type == 'electrical current':
- target_unit = units.pA
-
- if target_unit is not None:
- return (unit / target_unit).si.scale
-
- if unit == unit.bases[0] and len(unit.bases) == 1:
- # this case means that we stuck in a recursive definition
- # just return the factor 1.0
- return 1.0
-
- # now if it is not a base unit, it has to be a combined one, e.g. s**2, decompose it
- factor = 1.0
- for i in range(0, len(unit.bases)):
- factor *= cls.get_factor(unit.bases[i]) ** unit.powers[i]
- return factor
diff --git a/pynestml/codegeneration/printers/c_simple_expression_printer.py b/pynestml/codegeneration/printers/c_simple_expression_printer.py
index 7b2ccf748..2221e67ab 100644
--- a/pynestml/codegeneration/printers/c_simple_expression_printer.py
+++ b/pynestml/codegeneration/printers/c_simple_expression_printer.py
@@ -71,7 +71,4 @@ def _print(self, node: ASTNode) -> str:
return self.print_simple_expression(node)
def print(self, node: ASTNode) -> str:
- if node.get_implicit_conversion_factor() and not node.get_implicit_conversion_factor() == 1:
- return "(" + str(node.get_implicit_conversion_factor()) + " * (" + self._print(node) + "))"
-
return self._print(node)
diff --git a/pynestml/codegeneration/printers/cpp_expression_printer.py b/pynestml/codegeneration/printers/cpp_expression_printer.py
index e392a35b6..44f54bcce 100644
--- a/pynestml/codegeneration/printers/cpp_expression_printer.py
+++ b/pynestml/codegeneration/printers/cpp_expression_printer.py
@@ -39,9 +39,6 @@ class CppExpressionPrinter(ExpressionPrinter):
def print(self, node: ASTNode) -> str:
if isinstance(node, ASTExpression):
- if node.get_implicit_conversion_factor() and not node.get_implicit_conversion_factor() == 1:
- return "(" + str(node.get_implicit_conversion_factor()) + " * (" + self.print_expression(node) + "))"
-
return self.print_expression(node)
return self._simple_expression_printer.print(node)
diff --git a/pynestml/codegeneration/printers/gsl_variable_printer.py b/pynestml/codegeneration/printers/gsl_variable_printer.py
index 463833a43..1903b850d 100644
--- a/pynestml/codegeneration/printers/gsl_variable_printer.py
+++ b/pynestml/codegeneration/printers/gsl_variable_printer.py
@@ -18,7 +18,7 @@
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see .
-from pynestml.codegeneration.nest_unit_converter import NESTUnitConverter
+
from pynestml.codegeneration.printers.cpp_variable_printer import CppVariablePrinter
from pynestml.meta_model.ast_variable import ASTVariable
from pynestml.symbols.predefined_units import PredefinedUnits
@@ -45,7 +45,7 @@ def print_variable(self, node: ASTVariable) -> str:
if symbol is None:
# test if variable name can be resolved to a type
if PredefinedUnits.is_unit(node.get_complete_name()):
- return str(NESTUnitConverter.get_factor(PredefinedUnits.get_unit(node.get_complete_name()).get_unit()))
+ return str(PredefinedUnits.get_unit(node.get_complete_name()).get_unit())
code, message = Messages.get_could_not_resolve(node.get_name())
Logger.log_message(log_level=LoggingLevel.ERROR, code=code, message=message,
diff --git a/pynestml/codegeneration/printers/latex_expression_printer.py b/pynestml/codegeneration/printers/latex_expression_printer.py
index 79b04a0d0..14cd54a74 100644
--- a/pynestml/codegeneration/printers/latex_expression_printer.py
+++ b/pynestml/codegeneration/printers/latex_expression_printer.py
@@ -35,10 +35,6 @@ class LatexExpressionPrinter(ExpressionPrinter):
"""
def print(self, node: ASTExpressionNode) -> str:
- if node.get_implicit_conversion_factor() is not None \
- and str(node.get_implicit_conversion_factor()) not in ["1.", "1.0", "1"]:
- return str(node.get_implicit_conversion_factor()) + " * (" + self.print_expression(node) + ")"
-
return self.print_expression(node)
def print_expression(self, node: ASTExpressionNode) -> str:
diff --git a/pynestml/codegeneration/printers/nest_variable_printer.py b/pynestml/codegeneration/printers/nest_variable_printer.py
index 1516a984d..9a06c50dd 100644
--- a/pynestml/codegeneration/printers/nest_variable_printer.py
+++ b/pynestml/codegeneration/printers/nest_variable_printer.py
@@ -26,7 +26,6 @@
from pynestml.codegeneration.nest_code_generator_utils import NESTCodeGeneratorUtils
from pynestml.codegeneration.printers.cpp_variable_printer import CppVariablePrinter
from pynestml.codegeneration.printers.expression_printer import ExpressionPrinter
-from pynestml.codegeneration.nest_unit_converter import NESTUnitConverter
from pynestml.meta_model.ast_external_variable import ASTExternalVariable
from pynestml.meta_model.ast_variable import ASTVariable
from pynestml.symbols.predefined_units import PredefinedUnits
@@ -99,7 +98,7 @@ def print_variable(self, variable: ASTVariable) -> str:
if symbol is None:
# test if variable name can be resolved to a type
if PredefinedUnits.is_unit(variable.get_complete_name()):
- return str(NESTUnitConverter.get_factor(PredefinedUnits.get_unit(variable.get_complete_name()).get_unit()))
+ return str(PredefinedUnits.get_unit(variable.get_complete_name()).get_unit())
code, message = Messages.get_could_not_resolve(variable.get_name())
Logger.log_message(log_level=LoggingLevel.ERROR, code=code, message=message,
@@ -111,18 +110,11 @@ def print_variable(self, variable: ASTVariable) -> str:
vector_param = "[" + self._expression_printer.print(variable.get_vector_parameter()) + "]"
if symbol.is_buffer():
- if isinstance(symbol.get_type_symbol(), UnitTypeSymbol):
- units_conversion_factor = NESTUnitConverter.get_factor(symbol.get_type_symbol().unit.unit)
- else:
- units_conversion_factor = 1
s = ""
- if not units_conversion_factor == 1:
- s += "(" + str(units_conversion_factor) + " * "
if self.cpp_variable_suffix == "":
s += "B_."
s += self._print_buffer_value(variable)
- if not units_conversion_factor == 1:
- s += ")"
+
return s
if symbol.is_inline_expression:
diff --git a/pynestml/codegeneration/printers/nestml_expression_printer.py b/pynestml/codegeneration/printers/nestml_expression_printer.py
index f5795cde1..8880be6a8 100644
--- a/pynestml/codegeneration/printers/nestml_expression_printer.py
+++ b/pynestml/codegeneration/printers/nestml_expression_printer.py
@@ -36,9 +36,6 @@ class NESTMLExpressionPrinter(ExpressionPrinter):
def print(self, node: ASTNode) -> str:
if isinstance(node, ASTExpression):
- if node.get_implicit_conversion_factor() and not node.get_implicit_conversion_factor() == 1:
- return "(" + str(node.get_implicit_conversion_factor()) + " * (" + self.print_expression(node) + "))"
-
return self.print_expression(node)
if isinstance(node, ASTArithmeticOperator):
diff --git a/pynestml/codegeneration/printers/nestml_simple_expression_printer.py b/pynestml/codegeneration/printers/nestml_simple_expression_printer.py
index 8198c239a..fef554d0a 100644
--- a/pynestml/codegeneration/printers/nestml_simple_expression_printer.py
+++ b/pynestml/codegeneration/printers/nestml_simple_expression_printer.py
@@ -41,9 +41,6 @@ def _print(self, node: ASTNode) -> str:
return self.print_simple_expression(node)
def print(self, node: ASTNode) -> str:
- if node.get_implicit_conversion_factor() and not node.get_implicit_conversion_factor() == 1:
- return "(" + str(node.get_implicit_conversion_factor()) + " * (" + self._print(node) + "))"
-
return self._print(node)
def print_simple_expression(self, node: ASTSimpleExpression) -> str:
diff --git a/pynestml/codegeneration/printers/python_expression_printer.py b/pynestml/codegeneration/printers/python_expression_printer.py
index c129db26b..6b040a0b6 100644
--- a/pynestml/codegeneration/printers/python_expression_printer.py
+++ b/pynestml/codegeneration/printers/python_expression_printer.py
@@ -36,9 +36,6 @@ class PythonExpressionPrinter(ExpressionPrinter):
def print(self, node: ASTNode) -> str:
if isinstance(node, ASTExpression):
- if node.get_implicit_conversion_factor() and not node.get_implicit_conversion_factor() == 1:
- return "(" + str(node.get_implicit_conversion_factor()) + " * (" + self.print_expression(node) + "))"
-
return self.print_expression(node)
return self._simple_expression_printer.print(node)
diff --git a/pynestml/codegeneration/printers/python_simple_expression_printer.py b/pynestml/codegeneration/printers/python_simple_expression_printer.py
index e9ecde84e..370b4ca3a 100644
--- a/pynestml/codegeneration/printers/python_simple_expression_printer.py
+++ b/pynestml/codegeneration/printers/python_simple_expression_printer.py
@@ -75,7 +75,4 @@ def _print(self, node: ASTNode) -> str:
return self.print_simple_expression(node)
def print(self, node: ASTNode) -> str:
- if node.get_implicit_conversion_factor() and not node.get_implicit_conversion_factor() == 1:
- return "(" + str(node.get_implicit_conversion_factor()) + " * (" + self._print(node) + "))"
-
return self._print(node)
diff --git a/pynestml/codegeneration/printers/python_variable_printer.py b/pynestml/codegeneration/printers/python_variable_printer.py
index d03bdadd0..01895c399 100644
--- a/pynestml/codegeneration/printers/python_variable_printer.py
+++ b/pynestml/codegeneration/printers/python_variable_printer.py
@@ -21,7 +21,6 @@
from __future__ import annotations
-from pynestml.codegeneration.nest_unit_converter import NESTUnitConverter
from pynestml.codegeneration.printers.expression_printer import ExpressionPrinter
from pynestml.codegeneration.printers.variable_printer import VariablePrinter
from pynestml.codegeneration.python_code_generator_utils import PythonCodeGeneratorUtils
@@ -93,7 +92,7 @@ def print_variable(self, variable: ASTVariable) -> str:
if symbol is None:
# test if variable name can be resolved to a type
if PredefinedUnits.is_unit(variable.get_complete_name()):
- return str(NESTUnitConverter.get_factor(PredefinedUnits.get_unit(variable.get_complete_name()).get_unit()))
+ return str(PredefinedUnits.get_unit(variable.get_complete_name()).get_unit())
code, message = Messages.get_could_not_resolve(variable.get_name())
Logger.log_message(log_level=LoggingLevel.ERROR, code=code, message=message,
@@ -105,17 +104,9 @@ def print_variable(self, variable: ASTVariable) -> str:
vector_param = "[" + self._expression_printer.print(variable.get_vector_parameter()) + "]"
if symbol.is_buffer():
- if isinstance(symbol.get_type_symbol(), UnitTypeSymbol):
- units_conversion_factor = NESTUnitConverter.get_factor(symbol.get_type_symbol().unit.unit)
- else:
- units_conversion_factor = 1
s = ""
- if not units_conversion_factor == 1:
- s += "(" + str(units_conversion_factor) + " * "
s += self._print(variable, symbol, with_origin=self.with_origin) + vector_param
s += vector_param
- if not units_conversion_factor == 1:
- s += ")"
return s
if symbol.is_inline_expression:
diff --git a/pynestml/codegeneration/printers/spinnaker2_c_function_call_printer.py b/pynestml/codegeneration/printers/spinnaker2_c_function_call_printer.py
new file mode 100644
index 000000000..a450468a5
--- /dev/null
+++ b/pynestml/codegeneration/printers/spinnaker2_c_function_call_printer.py
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+#
+# spinnaker_c_function_call_printer.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+from pynestml.codegeneration.printers.function_call_printer import FunctionCallPrinter
+from pynestml.meta_model.ast_function_call import ASTFunctionCall
+from pynestml.symbols.predefined_functions import PredefinedFunctions
+from pynestml.utils.ast_utils import ASTUtils
+
+
+class Spinnaker2CFunctionCallPrinter(FunctionCallPrinter):
+ r"""
+ Printer for ASTFunctionCall in C SpiNNaker2 API syntax.
+ """
+
+ def print_function_call(self, node: ASTFunctionCall) -> str:
+ r"""
+ Converts a single handed over function call to C Spinnaker API syntax.
+
+ Parameters
+ ----------
+ function_call
+ The function call node to convert.
+
+ Returns
+ -------
+ s
+ The function call string in C syntax.
+ """
+ function_name = node.get_name()
+
+ if function_name in [PredefinedFunctions.TIME_RESOLUTION, PredefinedFunctions.TIME_TIMESTEP]:
+ # context dependent; we assume the template contains the necessary definitions
+ return 'global_params->calc_step_raw'
+
+ if function_name == PredefinedFunctions.TIME_STEPS:
+ raise Exception("time_steps() function not yet implemented")
+
+ if function_name == PredefinedFunctions.RANDOM_NORMAL:
+ raise Exception("rng functions not yet implemented")
+
+ if function_name == PredefinedFunctions.RANDOM_UNIFORM:
+ raise Exception("rng functions not yet implemented")
+
+ if function_name == PredefinedFunctions.EMIT_SPIKE:
+ return 'record_spike(neuron_index);\n' \
+ 'send_spikes_to_all_targets(routing_info_ptr->key_offset + neuron_index)'
+
+ return super().print_function_call(node)
+
+ def _print_function_call_format_string(self, function_call: ASTFunctionCall) -> str:
+ r"""
+ Converts a single handed over function call to C Spinnaker API syntax.
+
+ Parameters
+ ----------
+ function_call
+ The function call node to convert.
+
+ Returns
+ -------
+ s
+ The function call string in C syntax.
+ """
+ function_name = function_call.get_name()
+
+ if function_name == PredefinedFunctions.CLIP:
+ # the arguments of this function must be swapped and are therefore [v_max, v_min, v]
+ return 'MIN({2!s}, MAX({1!s}, {0!s}))'
+
+ if function_name == PredefinedFunctions.MAX:
+ return 'MAX({!s}, {!s})'
+
+ if function_name == PredefinedFunctions.MIN:
+ return 'MIN({!s}, {!s})'
+
+ if function_name == PredefinedFunctions.EXP:
+ return 'expk({!s})'
+
+ if function_name == PredefinedFunctions.LN:
+ return 'logk({!s})'
+
+ if function_name == PredefinedFunctions.POW:
+ return '(expk({1!s} * logk({0!s})))'
+
+ if function_name == PredefinedFunctions.LOG10:
+ return '(kdivk(logk({!s}), REAL_CONST(2.303)))'
+
+ if function_name == PredefinedFunctions.COS:
+ return 'cos({!s})'
+
+ if function_name == PredefinedFunctions.SIN:
+ return 'sin({!s})'
+
+ if function_name == PredefinedFunctions.TAN:
+ return 'tan({!s})'
+
+ if function_name == PredefinedFunctions.COSH:
+ return '(HALF * (expk({!s}) + expk(-{!s})))'
+
+ if function_name == PredefinedFunctions.SINH:
+ return '(HALF * (expk({!s}) - expk(-{!s})))'
+
+ if function_name == PredefinedFunctions.TANH:
+ return 'kdik((expk({!s}) - expk(-{!s})), (expk({!s}) + expk(-{!s})))'
+
+ if function_name == PredefinedFunctions.ERF:
+ raise Exception("Erf not defined for spinnaker")
+
+ if function_name == PredefinedFunctions.ERFC:
+ raise Exception("Erfc not defined for spinnaker")
+
+ if function_name == PredefinedFunctions.EXPM1:
+ raise Exception("Expm1 not defined for spinnaker")
+
+ if function_name == PredefinedFunctions.PRINT:
+ return 'log_info("%s", {!s})'
+
+ if function_name == PredefinedFunctions.PRINTLN:
+ raise Exception("PRINTLN not defined for spinnaker2")
+
+ if ASTUtils.needs_arguments(function_call):
+ n_args = len(function_call.get_args())
+ return function_name + '(' + ', '.join(['{!s}' for _ in range(n_args)]) + ')'
+
+ return function_name + '()'
diff --git a/pynestml/codegeneration/printers/spinnaker2_c_variable_printer.py b/pynestml/codegeneration/printers/spinnaker2_c_variable_printer.py
new file mode 100644
index 000000000..1830ccc3e
--- /dev/null
+++ b/pynestml/codegeneration/printers/spinnaker2_c_variable_printer.py
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+#
+# spinnaker_c_variable_printer.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+from __future__ import annotations
+
+from pynestml.utils.ast_utils import ASTUtils
+
+from pynestml.codegeneration.spinnaker2_code_generator_utils import SPINNAKER2CodeGeneratorUtils
+from pynestml.codegeneration.printers.cpp_variable_printer import CppVariablePrinter
+from pynestml.codegeneration.printers.expression_printer import ExpressionPrinter
+from pynestml.codegeneration.nest_unit_converter import NESTUnitConverter
+from pynestml.meta_model.ast_external_variable import ASTExternalVariable
+from pynestml.meta_model.ast_variable import ASTVariable
+from pynestml.symbols.predefined_units import PredefinedUnits
+from pynestml.symbols.predefined_variables import PredefinedVariables
+from pynestml.symbols.symbol import SymbolKind
+from pynestml.symbols.unit_type_symbol import UnitTypeSymbol
+from pynestml.symbols.variable_symbol import BlockType
+from pynestml.utils.logger import Logger, LoggingLevel
+from pynestml.utils.messages import Messages
+
+
+class Spinnaker2CVariablePrinter(CppVariablePrinter):
+ r"""
+ Variable printer for C syntax and the Spinnaker2 API.
+ """
+
+ def __init__(self, expression_printer: ExpressionPrinter, with_origin: bool = True, with_vector_parameter: bool = True) -> None:
+ super().__init__(expression_printer)
+ self.with_origin = with_origin
+ self.with_vector_parameter = with_vector_parameter
+ self._state_symbols = []
+
+ def print_variable(self, variable: ASTVariable) -> str:
+ """
+ Converts a single variable to Spinnaker processable format.
+ :param variable: a single variable.
+ :return: a Spinnaker processable format.
+ """
+ assert isinstance(variable, ASTVariable)
+
+ if isinstance(variable, ASTExternalVariable):
+ raise Exception("SpiNNaker2 does not support external variables")
+
+ if variable.get_name() == PredefinedVariables.E_CONSTANT:
+ return "REAL_CONST(2.718281828f)"
+
+ if variable.get_name() == PredefinedVariables.PI_CONSTANT:
+ return "REAL_CONST(3.14159265358979f)"
+
+ if variable.get_name() == '__h':
+ # context dependent; we assume the template contains the necessary definitions
+ return 'global_neuron_params.calc_step_raw'
+
+ symbol = variable.get_scope().resolve_to_symbol(variable.get_complete_name(), SymbolKind.VARIABLE)
+ if symbol is None:
+ # test if variable name can be resolved to a type
+ if PredefinedUnits.is_unit(variable.get_complete_name()):
+ return str(NESTUnitConverter.get_factor(PredefinedUnits.get_unit(variable.get_complete_name()).get_unit()))
+
+ code, message = Messages.get_could_not_resolve(variable.get_name())
+ Logger.log_message(log_level=LoggingLevel.ERROR, code=code, message=message,
+ error_position=variable.get_source_position())
+ return ""
+
+ vector_param = ""
+ if self.with_vector_parameter and symbol.has_vector_parameter():
+ vector_param = "[" + self._expression_printer.print(variable.get_vector_parameter()) + "]"
+
+ if symbol.is_buffer():
+ if isinstance(symbol.get_type_symbol(), UnitTypeSymbol):
+ units_conversion_factor = NESTUnitConverter.get_factor(symbol.get_type_symbol().unit.unit)
+ else:
+ units_conversion_factor = 1
+ s = ""
+ if not units_conversion_factor == 1:
+ s += "(" + str(units_conversion_factor) + " * "
+ s += self._print_buffer_value(variable)
+ if not units_conversion_factor == 1:
+ s += ")"
+ return s
+
+ if symbol.is_inline_expression:
+ # there might not be a corresponding defined state variable; insist on calling the getter function
+ return "get_" + self._print(variable, symbol, with_origin=False) + vector_param + "()"
+
+ assert not symbol.is_kernel(), "Cannot print kernel; kernel should have been converted during code generation"
+
+ if symbol.is_state() or symbol.is_inline_expression:
+ return self._print(variable, symbol, with_origin=self.with_origin) + vector_param
+
+ return self._print(variable, symbol, with_origin=self.with_origin) + vector_param
+
+ def _print_delay_variable(self, variable: ASTVariable) -> str:
+ """
+ Converts a delay variable to SPINNAKER processable format
+ :param variable:
+ :return:
+ """
+ symbol = variable.get_scope().resolve_to_symbol(variable.get_complete_name(), SymbolKind.VARIABLE)
+ if symbol and symbol.is_state() and symbol.has_delay_parameter():
+ return "get_delayed_" + variable.get_name() + "()"
+
+ return ""
+
+ def _print_buffer_value(self, variable: ASTVariable) -> str:
+ """
+ Converts for a handed over symbol the corresponding name of the buffer to a SPINNAKER processable format.
+ :param variable: a single variable symbol.
+ :return: the corresponding representation as a string
+ """
+ variable_symbol = variable.get_scope().resolve_to_symbol(variable.get_complete_name(), SymbolKind.VARIABLE)
+ if variable_symbol.is_spike_input_port():
+ var_name = variable_symbol.get_symbol_name().upper()
+ if variable.get_vector_parameter() is not None:
+ vector_parameter = ASTUtils.get_numeric_vector_size(variable)
+ var_name = var_name + "_" + str(vector_parameter)
+
+ return "input->inputs[" + var_name + "]"
+
+ if variable_symbol.is_continuous_input_port():
+ var_name = variable_symbol.get_symbol_name().upper()
+ if variable.get_vector_parameter() is not None:
+ vector_parameter = ASTUtils.get_numeric_vector_size(variable)
+ var_name = var_name + "_" + str(vector_parameter)
+
+ return "input->inputs[" + var_name + "]"
+
+ return variable_symbol.get_symbol_name() + '_grid_sum_'
+
+ def _print(self, variable: ASTVariable, symbol, with_origin: bool = True) -> str:
+ assert all([isinstance(s, str) for s in self._state_symbols])
+
+ variable_name = CppVariablePrinter._print_cpp_name(variable.get_complete_name())
+
+ if symbol.is_local():
+ return variable_name
+
+ if variable.is_delay_variable():
+ return self._print_delay_variable(variable)
+
+ if with_origin:
+ return SPINNAKER2CodeGeneratorUtils.print_symbol_origin(symbol, numerical_state_symbols=self._state_symbols) % variable_name
+
+ return variable_name
diff --git a/pynestml/codegeneration/printers/spinnaker2_python_simple_expression_printer.py b/pynestml/codegeneration/printers/spinnaker2_python_simple_expression_printer.py
new file mode 100644
index 000000000..3289c2df5
--- /dev/null
+++ b/pynestml/codegeneration/printers/spinnaker2_python_simple_expression_printer.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+#
+# spinnaker_python_simple_expression_printer.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+from spinn_front_end_common.interface.ds import DataType
+
+from pynestml.codegeneration.printers.simple_expression_printer import SimpleExpressionPrinter
+from pynestml.meta_model.ast_function_call import ASTFunctionCall
+from pynestml.meta_model.ast_node import ASTNode
+from pynestml.meta_model.ast_simple_expression import ASTSimpleExpression
+from pynestml.meta_model.ast_variable import ASTVariable
+
+
+class SpinnakerPythonSimpleExpressionPrinter(SimpleExpressionPrinter):
+ r"""
+ Printer for ASTSimpleExpressions in Python syntax.
+ """
+
+ def print_simple_expression(self, node: ASTSimpleExpression) -> str:
+ if node.has_unit():
+ if self._variable_printer.print(node.get_variable()) in ["1", "1.", "1.0"]:
+ return str(node.get_numeric_literal())
+
+ return str(node.get_numeric_literal()) + " * " + \
+ self._variable_printer.print(node.get_variable())
+
+ if isinstance(node, ASTVariable):
+ return self._variable_printer.print(node.get_variable())
+
+ if node.is_numeric_literal():
+ return str(node.get_numeric_literal())
+
+ if node.is_inf_literal:
+ return str('math.inf')
+
+ if node.is_string():
+ return str(node.get_string())
+
+ if node.is_boolean_true:
+ # Spinnaker supports no bool datatype
+ return '1'
+
+ if node.is_boolean_false:
+ # Spinnaker supports no bool datatype
+ return '0'
+
+ if node.is_variable() or node.is_delay_variable():
+ return self._variable_printer.print(node.get_variable())
+
+ if node.is_function_call():
+ return self._function_call_printer.print_function_call(node.get_function_call())
+
+ raise Exception("Unknown node type: " + str(node))
+
+ def _print(self, node: ASTNode) -> str:
+ if isinstance(node, ASTVariable):
+ return self._variable_printer.print(node)
+
+ if isinstance(node, ASTFunctionCall):
+ return self._function_call_printer.print(node)
+
+ return self.print_simple_expression(node)
+
+ def print(self, node: ASTNode) -> str:
+ if node.get_implicit_conversion_factor() and not node.get_implicit_conversion_factor() == 1:
+ return "(" + str(node.get_implicit_conversion_factor()) + " * (" + self._print(node) + "))"
+
+ return self._print(node)
diff --git a/pynestml/codegeneration/printers/spinnaker_c_variable_printer.py b/pynestml/codegeneration/printers/spinnaker_c_variable_printer.py
index b9ba5dfc1..87ed79529 100644
--- a/pynestml/codegeneration/printers/spinnaker_c_variable_printer.py
+++ b/pynestml/codegeneration/printers/spinnaker_c_variable_printer.py
@@ -26,7 +26,6 @@
from pynestml.codegeneration.spinnaker_code_generator_utils import SPINNAKERCodeGeneratorUtils
from pynestml.codegeneration.printers.cpp_variable_printer import CppVariablePrinter
from pynestml.codegeneration.printers.expression_printer import ExpressionPrinter
-from pynestml.codegeneration.nest_unit_converter import NESTUnitConverter
from pynestml.meta_model.ast_external_variable import ASTExternalVariable
from pynestml.meta_model.ast_variable import ASTVariable
from pynestml.symbols.predefined_units import PredefinedUnits
@@ -70,7 +69,7 @@ def print_variable(self, variable: ASTVariable) -> str:
if symbol is None:
# test if variable name can be resolved to a type
if PredefinedUnits.is_unit(variable.get_complete_name()):
- return str(NESTUnitConverter.get_factor(PredefinedUnits.get_unit(variable.get_complete_name()).get_unit()))
+ return str(PredefinedUnits.get_unit(variable.get_complete_name()).get_unit())
code, message = Messages.get_could_not_resolve(variable.get_name())
Logger.log_message(log_level=LoggingLevel.ERROR, code=code, message=message,
@@ -83,7 +82,7 @@ def print_variable(self, variable: ASTVariable) -> str:
if symbol.is_buffer():
if isinstance(symbol.get_type_symbol(), UnitTypeSymbol):
- units_conversion_factor = NESTUnitConverter.get_factor(symbol.get_type_symbol().unit.unit)
+ units_conversion_factor = symbol.get_type_symbol().unit.unit
else:
units_conversion_factor = 1
s = ""
diff --git a/pynestml/codegeneration/printers/spinnaker_python_simple_expression_printer.py b/pynestml/codegeneration/printers/spinnaker_python_simple_expression_printer.py
index 3d2c99244..1b56abea3 100644
--- a/pynestml/codegeneration/printers/spinnaker_python_simple_expression_printer.py
+++ b/pynestml/codegeneration/printers/spinnaker_python_simple_expression_printer.py
@@ -79,7 +79,4 @@ def _print(self, node: ASTNode) -> str:
return self.print_simple_expression(node)
def print(self, node: ASTNode) -> str:
- if node.get_implicit_conversion_factor() and not node.get_implicit_conversion_factor() == 1:
- return "(" + str(node.get_implicit_conversion_factor()) + " * (" + self._print(node) + "))"
-
return self._print(node)
diff --git a/pynestml/codegeneration/printers/sympy_simple_expression_printer.py b/pynestml/codegeneration/printers/sympy_simple_expression_printer.py
index bf9c64bea..adeeb7a7a 100644
--- a/pynestml/codegeneration/printers/sympy_simple_expression_printer.py
+++ b/pynestml/codegeneration/printers/sympy_simple_expression_printer.py
@@ -19,7 +19,6 @@
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see .
-from pynestml.codegeneration.nest_unit_converter import NESTUnitConverter
from pynestml.codegeneration.printers.cpp_simple_expression_printer import CppSimpleExpressionPrinter
from pynestml.codegeneration.printers.simple_expression_printer import SimpleExpressionPrinter
from pynestml.meta_model.ast_function_call import ASTFunctionCall
@@ -45,7 +44,7 @@ def print_simple_expression(self, node: ASTSimpleExpression) -> str:
node.variable.get_complete_name(), SymbolKind.VARIABLE) is not None
if not node_is_variable_symbol and PredefinedUnits.is_unit(node.variable.get_complete_name()):
# case for a literal unit, e.g. "ms"
- return str(NESTUnitConverter.get_factor(PredefinedUnits.get_unit(node.variable.get_complete_name()).get_unit()))
+ return str(PredefinedUnits.get_unit(node.variable.get_complete_name()).get_unit())
if node.has_unit():
if self._variable_printer.print(node.get_variable()) in ["1", "1.", "1.0"]:
@@ -87,7 +86,4 @@ def _print(self, node: ASTNode) -> str:
return self.print_simple_expression(node)
def print(self, node: ASTNode) -> str:
- if node.get_implicit_conversion_factor() and not node.get_implicit_conversion_factor() == 1:
- return "(" + str(node.get_implicit_conversion_factor()) + " * (" + self._print(node) + "))"
-
return self._print(node)
diff --git a/pynestml/codegeneration/python_standalone_target_tools.py b/pynestml/codegeneration/python_standalone_target_tools.py
index 32b5ad8a5..22c179025 100644
--- a/pynestml/codegeneration/python_standalone_target_tools.py
+++ b/pynestml/codegeneration/python_standalone_target_tools.py
@@ -24,6 +24,8 @@
import os
import sys
import tempfile
+from pathlib import Path
+
from pynestml.frontend.frontend_configuration import FrontendConfiguration
from pynestml.frontend.pynestml_frontend import generate_python_standalone_target
@@ -36,6 +38,29 @@ class PythonStandaloneTargetTools:
r"""
Helper functions for the Python standalone target.
"""
+
+ @classmethod
+ def _dynamic_import(cls, target_path: str, module_name: str):
+ r"""
+ Dynamically imports a module from a given directory.
+ """
+
+ # Convert the path to an absolute path
+ target_path = Path(target_path).resolve()
+
+ # Add the target directory to sys.path if not already present
+ if str(target_path) not in sys.path:
+ sys.path.append(str(target_path))
+
+ # Import the module dynamically
+ try:
+ module = importlib.import_module(module_name)
+ Logger.log_message(None, -1,f"Successfully imported {module_name}", None, LoggingLevel.INFO)
+ return module # Return the imported module for use
+ except ModuleNotFoundError as e:
+ Logger.log_message(None, -1,f"Module not found Error: {e}", None, LoggingLevel.ERROR)
+ return None
+
@classmethod
def _get_model_parameters_and_state(cls, nestml_file_name: str):
suffix = ""
@@ -58,7 +83,7 @@ def _get_model_parameters_and_state(cls, nestml_file_name: str):
model_name = model.get_name()
py_module_name = os.path.basename(target_path) + "." + model_name
- module = importlib.import_module(py_module_name)
+ module = module = cls._dynamic_import(target_path, py_module_name)
neuron_name = "Neuron_" + model_name + "(1.0)" # 1.0 is a dummy value for the timestep
neuron = eval("module." + neuron_name)
diff --git a/pynestml/codegeneration/resources_spinnaker2/@NEURON_NAME@.c.jinja2 b/pynestml/codegeneration/resources_spinnaker2/@NEURON_NAME@.c.jinja2
new file mode 100644
index 000000000..a39c6741b
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/@NEURON_NAME@.c.jinja2
@@ -0,0 +1,298 @@
+#define USE_RECORD 1
+
+#include {{ '<' }}s2app.h{{ '>' }}
+#include {{ '<' }}data_specification.h{{ '>' }}
+#include {{ '<' }}comms.h{{ '>' }}
+#include {{ '<' }}synch_helper.h{{ '>' }}
+#include "common/neuron-typedefs.h"
+#include "regions.h"
+#include "{{ neuronName }}.h"
+#include "neuron.h"
+#include "population_table.h"
+#include "synapses.h"
+#include "param_defs.h"
+#include "spike_processing.h"
+#include "simulation.h"
+#include "global_params.h"
+
+uint32_t pe_id;
+uint32_t qpe_x;
+uint32_t qpe_y;
+
+static volatile uint32_t *status;
+static volatile uint32_t *data = (uint32_t *) {{ neuronName|upper }}_DATA_BASE;
+
+// dummy array to reserve memory for data specification
+uint32_t data_specification[16] __attribute__((section(".myDataSpecSection")));
+
+// dummy array to reserve memory for log_info
+uint32_t log_data[{{ neuronName|upper }}_DEBUG_SIZE/4 + 4] __attribute__((section(".myLogInfoSection")));
+
+// data specification pointer
+data_specification_metadata_t *ds_regions;
+
+// spike records for all timesteps
+uint32_t* spike_records_all_timesteps;
+
+// voltage records for all timesteps
+uint32_t* voltage_records_all_timesteps;
+
+// time done records for all timesteps
+uint32_t* time_done_records_all_timesteps;
+
+volatile struct global_params* global_params_ptr;
+extern global_neuron_params_t global_neuron_params __attribute__((aligned(0x10)));
+
+volatile population_table_info * pop_table_info;
+volatile master_population_table_entry* master_population_table;
+
+
+volatile uint32_t packet_buffer[PACKET_BUFFER_LENGTH+1] __attribute__((aligned(0x10)));
+uint32_t read_pos = 0;
+static volatile uint32_t finished = 0;
+
+simulation_config sim_config;
+
+volatile uint32_t run = 1;
+volatile uint32_t multi_run = 1;
+volatile uint32_t iteration = 0;
+
+// Start the time at "-1" so that the first tick will be 0
+uint32_t systicks = UINT32_MAX;
+
+// Pointer to routing table
+volatile routing_info* routing_info_ptr;
+
+void _multicast_packet_received_callback(uint32_t key) {
+ // log_info("spike from key: %d, processed in time step %d\n", key, systicks);
+
+ address_t row_address;
+ size_t n_bytes_to_transfer;
+ if (population_table_get_address(key, &row_address, &n_bytes_to_transfer)) {
+ //log_info("\tfound syn row at address 0x%x\n", row_address);
+
+ synaptic_row_t row = row_address;
+ synapses_process_synaptic_row(systicks, row);
+ } else {
+ //log_info("\tno pop table entry found \n");
+ }
+}
+
+void receive_spikes() {
+ uint32_t n_packets_received = comms[COMMS_DMA_0_N_WORDS];
+
+ for (uint32_t i = 0; i < n_packets_received/PACKET_IN_WORDS ; i++) {
+ _multicast_packet_received_callback(packet_buffer[read_pos]);
+
+ read_pos+=1;
+ if (read_pos > PACKET_BUFFER_LENGTH) {
+ read_pos = 0;
+ }
+ }
+
+ comms[COMMS_DMA_0_READ] = (uint32_t) &(packet_buffer[read_pos]);
+ comms[COMMS_DMA_0_CONFIG] = COMMS_DMA_CONFIG_MC_NO_PAYLOAD ;
+}
+
+void timer_callback(){
+
+ systicks++;
+ if (systicks>= sim_config.n_simulation_ticks) {
+ timer[TIMER1_CTL] = 0;
+ finished = 1;
+ run=0;
+ set_mask_feedthrough_irq(qpe_x, qpe_y, pe_id, 1+2);
+ return;
+ }
+ log_info("reached timer_callback()\n");
+ receive_spikes();
+ synapses_do_timestep_update(systicks);
+ neuron_do_timestep_update();
+ if (global_params_ptr->profiling)
+// log_info("Profiling [%d]: %i, %i\n", systicks, t1, t4);
+
+ if (global_params_ptr->record_time_done == 1){
+ if (systicks < sim_config.n_simulation_ticks) {
+ time_done_records_all_timesteps[systicks] = timer[TIMER1_VALUE];
+ }
+ }
+}
+
+void log_prepare(){
+ status = &(data[{{ neuronName|upper }}_STATUS]);
+ *status = {{ neuronName|upper }}_STATUS_RUNNING;
+ log_init(
+ (uint32_t *) &(data[{{ neuronName|upper }}_DEBUG_START]), {{ neuronName|upper }}_DEBUG_SIZE);
+}
+
+void timer_init(){
+ timer[TIMER1_CTL] = 0;
+ timer[TIMER1_LOAD] = sim_config.timer_period; // TODO: decrease by 1?
+}
+
+void timer_start(){
+ timer[TIMER1_CTL] = 0xE2;
+ NVIC_SetPriority(Timer_Int1_IRQn, (1UL << __NVIC_PRIO_BITS) - 2UL);
+ NVIC_EnableIRQ(Timer_Int1_IRQn);
+}
+
+void comms_init()
+{
+ read_pos = 0;
+
+ comms_configure_rcv_mc_no_payload(comms, packet_buffer,
+ &(packet_buffer[PACKET_BUFFER_LENGTH]));
+}
+
+static inline bool initialise_common_regions(data_specification_metadata_t **ds_regions) {
+
+ // Get the address this core's DTCM data starts at from SRAM
+ //*ds_regions = data_specification_get_data_address();
+ *ds_regions = (data_specification_metadata_t*) {{ neuronName|upper }}_DATA_SPEC;
+
+ // Read the header
+ if (!data_specification_read_header(*ds_regions)) {
+ return false;
+ }
+
+ return true;
+}
+
+void print_routing_table() {
+ log_info("routing_info addr: %p\n", routing_info_ptr);
+ log_info("key_offset: %d\n", routing_info_ptr->key_offset);
+ log_info("n_routing_targets: %d\n", routing_info_ptr->n_routing_targets);
+
+ for (uint32_t i=0; i < routing_info_ptr->n_routing_targets; ++i) {
+ routing_target tgt = routing_info_ptr->routing_targets[i];
+ log_info("\tx=%d,y=%d,pes=%x\n", tgt.qpe_x, tgt.qpe_y, tgt.pes);
+ }
+}
+
+void initialise_routing_table(address_t region_address) {
+ routing_info_ptr = (routing_info *) region_address;
+ // print_routing_table();
+}
+
+
+void initialise_population_table(address_t region_address){
+ pop_table_info = (population_table_info *) region_address;
+
+ log_info("pop_table_info addr: 0x%x\n", &pop_table_info);
+ log_info("pop_table_info value: 0x%x\n", pop_table_info);
+ log_info("pop_table addr: 0x%x\n", region_address);
+ log_info("pop_table_info.address: 0x%x\n", pop_table_info->address);
+ log_info("pop_table_info.length: %d\n", pop_table_info->length);
+
+ population_table_initialise();
+ // print_population_table();
+}
+
+void initialise_simulation(address_t region_address){
+ simulation_config* sim_config_ptr = (simulation_config*) region_address;
+ sim_config.timer_period = sim_config_ptr->timer_period;
+ sim_config.n_simulation_ticks = sim_config_ptr->n_simulation_ticks;
+}
+
+void initialise_global_params(address_t region_address) {
+ global_params_ptr = (struct global_params*) region_address;
+ log_info("global params addr: 0x%x\n", region_address);
+ log_info("n_used_neurons: %d\n", global_params_ptr->n_used_neurons);
+ log_info("record_spikes: %d\n", global_params_ptr->record_spikes);
+ log_info("record_v: %d\n", global_params_ptr->record_v);
+ log_info("record_time_done: %d\n", global_params_ptr->record_time_done);
+ log_info("profiling: %d\n", global_params_ptr->profiling);
+ log_info("calc_step_raw: %d\n", f2ui(global_params_ptr->calc_step_raw));
+ log_info("weight_scaling_factor: %d\n", f2ui(global_params_ptr->weight_scaling_factor));
+ global_neuron_params.calc_step_raw = global_params_ptr->calc_step_raw;
+ global_neuron_params.weight_scaling_factor = global_params_ptr->weight_scaling_factor;
+
+}
+
+void reset_all() {
+ // disable FT IRQ 1 and enable FT IRQ 0
+ enable_mask_feedthrough_irq_0(qpe_x, qpe_y, pe_id);
+
+ // reset input buffer and neuron states
+ input_buffer_initialise();
+
+ neuron_initialise(); // reset neuron state variables
+
+ // reset pointers for spike and voltage recording
+ spike_records_all_timesteps = data_specification_get_region(NEURON_RECORDING_REGION, ds_regions);
+ voltage_records_all_timesteps = data_specification_get_region(VOLTAGE_RECORDING_REGION, ds_regions);
+ time_done_records_all_timesteps = data_specification_get_region(TIME_DONE_RECORDING_REGION, ds_regions);
+
+ // reset systick
+ systicks = UINT32_MAX;
+
+ // reset logging
+ log_prepare();
+
+ iteration++;
+ run = 1;
+
+ log_info("reset_all()\n");
+}
+
+int main() {
+
+ qpe_x = (getMyPEID () >> 5)&0x7;
+ qpe_y = (getMyPEID () >> 2)&0x7;
+ pe_id = (getMyPEID () )&0x3;
+
+ finished = 0;
+ srand();
+ log_prepare();
+ comms_init();
+ neuron_initialise();
+
+
+ initialise_common_regions(&ds_regions);
+
+ // routing table
+ address_t rt_region = data_specification_get_region(ROUTING_TABLE_REGION, ds_regions);
+ initialise_routing_table(rt_region);
+
+ // master population table
+ address_t mpt_region = data_specification_get_region(POPULATION_TABLE_REGION, ds_regions);
+ initialise_population_table(mpt_region);
+
+ // global params
+ initialise_global_params(
+ data_specification_get_region(GLOBAL_PARAMS_REGION, ds_regions));
+
+ // spike recording region
+ spike_records_all_timesteps = data_specification_get_region(NEURON_RECORDING_REGION, ds_regions);
+ //log_info("spike_record_addr: 0x%x\n", spike_records_all_timesteps);
+
+ // voltage recording region
+ voltage_records_all_timesteps = data_specification_get_region(VOLTAGE_RECORDING_REGION, ds_regions);
+ //log_info("v_record_addr: 0x%x\n", &voltage_records_all_timesteps);
+
+ // time done recording region
+ time_done_records_all_timesteps = data_specification_get_region(TIME_DONE_RECORDING_REGION, ds_regions);
+ //log_info("time_done_record_addr: 0x%x\n", &time_done_records_all_timesteps);
+
+ // debug
+ log_info("test\n");
+ print_neuron_params();
+
+ // timer config
+ initialise_simulation(
+ data_specification_get_region(SIMULATION_REGION, ds_regions));
+ timer_init();
+
+ // enable own feedthrough mask for IRQ 0 and 1
+ set_mask_feedthrough_irq(qpe_x, qpe_y, pe_id, 1+2);
+
+ NVIC_SetPriority (FT_INT_0_IRQn, (1UL << __NVIC_PRIO_BITS) - 2UL);
+ NVIC_EnableIRQ (FT_INT_0_IRQn);
+ NVIC_SetPriority (FT_INT_1_IRQn, (1UL << __NVIC_PRIO_BITS) - 2UL);
+ NVIC_EnableIRQ (FT_INT_1_IRQn);
+
+ while(run || multi_run){
+ __WFI();
+ }
+ return 0xcafebabe;
+}
diff --git a/pynestml/codegeneration/resources_spinnaker2/@NEURON_NAME@.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/@NEURON_NAME@.h.jinja2
new file mode 100644
index 000000000..c108389d2
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/@NEURON_NAME@.h.jinja2
@@ -0,0 +1,24 @@
+#include {{ '<' }}stdint.h{{ '>' }}
+#include {{ '<' }}math.h{{ '>' }}
+#include {{ '<' }}float.h{{ '>' }}
+
+#include {{ '<' }}spinn2.h{{ '>' }}
+#include {{ '<' }}spinn_log.h{{ '>' }}
+
+#define {{ neuronName|upper }}_DATA_SPEC 0x00010200
+#define {{ neuronName|upper }}_DATA_BASE 0x0001b000
+
+#define {{ neuronName|upper }}_STATUS 0
+#define {{ neuronName|upper }}_DEBUG_START 3
+#define {{ neuronName|upper }}_DEBUG_SIZE (4000 * 4)
+
+#define {{ neuronName|upper }}_STATUS_RUNNING 0
+
+#define {{ neuronName|upper }}_N_SYNAPSES 2
+
+
+// enums data specification
+
+inline uint32_t {{ neuronName }}_mem_pos(uint32_t offset) {
+ return {{ neuronName|upper }}_DATA_BASE + (offset * 4);
+}
\ No newline at end of file
diff --git a/pynestml/codegeneration/resources_spinnaker2/@NEURON_NAME@.py.jinja2 b/pynestml/codegeneration/resources_spinnaker2/@NEURON_NAME@.py.jinja2
new file mode 100644
index 000000000..79f99344c
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/@NEURON_NAME@.py.jinja2
@@ -0,0 +1,487 @@
+#
+# {{neuronName}}.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+#
+# Generated from NESTML {{ nestml_version }} at time: {{ now }}
+
+import numpy as np
+import math
+import re
+
+from spinnaker2.neuron_models.application import BaseApplication
+from spinnaker2.coordinates import ByteAddr, align_addr_to_next_multiple_of_other
+from spinnaker2.configuration import MemoryRegion, PEConfig
+from spinnaker2.coordinates import ByteAddr
+from spinnaker2.mapper import SynapseWordSize, SynapseWordSpec
+from spinnaker2.neuron_models.application import BaseApplication
+from spinnaker2.neuron_models.common import (
+ N_WORDS_MPT_ENTRY,
+ add_log_memory_region,
+ collect_routing_targets,
+ format_routing_targets,
+)
+
+
+
+
+
+class {{neuronName}}Application(BaseApplication):
+ sw_spec = SynapseWordSpec(
+ word_size=SynapseWordSize.SIZE_16,
+ weight=4,
+ delay=3,
+ synapse_type=1,
+ target=8,
+ )
+ profiling = False
+
+ # here the default parameters from .nestml
+ # and the internal parameters with evaluated, numerical values have to be printed
+ default_parameters = {
+ {% set p = neuron.get_parameter_value_dict() | dictsort %}
+ {%- for name, value in p %}
+ "{{ name }}": {{ value | float }},
+ {%- endfor %}
+{# {%- for parameter in neuron.get_parameter_value_dict() %}#}
+{# "{{ parameter }}": {{ neuron.get_parameter_value_dict()[parameter] | float }},#}
+{# {%- endfor %}#}
+ "calculation_timestep_in_s": 10.0,
+ "weight_scaling_factor": 0.0,
+ }
+
+ propagators = {
+ {%- for name, value in propagators | dictsort %}
+ "{{ name }}": "{{ value }}",
+ {%- endfor %}
+ }
+
+ max_atoms_per_core = 48
+ splittable = True
+ recordables = ["spikes", "v", "v_last", "time_done"]
+ # fixed addresses
+ neuron_params_addr = ByteAddr(0xE400)
+ data_spec_addr = ByteAddr(0x10200)
+ log_addr = ByteAddr(0x1B000)
+ data_spec_max_size = 16 # in words
+
+ def __init__(self):
+ app_name = "{{ neuronName }}"
+ BaseApplication.__init__(self, name=app_name)
+
+ def calculate_propagator_expressions(self, default_parameters, propagators, user_params):
+ """
+ Calculate propagator expressions with support for both single values and lists of values.
+
+ Args:
+ default_parameters: Dictionary of default parameter values
+ propagators: Dictionary of propagator expressions
+ pop_slice: Population slice with params attribute
+
+ Returns:
+ Dictionary of propagator expressions with parameters substituted
+ """
+ # Get user parameters
+ user_parameters = user_params
+ user_parameters_has_lists = any(isinstance(value, list) for value in user_parameters.values())
+ # Create updated parameters dictionary
+ updated_parameters = {}
+ for key in default_parameters:
+ if key in user_parameters:
+ if user_parameters_has_lists:
+ updated_parameters[key] = list(map(str, user_parameters[key]))
+ else:
+
+ updated_parameters[key] = str(user_parameters[key])
+ else:
+ updated_parameters[key] = str(default_parameters[key])
+
+ if not user_parameters_has_lists:
+ propagators_as_math_expressions = {}
+ for propagator_name in propagators:
+ expression = propagators[propagator_name]
+ for symbol, value in updated_parameters.items():
+ expression = expression.replace(symbol, str(value))
+ propagators_as_math_expressions[propagator_name] = expression
+ return updated_parameters|propagators_as_math_expressions
+ else:
+ # Handle lists of parameters
+ list_lengths = [len(value) for value in updated_parameters.values()
+ if isinstance(value, list)]
+
+ if not list_lengths:
+ return self.calculate_propagator_expressions(default_parameters, propagators, user_params)
+
+ if len(set(list_lengths)) > 1:
+ raise ValueError("All parameter lists must have the same length")
+
+ list_length = list_lengths[0]
+
+ # Create list of parameter sets
+ parameter_sets = []
+ for i in range(list_length):
+ param_set = {}
+ for key, value in updated_parameters.items():
+ if isinstance(value, list):
+ param_set[key] = value[i]
+ else:
+ param_set[key] = value
+ parameter_sets.append(param_set)
+
+ # Calculate expressions for each parameter set
+ all_expressions = []
+ for param_set in parameter_sets:
+ expressions = {}
+ for propagator_name in propagators:
+ expression = propagators[propagator_name]
+ for symbol, value in param_set.items():
+ expression = expression.replace(symbol, str(value))
+ expressions[propagator_name] = expression
+ all_expressions.append(expressions)
+
+ return [{**d1, **d2} for d1, d2 in zip(parameter_sets, all_expressions)]
+
+ def evaluate_propagator_expressions(self, propagators_as_math_expressions):
+ """
+ Evaluate propagator expressions with support for both single expressions and lists of expressions.
+
+ Args:
+ propagators_as_math_expressions: Dictionary of expressions or list of dictionaries
+
+ Returns:
+ Dictionary of evaluated expressions or list of dictionaries
+ """
+ # Define supported math functions and constants
+ safe_dict = {
+ # Basic math functions
+ 'exp': math.exp,
+ 'ln': math.log,
+ 'log10': math.log10,
+ 'pow': math.pow,
+ 'sqrt': math.sqrt,
+ # Trigonometric functions
+ 'sin': math.sin,
+ 'cos': math.cos,
+ 'tan': math.tan,
+ 'asin': math.asin,
+ 'acos': math.acos,
+ 'atan': math.atan,
+ 'atan2': math.atan2,
+ # Hyperbolic functions
+ 'sinh': math.sinh,
+ 'cosh': math.cosh,
+ 'tanh': math.tanh,
+ # Math functions
+ 'abs': abs,
+ 'ceil': math.ceil,
+ 'floor': math.floor,
+ 'round': round,
+ 'erf': math.erf,
+ 'erfc': math.erfc,
+ # Constants
+ 'e': math.e,
+ 'pi': math.pi,
+ 'inf': float('inf'),
+ }
+
+ if isinstance(propagators_as_math_expressions, list):
+ all_results = []
+ for expressions_dict in propagators_as_math_expressions:
+ results = {}
+ for key, expression in expressions_dict.items():
+ results[key] = self._evaluate_single_expression(expression, safe_dict)
+ all_results.append(results)
+ return all_results
+ else:
+ name_value_dict = {}
+ for key, expression in propagators_as_math_expressions.items():
+ name_value_dict[key] = self._evaluate_single_expression(expression, safe_dict)
+ return name_value_dict
+
+ def _evaluate_single_expression(self, expression, safe_dict):
+ """
+ Helper method to evaluate a single expression safely.
+
+ Args:
+ expression: Math expression as string
+ safe_dict: Dictionary of allowed functions and constants
+
+ Returns:
+ Evaluated result
+ """
+ # Check if the expression contains only allowed characters and function names
+ allowed_pattern = r'^[\s\d\.\+\-\*\/\(\)\,\^\%]+$'
+ cleaned_expr = expression
+
+ # Remove all function names from the expression before checking the pattern
+ for func_name in safe_dict.keys():
+ cleaned_expr = cleaned_expr.replace(func_name, '')
+
+ if not re.match(allowed_pattern, cleaned_expr):
+ raise ValueError(f"Expression '{expression}' contains disallowed characters or functions")
+
+ try:
+ result = eval(expression, {"__builtins__": {}}, safe_dict)
+ return result
+ except Exception as e:
+ raise ValueError(f"Error evaluating expression '{expression}': {str(e)}")
+
+ def convert_calculated_propagators_to_raw_data(self, name_value_dict, pop_slice=None):
+ """
+ Coverts calculated propagator values to 32bit data, which can be sent over to SpiNNaker2
+
+ Args:
+ name_value_dict: Dictionary of propagator name and attached value
+
+ Returns:
+ List of 32bit values representing bits of attached propagator values
+ """
+ if isinstance(name_value_dict, list):
+ all_raw_data = []
+ for single_dict in name_value_dict:
+ del single_dict['calculation_timestep_in_s']
+ del single_dict['weight_scaling_factor']
+ values = list(single_dict.values())
+ float32_array = np.array(values, dtype=np.float32)
+ raw_data = np.frombuffer(float32_array.data, dtype=np.uint32)
+ all_raw_data.append(raw_data.tolist())
+ return [item for sublist in all_raw_data for item in sublist]
+ else:
+ n_neurons = pop_slice.pop.size
+ del name_value_dict['calculation_timestep_in_s']
+ del name_value_dict['weight_scaling_factor']
+ values = list(name_value_dict.values())
+ copy = values.copy()
+ values.extend(copy * (n_neurons - 1))
+ float32_array = np.array(values, dtype=np.float32)
+ raw_data = np.frombuffer(float32_array.data, dtype=np.uint32)
+ return raw_data.tolist()
+
+
+ def pe_config(self, pe, mapper, sim_cfg, debug=True):
+ """
+ return PE configuration for a given PE
+ """
+
+ config = PEConfig(pe, self.name, self.mem_file)
+ pop_slice = mapper.mapping.get_population_slice(pe)
+
+ if debug:
+ add_log_memory_region(config, self.log_addr, 4000)
+
+ neuron_params = pop_slice.pop.params
+ weight_scaling_factor = neuron_params.get("weight_scaling_factor", self.default_parameters["weight_scaling_factor"])
+ calculation_timestep_in_s = neuron_params.get("calculation_timestep_in_s", self.default_parameters["calculation_timestep_in_s"])
+ for propagator_expr in self.propagators:
+ if "__h" in self.propagators[propagator_expr]:
+ self.propagators[propagator_expr] = self.propagators[propagator_expr].replace("__h", "calculation_timestep_in_s")
+
+ #####################
+ # neuron parameters #
+ #####################
+
+ propagator_expressions = self.calculate_propagator_expressions(default_parameters=self.default_parameters, propagators=self.propagators, user_params=neuron_params)
+ evaluated_expression = self.evaluate_propagator_expressions(propagator_expressions)
+ neuron_params_raw = self.convert_calculated_propagators_to_raw_data(evaluated_expression, pop_slice)
+
+ ################
+ # routing info #
+ ################
+ target_cores = mapper.routing_targets.get(pe, set())
+ tgt_qpes_and_pes = collect_routing_targets(target_cores)
+ n_targets = len(tgt_qpes_and_pes)
+ routing_targets_raw = format_routing_targets(tgt_qpes_and_pes)
+ key_offset = mapper.key_offsets[pe]
+
+ rt_addr = ByteAddr(self.data_spec_addr + self.data_spec_max_size * 4)
+ routing_targets_addr = ByteAddr(rt_addr + 3 * 4) # start address of routing table
+ rt_data = [
+ key_offset,
+ n_targets,
+ routing_targets_addr,
+ ] + routing_targets_raw
+ config.add_mem_data_to_send(rt_addr.to_WordAddr(), rt_data)
+
+ ################
+ # timer config #
+ ################
+ timer_config_addr = align_addr_to_next_multiple_of_other(ByteAddr(rt_addr + len(rt_data) * 4), ByteAddr(0x10))
+ timer_config_addr = ByteAddr(timer_config_addr)
+ sim_config = [sim_cfg["timer_period"], sim_cfg["n_simulation_ticks"]]
+ config.add_mem_data_to_send(timer_config_addr.to_WordAddr(), sim_config)
+
+ #################
+ # global params #
+ #################
+ if calculation_timestep_in_s is not None:
+ calc_step_list = list([calculation_timestep_in_s])
+ calc_step_array = np.array(calc_step_list, dtype=np.float32)
+ calc_step_raw = np.frombuffer(calc_step_array.data, dtype=np.uint32).tolist()
+ weight_scaling_factor_list = list([weight_scaling_factor])
+ weight_scaling_factor_array = np.array(weight_scaling_factor_list, dtype=np.float32)
+ weight_scaling_factor_raw = np.frombuffer(weight_scaling_factor_array.data, dtype=np.uint32).tolist()
+ n_neurons = pop_slice.size()
+ record_spikes = "spikes" in pop_slice.pop.record
+ record_v_all = "v" in pop_slice.pop.record
+ record_v_last = "v_last" in pop_slice.pop.record
+ record_v = 1 if record_v_all else 2 if record_v_last else 0
+ time_done_flag = "time_done" in pop_slice.pop.record
+ global_params_raw = [
+ n_neurons,
+ int(record_spikes),
+ int(record_v),
+ int(self.profiling),
+ int(time_done_flag),
+ calc_step_raw[0],
+ weight_scaling_factor_raw[0]
+ ]
+
+ else:
+ n_neurons = pop_slice.size()
+ record_spikes = "spikes" in pop_slice.pop.record
+ record_v_all = "v" in pop_slice.pop.record
+ record_v_last = "v_last" in pop_slice.pop.record
+ record_v = 1 if record_v_all else 2 if record_v_last else 0
+ time_done_flag = "time_done" in pop_slice.pop.record
+ global_params_raw = [
+ n_neurons,
+ int(record_spikes),
+ int(record_v),
+ int(self.profiling),
+ int(time_done_flag),
+ ]
+ global_params_addr = align_addr_to_next_multiple_of_other(
+ ByteAddr(timer_config_addr + len(sim_config) * 4), ByteAddr(0x10)
+ )
+ global_params_addr = ByteAddr(global_params_addr)
+ config.add_mem_data_to_send(global_params_addr.to_WordAddr(), global_params_raw)
+
+ ############################################
+ # master population table and synapse rows #
+ ############################################
+
+ # Master population table info
+ mpt_info_addr = align_addr_to_next_multiple_of_other(
+ ByteAddr(global_params_addr + len(global_params_raw) * 4),
+ ByteAddr(0x10),
+ )
+ mpt_info_addr = ByteAddr(mpt_info_addr)
+ mpt_info_len = 2
+
+ # 1: estimate size of MPT
+ mpt_length = mapper.estimate_master_pop_table_length(pe)
+
+ mpt_addr = align_addr_to_next_multiple_of_other(ByteAddr(mpt_info_addr + mpt_info_len * 4), ByteAddr(0x10))
+ mpt_addr = ByteAddr(mpt_addr)
+ mpt_n_bytes = mpt_length * N_WORDS_MPT_ENTRY * 4
+
+ syn_row_addr = align_addr_to_next_multiple_of_other(ByteAddr(mpt_addr + mpt_n_bytes), ByteAddr(0x10))
+ syn_row_addr = ByteAddr(syn_row_addr)
+
+ all_syn_rows_raw, pop_table_raw = mapper.synapse_rows_and_master_pop_table(pe, self.sw_spec, syn_row_addr)
+ syn_row_addr_end = ByteAddr(syn_row_addr + len(all_syn_rows_raw) * 4)
+
+ # Master population table info
+ mpt_info_raw = [mpt_addr, mpt_length]
+ assert len(mpt_info_raw) == mpt_info_len # make sure that the addresses don't overlap
+ config.add_mem_data_to_send(mpt_info_addr.to_WordAddr(), mpt_info_raw)
+
+ ###################
+ # spike recording #
+ ###################
+ if record_spikes:
+ SPIKE_RECORD_LENGTH = (self.max_atoms_per_core + 31) // 32 + 2
+ timesteps_to_record = sim_cfg["n_simulation_ticks"]
+ spike_recording_total_words = SPIKE_RECORD_LENGTH * timesteps_to_record
+ spike_record_addr = align_addr_to_next_multiple_of_other(syn_row_addr_end, ByteAddr(0x10))
+ spike_record_addr = ByteAddr(spike_record_addr)
+ config.add_mem_region_to_read(
+ "spike_record",
+ MemoryRegion(spike_record_addr.to_WordAddr(), spike_recording_total_words),
+ )
+ spike_record_addr_end = ByteAddr(spike_record_addr + spike_recording_total_words * 4)
+ else:
+ spike_record_addr = syn_row_addr_end
+ spike_record_addr_end = syn_row_addr_end
+
+ #####################
+ # voltage recording #
+ #####################
+ if record_v_all or record_v_last:
+ timesteps_to_record = sim_cfg["n_simulation_ticks"] if record_v_all else 1
+ voltage_record_addr = align_addr_to_next_multiple_of_other(spike_record_addr_end, ByteAddr(0x10))
+ voltage_record_addr = ByteAddr(voltage_record_addr)
+ # for each timestep: 1 word header + n_neurons*voltages
+ voltage_recording_total_words = (1 + n_neurons) * timesteps_to_record
+ config.add_mem_region_to_read(
+ "voltage_record", MemoryRegion(voltage_record_addr.to_WordAddr(), voltage_recording_total_words)
+ )
+ voltage_record_addr_end = ByteAddr(voltage_record_addr + voltage_recording_total_words * 4)
+ else:
+ voltage_record_addr = spike_record_addr_end
+ voltage_record_addr_end = spike_record_addr_end
+
+ #######################
+ # time done recording #
+ #######################
+
+ if time_done_flag:
+ timesteps_to_record = sim_cfg["n_simulation_ticks"]
+ time_done_addr = align_addr_to_next_multiple_of_other(voltage_record_addr_end, ByteAddr(0x10))
+ time_done_addr = ByteAddr(time_done_addr)
+ config.add_mem_region_to_read(
+ "time_done_record",
+ MemoryRegion(time_done_addr.to_WordAddr(), timesteps_to_record),
+ )
+ time_done_addr_end = ByteAddr(time_done_addr + timesteps_to_record * 4)
+
+ else:
+ time_done_addr = voltage_record_addr
+ time_done_addr_end = voltage_record_addr_end
+
+ if time_done_addr_end > self.log_addr:
+ raise MemoryError(
+ f"synapse rows too large in population {pop_slice.pop.name}: "
+ f"{hex(time_done_addr_end)}, {hex(self.log_addr)} "
+ f"by {time_done_addr_end- self.log_addr} byte"
+ )
+
+ ######################
+ # data specification #
+ ######################
+ data_spec = [
+ 0xAD130AD6, # magic_number
+ 0x00010000, # version
+ rt_addr, # start of routing table
+ timer_config_addr, # start of timer config
+ global_params_addr, # start of global params
+ mpt_info_addr, # master population table info address
+ syn_row_addr, # start of synapse rows
+ self.neuron_params_addr, # start of neuron params
+ spike_record_addr, # start of spike records
+ voltage_record_addr, # start of voltage records
+ time_done_addr, # start of time_done records
+ self.log_addr, # start of log (dummy)
+ ]
+
+ config.add_mem_data_to_send(self.data_spec_addr.to_WordAddr(), data_spec)
+ config.add_mem_data_to_send(mpt_addr.to_WordAddr(), pop_table_raw)
+ config.add_mem_data_to_send(syn_row_addr.to_WordAddr(), all_syn_rows_raw)
+ config.add_mem_data_to_send(self.neuron_params_addr.to_WordAddr(), neuron_params_raw)
+
+ return config
diff --git a/pynestml/codegeneration/resources_spinnaker2/@SYNAPSE_NAME@.py.jinja2 b/pynestml/codegeneration/resources_spinnaker2/@SYNAPSE_NAME@.py.jinja2
new file mode 100644
index 000000000..516b26082
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/@SYNAPSE_NAME@.py.jinja2
@@ -0,0 +1,13 @@
+# TODO: This should be class of SynapseDynamicSTDP with default values set to generated timing and weight
+from spynnaker.pyNN.models.neuron.synapse_dynamics.synapse_dynamics_stdp import SynapseDynamicsSTDP
+
+from python_models8.neuron.builds.{{synapseName}}_timing import MyTimingDependence
+from python_models8.neuron.builds.{{synapseName}}_weight import MyWeightDependence
+
+class {{synapseName}}_synapse_impl(SynapseDynamicsSTDP):
+
+ def __init__(self):
+ super().__init__(
+ timing_dependence=MyTimingDependence(my_potentiation_parameter=2., my_depression_parameter=0.1),
+ weight_dependence=MyWeightDependence(w_min=0., w_max=10., my_weight_parameter=0.5),
+ )
diff --git a/pynestml/codegeneration/resources_spinnaker2/Makefile.jinja2 b/pynestml/codegeneration/resources_spinnaker2/Makefile.jinja2
new file mode 100644
index 000000000..ed7b1baf8
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/Makefile.jinja2
@@ -0,0 +1,7 @@
+LINKERSCRIPT = qpe.ld
+
+LIB_S2L = $(PREFIX_LIB_DIR)/s2-lib
+LIBRARIES += $(LIB_S2L)
+LIBS += -ls2
+#include ../Makefile.default.with_s2lib
+include $(S2SIM2LABAPP_ROOT_PATH)/chip/app-pe/s2app/Makefile.default # has to be adjusted to point to default from any directory the generated code is placed in
diff --git a/pynestml/codegeneration/resources_spinnaker2/common/maths-util.h b/pynestml/codegeneration/resources_spinnaker2/common/maths-util.h
new file mode 100644
index 000000000..7eaa38e80
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/common/maths-util.h
@@ -0,0 +1,93 @@
+#ifndef _MATHS_UTIL_
+#define _MATHS_UTIL_
+
+#define FLOATING_POINT
+
+typedef unsigned int Card;
+
+#define START 0
+
+#ifdef FLOATING_POINT
+
+#include
+
+typedef float REAL;
+typedef float UREAL;
+typedef float FRACT;
+typedef float UFRACT;
+#define REAL_CONST(x) x
+#define UREAL_CONST(x) x
+#define FRACT_CONST(x) x
+#define UFRACT_CONST(x) x
+
+
+#define ONE 1.00000000000000000
+#define HALF 0.50000000000000000
+#define ZERO 0.00000000000000000
+
+#define POW( x, p ) pow( (x), (p) )
+
+#define SQRT( x ) sqrt( x )
+#define EXP( x ) exp( x )
+#define LN( x ) log( x )
+#define ABS( x ) fabs(x)
+
+
+#define MAX( x, y ) MAX_HR( (x), (y) )
+#define SIGN( x, y ) ( (macro_arg_1=(y)) >= ZERO ? ABS( x ) : -ABS( x ) )
+
+#define ACS_DBL_TINY 1.0e-300
+
+#else
+
+#include
+#define REAL_CONST(x) x##k
+#define UREAL_CONST(x) x##uk
+#define FRACT_CONST(x) x##lr
+#define UFRACT_CONST(x) x##ulr
+
+#define ONE REAL_CONST(1.0000)
+#define HALF REAL_CONST(0.5000)
+#define ZERO REAL_CONST(0.0000)
+#define ACS_DBL_TINY REAL_CONST(0.000001)
+
+#define ABS( x ) absfx( x )
+
+#define SIGN( x, y ) ( (macro_arg_1=(y)) >= ZERO ? ABS( x ) : -ABS( x ) )
+
+#endif
+
+#ifdef FLOATING_POINT
+
+#define REAL_COMPARE( x, op, y ) ( (x) op (y) )
+#define REAL_TWICE( x ) ((x) * 2.00000 )
+#define REAL_HALF( x ) ((x) * 0.50000 )
+
+#else
+
+#define REAL_COMPARE( x, op, y ) ( bitsk( (x) ) op bitsk( (y) ) )
+#define REAL_TWICE( x ) ((x) * 2.000000k )
+#define REAL_HALF( x ) ((x) * 0.500000k )
+
+#endif
+
+#define MIN_HR(a, b) ({\
+ __type_of__(a) _a = (a); \
+ __type_of__(b) _b = (b); \
+ _a <= _b? _a : _b;})
+
+#define MAX_HR(a, b) ({\
+ __type_of__(a) _a = (a); \
+ __type_of__(b) _b = (b); \
+ _a > _b? _a : _b;})
+
+#define SQR(a) ({\
+ __type_of__(a) _a = (a); \
+ _a == ZERO? ZERO: _a * _a;})
+
+#define CUBE(a) ({\
+ __type_of__(a) _a = (a); \
+ _a == ZERO? ZERO: _a * _a * _a;})
+
+#endif // _MATHS_UTIL_
+
diff --git a/pynestml/codegeneration/resources_spinnaker2/common/maths-util.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/common/maths-util.h.jinja2
new file mode 100644
index 000000000..cfa26ef2c
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/common/maths-util.h.jinja2
@@ -0,0 +1,93 @@
+#ifndef _MATHS_UTIL_
+#define _MATHS_UTIL_
+
+#define FLOATING_POINT
+
+typedef unsigned int Card;
+
+#define START 0
+
+#ifdef FLOATING_POINT
+
+#include {{ '<' }}math.h{{ '>' }}
+
+typedef float REAL;
+typedef float UREAL;
+typedef float FRACT;
+typedef float UFRACT;
+#define REAL_CONST(x) x
+#define UREAL_CONST(x) x
+#define FRACT_CONST(x) x
+#define UFRACT_CONST(x) x
+
+
+#define ONE 1.00000000000000000
+#define HALF 0.50000000000000000
+#define ZERO 0.00000000000000000
+
+#define POW( x, p ) pow( (x), (p) )
+
+#define SQRT( x ) sqrt( x )
+#define EXP( x ) exp( x )
+#define LN( x ) log( x )
+#define ABS( x ) fabs(x)
+
+
+#define MAX( x, y ) MAX_HR( (x), (y) )
+#define SIGN( x, y ) ( (macro_arg_1=(y)) >= ZERO ? ABS( x ) : -ABS( x ) )
+
+#define ACS_DBL_TINY 1.0e-300
+
+#else
+
+#include {{ '<' }}stdfix.h{{ '>' }}
+#define REAL_CONST(x) x##k
+#define UREAL_CONST(x) x##uk
+#define FRACT_CONST(x) x##lr
+#define UFRACT_CONST(x) x##ulr
+
+#define ONE REAL_CONST(1.0000)
+#define HALF REAL_CONST(0.5000)
+#define ZERO REAL_CONST(0.0000)
+#define ACS_DBL_TINY REAL_CONST(0.000001)
+
+#define ABS( x ) absfx( x )
+
+#define SIGN( x, y ) ( (macro_arg_1=(y)) >= ZERO ? ABS( x ) : -ABS( x ) )
+
+#endif
+
+#ifdef FLOATING_POINT
+
+#define REAL_COMPARE( x, op, y ) ( (x) op (y) )
+#define REAL_TWICE( x ) ((x) * 2.00000 )
+#define REAL_HALF( x ) ((x) * 0.50000 )
+
+#else
+
+#define REAL_COMPARE( x, op, y ) ( bitsk( (x) ) op bitsk( (y) ) )
+#define REAL_TWICE( x ) ((x) * 2.000000k )
+#define REAL_HALF( x ) ((x) * 0.500000k )
+
+#endif
+
+#define MIN_HR(a, b) ({\
+ __type_of__(a) _a = (a); \
+ __type_of__(b) _b = (b); \
+ _a <= _b? _a : _b;})
+
+#define MAX_HR(a, b) ({\
+ __type_of__(a) _a = (a); \
+ __type_of__(b) _b = (b); \
+ _a > _b? _a : _b;})
+
+#define SQR(a) ({\
+ __type_of__(a) _a = (a); \
+ _a == ZERO? ZERO: _a * _a;})
+
+#define CUBE(a) ({\
+ __type_of__(a) _a = (a); \
+ _a == ZERO? ZERO: _a * _a * _a;})
+
+#endif // _MATHS_UTIL_
+
diff --git a/pynestml/codegeneration/resources_spinnaker2/common/neuron-typedefs.h b/pynestml/codegeneration/resources_spinnaker2/common/neuron-typedefs.h
new file mode 100644
index 000000000..88c7f20c5
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/common/neuron-typedefs.h
@@ -0,0 +1,53 @@
+#ifndef __NEURON_TYPEDEFS_H__
+#define __NEURON_TYPEDEFS_H__
+
+#include
+#include "maths-util.h"
+
+#ifndef __SPIKE_T__
+
+typedef uint32_t payload_t;
+
+#ifdef SPIKES_WITH_PAYLOADS
+
+typedef uint64_t spike_t;
+
+static inline payload_t spike_payload (spike_t s) {
+ return ((payload_t)(s & UINT32_MAX));
+}
+
+#else
+
+typedef uint32_t spike_t;
+
+
+static inline payload_t spike_payload(spike_t s) {
+ use(s);
+ return (0);
+}
+#endif
+#endif
+
+typedef address_t synaptic_row_t;
+
+typedef REAL input_t;
+
+typedef struct input_struct_t{
+ input_t exc;
+ input_t inh;
+} input_struct_t;
+
+typedef struct timed_input_t {
+ uint32_t time;
+ input_struct_t inputs[];
+} timed_input_t;
+
+typedef float state_t;
+
+typedef struct timed_state_t {
+ uint32_t time;
+ state_t states[];
+} timed_state_t;
+
+
+#endif /* __NEURON_TYPEDEFS_H__ */
diff --git a/pynestml/codegeneration/resources_spinnaker2/common/neuron-typedefs.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/common/neuron-typedefs.h.jinja2
new file mode 100644
index 000000000..2daa6d2b3
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/common/neuron-typedefs.h.jinja2
@@ -0,0 +1,53 @@
+#ifndef __NEURON_TYPEDEFS_H__
+#define __NEURON_TYPEDEFS_H__
+
+#include {{ '<' }}common-typedefs.h{{ '>' }}
+#include "maths-util.h"
+
+#ifndef __SPIKE_T__
+
+typedef uint32_t payload_t;
+
+#ifdef SPIKES_WITH_PAYLOADS
+
+typedef uint64_t spike_t;
+
+static inline payload_t spike_payload (spike_t s) {
+ return ((payload_t)(s & UINT32_MAX));
+}
+
+#else
+
+typedef uint32_t spike_t;
+
+
+static inline payload_t spike_payload(spike_t s) {
+ use(s);
+ return (0);
+}
+#endif
+#endif
+
+typedef address_t synaptic_row_t;
+
+typedef REAL input_t;
+
+typedef struct input_struct_t{
+ input_t exc;
+ input_t inh;
+} input_struct_t;
+
+typedef struct timed_input_t {
+ uint32_t time;
+ input_struct_t inputs[];
+} timed_input_t;
+
+typedef float state_t;
+
+typedef struct timed_state_t {
+ uint32_t time;
+ state_t states[];
+} timed_state_t;
+
+
+#endif /* __NEURON_TYPEDEFS_H__ */
\ No newline at end of file
diff --git a/pynestml/codegeneration/resources_spinnaker2/decay.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/decay.h.jinja2
new file mode 100644
index 000000000..e010a0337
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/decay.h.jinja2
@@ -0,0 +1,25 @@
+
+#ifndef _DECAY_H_
+#define _DECAY_H_
+
+#include "common/maths-util.h"
+#include "common/neuron-typedefs.h"
+
+typedef UFRACT decay_t;
+#define decay(x,d) \
+ ({ \
+ __typeof__ (x) tmp = (x); \
+ if (__builtin_types_compatible_p (__typeof__(x), s1615)) \
+ tmp = decay_s1615 (x,d); \
+ else if (__builtin_types_compatible_p (__typeof__(x), u1616)) \
+ tmp = decay_u1616 (x,d); \
+ else if (__builtin_types_compatible_p (__typeof__(x), s015)) \
+ tmp = decay_s015 (x,d); \
+ else if (__builtin_types_compatible_p (__typeof__(x), u016)) \
+ tmp = decay_u016 (x,d); \
+ else \
+ abort (1); \
+ tmp; \
+})
+
+#endif // _DECAY_H_
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/AnalyticIntegrationStep_begin.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/AnalyticIntegrationStep_begin.jinja2
new file mode 100644
index 000000000..63ace4f1a
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/AnalyticIntegrationStep_begin.jinja2
@@ -0,0 +1,16 @@
+{#
+ Generates a series of C++ statements which perform one integration step of all ODEs that are solved by the analytic integrator.
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- if uses_analytic_solver %}
+{%- for variable_name in analytic_state_variables_ %}
+{%- set update_expr = update_expressions[variable_name] %}
+{%- set var_ast = utils.get_variable_by_name(astnode, variable_name)%}
+{%- set var_symbol = var_ast.get_scope().resolve_to_symbol(variable_name, SymbolKind.VARIABLE)%}
+{%- if use_gap_junctions %}
+const {{ type_symbol_printer.print(var_symbol.type_symbol) }} {{variable_name}}__tmp = {{ printer.print(update_expr) | replace("B_." + gap_junction_port + "_grid_sum_", "(B_." + gap_junction_port + "_grid_sum_ + __I_gap)") }};
+{%- else %}
+const {{ type_symbol_printer.print(var_symbol.type_symbol) }} {{variable_name}}__tmp = {{ printer.print(update_expr) }};
+{%- endif %}
+{%- endfor %}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/AnalyticIntegrationStep_end.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/AnalyticIntegrationStep_end.jinja2
new file mode 100644
index 000000000..89c524dba
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/AnalyticIntegrationStep_end.jinja2
@@ -0,0 +1,11 @@
+{#
+ Generates a series of C++ statements which perform one integration step of all ODEs that are solved by the analytic integrator.
+#}
+/* replace analytically solvable variables with precisely integrated values */
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- if uses_analytic_solver %}
+{%- for variable_name in analytic_state_variables_: %}
+{%- set variable_symbol = variable_symbols[variable_name] %}
+{{ printer.print(utils.get_state_variable_by_name(astnode, variable_symbol.get_symbol_name())) }} = {{ variable_name }}__tmp;
+{%- endfor %}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/ApplySpikesFromBuffers.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/ApplySpikesFromBuffers.jinja2
new file mode 100644
index 000000000..881257451
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/ApplySpikesFromBuffers.jinja2
@@ -0,0 +1,6 @@
+{% if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- for spike_updates_for_port in spike_updates.values() %}
+{%- for ast in spike_updates_for_port -%}
+{%- include "directives_cpp/Assignment.jinja2" %}
+{%- endfor %}
+{%- endfor %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/AssignTmpDictionaryValue.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/AssignTmpDictionaryValue.jinja2
new file mode 100644
index 000000000..584404bb3
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/AssignTmpDictionaryValue.jinja2
@@ -0,0 +1,10 @@
+{#
+ Assigns a tmp value which was read from the dictionary to the corresponding block variable.
+
+ @param variable VariableSymbol
+ @result C++ Block
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- if not variable_symbol.is_inline_expression %}
+set_{{ printer_no_origin.print(variable) }}(tmp_{{ printer_no_origin.print(variable) }});
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/Assignment.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/Assignment.jinja2
new file mode 100644
index 000000000..8cd4f5bd7
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/Assignment.jinja2
@@ -0,0 +1,7 @@
+{#
+ Generates C++ declaration
+ @grammar: Assignment = variableName:QualifiedName "=" Expr;
+ @param ast ASTAssignment
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{{ printer.print(ast.get_variable()) }} {{ assignments.print_assignments_operation(ast) }} {{ printer.print(ast.get_expression()) }};
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/BufferDeclaration.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/BufferDeclaration.jinja2
new file mode 100644
index 000000000..15f94ecad
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/BufferDeclaration.jinja2
@@ -0,0 +1,9 @@
+{%- macro BufferDeclaration(node) -%}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- if node.has_vector_parameter() %}
+std::vector< {{ type_symbol_printer.print(node.get_type_symbol()) }} >
+{%- else %}
+{{ type_symbol_printer.print(node.get_type_symbol()) }}
+{%- endif %}
+ {{ node.get_symbol_name() }}; //!< Buffer for input (type: {{ node.get_type_symbol().get_symbol_name() }})
+{%- endmacro -%}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/BufferDeclarationValue.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/BufferDeclarationValue.jinja2
new file mode 100644
index 000000000..e2fdbad16
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/BufferDeclarationValue.jinja2
@@ -0,0 +1,9 @@
+{%- macro BufferDeclarationValue(node) -%}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- if node.has_vector_parameter() %}
+std::vector {{ node.name }}_grid_sum_;
+std::vector {{ node.name }}_spike_input_received_grid_sum_;
+{%- else %}
+double {{ node.name }}_grid_sum_;
+{%- endif %}
+{%- endmacro -%}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/BufferInitialization.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/BufferInitialization.jinja2
new file mode 100644
index 000000000..506d060d2
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/BufferInitialization.jinja2
@@ -0,0 +1,5 @@
+{%- macro BufferInitialization(node) -%}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+get_{{ node.get_symbol_name() }}().clear();
+B_.{{ node.get_symbol_name() }}_grid_sum_ = 0;
+{%- endmacro -%}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/CommonPropertiesDictionaryMemberInitialization.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/CommonPropertiesDictionaryMemberInitialization.jinja2
new file mode 100644
index 000000000..7c544f5eb
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/CommonPropertiesDictionaryMemberInitialization.jinja2
@@ -0,0 +1,18 @@
+{#
+ In general case creates an
+ @param variable VariableSymbol Variable for which the initialization should be done
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif -%}
+{%- if variable_symbol.has_declaring_expression() and not variable_symbol.is_kernel() %}
+{%- if variable_symbol.has_vector_parameter() %}
+this->{{ printer_no_origin.print(variable) }}.resize(P_.{{ variable.get_vector_parameter() }}, {{ printer.print_expression(variable_symbol.get_declaring_expression()) }}); // as {{ variable_symbol.get_type_symbol().print_symbol() }}
+{%- else %}
+this->{{ printer_no_origin.print(variable) }} = {{ printer.print_expression(variable_symbol.get_declaring_expression()) }}; // as {{ variable_symbol.get_type_symbol().print_symbol() }}
+{%- endif %}
+{%- else %}
+{%- if variable_symbol.has_vector_parameter() %}
+this->{{ printer_no_origin.print(variable) }}.resize(0); // as {{ variable_symbol.get_type_symbol().print_symbol() }}
+{%- else %}
+this->{{ printer_no_origin.print(variable) }} = 0; // as {{ variable_symbol.get_type_symbol().print_symbol() }}
+{%- endif %}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/CommonPropertiesDictionaryReader.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/CommonPropertiesDictionaryReader.jinja2
new file mode 100644
index 000000000..2dbb9af03
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/CommonPropertiesDictionaryReader.jinja2
@@ -0,0 +1,10 @@
+{#
+ In general case creates an
+ @param variable VariableSymbol Variable for which the initialization should be done
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- if variable_symbol.has_vector_parameter() %}
+{{ raise('Vector parameters not supported in common properties dictionary.') }}
+{%- endif %}
+updateValue< {{ declarations.print_variable_type(variable_symbol) }} >(d, nest::{{ synapseName }}_names::_{{ printer_no_origin.print(variable) }}, this->{{ printer_no_origin.print(variable) }} );
+
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/CommonPropertiesDictionaryWriter.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/CommonPropertiesDictionaryWriter.jinja2
new file mode 100644
index 000000000..9791b58d7
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/CommonPropertiesDictionaryWriter.jinja2
@@ -0,0 +1,9 @@
+{#
+ In general case creates an
+ @param variable VariableSymbol Variable for which the initialization should be done
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- if variable_symbol.has_vector_parameter() %}
+{{ raise('Vector parameters not supported in common properties dictionary.') }}
+{%- endif %}
+def< {{ declarations.print_variable_type(variable_symbol) }} >(d, nest::{{ synapseName }}_names::_{{ printer_no_origin.print(variable) }}, this->{{ printer_no_origin.print(variable) }} );
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/CompoundStatement.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/CompoundStatement.jinja2
new file mode 100644
index 000000000..15d91ee64
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/CompoundStatement.jinja2
@@ -0,0 +1,18 @@
+{#
+ Handles the compound statement.
+ @grammar: Compound_Stmt = IF_Stmt | FOR_Stmt | WHILE_Stmt;
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- if stmt.is_if_stmt() %}
+{%- with ast = stmt.get_if_stmt() %}
+{%- include "directives_cpp/IfStatement.jinja2" %}
+{%- endwith %}
+{%- elif stmt.is_for_stmt() %}
+{%- with ast = stmt.get_for_stmt() %}
+{%- include "directives_cpp/ForStatement.jinja2" %}
+{%- endwith %}
+{%- elif stmt.is_while_stmt() %}
+{%- with ast = stmt.get_while_stmt() %}
+{%- include "directives_cpp/WhileStatement.jinja2" %}
+{%- endwith %}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/ContinuousInputBufferGetter.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/ContinuousInputBufferGetter.jinja2
new file mode 100644
index 000000000..501bb92ab
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/ContinuousInputBufferGetter.jinja2
@@ -0,0 +1,10 @@
+{%- macro ContinuousInputBufferGetter(node, is_in_struct) -%}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+inline {% if node.has_vector_parameter() %}std::vector< {{ type_symbol_printer.print(node.get_type_symbol()) }} >&{%- else %}{{ type_symbol_printer.print(node.get_type_symbol()) }}&{%- endif %} get_{{ node.get_symbol_name() }}() {
+{%- if is_in_struct %}
+ return {{ node.get_symbol_name() }};
+{%- else %}
+ return B_.get_{{ node.get_symbol_name() }}();
+{%- endif %}
+}
+{%- endmacro -%}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/Declaration.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/Declaration.jinja2
new file mode 100644
index 000000000..3a732eb17
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/Declaration.jinja2
@@ -0,0 +1,21 @@
+{#
+ Generates C++ declaration
+ @param ast ASTDeclaration
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- for variable in declarations.get_variables(ast) %}
+{%- if ast.has_size_parameter() %}
+{{declarations.print_variable_type(variable)}} {{variable.get_symbol_name()}}(P_.{{declarations.print_size_parameter(ast)}});
+{%- if ast.has_expression() %}
+for (long i=0; i < get_{{declarations.print_size_parameter(ast)}}(); i++) {
+ {{variable.get_symbol_name()}}[i] = {{printer.print(ast.getExpr())}};
+}
+{%- endif %}
+{%- else %}
+{%- if ast.has_expression() %}
+{{declarations.print_variable_type(variable)}} {{variable.get_symbol_name()}} = {{printer.print(ast.get_expression())}};
+{%- else %}
+{{declarations.print_variable_type(variable)}} {{variable.get_symbol_name()}};
+{%- endif %}
+{%- endif %}
+{%- endfor -%}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/DelayVariablesDeclaration.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/DelayVariablesDeclaration.jinja2
new file mode 100644
index 000000000..19e6184a4
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/DelayVariablesDeclaration.jinja2
@@ -0,0 +1,8 @@
+{#
+ Generates C++ declaration of vector for variables with delay
+ @param variable VariableSymbol
+ @result C++ declaration
+#}
+ size_t delay_{{variable.get_symbol_name()}}_steps;
+ std::vector< double > delayed_{{variable.get_symbol_name()}};
+ size_t delayed_{{variable.get_symbol_name()}}_idx;
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/DelayVariablesInitialization.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/DelayVariablesInitialization.jinja2
new file mode 100644
index 000000000..1454f1e28
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/DelayVariablesInitialization.jinja2
@@ -0,0 +1,8 @@
+{#
+ Generates C++ initialization of variables related to delay variables
+ @param variable VariableSymbol
+ @result C++ declaration
+#}
+ DV_.delayed_{{ variable.get_symbol_name() }}_idx = 0;
+ DV_.delay_{{ variable.get_symbol_name() }}_steps = nest::Time::delay_ms_to_steps( {{ declarations.print_delay_parameter(variable) }} ) + 1;
+ DV_.delayed_{{ variable.get_symbol_name() }}.resize( DV_.delay_{{ variable.get_symbol_name() }}_steps );
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/DynamicStateElement.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/DynamicStateElement.jinja2
new file mode 100644
index 000000000..d7ad88053
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/DynamicStateElement.jinja2
@@ -0,0 +1,48 @@
+{#
+ Generates get_state_element function to get elements state variables based on how they are inserted into the DynamicRecordablesMap
+ @param neuron ASTModel: the neuron model
+-#}
+{%- import 'directives_cpp/VectorSizeParameter.jinja2' as vector_size_parameter with context %}
+inline double get_state_element(size_t elem)
+ {
+{%- set len = recordable_state_variables | length %}
+{%- for variable in recordable_state_variables %}
+{%- if loop.index == 1 %}
+ if
+{%- elif loop.index == len %}
+ else
+{%- else %}
+ else if
+{%- endif %}
+
+{%- if len == 1 or loop.index < len %}
+{%- if variable.has_vector_parameter() %}
+{%- set size = variable.get_vector_parameter() %}
+{%- if size|int == 0 %}
+{%- set size = vector_size_parameter.VectorSizeParameter(variable, true) %}
+{%- endif -%}
+ (elem >= State_::{{ printer_no_origin.print(variable).upper() }} and elem < State_::{{ printer_no_origin.print(variable).upper() }} + {{ size }})
+ {
+ return S_.{{ printer_no_origin.print(variable) }}[ elem - State_::{{ printer_no_origin.print(variable).upper() }} ];
+ }
+{%- else %}
+ (elem == State_::{{ printer_no_origin.print(utils.get_state_variable_by_name(astnode, variable.get_complete_name())).upper() }})
+ {
+ return {{ printer.print(utils.get_state_variable_by_name(astnode, variable.get_complete_name())) }};
+ }
+{%- endif %}
+{%- else %}
+{%- if variable.has_vector_parameter() %}
+{%- set variable_symbol = variable.get_scope().resolve_to_symbol(variable.get_complete_name(), SymbolKind.VARIABLE) %}
+
+ {
+ return {{ nest_codegen_utils.print_symbol_origin(variable_symbol, variable) % printer_no_origin.print(variable) }}[ elem - State_::{{ printer_no_origin.print(variable).upper() }} ];
+ }
+{%- else %}
+ {
+ return {{ printer.print(variable) }};
+ }
+{%- endif %}
+{%- endif %}
+{%- endfor %}
+ }
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/ForStatement.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/ForStatement.jinja2
new file mode 100644
index 000000000..53fd412a5
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/ForStatement.jinja2
@@ -0,0 +1,22 @@
+{#
+ Generates C++ statements that implement for loop
+ @param ast ASTForStmt
+#}
+{%- if tracing %}/* generated by {{ self._TemplateReference__context.name }} */ {% endif %}
+for ( {{ ast.get_variable() }} = {{ printer.print(ast.get_start_from()) }};
+ {{ ast.get_variable() }}
+{%- if ast.get_step() < 0 -%}
+>
+{%- elif ast.get_step() > 0 -%}
+<
+{%- else -%}
+!=
+{%- endif -%} {{ printer.print(ast.get_end_at()) }};
+ {{ ast.get_variable() }} += {{ ast.get_step() }} )
+{
+{%- filter indent(2) %}
+{%- with ast = ast.get_stmts_body() %}
+{%- include "directives_cpp/StmtsBody.jinja2" %}
+{%- endwith %}
+{%- endfilter %}
+}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/FunctionCall.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/FunctionCall.jinja2
new file mode 100644
index 000000000..1e9bb2947
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/FunctionCall.jinja2
@@ -0,0 +1,13 @@
+{#
+ Generates C++ function call
+ @param ast ASTFunctionCall
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- if ast.get_name() == PredefinedFunctions.INTEGRATE_ODES %}
+{%- include "directives_cpp/PredefinedFunction_integrate_odes.jinja2" %}
+{%- elif ast.get_name() == PredefinedFunctions.EMIT_SPIKE %}
+ {{ printer.print(ast) }};
+{%- else %}
+{# call to a non-predefined function #}
+{{ printer.print(ast) }};
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/FunctionDeclaration.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/FunctionDeclaration.jinja2
new file mode 100644
index 000000000..13c8c5ab1
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/FunctionDeclaration.jinja2
@@ -0,0 +1,20 @@
+{%- macro FunctionDeclaration(ast_function, namespace_prefix) -%}
+{%- with function_symbol = ast_function.get_scope().resolve_to_symbol(ast_function.get_name(), SymbolKind.FUNCTION) -%}
+{%- if function_symbol is none -%}
+{{ raise('Cannot resolve the method ' + ast_function.get_name()) }}
+{%- endif %}
+{{ ast_function.print_comment('//') }}
+{{ type_symbol_printer.print(function_symbol.get_return_type()) | replace('.', '::') }} {{ namespace_prefix }}{{ ast_function.get_name() }} (
+{%- for param in ast_function.get_parameters() %}
+{%- with typeSym = param.get_data_type().get_type_symbol() -%}
+{%- filter indent(1, True) -%}
+{{ type_symbol_printer.print(typeSym) }} {{ param.get_name() }}
+{%- if not loop.last -%}
+,
+{%- endif -%}
+{%- endfilter -%}
+{%- endwith -%}
+{%- endfor -%}
+) const
+{%- endwith -%}
+{%- endmacro -%}
\ No newline at end of file
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/GSLDifferentiationFunction.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/GSLDifferentiationFunction.jinja2
new file mode 100644
index 000000000..e2495a676
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/GSLDifferentiationFunction.jinja2
@@ -0,0 +1,73 @@
+{#
+ Creates GSL implementation of the differentiation step for the system of ODEs.
+-#}
+extern "C" inline int {{neuronName}}_dynamics{% if ast.get_args() | length > 0 %}_{{ utils.integrate_odes_args_str_from_function_call(ast) }}{% endif %}(double __time, const double ode_state[], double f[], void* pnode)
+{
+ typedef {{neuronName}}::State_ State_;
+ // get access to node so we can almost work as in a member function
+ assert( pnode );
+ const {{neuronName}}& node = *( reinterpret_cast< {{neuronName}}* >( pnode ) );
+
+ // ode_state[] here is---and must be---the state vector supplied by the integrator,
+ // not the state vector in the node, node.S_.ode_state[].
+
+{%- for eq_block in neuron.get_equations_blocks() %}
+{%- for ode in eq_block.get_declarations() %}
+{%- for inline_expr in utils.get_inline_expression_symbols(ode) %}
+{%- if not inline_expr.is_equation() %}
+{%- set declaring_expr = inline_expr.get_declaring_expression() %}
+ double {{ printer.print(utils.get_state_variable_by_name(astnode, inline_expr)) }} = {{ gsl_printer.print(declaring_expr) }};
+{%- endif %}
+{%- endfor %}
+{%- endfor %}
+{%- endfor %}
+
+{%- if use_gap_junctions %}
+ // set I_gap depending on interpolation order
+ double __I_gap = 0.0;
+
+ const double __t_gap = node.gap_junction_step / nest::Time::get_resolution().get_ms();
+
+ switch ( nest::kernel().simulation_manager.get_wfr_interpolation_order() )
+ {
+ case 0:
+ __I_gap = -node.B_.sumj_g_ij_ * ode_state[State_::{{ gap_junction_membrane_potential_variable }}] + node.B_.interpolation_coefficients[ node.B_.lag_ ];
+ break;
+
+ case 1:
+ __I_gap = -node.B_.sumj_g_ij_ * ode_state[State_::{{ gap_junction_membrane_potential_variable }}] + node.B_.interpolation_coefficients[ node.B_.lag_ * 2 + 0 ]
+ + node.B_.interpolation_coefficients[ node.B_.lag_ * 2 + 1 ] * __t_gap;
+ break;
+
+ case 3:
+ __I_gap = -node.B_.sumj_g_ij_ * ode_state[State_::{{ gap_junction_membrane_potential_variable }}] + node.B_.interpolation_coefficients[ node.B_.lag_ * 4 + 0 ]
+ + node.B_.interpolation_coefficients[ node.B_.lag_ * 4 + 1 ] * __t_gap
+ + node.B_.interpolation_coefficients[ node.B_.lag_ * 4 + 2 ] * __t_gap * __t_gap
+ + node.B_.interpolation_coefficients[ node.B_.lag_ * 4 + 3 ] * __t_gap * __t_gap * __t_gap;
+ break;
+
+ default:
+ throw nest::BadProperty( "Interpolation order must be 0, 1, or 3." );
+ }
+{%- endif %}
+
+{% set numeric_state_variables_to_be_integrated = numeric_state_variables + purely_numeric_state_variables_moved %}
+{%- if ast.get_args() | length > 0 %}
+{%- set numeric_state_variables_to_be_integrated = utils.filter_variables_list(numeric_state_variables_to_be_integrated, ast.get_args()) %}
+{%- endif %}
+{%- for variable_name in numeric_state_variables + numeric_state_variables_moved %}
+{%- set update_expr = numeric_update_expressions[variable_name] %}
+{%- set variable_symbol = variable_symbols[variable_name] %}
+{%- if use_gap_junctions %}
+ f[State_::{{ variable_symbol.get_symbol_name() }}] = {% if ast.get_args() | length > 0 %}{% if variable_name in numeric_state_variables_to_be_integrated + utils.all_convolution_variable_names(astnode) %}{{ gsl_printer.print(update_expr)|replace("node.B_." + gap_junction_port + "_grid_sum_", "(node.B_." + gap_junction_port + "_grid_sum_ + __I_gap)") }}{% else %}0{% endif %}{% else %}{{ gsl_printer.print(update_expr) }}{% endif %};
+{%- else %}
+ f[State_::{{ variable_symbol.get_symbol_name() }}] = {% if ast.get_args() | length > 0 %}{% if variable_name in numeric_state_variables_to_be_integrated + utils.all_convolution_variable_names(astnode) %}{{ gsl_printer.print(update_expr) }}{% else %}0{% endif %}{% else %}{{ gsl_printer.print(update_expr) }}{% endif %};
+{%- endif %}
+{%- endfor %}
+
+{%- if numeric_solver == "rk45" %}
+ return GSL_SUCCESS;
+{%- else %}
+ return 0;
+{%- endif %}
+}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/GSLIntegrationStep.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/GSLIntegrationStep.jinja2
new file mode 100644
index 000000000..4a8090537
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/GSLIntegrationStep.jinja2
@@ -0,0 +1,50 @@
+{#
+ Generates a series of C++ statements which perform one integration step of
+ all odes defined the neuron.
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- if numeric_solver == "rk45" %}
+double __t = 0;
+B_.__sys.function = {{neuronName}}_dynamics{% if ast.get_args() | length > 0 %}_{{ utils.integrate_odes_args_str_from_function_call(ast) }}{% endif %};
+// numerical integration with adaptive step size control:
+// ------------------------------------------------------
+// gsl_odeiv_evolve_apply performs only a single numerical
+// integration step, starting from t and bounded by step;
+// the while-loop ensures integration over the whole simulation
+// step (0, step] if more than one integration step is needed due
+// to a small integration step size;
+// note that (t+IntegrationStep > step) leads to integration over
+// (t, step] and afterwards setting t to step, but it does not
+// enforce setting IntegrationStep to step-t; this is of advantage
+// for a consistent and efficient integration across subsequent
+// simulation intervals
+while ( __t < B_.__step )
+{
+{%- if use_gap_junctions %}
+ gap_junction_step = B_.__step;
+{%- endif %}
+
+ const int status = gsl_odeiv_evolve_apply(B_.__e,
+ B_.__c,
+ B_.__s,
+ &B_.__sys, // system of ODE
+ &__t, // from t
+ B_.__step, // to t <= step
+ &B_.__integration_step, // integration step size
+ S_.ode_state); // neuronal state
+
+ if ( status != GSL_SUCCESS )
+ {
+ throw nest::GSLSolverFailure( get_name(), status );
+ }
+}
+{%- elif numeric_solver == "forward-Euler" %}
+double f[State_::STATE_VEC_SIZE];
+{{neuronName}}_dynamics{% if ast.get_args() | length > 0 %}_{{ utils.integrate_odes_args_str_from_function_call(ast) }}{% endif %}( get_t(), S_.ode_state, f, reinterpret_cast< void* >( this ) );
+for (size_t i = 0; i < State_::STATE_VEC_SIZE; ++i)
+{
+ S_.ode_state[i] += __timestep * f[i];
+}
+{%- else %}
+{{ raise('Unknown numeric ODE solver requested.') }}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/IfStatement.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/IfStatement.jinja2
new file mode 100644
index 000000000..cf4a45608
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/IfStatement.jinja2
@@ -0,0 +1,33 @@
+{#
+ Generates C++ if..then..else statement
+ @param ast ASTIfStmt
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+if ({{ printer.print(ast.get_if_clause().get_condition()) }})
+{
+{%- filter indent(2, True) %}
+{%- with ast = ast.get_if_clause().get_stmts_body() %}
+{%- include "directives_cpp/StmtsBody.jinja2" %}
+{%- endwith %}
+{%- endfilter %}
+{%- for elif in ast.get_elif_clauses() %}
+}
+else if ({{ printer.print(elif.get_condition()) }})
+{
+{%- filter indent(2, True) %}
+{%- with ast = elif.get_stmts_body() %}
+{%- include "directives_cpp/StmtsBody.jinja2" %}
+{%- endwith %}
+{%- endfilter %}
+{%- endfor %}
+{%- if ast.has_else_clause() %}
+}
+else
+{
+{%- filter indent(2, True) %}
+{%- with ast = ast.get_else_clause().get_stmts_body() %}
+{%- include "directives_cpp/StmtsBody.jinja2" %}
+{%- endwith %}
+{%- endfilter %}
+{%- endif %}
+}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/MemberDeclaration.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/MemberDeclaration.jinja2
new file mode 100644
index 000000000..59a0a77b6
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/MemberDeclaration.jinja2
@@ -0,0 +1,10 @@
+{#
+ Generates C++ declaration for a variable
+ @param variable VariableSymbol
+ @result C++ declaration
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- if variable_symbol.has_comment() %}
+{{ variable_symbol.print_comment("//! ") }}
+{%- endif %}
+{{ declarations.print_variable_type(variable_symbol) }} {{ printer_no_origin.print(variable) }};
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/MemberInitialization.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/MemberInitialization.jinja2
new file mode 100644
index 000000000..eaf462507
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/MemberInitialization.jinja2
@@ -0,0 +1,20 @@
+{#
+ In general case creates an
+ @param variable ASTVariable Variable for which the initialization should be done
+#}
+{%- import 'directives_cpp/VectorDeclaration.jinja2' as vector_declaration with context %}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif -%}
+{%- set sym = variable.get_scope().resolve_to_symbol(variable.get_complete_name(), SymbolKind.VARIABLE) %}
+{%- if sym.has_declaring_expression() and not sym.is_kernel() %}
+{%- if sym.has_vector_parameter() %}
+{{ vector_declaration.VectorDeclaration(variable) }}
+{%- else %}
+{{printer.print(variable)}} = {{printer.print(sym.get_declaring_expression())}}; // as {{sym.get_type_symbol().print_symbol()}}
+{%- endif %}
+{%- else %}
+{%- if sym.has_vector_parameter() %}
+{{printer.print(variable)}}.resize(0); // as {{sym.get_type_symbol().print_symbol()}}
+{%- else %}
+{{printer.print(variable)}} = 0; // as {{sym.get_type_symbol().print_symbol()}}
+{%- endif %}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/MemberVariableGetterSetter.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/MemberVariableGetterSetter.jinja2
new file mode 100644
index 000000000..cca363e85
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/MemberVariableGetterSetter.jinja2
@@ -0,0 +1,16 @@
+{% if variable_symbol.is_inline_expression and not utils.contains_convolve_call(variable_symbol) -%}
+inline {{ declarations.print_variable_type(variable_symbol) }} {{ printer_no_origin.print(variable) }} const
+{
+ return {{ printer.print(variable_symbol.get_declaring_expression()) }};
+}
+{%- else -%}
+inline {{ declarations.print_variable_type(variable_symbol) }} get_{{ printer_no_origin.print(variable) }}() const
+{
+ return {{ nest_codegen_utils.print_symbol_origin(variable_symbol, variable) % printer_no_origin.print(variable) }};
+}
+
+inline void set_{{ printer_no_origin.print(variable) }}(const {{ declarations.print_variable_type(variable_symbol) }} __v)
+{
+ {{ nest_codegen_utils.print_symbol_origin(variable_symbol, variable) % printer_no_origin.print(variable) }} = __v;
+}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/OutputEvent.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/OutputEvent.jinja2
new file mode 100644
index 000000000..ab97b0eff
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/OutputEvent.jinja2
@@ -0,0 +1,18 @@
+{%- macro OutputEvent(node) -%}
+{%- with outputs = neuron.get_body().get_output_blocks() -%}
+{%- if outputs|length == 0 -%}
+{#- no output port defined in the model: pretend dummy spike output port to obtain usable model -#}
+nest::SpikeEvent
+{%- elif outputs|length == 1 -%}
+{%- with output = outputs[0] -%}
+{%- if output.is_spike() -%}
+nest::SpikeEvent
+{%- elif output.is_continuous() -%}
+nest::CurrentEvent
+{%- else -%}
+{{ raise('Unexpected output type. Must be continuous or spike, is %s.' % output) }}
+{%- endif -%}
+{%- endwith -%}
+{%- endif -%}
+{%- endwith -%}
+{%- endmacro -%}
\ No newline at end of file
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/PredefinedFunction_emit_spike.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/PredefinedFunction_emit_spike.jinja2
new file mode 100644
index 000000000..658c80f45
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/PredefinedFunction_emit_spike.jinja2
@@ -0,0 +1,28 @@
+{#
+ Generates code for emit_spike() function call
+ @param ast ASTFunctionCall
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+
+// begin generated code for emit_spike() function
+{% if ast.get_args() | length == 0 %}
+{#- no parameters -- emit_spike() called from within neuron #}
+#ifdef DEBUG
+std::cout << "Emitting a spike at t = " << nest::Time(nest::Time::step(origin.get_steps() + lag + 1)).get_ms() << "\n";
+#endif
+set_spiketime(nest::Time::step(origin.get_steps() + lag + 1));
+nest::SpikeEvent se;
+nest::kernel().event_delivery_manager.send(*this, se, lag);
+{%- else %}
+{#- weight and delay parameters given -- emit_spike() called from within synapse #}
+set_delay( {{ printer.print(ast.get_args()[1]) }} );
+const long __delay_steps = nest::Time::delay_ms_to_steps( get_delay() );
+set_delay_steps(__delay_steps);
+e.set_receiver( *__target );
+e.set_weight( {{ printer.print(ast.get_args()[0]) }} );
+// use accessor functions (inherited from Connection< >) to obtain delay in steps and rport
+e.set_delay_steps( get_delay_steps() );
+e.set_rport( get_rport() );
+e();
+{%- endif %}
+// end generated code for emit_spike() function
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/PredefinedFunction_integrate_odes.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/PredefinedFunction_integrate_odes.jinja2
new file mode 100644
index 000000000..b630f329a
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/PredefinedFunction_integrate_odes.jinja2
@@ -0,0 +1,64 @@
+{#
+ Generates code for integrate_odes() function call
+ @param ast ASTFunctionCall
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+
+// start rendered code for integrate_odes({{ ", ".join(utils.integrate_odes_args_strs_from_function_call(ast)) }})
+
+{%- if uses_analytic_solver %}
+{% set analytic_state_variables_ = analytic_state_variables.copy() %}
+
+{%- if paired_synapse is defined %}
+{% set tmp = analytic_state_variables_.extend(analytic_state_variables_moved) %}
+{%- endif %}
+
+{%- if ast.get_args() | length > 0 %}
+{%- set analytic_state_variables_ = utils.filter_variables_list(analytic_state_variables_, ast.get_args()) %}
+{%- endif %}
+
+{%- if analytic_state_variables_ | length > 0 %}
+// analytic solver: integrating state variables (first step): {% for variable_name in analytic_state_variables_ %}{{ variable_name }}, {% endfor %}
+{%- include "directives_cpp/AnalyticIntegrationStep_begin.jinja2" %}
+{%- endif %}
+{%- endif %}
+
+{%- if uses_numeric_solver %}
+
+{% set numeric_state_variables_to_be_integrated = numeric_state_variables + purely_numeric_state_variables_moved %}
+{%- if ast.get_args() | length > 0 %}
+{%- set numeric_state_variables_to_be_integrated = utils.filter_variables_list(numeric_state_variables_to_be_integrated, ast.get_args()) %}
+{%- endif %}
+{%- if numeric_state_variables_to_be_integrated | length > 0 %}
+// numeric solver: integrating state variables: {% for variable_name in numeric_state_variables_to_be_integrated %}{{ variable_name }}, {% endfor %}
+
+{%- if analytic_state_variables_from_convolutions | length > 0 %}
+// solver step should update state of convolutions internally, but not change ode_state[] pertaining to convolutions; convolution integration should be independent of integrate_odes() calls
+// buffer the old values
+{%- for variable_name in analytic_state_variables_from_convolutions %}
+{%- set update_expr = update_expressions[variable_name] %}
+{%- set variable_symbol = variable_symbols[variable_name] %}
+const double {{ variable_name }}__orig = {{ printer.print(utils.get_state_variable_by_name(astnode, variable_symbol.get_symbol_name())) }};
+{%- endfor %}
+{%- endif %}
+
+{%- include "directives_cpp/GSLIntegrationStep.jinja2" %}
+
+{%- if analytic_state_variables_from_convolutions | length > 0 %}
+// restore the old values for convolutions
+{%- for variable_name in analytic_state_variables_from_convolutions %}
+{%- set variable_symbol = variable_symbols[variable_name] %}
+{{ printer.print(utils.get_state_variable_by_name(astnode, variable_symbol.get_symbol_name())) }} = {{ variable_name }}__orig;
+{%- endfor %}
+{% endif %}
+
+{%- endif %}
+{%- endif %}
+
+{%- if uses_analytic_solver %}
+{%- if analytic_state_variables_ | length > 0 %}
+// analytic solver: integrating state variables (second step): {% for variable_name in analytic_state_variables_ %}{{ variable_name }}, {% endfor %}
+
+{%- include "directives_cpp/AnalyticIntegrationStep_end.jinja2" %}
+{%- endif %}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/ReadFromDictionaryToTmp.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/ReadFromDictionaryToTmp.jinja2
new file mode 100644
index 000000000..607b8c8d4
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/ReadFromDictionaryToTmp.jinja2
@@ -0,0 +1,64 @@
+{#
+ Generates a code snippet that retrieves a data from dictionary and sets it the the model variable.
+ @param variable VariableSymbol
+#}
+{%- import 'directives_cpp/VectorSizeParameter.jinja2' as vector_size_parameter with context %}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+
+{%- if not variable_symbol.is_inline_expression %}
+{%- if not variable_symbol.is_state() %}
+{{ declarations.print_variable_type(variable_symbol) }} tmp_{{ printer_no_origin.print(variable) }} = get_{{ printer_no_origin.print(variable) }}();
+{%- if synapse is defined or variable_symbol in vector_symbols or nest_version.startswith("v2") %}
+{#- setting with probability distributions is not supported for synapses after they have been created; for vectors; or when using NEST 2 #}
+updateValue<{{declarations.print_variable_type(variable_symbol)}}>(__d, nest::{{names_namespace}}::_{{printer_no_origin.print(variable)}}, tmp_{{printer_no_origin.print(variable)}});
+{%- else %}
+nest::updateValueParam<{{declarations.print_variable_type(variable_symbol)}}>(__d, nest::{{names_namespace}}::_{{printer_no_origin.print(variable)}}, tmp_{{printer_no_origin.print(variable)}}, this);
+{%- endif %}
+
+{%- if vector_symbols|length > 0 %}
+// Resize vectors
+if (tmp_{{ printer_no_origin.print(variable) }} != get_{{ printer_no_origin.print(variable) }}())
+{
+{%- for vector_var in vector_symbols %}
+{%- if vector_var.get_vector_parameter().is_variable() and vector_var.get_vector_parameter().get_variable().get_complete_name() == variable_symbol.get_symbol_name() %}
+ {{declarations.print_variable_type(vector_var)}} _tmp_{{ printer_no_origin.print(utils.get_variable_by_name(astnode, vector_var.get_symbol_name())) }} = get_{{printer_no_origin.print(utils.get_variable_by_name(astnode, vector_var.get_symbol_name()))}}();
+ _tmp_{{ vector_var.get_symbol_name() }}.resize(tmp_{{ printer_no_origin.print(variable) }}, 0.);
+ set_{{ vector_var.get_symbol_name() }}(_tmp_{{ vector_var.get_symbol_name() }});
+{%- endif %}
+{%- endfor %}
+}
+{%- endif %}
+
+{%- else %}
+{{declarations.print_variable_type(variable_symbol)}} tmp_{{ printer_no_origin.print(variable) }} = get_{{printer_no_origin.print(variable)}}();
+{%- if synapse is defined or variable_symbol in vector_symbols or nest_version.startswith("v2") %}
+{#- setting with probability distributions is not supported for synapses after they have been created; for vectors; or when using NEST 2 #}
+updateValue<{{declarations.print_variable_type(variable_symbol)}}>(__d, nest::{{names_namespace}}::_{{variable_symbol.get_symbol_name()}}, tmp_{{printer_no_origin.print(variable)}});
+{%- else %}
+nest::updateValueParam<{{declarations.print_variable_type(variable_symbol)}}>(__d, nest::{{names_namespace}}::_{{variable_symbol.get_symbol_name()}}, tmp_{{printer_no_origin.print(variable)}}, this);
+{%- endif %}
+{%- endif %}
+
+{%- if variable.has_vector_parameter() %}
+ {#
+Typecast the vector parameter to an int. If the typecast fails with a return value of 0, the vector parameter is a
+variable
+ #}
+{%- if variable.get_vector_parameter().is_numeric_literal() -%}
+{%- set vector_size = vector_size_parameter.VectorSizeParameter(variable, true) %}
+{%- elif variable.get_vector_parameter().is_variable() -%}
+{%- set vector_size = "tmp_" + vector_size_parameter.VectorSizeParameter(variable, false) %}
+{%- else %}
+{{ raise("Vector size expression needs to be numeric literal or variable") }}
+{%- endif %}
+// Check if the new vector size matches its original size
+if ( tmp_{{ printer_no_origin.print(variable) }}.size() != {{vector_size}} )
+{
+ std::stringstream msg;
+ msg << "The vector \"{{ printer_no_origin.print(variable) }}\" does not match its size: " << {{vector_size}};
+ throw nest::BadProperty(msg.str());
+}
+{%- endif %}
+{%- else %}
+ // ignores '{{ printer_no_origin.print(variable) }}' {{ declarations.print_variable_type(variable_symbol) }}' since it is a function and setter isn't defined
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/ReturnStatement.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/ReturnStatement.jinja2
new file mode 100644
index 000000000..26001711f
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/ReturnStatement.jinja2
@@ -0,0 +1,10 @@
+{#
+ Generates a single return statement in C++ syntax.
+ @param: ast A single ast-return stmt object. ASTReturnStmt
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- if ast.has_expression() %}
+return {{ printer.print(ast.get_expression()) }};
+{%- else %}
+return;
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/RportToBufferIndexEntry.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/RportToBufferIndexEntry.jinja2
new file mode 100644
index 000000000..0e6a13c28
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/RportToBufferIndexEntry.jinja2
@@ -0,0 +1,20 @@
+{%- macro RportToBufferIndexEntry(ports, rport, index=-1) -%}
+{%- if index >= 0 -%}
+{%- set name = "{}_" ~ index|string %}
+{%- else -%}
+{%- set name = "{}" %}
+{%- endif -%}
+
+{%- if ports|length > 1 -%}
+{%- if ports[0].is_excitatory() %}
+{%- set exc_port = ports[0] %}
+{%- set inh_port = ports[1] %}
+{%- else %}
+{%- set exc_port = ports[1] %}
+{%- set inh_port = ports[0] %}
+{%- endif %}
+ { {{neuronName}}::{{ name.format(exc_port.get_symbol_name().upper()) }}, {{neuronName}}::{{ name.format(inh_port.get_symbol_name().upper()) }} },
+{%- else -%}
+ { {{neuronName}}::{{ name.format(ports[0].get_symbol_name().upper()) }}, {{neuronName}}::PORT_NOT_AVAILABLE },
+{%- endif -%}
+{%- endmacro -%}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/SmallStatement.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/SmallStatement.jinja2
new file mode 100644
index 000000000..5dd6d6c2c
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/SmallStatement.jinja2
@@ -0,0 +1,22 @@
+{#
+ Generates a single small statement into equivalent C++ syntax.
+ @param stmt ASTSmallStmt
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- if stmt.is_assignment() %}
+{%- with ast = stmt.get_assignment() %}
+{%- include "directives_cpp/Assignment.jinja2" %}
+{%- endwith %}
+{%- elif stmt.is_function_call() %}
+{%- with ast = stmt.get_function_call() %}
+{%- include "directives_cpp/FunctionCall.jinja2" %}
+{%- endwith %}
+{%- elif stmt.is_declaration() %}
+{%- with ast = stmt.get_declaration() %}
+{%- include "directives_cpp/Declaration.jinja2" %}
+{%- endwith %}
+{%- elif stmt.is_return_stmt() %}
+{%- with ast = stmt.get_return_stmt() %}
+{%- include "directives_cpp/ReturnStatement.jinja2" %}
+{%- endwith %}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/SpikeBufferGetter.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/SpikeBufferGetter.jinja2
new file mode 100644
index 000000000..e5f519248
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/SpikeBufferGetter.jinja2
@@ -0,0 +1,29 @@
+{%- macro SpikeBufferGetter(is_in_struct) -%}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+
+{% set _tuples = [
+ ("nest::RingBuffer", "spike_inputs_", "Buffer containing the incoming spikes"),
+ ("double", "spike_inputs_grid_sum_", "Buffer containing the sum of all the incoming spikes"),
+ ("nest::RingBuffer", "spike_input_received_", "Buffer containing a flag whether incoming spikes have been received on a given port"),
+ ("double", "spike_input_received_grid_sum_", "Buffer containing a flag whether incoming spikes have been received on a given port")
+] %}
+
+{%- for data_type, variable_name, comment_string in _tuples %}
+
+/**
+ * {{ comment_string }}
+**/
+inline std::vector< {{data_type}} >& get_{{variable_name}}()
+{
+{%- if is_in_struct %}
+ return {{variable_name}};
+{%- else %}
+ return B_.get_{{variable_name}}();
+{%- endif %}
+}
+{%- if is_in_struct %}
+std::vector< {{data_type}} > {{variable_name}};
+{%- endif %}
+{%- endfor %}
+
+{%- endmacro -%}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/StateVariablesEnum.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/StateVariablesEnum.jinja2
new file mode 100644
index 000000000..93e71ce1e
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/StateVariablesEnum.jinja2
@@ -0,0 +1,20 @@
+{#
+ Generates an Enum with state variables that are recordable when the neuron model uses vectors
+ @param neuron ASTModel: the neuron model
+-#}
+
+enum StateVecVars {
+{%- set ns = namespace(count=0) %}
+{%- for variable in neuron.get_state_symbols() %}
+{%- set varDomain = declarations.get_domain_from_type(variable.get_type_symbol()) %}
+{%- if varDomain == "double" and variable.is_recordable %}
+ {{ printer_no_origin.print(utils.get_state_variable_by_name(astnode, variable.get_symbol_name())).upper() }} = {{ ns.count }},
+{%- if variable.has_vector_parameter() %}
+{%- set size = utils.get_numeric_vector_size(variable) %}
+{%- set ns.count = ns.count + size %}
+{%- else %}
+{%- set ns.count = ns.count + 1 %}
+{%- endif %}
+{%- endif %}
+{%- endfor %}
+};
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/Statement.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/Statement.jinja2
new file mode 100644
index 000000000..f20cbe90a
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/Statement.jinja2
@@ -0,0 +1,16 @@
+{#
+ Generates a single statement, either a simple or compound, to equivalent C++ syntax.
+ @param ast ASTSmallStmt or ASTCompoundStmt
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- if stmt.has_comment() %}
+{{stmt.print_comment('//')}}{%- endif %}
+{%- if stmt.is_small_stmt() %}
+{%- with stmt = stmt.small_stmt %}
+{%- include "directives_cpp/SmallStatement.jinja2" %}
+{%- endwith %}
+{%- elif stmt.is_compound_stmt() %}
+{%- with stmt = stmt.compound_stmt %}
+{%- include "directives_cpp/CompoundStatement.jinja2" %}
+{%- endwith %}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/StmtsBody.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/StmtsBody.jinja2
new file mode 100644
index 000000000..dc1419aaf
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/StmtsBody.jinja2
@@ -0,0 +1,9 @@
+{#
+ Handles an ASTStmtsBody
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- for statement in ast.get_stmts() %}
+{%- with stmt = statement %}
+{%- include "directives_cpp/Statement.jinja2" %}
+{%- endwith %}
+{%- endfor %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/UpdateDelayVariables.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/UpdateDelayVariables.jinja2
new file mode 100644
index 000000000..56242b741
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/UpdateDelayVariables.jinja2
@@ -0,0 +1,6 @@
+{#
+ Generates C++ statements that update the delay variables
+ @param variable VariableSymbol
+#}
+ DV_.delayed_{{ variable_symbol.get_symbol_name() }} [DV_.delayed_{{ variable_symbol.get_symbol_name() }}_idx] = {{ nest_codegen_utils.print_symbol_origin(variable_symbol, variable) % printer_no_origin.print(variable) }};
+ DV_.delayed_{{ variable_symbol.get_symbol_name() }}_idx = (DV_.delayed_{{ variable_symbol.get_symbol_name() }}_idx + 1) % DV_.delay_{{ variable_symbol.get_symbol_name() }}_steps;
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/VectorDeclaration.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/VectorDeclaration.jinja2
new file mode 100644
index 000000000..5bedf7164
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/VectorDeclaration.jinja2
@@ -0,0 +1,5 @@
+{%- import "directives_cpp/VectorSizeParameter.jinja2" as vector_size_parameter with context %}
+{%- macro VectorDeclaration(node) -%}
+
+{{ nest_codegen_utils.print_symbol_origin(variable_symbol, variable) % variable_symbol.get_symbol_name() }}.resize({{ vector_size_parameter.VectorSizeParameter(variable, true) }}, {{ printer.print(variable_symbol.get_declaring_expression()) }});
+{%- endmacro -%}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/VectorSizeParameter.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/VectorSizeParameter.jinja2
new file mode 100644
index 000000000..51a575c86
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/VectorSizeParameter.jinja2
@@ -0,0 +1,13 @@
+{%- macro VectorSizeParameter(node, with_origin) -%}
+{%- set vector_parameter = node.get_vector_parameter() -%}
+{%- if vector_parameter.is_variable() -%}
+{%- set symbol = vector_parameter.get_scope().resolve_to_symbol(vector_parameter.get_variable().get_complete_name(), SymbolKind.VARIABLE) -%}
+{%- if with_origin %}
+{{ nest_codegen_utils.print_symbol_origin(symbol, vector_parameter.get_variable()) % vector_parameter.get_variable().get_complete_name() }}
+{%- else -%}
+{{ vector_parameter.get_variable().get_complete_name() }}
+{%- endif -%}
+{%- elif vector_parameter.is_numeric_literal() -%}
+{{ vector_parameter.get_numeric_literal() }}
+{%- endif -%}
+{%- endmacro -%}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/WhileStatement.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/WhileStatement.jinja2
new file mode 100644
index 000000000..ef171ac81
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/WhileStatement.jinja2
@@ -0,0 +1,13 @@
+{#
+ Generates C++ declaration
+ @param ast ASTWhileStmt
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+while ( {{ printer.print(ast.get_condition()) }})
+{
+{%- filter indent(2) %}
+{%- with ast = ast.get_stmts_body() %}
+{%- include "directives_cpp/StmtsBody.jinja2" %}
+{%- endwith %}
+{%- endfilter %}
+}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/WriteInDictionary.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/WriteInDictionary.jinja2
new file mode 100644
index 000000000..2d8d62d40
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/WriteInDictionary.jinja2
@@ -0,0 +1,8 @@
+{#
+ Generates an instruction to write a value into the dictionary of registered variables.
+ @param variable VariableSymbol
+#}
+{%- if tracing %}/* generated by {{self._TemplateReference__context.name}} */ {% endif %}
+{%- if not variable_symbol.is_internals() %}
+def< {{declarations.print_variable_type(variable_symbol)}} >(__d, nest::{{names_namespace}}::_{{variable_symbol.get_symbol_name()}}, get_{{printer_no_origin.print(variable)}}());
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_cpp/__init__.py b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/__init__.py
new file mode 100644
index 000000000..ec6cd5167
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_cpp/__init__.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+#
+# __init__.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+# ---------------------------------------------------------------
+# Caution: This file is required to enable Python to also include the templates
+# ---------------------------------------------------------------
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/AnalyticIntegrationStep_begin.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/AnalyticIntegrationStep_begin.jinja2
new file mode 100644
index 000000000..da7b5cb9b
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/AnalyticIntegrationStep_begin.jinja2
@@ -0,0 +1,10 @@
+{#
+ Generates a series of statements which perform one integration step of all ODEs that are solved by the analytic integrator.
+#}
+{%- if tracing %}# generated by {{self._TemplateReference__context.name}}{% endif %}
+{%- if uses_analytic_solver %}
+{%- for variable_name in analytic_state_variables_: %}
+{%- set update_expr = update_expressions[variable_name] %}
+{{variable_name}}__tmp: float = {{ printer.print(update_expr) }}
+{%- endfor %}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/AnalyticIntegrationStep_end.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/AnalyticIntegrationStep_end.jinja2
new file mode 100644
index 000000000..41f7c8c73
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/AnalyticIntegrationStep_end.jinja2
@@ -0,0 +1,12 @@
+{#
+ Generates a series of C++ statements which perform one integration step of all ODEs that are solved by the analytic integrator.
+#}
+# replace analytically solvable variables with precisely integrated values
+{%- if tracing %}# generated by {{self._TemplateReference__context.name}} {% endif %}
+{%- if uses_analytic_solver %}
+{%- for variable_name in analytic_state_variables_: %}
+{%- set variable_symbol = variable_symbols[variable_name] %}
+{%- set variable = utils.get_variable_by_name(astnode, variable_symbol.get_symbol_name()) %}
+{{ printer.print(variable) }} = {{ variable_name }}__tmp
+{%- endfor %}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/ApplySpikesFromBuffers.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/ApplySpikesFromBuffers.jinja2
new file mode 100644
index 000000000..c0952b2f5
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/ApplySpikesFromBuffers.jinja2
@@ -0,0 +1,6 @@
+{%- if tracing %}# generated by {{self._TemplateReference__context.name}}{% endif %}
+{%- for spike_updates_for_port in spike_updates.values() %}
+{%- for ast in spike_updates_for_port -%}
+{%- include "directives_py/Assignment.jinja2" %}
+{%- endfor %}
+{%- endfor %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/Assignment.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/Assignment.jinja2
new file mode 100644
index 000000000..3e52995eb
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/Assignment.jinja2
@@ -0,0 +1,25 @@
+{#
+ Generates Python assignment
+ @param ast ASTAssignment
+#}
+{%- if tracing %}# generated by {{self._TemplateReference__context.name}}{% endif %}
+{%- set lhs_variable_symbol = assignments.lhs_variable(ast) %}
+{%- set lhs_variable = utils.get_variable_by_name(astnode, lhs_variable_symbol.get_symbol_name()) %}
+{%- if lhs_variable_symbol is none %}
+{{ raise('Symbol with name "%s" could not be resolved' % ast.lhs.get_complete_name()) }}
+{%- endif %}
+{%- if assignments.is_vectorized_assignment(ast) %}
+{%- if lhs_variable_symbol.has_vector_parameter() %}
+{%- set lhs_vector_variable = assignments.lhs_vector_variable(ast) %}
+{%- if lhs_vector_variable is none %}
+{{ printer.print(lhs_variable) }}[{{ ast.get_variable().get_vector_parameter() }}]
+{%- else %}
+{{ printer.print(lhs_variable) }}[{{ printer.print(lhs_vector_variable) }}]
+{%- endif %}
+{%- else %}
+{{ printer.print(lhs_variable) }}
+{%- endif %}
+{{assignments.print_assignments_operation(ast)}} {{printer.print(ast.get_expression())}}
+{%- else %}
+{{ printer.print(lhs_variable) }} {{assignments.print_assignments_operation(ast)}} {{printer.print(ast.get_expression())}}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/CompoundStatement.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/CompoundStatement.jinja2
new file mode 100644
index 000000000..12f2e2b89
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/CompoundStatement.jinja2
@@ -0,0 +1,18 @@
+{#
+ Handles the compound statement.
+ @grammar: Compound_Stmt = IF_Stmt | FOR_Stmt | WHILE_Stmt;
+#}
+{%- if tracing %}# generated by {{self._TemplateReference__context.name}}{% endif %}
+{%- if stmt.is_if_stmt() %}
+{%- with ast = stmt.get_if_stmt() %}
+{%- include "directives_py/IfStatement.jinja2" %}
+{%- endwith %}
+{%- elif stmt.is_for_stmt() %}
+{%- with ast = stmt.get_for_stmt() %}
+{%- include "directives_py/ForStatement.jinja2" %}
+{%- endwith %}
+{%- elif stmt.is_while_stmt() %}
+{%- with ast = stmt.get_while_stmt() %}
+{%- include "directives_py/WhileStatement.jinja2" %}
+{%- endwith %}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/ConstructorParameterWithDefault.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/ConstructorParameterWithDefault.jinja2
new file mode 100644
index 000000000..98486bbc2
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/ConstructorParameterWithDefault.jinja2
@@ -0,0 +1,28 @@
+{#
+ In general case creates an
+ @param variable_symbol variable_symbolSymbol variable_symbol for which the initialization should be done
+#}
+{%- if tracing %}# generated by {{self._TemplateReference__context.name}}{% endif %}
+{%- if variable_symbol.has_declaring_expression() and not variable_symbol.is_kernel() %}
+
+
+{%- if variable_symbol.has_vector_parameter() %}
+{{ printer.print_vector_declaration(variable_symbol) }},
+
+{%- else %}
+{%- set variable_initial_value = utils.get_variable_by_name(astnode, variable_symbol.get_initial_value()|string()) %}
+{%- if not variable_initial_value %}
+{{ printer.print(variable) }} = {{ printer.print_expression(variable_symbol.get_declaring_expression()) }} , # type: {{variable_symbol.get_type_symbol().print_symbol()}}
+{%- else %}
+{%- set variable_symbol = variable_initial_value.resolve_in_own_scope()%}
+{%- include "directives_py/ConstructorParameterWithDefault.jinja2" %}
+{%- endif %}
+
+{%- endif %}
+{%- else %}
+{%- if variable_symbol.has_vector_parameter() %}
+{{ printer.print(variable) }} , # type: {{ variable_symbol.get_type_symbol().print_symbol() }}
+{%- else %}
+{{ printer.print(variable) }} , # type: {{ variable_symbol.get_type_symbol().print_symbol() }}
+{%- endif -%}
+{%- endif -%}
\ No newline at end of file
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/Declaration.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/Declaration.jinja2
new file mode 100644
index 000000000..5d676c425
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/Declaration.jinja2
@@ -0,0 +1,20 @@
+{#
+ Generates Python declaration
+ @param ast ASTDeclaration
+#}
+{%- if tracing %}# generated by {{self._TemplateReference__context.name}}{% endif %}
+{%- for variable in declarations.get_variables(ast) %}
+{%- if ast.has_size_parameter() %}
+{{variable.get_symbol_name()}}: Sequence[{{declarations.print_variable_type(variable)}}] = P_.{{declarations.print_size_parameter(ast)}} * [None]
+{%- if ast.has_expression() %}
+for i in range(0, get_{{declarations.print_size_parameter(ast)}}()):
+ {{variable.get_symbol_name()}}[i] = {{printer.print(ast.getExpr())}}
+{%- endif %}
+{%- else %}
+{%- if ast.has_expression() %}
+{{variable.get_symbol_name()}}: {{declarations.print_variable_type(variable)}} = {{printer.print(ast.get_expression())}}
+{%- else %}
+{{variable.get_symbol_name()}}: {{declarations.print_variable_type(variable)}}
+{%- endif %}
+{%- endif %}
+{%- endfor -%}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/DynamicStateElement.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/DynamicStateElement.jinja2
new file mode 100644
index 000000000..4056e715b
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/DynamicStateElement.jinja2
@@ -0,0 +1,46 @@
+{#
+ Generates get_state_element function to get elements state variables based on how they are inserted into the DynamicRecordablesMap
+ @param neuron ASTModel: the neuron model
+-#}
+
+inline double get_state_element(size_t elem)
+ {
+{%- set len = recordable_state_variables | length %}
+{%- for variable in recordable_state_variables %}
+{%- if loop.index == 1 %}
+ if
+{%- elif loop.index == len %}
+ else
+{%- else %}
+ else if
+{%- endif %}
+
+{%- if loop.index != len %}
+{%- if variable.has_vector_parameter() %}
+{%- set size = variable.get_vector_parameter() %}
+{%- if size|int == 0 %}
+{%- set size = printer.print_vector_size_parameter(variable) %}
+{%- endif -%}
+ (elem >= State_::STATE_VEC_VAR_{{ printer.print(utils.get_variable_by_name(astnode, variable).upper()}} && elem < State_::STATE_VEC_VAR_{{ printer.print(utils.get_variable_by_name(astnode, variable).upper()}} + {{size}})
+ {
+ return S_.{{ printer.print(utils.get_variable_by_name(astnode, variable)}}[ elem - State_::STATE_VEC_VAR_{{ printer.print(utils.get_variable_by_name(astnode, variable).upper()}}];
+ }
+{%- else %}
+ (elem == State_::STATE_VEC_VAR_{{ printer.print(utils.get_variable_by_name(astnode, variable).upper()}})
+ {
+ return S_.{{ printer.print(utils.get_variable_by_name(astnode, variable)}};
+ }
+{%- endif %}
+{%- else %}
+{%- if variable.has_vector_parameter() %}
+ {
+ return S_.{{ printer.print(utils.get_variable_by_name(astnode, variable)}}[ elem - State_::STATE_VEC_VAR_{{ printer.print(utils.get_variable_by_name(astnode, variable).upper()}}];
+ }
+{%- else %}
+ {
+ return S_.{{ printer.print(utils.get_variable_by_name(astnode, variable)}};
+ }
+{%- endif %}
+{%- endif %}
+{%- endfor %}
+ }
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/ForStatement.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/ForStatement.jinja2
new file mode 100644
index 000000000..262038f16
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/ForStatement.jinja2
@@ -0,0 +1,13 @@
+{#
+ Generates C++ statements that implement for loop
+ @param ast ASTForStmt
+#}
+{%- if tracing %}# generated by {{self._TemplateReference__context.name}}{% endif %}
+for {{ ast.get_variable() }} in range({{ printer.print(ast.get_start_from()) }},
+ {{ printer.print(ast.get_end_at()) }}):
+{%- filter indent(2) %}
+{%- with ast = ast.get_stmts_body() %}
+{%- include "directives_py/StmtsBody.jinja2" %}
+{%- endwith %}
+{%- endfilter %}
+ {{ ast.get_variable() }} += {{ ast.get_step() }}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/FunctionCall.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/FunctionCall.jinja2
new file mode 100644
index 000000000..290038632
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/FunctionCall.jinja2
@@ -0,0 +1,12 @@
+{#
+ Generates Python function call
+ @param ast ASTFunctionCall
+#}
+{%- if tracing %}# generated by {{self._TemplateReference__context.name}}{% endif %}
+{%- if ast.get_name() == PredefinedFunctions.INTEGRATE_ODES %}
+self._integrate_odes{% if ast.get_args() | length > 0 %}_{{ utils.integrate_odes_args_str_from_function_call(ast) }}{% endif %}(origin, timestep)
+{%- elif ast.get_name() == PredefinedFunctions.EMIT_SPIKE %}
+self.emit_spike(origin)
+{%- else %}
+{{ printer.print(ast) }}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/FunctionDeclaration.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/FunctionDeclaration.jinja2
new file mode 100644
index 000000000..b8ebc8ce0
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/FunctionDeclaration.jinja2
@@ -0,0 +1,19 @@
+{%- macro FunctionDeclaration(ast_function, namespace_prefix) -%}
+{%- with function_symbol = ast_function.get_scope().resolve_to_symbol(ast_function.get_name(), SymbolKind.FUNCTION) -%}
+{%- if function_symbol is none -%}
+{{ raise('Cannot resolve the method ' + ast_function.get_name()) }}
+{%- endif %}
+{{ ast_function.print_comment('#') }}
+def {{ namespace_prefix }}{{ ast_function.get_name() }} (
+{%- filter indent(6) %}
+{%- for param in ast_function.get_parameters() %}
+{%- set typeSym = param.get_data_type().get_type_symbol() -%}
+{{ types_printer.convert(typeSym) }} {{ param.get_name() }}
+{%- if not loop.last -%}
+,
+{%- endif -%}
+{%- endfor -%}
+{%- endfilter -%}
+) -> {{ types_printer.print(function_symbol.get_return_type()) }}:
+{%- endwith -%}
+{%- endmacro -%}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/GSLDifferentiationFunction.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/GSLDifferentiationFunction.jinja2
new file mode 100644
index 000000000..938a695c3
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/GSLDifferentiationFunction.jinja2
@@ -0,0 +1,36 @@
+{#
+ Creates scipy odeint implementation of the differentiation step for the system of ODEs.
+#}
+@staticmethod
+def dynamics{% if ast.get_args() | length > 0 %}_{{ utils.integrate_odes_args_str_from_function_call(ast) }}{% endif %}(t: float, ode_state: List[float], args: Tuple[Any]) -> List[float]:
+ r"""Numerical integrator stepping function for _integrate_odes{% if ast.get_args() | length > 0 %}_{{ utils.integrate_odes_args_str_from_function_call(ast) }}{% endif %}"""
+ # ode_state[] here is---and must be---the state vector supplied by the integrator, not the state vector in the node, node.S_.ode_state[].
+ node = args
+
+ dim: int = len(ode_state)
+ f = np.empty(dim, dtype=float)
+
+{%- for equations_block in neuron.get_equations_blocks() %}
+{%- for ode in equations_block.get_declarations() %}
+{%- for inline_expr in utils.get_inline_expression_symbols(ode) %}
+{%- if not inline_expr.is_equation() %}
+{%- set declaring_expr = inline_expr.get_declaring_expression() %}
+ {{ printer.print(utils.get_variable_by_name(astnode, inline_expr)) }} = {{ gsl_printer.print(declaring_expr) }}
+{%- endif %}
+{%- endfor %}
+{%- endfor %}
+{%- endfor %}
+
+{%- set var_names = numeric_state_variables %}
+{%- if paired_synapse is defined %}
+{%- set tmp = var_names.extend(numeric_state_variables_moved) %}
+{%- endif %}
+
+{%- for variable_name in var_names %}
+{%- set update_expr = numeric_update_expressions[variable_name] %}
+{%- set variable_symbol = astnode.get_scope().resolve_to_symbol(variable_name, SymbolKind.VARIABLE) %}
+ f[node.S_.ode_state_variable_name_to_index["{{ variable_symbol.name }}"]] = {% if ast.get_args() | length > 0 %}{% if variable_name in utils.integrate_odes_args_strs_from_function_call(ast) + utils.all_convolution_variable_names(astnode) %}{{ gsl_printer.print(update_expr) }}{% else %}0{% endif %}{% else %}{{ gsl_printer.print(update_expr) }}{% endif %}
+
+{%- endfor %}
+
+ return f
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/GSLIntegrationStep.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/GSLIntegrationStep.jinja2
new file mode 100644
index 000000000..3bfab7253
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/GSLIntegrationStep.jinja2
@@ -0,0 +1,12 @@
+{#
+ Generates a series of statements which perform one integration step of all ODEs defined in the model.
+#}
+{%- if tracing %}# generated by {{self._TemplateReference__context.name}}{% endif %}
+{%- if numeric_solver == "rk45" %}
+res = scipy.integrate.solve_ivp(Neuron_{{neuronName}}.dynamics{% if ast.get_args() | length > 0 %}_{{ utils.integrate_odes_args_str_from_function_call(ast) }}{% endif %}, t_span=(origin, origin + timestep), y0=self.S_.ode_state, method="RK45", args=(self, ))
+np.testing.assert_almost_equal(res.t[-1], origin + timestep) # sanity check on the final timestep reached by the solver
+self.S_.ode_state = res.y[:, -1]
+{%- else %}
+{{ raise('Unsupported numerical solver') }}
+{%- endif %}
+
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/IfStatement.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/IfStatement.jinja2
new file mode 100644
index 000000000..baf9ee70e
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/IfStatement.jinja2
@@ -0,0 +1,24 @@
+{#
+ Generates Python if-then-else statement
+ @param ast ASTIfStmt
+#}
+{%- if tracing %}# generated by {{self._TemplateReference__context.name}} {% endif %}
+if {{ printer.print(ast.get_if_clause().get_condition()) }}:
+{%- filter indent(2) %}
+{%- set ast = ast.get_if_clause().get_stmts_body() %}
+{%- include "directives_py/StmtsBody.jinja2" %}
+{%- endfilter %}
+{%- for elif in ast.get_elif_clauses() %}
+elif {{ printer.print(elif.get_condition()) }}:
+{%- filter indent(2) %}
+{%- set ast = elif.get_stmts_body() %}
+{%- include "directives_py/StmtsBody.jinja2" %}
+{%- endfilter %}
+{%- endfor %}
+{%- if ast.has_else_clause() %}
+else:
+{%- filter indent(2) %}
+{%- set ast = ast.get_else_clause().get_stmts_body() %}
+{%- include "directives_py/StmtsBody.jinja2" %}
+{%- endfilter %}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/InternalPropagatorValues.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/InternalPropagatorValues.jinja2
new file mode 100644
index 000000000..8ff3a7d5e
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/InternalPropagatorValues.jinja2
@@ -0,0 +1,16 @@
+{#
+ Generates math expression computable by python for a the internal propagator parameters
+ @param result Initial value expression
+#}
+{%- if type(result.get_initial_value()) is ASTSimpleExpression %}
+ {%- if result.get_initial_value() is not None %}
+ "{{ result.get_symbol_name() }}": {{ printer_no_origin.print(result.get_initial_value()).numeric_literal }},
+ {%- endif %}
+ {%- else %}
+ {%- for symbol, value in parameters.items(): %}
+ result = result.replace(symbol, str(value))
+ {% endfor %}
+ "{{ result.get_symbol_name() }}": {{ result }}
+{%- endif %}
+
+
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/MemberDeclaration.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/MemberDeclaration.jinja2
new file mode 100644
index 000000000..cf9c60de9
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/MemberDeclaration.jinja2
@@ -0,0 +1,14 @@
+{#
+ Generates declaration for a variable
+ @param variable ASTVariable
+ @param variable_symbol VariableSymbol
+#}
+{%- if tracing %}# generated by {{self._TemplateReference__context.name}}{% endif %}
+{%- if variable_symbol.has_comment() %}
+{{ variable_symbol.print_comment("# ") }}
+{%- endif %}
+{%- if variable_symbol.get_symbol_name() in numeric_state_variables %}
+ode_state[ode_state_variable_name_to_index["{{ printer_no_origin.print(variable) }}"]] = np.nan # type: {{ declarations.print_variable_type(variable_symbol) }}
+{%- else %}
+{{ printer_no_origin.print(variable) }} = None # type: {{ declarations.print_variable_type(variable_symbol) }}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/MemberInitialization.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/MemberInitialization.jinja2
new file mode 100644
index 000000000..a25f4dd80
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/MemberInitialization.jinja2
@@ -0,0 +1,18 @@
+{#
+ In general case creates an
+ @param variable_symbol variable_symbolSymbol variable_symbol for which the initialization should be done
+#}
+{%- if tracing %}# generated by {{self._TemplateReference__context.name}}{% endif %}
+{%- if variable_symbol.has_declaring_expression() and not variable_symbol.is_kernel() %}
+{%- if variable_symbol.has_vector_parameter() %}
+{{ printer.print_vector_declaration(variable_symbol) }}
+{%- else %}
+{{ printer.print(variable) }} = {{ printer.print(variable_symbol.get_declaring_expression()) }} # type: {{variable_symbol.get_type_symbol().print_symbol()}}
+{%- endif %}
+{%- else %}
+{%- if variable_symbol.has_vector_parameter() %}
+{{ printer.print(variable) }} = [] # type: {{ variable_symbol.get_type_symbol().print_symbol() }}
+{%- else %}
+{{ printer.print(variable) }} = 0 # type: {{ variable_symbol.get_type_symbol().print_symbol() }}
+{%- endif -%}
+{%- endif -%}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/MemberVariableGetterSetter.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/MemberVariableGetterSetter.jinja2
new file mode 100644
index 000000000..88b6347cd
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/MemberVariableGetterSetter.jinja2
@@ -0,0 +1,10 @@
+{% if variable_symbol.is_inline_expression and not utils.contains_convolve_call(variable_symbol) -%}
+def get_{{ printer_no_origin.print(variable) }}(self) -> {{ declarations.print_variable_type(variable_symbol) }}:
+ return get_{{ printer_no_origin.print(variable_symbol.get_declaring_expression()) }}
+{%- else %}
+def get_{{ printer_no_origin.print(variable) }}(self) -> {{ declarations.print_variable_type(variable_symbol) }}:
+ return {{ printer.print(variable) }}
+
+def set_{{ printer_no_origin.print(variable) }}(self, __v: {{ declarations.print_variable_type(variable_symbol) }}):
+ {{ printer.print(variable) }} = __v
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/ReturnStatement.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/ReturnStatement.jinja2
new file mode 100644
index 000000000..c9517e331
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/ReturnStatement.jinja2
@@ -0,0 +1,10 @@
+{#
+ Generates a single return statement
+ @param ast ASTReturnStmt
+#}
+{%- if tracing %}# generated by {{self._TemplateReference__context.name}}{% endif %}
+{%- if ast.has_expression() %}
+return {{ printer.print(ast.get_expression()) }}
+{%- else %}
+return
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/SmallStatement.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/SmallStatement.jinja2
new file mode 100644
index 000000000..d93e3841c
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/SmallStatement.jinja2
@@ -0,0 +1,22 @@
+{#
+ Generates a single small statement
+ @param stmt ASTSmallStmt
+#}
+{%- if tracing %}# generated by {{self._TemplateReference__context.name}}{% endif %}
+{%- if stmt.is_assignment() %}
+{%- with ast = stmt.get_assignment() %}
+{%- include "directives_py/Assignment.jinja2" %}
+{%- endwith %}
+{%- elif stmt.is_function_call() %}
+{%- with ast = stmt.get_function_call() %}
+{%- include "directives_py/FunctionCall.jinja2" %}
+{%- endwith %}
+{%- elif stmt.is_declaration() %}
+{%- with ast = stmt.get_declaration() %}
+{%- include "directives_py/Declaration.jinja2" %}
+{%- endwith %}
+{%- elif stmt.is_return_stmt() %}
+{%- with ast = stmt.get_return_stmt() %}
+{%- include "directives_py/ReturnStatement.jinja2" %}
+{%- endwith %}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/Statement.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/Statement.jinja2
new file mode 100644
index 000000000..ef4b6bb58
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/Statement.jinja2
@@ -0,0 +1,17 @@
+{#
+ Generates a single statement, either a simple or compound
+ @param ast ASTSmallStmt or ASTCompoundStmt
+#}
+{%- if tracing %}# generated by {{self._TemplateReference__context.name}}{% endif %}
+{%- if stmt.has_comment() %}
+{{ stmt.print_comment('#') }}
+{%- endif %}
+{%- if stmt.is_small_stmt() %}
+{%- with stmt = stmt.small_stmt %}
+{%- include "directives_py/SmallStatement.jinja2" %}
+{%- endwith %}
+{%- elif stmt.is_compound_stmt() %}
+{%- with stmt = stmt.compound_stmt %}
+{%- include "directives_py/CompoundStatement.jinja2" %}
+{%- endwith %}
+{%- endif %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/StmtsBody.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/StmtsBody.jinja2
new file mode 100644
index 000000000..a6e4627b0
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/StmtsBody.jinja2
@@ -0,0 +1,11 @@
+{#
+ Handles a sequence of statements
+ @param ast ASTStmtsBody
+#}
+{%- if tracing %}# generated by {{ self._TemplateReference__context.name }}{% endif %}
+
+{%- for statement in ast.get_stmts() %}
+{%- with stmt = statement %}
+{%- include "directives_py/Statement.jinja2" %}
+{%- endwith %}
+{%- endfor %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/WhileStatement.jinja2 b/pynestml/codegeneration/resources_spinnaker2/directives_py/WhileStatement.jinja2
new file mode 100644
index 000000000..324769f8b
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/WhileStatement.jinja2
@@ -0,0 +1,10 @@
+{#
+ Generates Python while statement
+ @param ast ASTWhileStmt
+#}
+{%- if tracing %}# generated by {{self._TemplateReference__context.name}}{% endif %}
+while {{ printer.print(ast.get_condition()) }}:
+{%- filter indent(2) %}
+{%- set ast = ast.get_stmts_body() %}
+{%- include "directives_py/StmtsBody.jinja2" %}
+{%- endfilter %}
diff --git a/pynestml/codegeneration/resources_spinnaker2/directives_py/__init__.py b/pynestml/codegeneration/resources_spinnaker2/directives_py/__init__.py
new file mode 100644
index 000000000..ec6cd5167
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/directives_py/__init__.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+#
+# __init__.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+# ---------------------------------------------------------------
+# Caution: This file is required to enable Python to also include the templates
+# ---------------------------------------------------------------
diff --git a/pynestml/codegeneration/resources_spinnaker2/global_params.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/global_params.h.jinja2
new file mode 100644
index 000000000..0c55a18fd
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/global_params.h.jinja2
@@ -0,0 +1,11 @@
+#pragma once
+
+struct global_params {
+ uint32_t n_used_neurons; // number of neurons to generate
+ uint32_t record_spikes; // use like bool
+ uint32_t record_v; // 0: recording off, 1: record voltage in all timesteps, 2: record last voltage
+ uint32_t profiling; // use like bool
+ uint32_t record_time_done; // use like bool
+ REAL calc_step_raw; // numerical value of '__h' in s, timestep used for ODE integration, e.g. 0.001 for 1ms steps
+ REAL weight_scaling_factor; // scaling factor for 4 bit weights, e.g. 1e-9 for NanoAmpere
+};
\ No newline at end of file
diff --git a/pynestml/codegeneration/resources_spinnaker2/maths-util.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/maths-util.h.jinja2
new file mode 100644
index 000000000..cfa26ef2c
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/maths-util.h.jinja2
@@ -0,0 +1,93 @@
+#ifndef _MATHS_UTIL_
+#define _MATHS_UTIL_
+
+#define FLOATING_POINT
+
+typedef unsigned int Card;
+
+#define START 0
+
+#ifdef FLOATING_POINT
+
+#include {{ '<' }}math.h{{ '>' }}
+
+typedef float REAL;
+typedef float UREAL;
+typedef float FRACT;
+typedef float UFRACT;
+#define REAL_CONST(x) x
+#define UREAL_CONST(x) x
+#define FRACT_CONST(x) x
+#define UFRACT_CONST(x) x
+
+
+#define ONE 1.00000000000000000
+#define HALF 0.50000000000000000
+#define ZERO 0.00000000000000000
+
+#define POW( x, p ) pow( (x), (p) )
+
+#define SQRT( x ) sqrt( x )
+#define EXP( x ) exp( x )
+#define LN( x ) log( x )
+#define ABS( x ) fabs(x)
+
+
+#define MAX( x, y ) MAX_HR( (x), (y) )
+#define SIGN( x, y ) ( (macro_arg_1=(y)) >= ZERO ? ABS( x ) : -ABS( x ) )
+
+#define ACS_DBL_TINY 1.0e-300
+
+#else
+
+#include {{ '<' }}stdfix.h{{ '>' }}
+#define REAL_CONST(x) x##k
+#define UREAL_CONST(x) x##uk
+#define FRACT_CONST(x) x##lr
+#define UFRACT_CONST(x) x##ulr
+
+#define ONE REAL_CONST(1.0000)
+#define HALF REAL_CONST(0.5000)
+#define ZERO REAL_CONST(0.0000)
+#define ACS_DBL_TINY REAL_CONST(0.000001)
+
+#define ABS( x ) absfx( x )
+
+#define SIGN( x, y ) ( (macro_arg_1=(y)) >= ZERO ? ABS( x ) : -ABS( x ) )
+
+#endif
+
+#ifdef FLOATING_POINT
+
+#define REAL_COMPARE( x, op, y ) ( (x) op (y) )
+#define REAL_TWICE( x ) ((x) * 2.00000 )
+#define REAL_HALF( x ) ((x) * 0.50000 )
+
+#else
+
+#define REAL_COMPARE( x, op, y ) ( bitsk( (x) ) op bitsk( (y) ) )
+#define REAL_TWICE( x ) ((x) * 2.000000k )
+#define REAL_HALF( x ) ((x) * 0.500000k )
+
+#endif
+
+#define MIN_HR(a, b) ({\
+ __type_of__(a) _a = (a); \
+ __type_of__(b) _b = (b); \
+ _a <= _b? _a : _b;})
+
+#define MAX_HR(a, b) ({\
+ __type_of__(a) _a = (a); \
+ __type_of__(b) _b = (b); \
+ _a > _b? _a : _b;})
+
+#define SQR(a) ({\
+ __type_of__(a) _a = (a); \
+ _a == ZERO? ZERO: _a * _a;})
+
+#define CUBE(a) ({\
+ __type_of__(a) _a = (a); \
+ _a == ZERO? ZERO: _a * _a * _a;})
+
+#endif // _MATHS_UTIL_
+
diff --git a/pynestml/codegeneration/resources_spinnaker2/neuron-typedefs.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/neuron-typedefs.h.jinja2
new file mode 100644
index 000000000..2daa6d2b3
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/neuron-typedefs.h.jinja2
@@ -0,0 +1,53 @@
+#ifndef __NEURON_TYPEDEFS_H__
+#define __NEURON_TYPEDEFS_H__
+
+#include {{ '<' }}common-typedefs.h{{ '>' }}
+#include "maths-util.h"
+
+#ifndef __SPIKE_T__
+
+typedef uint32_t payload_t;
+
+#ifdef SPIKES_WITH_PAYLOADS
+
+typedef uint64_t spike_t;
+
+static inline payload_t spike_payload (spike_t s) {
+ return ((payload_t)(s & UINT32_MAX));
+}
+
+#else
+
+typedef uint32_t spike_t;
+
+
+static inline payload_t spike_payload(spike_t s) {
+ use(s);
+ return (0);
+}
+#endif
+#endif
+
+typedef address_t synaptic_row_t;
+
+typedef REAL input_t;
+
+typedef struct input_struct_t{
+ input_t exc;
+ input_t inh;
+} input_struct_t;
+
+typedef struct timed_input_t {
+ uint32_t time;
+ input_struct_t inputs[];
+} timed_input_t;
+
+typedef float state_t;
+
+typedef struct timed_state_t {
+ uint32_t time;
+ state_t states[];
+} timed_state_t;
+
+
+#endif /* __NEURON_TYPEDEFS_H__ */
\ No newline at end of file
diff --git a/pynestml/codegeneration/resources_spinnaker2/neuron.c.jinja2 b/pynestml/codegeneration/resources_spinnaker2/neuron.c.jinja2
new file mode 100644
index 000000000..6e0ea9579
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/neuron.c.jinja2
@@ -0,0 +1,182 @@
+#include "neuron.h"
+#include "global_params.h"
+#include "simulation.h"
+
+uint32_t n_neurons;
+extern input_t input_buffers[INPUT_BUFFER_SIZE];
+neuron_impl_t neuron_array[N_NEURONS];
+neuron_params_t neuron_params_array[N_NEURONS] __attribute__((section(".myNeuronParamsSection")));
+global_neuron_params_t global_neuron_params;
+REAL weight_scaling_factor;
+
+// spike records for current timestep
+uint32_t spike_records[SPIKE_RECORD_LENGTH] __attribute__((aligned(0x10)));
+uint32_t spike_records_count[SPIKE_RECORD_COUNTER_LENGTH];
+
+extern uint32_t* spike_records_all_timesteps;
+extern uint32_t* voltage_records_all_timesteps;
+extern uint32_t systicks;
+extern uint32_t pe_id;
+extern uint32_t qpe_x;
+extern uint32_t qpe_y;
+extern volatile routing_info* routing_info_ptr;
+extern volatile struct global_params* global_params_ptr;
+extern simulation_config sim_config;
+
+
+bool neuron_initialise() {
+ n_neurons = N_NEURONS;
+ clear_spike_record();
+
+ // neuron states
+ for (index_t neuron_index = 0; neuron_index < n_neurons; neuron_index++) {
+{# neuron_params_t* neuron_params = &neuron_params_array[neuron_index];#}
+ neuron_pointer_t neuron = &neuron_array[neuron_index];
+ neuron_state_t* state = &neuron->state;
+ {% set updated_state_dict = utils.generate_updated_state_dict(initial_values, neuron.get_parameter_value_dict()) %}
+ {%- for name, value in updated_state_dict | dictsort %}
+ state->{{ name }} = {{ value }};
+ {%- endfor %}
+ }
+
+ return true;
+}
+
+void print_neuron_params() {
+ log_info("Neuron params:\n");
+ uint32_t n_used_neurons = global_params_ptr->n_used_neurons;
+ for (index_t neuron_index = 0; neuron_index < n_used_neurons; neuron_index++) {
+ neuron_params_t* neuron_params = &neuron_params_array[neuron_index];
+
+ {%- for name, value in neuron.get_parameter_value_dict() | dictsort %}
+ log_info("%u: {{ name }}=%i\n",
+ neuron_index,(int32_t) (f2ui(neuron_params->{{ name }})));
+ {%- endfor %}
+{% if uses_analytic_solver == True %}
+ {%- for name, value in propagators | dictsort %}
+ log_info("%u: {{ name }}=%i\n",
+ neuron_index,(int32_t) (f2ui(neuron_params->{{ name }})));
+ {%- endfor %}
+{%- endif %}
+ }
+}
+
+void record_voltages() {
+ if (global_params_ptr->record_v == 1) { // record all voltages
+ *voltage_records_all_timesteps = systicks;
+ voltage_records_all_timesteps++;
+
+ uint32_t n_used_neurons = global_params_ptr->n_used_neurons;
+ // neuron states
+ for (index_t neuron_index = 0; neuron_index < n_used_neurons; neuron_index++) {
+ *voltage_records_all_timesteps = f2ui(neuron_array[neuron_index].state.V_m);
+ voltage_records_all_timesteps++;
+ }
+ }
+ else if (global_params_ptr->record_v == 2) { // record last voltage only
+ if (systicks == sim_config.n_simulation_ticks - 1) {
+ log_info("Recording voltages at last time step %i", systicks);
+ *voltage_records_all_timesteps = systicks;
+ voltage_records_all_timesteps++;
+
+ uint32_t n_used_neurons = global_params_ptr->n_used_neurons;
+ // neuron states
+ for (index_t neuron_index = 0; neuron_index < n_used_neurons; neuron_index++) {
+ *voltage_records_all_timesteps = f2ui(neuron_array[neuron_index].state.V_m);
+ voltage_records_all_timesteps++;
+ }
+ }
+ }
+}
+
+void neuron_do_timestep_update() {
+ clear_spike_record();
+ uint32_t n_used_neurons = global_params_ptr->n_used_neurons;
+ for (index_t neuron_index = 0; neuron_index < n_used_neurons; neuron_index++) {
+
+ neuron_pointer_t neuron = &neuron_array[neuron_index];
+ neuron_params_t* neuron_params = &neuron_params_array[neuron_index];
+ neuron_state_t* state = &neuron->state;
+{# neuron_input_t* input = &neuron->input;#}
+{# GENERATE THIS!#}
+ state->I_syn_exc = synapse_types_get_excitatory_input(input_buffers, neuron_index);
+ state->I_syn_inh = synapse_types_get_inhibitory_input(input_buffers, neuron_index);
+{# GENERATE THIS!#}
+ {%- if neuron.get_update_blocks() %}
+ {%- filter indent(8) %}
+ {%- for block in neuron.get_update_blocks() %}
+ {%- set ast = block.get_stmts_body() %}
+ {%- if ast.print_comment('*')|length > 1 %}
+ /*
+ {{ast.print_comment('*')}}
+ */
+ {%- endif %}
+ {%- include "directives_cpp/StmtsBody.jinja2" %}
+ {%- endfor %}
+ {%- endfilter %}
+ {%- endif %}
+ neuron_check_and_spike(state, neuron_params, neuron_index);
+ }
+ send_spike_record();
+ record_voltages();
+}
+
+void neuron_check_and_spike(neuron_state_t* state, neuron_params_t* neuron_params, index_t neuron_index) { // onCondition block processing
+ {%- if neuron.get_on_condition_blocks() %}
+ {%- filter indent(8) %}
+ {%- for block in neuron.get_on_condition_blocks() %}
+if ({{ printer.print(block.get_cond_expr()) }}){
+ {%- set ast = block.get_stmts_body() %}
+ {%- if ast.print_comment('*')|length > 1 %}
+ /*
+ {{ast.print_comment('*')}}
+ */
+ {%- endif %}
+ {%- include "directives_cpp/StmtsBody.jinja2" %}
+ {%- endfor %}
+ {%- endfilter %}
+ {%- endif %}
+{# record_spike(neuron_index);#}
+{# log_info("spike should be recorded\n");#}
+{# send_spikes_to_all_targets(routing_info_ptr->key_offset + neuron_index);#}
+ }
+}
+
+void record_spike(uint32_t neuron_id){
+ spike_records[neuron_id/32+2] |= 1 << ( neuron_id - neuron_id/32*32);
+ log_info("neuron id %u: value=%i\n", neuron_id/32+2, spike_records[neuron_id/32+2]);
+ spike_records_count[neuron_id/32]++;
+}
+
+
+void neuron_reset(){
+ clear_spike_record();
+}
+
+void send_spike_record(){
+ if (global_params_ptr->record_spikes) {
+ spike_records[0]=pe_id;
+ spike_records[1]=systicks;
+ for(uint32_t i = 0; i < SPIKE_RECORD_LENGTH; i++){
+ *spike_records_all_timesteps = spike_records[i];
+ spike_records_all_timesteps++;
+ }
+ }
+}
+
+void clear_spike_record(){
+ for (uint32_t i = 0; i < SPIKE_RECORD_LENGTH ; i++){
+ spike_records[i]=0;
+ }
+ for (uint32_t i = 0 ; i < SPIKE_RECORD_COUNTER_LENGTH ; i++){
+ spike_records_count[i]=0;
+ }
+}
+
+bool input_buffer_initialise() {
+ for (uint32_t i = 0; i < INPUT_BUFFER_SIZE; i++) {
+ input_buffers[i] = 0;
+ }
+ return true;
+}
+
diff --git a/pynestml/codegeneration/resources_spinnaker2/neuron.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/neuron.h.jinja2
new file mode 100644
index 000000000..407babb14
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/neuron.h.jinja2
@@ -0,0 +1,36 @@
+#ifndef _NEURON_H_
+#define _NEURON_H_
+
+#include "s2app.h"
+#include "common/neuron-typedefs.h"
+#include "neuron_model_{{ neuronName }}_impl.h"
+#include "synapse_types/synapse_types_exponential_impl.h"
+#include "neuron_model.h"
+#include "param_defs.h"
+#include {{ '<' }}string.h{{ '>' }}
+#include "spinn_log.h"
+#include "spike_processing.h"
+
+extern input_t input_buffers[INPUT_BUFFER_SIZE];
+
+bool neuron_initialise();
+
+void neuron_reset();
+
+void neuron_do_timestep_update();
+
+void neuron_check_and_spike(neuron_state_t* state, neuron_params_t* params, index_t neuron_index);
+
+void clear_spike_record();
+
+void send_spike_record();
+
+void record_spike(uint32_t neuron_id);
+
+void record_voltages();
+
+bool input_buffer_initialise();
+
+void print_neuron_params();
+
+#endif // _NEURON_H_
diff --git a/pynestml/codegeneration/resources_spinnaker2/neuron_model.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/neuron_model.h.jinja2
new file mode 100644
index 000000000..5194a182e
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/neuron_model.h.jinja2
@@ -0,0 +1,12 @@
+
+#ifndef _NEURON_MODEL_H_
+#define _NEURON_MODEL_H_
+#include "common/neuron-typedefs.h"
+#include "s2app.h"
+
+typedef struct neuron_impl_t* neuron_pointer_t;
+typedef struct neuron_params_t neuron_params_t;
+
+void neuron_model_print_parameters(restrict neuron_pointer_t neuron);
+
+#endif // _NEURON_MODEL_H_
diff --git a/pynestml/codegeneration/resources_spinnaker2/neuron_model_@NEURON_NAME@_impl.c.jinja2 b/pynestml/codegeneration/resources_spinnaker2/neuron_model_@NEURON_NAME@_impl.c.jinja2
new file mode 100644
index 000000000..37d93ac4d
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/neuron_model_@NEURON_NAME@_impl.c.jinja2
@@ -0,0 +1,7 @@
+#include "neuron_model_{{ neuronName }}_impl.h"
+
+uint32_t rand_count=0;
+
+extern volatile uint32_t systicks;
+extern global_neuron_params_t global_neuron_params __attribute__((aligned(0x10)));
+
diff --git a/pynestml/codegeneration/resources_spinnaker2/neuron_model_@NEURON_NAME@_impl.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/neuron_model_@NEURON_NAME@_impl.h.jinja2
new file mode 100644
index 000000000..65f04740c
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/neuron_model_@NEURON_NAME@_impl.h.jinja2
@@ -0,0 +1,40 @@
+#ifndef _NEURON_MODEL_{{ neuronName|upper }}_IMPL_H_
+#define _NEURON_MODEL_{{ neuronName|upper }}_IMPL_H_
+
+#include "neuron_model.h"
+#include "common/neuron-typedefs.h"
+
+typedef struct global_neuron_params_t {
+ REAL calc_step_raw; // ode calc timestep
+ REAL weight_scaling_factor; // scaling factor for syn weights
+} global_neuron_params_t;
+
+// neuron state variables
+typedef struct neuron_state_t {
+ {%- for state in neuron.get_state_symbols() %}
+ {{ declarations.get_domain_from_type(state.get_type_symbol()) }} {{ state.name }};
+ {%- endfor %}
+} neuron_state_t;
+
+// neuron parameters from .nestml file and internal parameters
+typedef struct neuron_params_t {
+
+ {%- for parameter in neuron.get_parameter_symbols() |sort(attribute='name') %}
+ {{ declarations.get_domain_from_type(parameter.get_type_symbol()) }} {{ parameter.name }};
+ {%- endfor %}
+{% if uses_numeric_solver == False %}
+ {%- for propagator in neuron.get_internal_symbols() %}
+ {% if propagator.name != "__h" %}
+ {{ declarations.get_domain_from_type(propagator.get_type_symbol()) }} {{ propagator.name }};
+ {% endif %}
+ {%- endfor %}
+{% endif %}
+} neuron_params_t;
+
+typedef struct neuron_impl_t {
+ neuron_state_t state;
+ neuron_params_t parameter;
+} neuron_impl_t;
+
+#endif // _NEURON_MODEL_{{ neuronName|upper }}_IMPL_H_
+
diff --git a/pynestml/codegeneration/resources_spinnaker2/param_defs.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/param_defs.h.jinja2
new file mode 100644
index 000000000..fd4c994a0
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/param_defs.h.jinja2
@@ -0,0 +1,20 @@
+#ifndef PARAM_DEFS
+#define PARAM_DEFS
+
+#include "s2app.h"
+
+#define SYNAPSE_WEIGHTS_REAL // changed to REAL such that shifted weight value can be multiplied with scaling factor
+
+#define PACKET_BUFFER_LENGTH 512
+#define PACKET_IN_WORDS 1
+#define N_NEURONS 48 // TODO: change this to whatever the default should be for generated models
+#define INPUT_BUFFER_SIZE (1 << (SYNAPSE_TYPE_BITS + SYNAPSE_INDEX_BITS))
+#define RING_BUFFER_SIZE (1 << (SYNAPSE_DELAY_BITS + SYNAPSE_TYPE_BITS\
+ + SYNAPSE_INDEX_BITS))
+#define SPIKE_RECORD_LENGTH (N_NEURONS+31)/32 + 2
+#define SPIKE_RECORD_COUNTER_LENGTH (N_NEURONS+31)/32
+
+static volatile uint32_t* const timer = (uint32_t *) TIMER1_BASE;
+static volatile uint32_t* const comms = (uint32_t *) COMMS_BASE;
+
+#endif
\ No newline at end of file
diff --git a/pynestml/codegeneration/resources_spinnaker2/population_table.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/population_table.h.jinja2
new file mode 100644
index 000000000..25ec6433c
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/population_table.h.jinja2
@@ -0,0 +1,23 @@
+#ifndef _POPULATION_TABLE_H_
+#define _POPULATION_TABLE_H_
+
+#include "common/neuron-typedefs.h"
+#include "param_defs.h"
+typedef struct master_population_table_entry {
+ uint32_t key;
+ uint32_t mask;
+ uint32_t address_and_row_length;
+} master_population_table_entry;
+
+typedef struct population_table_info{
+ uint32_t address;
+ uint32_t length;
+} population_table_info;
+
+bool population_table_initialise();
+
+bool population_table_get_address(spike_t spike, address_t* row_address,
+ size_t* n_bytes_to_transfer);
+
+void print_population_table();
+#endif // _POPULATION_TABLE_H_
\ No newline at end of file
diff --git a/pynestml/codegeneration/resources_spinnaker2/population_table_binary_search_impl.c.jinja2 b/pynestml/codegeneration/resources_spinnaker2/population_table_binary_search_impl.c.jinja2
new file mode 100644
index 000000000..bc6537a14
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/population_table_binary_search_impl.c.jinja2
@@ -0,0 +1,54 @@
+#include "population_table.h"
+#include "synapse_row.h"
+#include "spinn_log.h"
+
+extern volatile population_table_info* pop_table_info;
+extern volatile master_population_table_entry* master_population_table;
+
+static uint32_t master_population_table_length;
+
+static inline uint32_t _get_address(master_population_table_entry entry) {
+ return entry.address_and_row_length {{ '>>' }} 8;
+}
+
+static inline uint32_t _get_row_length(master_population_table_entry entry) {
+ return entry.address_and_row_length & 0xFF;
+}
+
+bool population_table_initialise( ) {
+ master_population_table_length = pop_table_info->length;
+// log_info("pop table address 0: %p\n", pop_table_info->address);
+// log_info("pop table address 1: %p\n", master_population_table);
+ master_population_table = (volatile master_population_table_entry*) pop_table_info->address;
+// log_info("pop table address 2: %p\n", master_population_table);
+ return true;
+}
+
+bool population_table_get_address(spike_t spike, address_t* row_address,
+ size_t* n_bytes_to_transfer) {
+ uint32_t imin = 0;
+ uint32_t imax = master_population_table_length;
+ while (imin {{ '<' }} imax) {
+ int imid = (imax + imin) {{ '>>' }} 1;
+ master_population_table_entry entry = master_population_table[imid];
+ if ((spike & entry.mask) == entry.key) {
+ *row_address =(address_t) _get_address(entry);
+ *n_bytes_to_transfer =(size_t) _get_row_length(entry);
+ return true;
+ } else if (entry.key {{ '<' }} spike) {
+ imin = imid + 1;
+ } else {
+ imax = imid;
+ }
+ }
+ return false;
+}
+
+
+void print_population_table() {
+ log_info("== Population table (%d entries) at address 0x%x ==\n", master_population_table_length, (uint32_t) master_population_table);
+ for (uint32_t i=0; i{{ '<' }}master_population_table_length; ++i) {
+ master_population_table_entry entry = master_population_table[i];
+ log_info("Entry %d: key=%d, mask=0x%x, address=0x%x, row_length=%d\n", i, entry.key, entry.mask, _get_address(entry), _get_row_length(entry));
+ }
+}
diff --git a/pynestml/codegeneration/resources_spinnaker2/qpe.ld.jinja2 b/pynestml/codegeneration/resources_spinnaker2/qpe.ld.jinja2
new file mode 100644
index 000000000..9a412073e
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/qpe.ld.jinja2
@@ -0,0 +1,149 @@
+
+/* Entry Point */
+ENTRY (Reset_Handler)
+
+MEMORY
+{
+ ITCM (rx): ORIGIN = 0x00000000, LENGTH = 32K
+ DTCM (rwx): ORIGIN = 0x00008000, LENGTH = 96K
+}
+
+_estack = ORIGIN (DTCM) + LENGTH (DTCM);
+
+/* Generate a link error if heap and stack don't fit into RAM */
+_Stack_Heap_Limit_Size = 4K;
+
+/* Specify the memory areas */
+SECTIONS
+{
+ . = ORIGIN (ITCM);
+
+ /* The startup code goes first */
+ .isr_vector :
+ {
+ . = ALIGN (4);
+ KEEP (*( .isr_vector )) /* Startup code */
+ . = ALIGN (4);
+ } >ITCM
+ .text :
+ {
+ . = ALIGN (4);
+ *( .text ) /* .text sections (code) */
+ *( .text* ) /* .text* sections (code) */
+ *( .rodata ) /* .rodata sections (constants, strings, etc. ) */
+ *( .rodata* ) /* .rodata* sections (constants, strings, etc. ) */
+ *( .glue_7 ) /* glue arm to thumb code */
+ *( .glue_7t ) /* glue thumb to arm code */
+
+ . = ALIGN (4);
+ } >ITCM
+ PROVIDE ( __etext = . );
+
+ .ARM.extab :
+ {
+ *( .ARM.extab* .gnu.linkonce.armextab.* )
+ } >ITCM
+
+ .ARM :
+ {
+ PROVIDE_HIDDEN ( __exidx_start = . );
+ *( .ARM.exidx* .gnu.linkonce.armexidx.* )
+ PROVIDE_HIDDEN ( __exidx_end = . );
+
+ } >ITCM
+
+
+ .preinit_array :
+ {
+ PROVIDE_HIDDEN ( __preinit_array_start = . );
+ KEEP ( *( .preinit_array* ) )
+ PROVIDE_HIDDEN ( __preinit_array_end = . );
+ } >ITCM
+ .init_array :
+ {
+ PROVIDE_HIDDEN ( __init_array_start = . );
+ KEEP ( *( SORT (.init_array.* ) ) )
+ KEEP ( *( .init_array* ) )
+ PROVIDE_HIDDEN ( __init_array_end = . );
+ } >ITCM
+ .fini_array :
+ {
+ PROVIDE_HIDDEN ( __fini_array_start = . );
+ KEEP ( *( .fini_array* ) )
+ KEEP ( *( SORT (.fini_array.* ) ) )
+ PROVIDE_HIDDEN ( __fini_array_end = . );
+ } >ITCM
+
+ .ctors :
+ {
+
+ KEEP ( *crtbegin.o (.ctors) )
+ KEEP ( *crtbegin?.o (.ctors) )
+ KEEP ( *( EXCLUDE_FILE ( *crtend.o *crtend?.o ) .ctors) )
+ KEEP ( *( SORT ( .ctors.* ) ) )
+ KEEP ( *( .ctors ) )
+ } >ITCM
+ .dtors :
+ {
+ KEEP ( *crtbegin.o( .dtors ) )
+ KEEP ( *crtbegin?.o( .dtors ) )
+ KEEP ( *( EXCLUDE_FILE ( *crtend.o *crtend?.o ) .dtors ) )
+ KEEP ( *( SORT (.dtors.* ) ) )
+ KEEP ( *( .dtors ) )
+ } >ITCM
+
+
+ . = ALIGN (4);
+ _sidata = .;
+ PROVIDE ( _sidata = . );
+
+ /* used by the startup to initialize data */
+ .exit_value :
+ {
+ *( .exit_value )
+ } > DTCM
+
+
+ .data : AT (_sidata)
+ {
+ . = ALIGN (4);
+ _sdata = .;
+ PROVIDE ( _sdata = . );
+ *( .data )
+ *( .data* ) /* data sections */
+ . = ALIGN (4);
+ _edata = .;
+ PROVIDE ( _edata = . );
+ } >DTCM
+
+
+
+ .bss :
+ {
+ . = ALIGN (4);
+ __bss_start__ = .;
+ PROVIDE ( __bss_start__ = . );
+ *( .bss )
+ *( .bss* )
+ *( COMMON )
+ . = ALIGN (4);
+ __bss_end__ = .;
+ PROVIDE ( __bss_end__ = . );
+ } >DTCM
+
+ ._heap_stack 0x1F000:
+ {
+ . = ALIGN (4);
+ PROVIDE ( _heap_base = . );
+ . = . + _Stack_Heap_Limit_Size;
+ PROVIDE ( _heap_top = _estack - 2k);
+
+ } >DTCM
+
+ .ARM.attributes 0 : { *( .ARM.attributes ) }
+
+ /* custom sections */
+ .mySegment1 0x10200(NOLOAD) : {KEEP(*(.myDataSpecSection))}
+ .mySegment3 0x1b000(NOLOAD) : {KEEP(*(.myLogInfoSection))}
+ .mySegment4 0xe400(NOLOAD) : {KEEP(*(.myNeuronParamsSection))}
+}
diff --git a/pynestml/codegeneration/resources_spinnaker2/qpe_isr.c.jinja2 b/pynestml/codegeneration/resources_spinnaker2/qpe_isr.c.jinja2
new file mode 100644
index 000000000..8d40c280c
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/qpe_isr.c.jinja2
@@ -0,0 +1,24 @@
+#include {{ '<' }}s2app.h{{ '>' }}
+#include "spinn2.h"
+#include "param_defs.h"
+
+extern void timer_callback();
+extern void timer_start();
+extern void reset_all();
+
+void Timer_Int1_Handler (void) {
+ timer[TIMER1_INT_CLR] = 0xFFFFFFFF;
+ timer_callback();
+}
+
+void FT_INT_0_Handler(void)
+{
+ NVIC_ClearPendingIRQ (FT_INT_0_IRQn);
+ timer_start();
+}
+
+void FT_INT_1_Handler(void)
+{
+ NVIC_ClearPendingIRQ (FT_INT_1_IRQn);
+ reset_all();
+}
\ No newline at end of file
diff --git a/pynestml/codegeneration/resources_spinnaker2/regions.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/regions.h.jinja2
new file mode 100644
index 000000000..8188188fa
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/regions.h.jinja2
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2017-2019 The University of Manchester
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see .
+ */
+
+//! \file
+//! \brief Standard layout of DSG regions in neuron code.
+//!
+//! Note that not all models use all of these regions, but they all use the same
+//! region identifier mapping.
+#pragma once
+
+//! DSG region identifiers
+typedef enum neuron_regions_e {
+ ROUTING_TABLE_REGION, //!< routing table, 0
+ SIMULATION_REGION, //!< simulation (tick & duration), 1
+ GLOBAL_PARAMS_REGION, //!< global parameters, 2
+ POPULATION_TABLE_REGION, //!< master population table; 3
+ SYNAPSE_ROWS_REGION, //!< synaptic matrix; 4
+ NEURON_PARAMS_REGION, //!< neuron parameters; 5
+ NEURON_RECORDING_REGION, //!< spike recording; 6
+ VOLTAGE_RECORDING_REGION, //!< voltage recording; 7
+ TIME_DONE_RECORDING_REGION, //!< time done recording; 8
+ LOG_INFO_REGION //!< log_info start address; 9
+} regions_e;
\ No newline at end of file
diff --git a/pynestml/codegeneration/resources_spinnaker2/simulation.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/simulation.h.jinja2
new file mode 100644
index 000000000..cbf6fd2f3
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/simulation.h.jinja2
@@ -0,0 +1,6 @@
+#pragma once
+
+typedef struct simulation_config {
+ uint32_t timer_period; /// timer period: nr of clock cycles of timer reference clock
+ uint32_t n_simulation_ticks; /// number of simulation ticks
+} simulation_config;
\ No newline at end of file
diff --git a/pynestml/codegeneration/resources_spinnaker2/spike_processing.c.jinja2 b/pynestml/codegeneration/resources_spinnaker2/spike_processing.c.jinja2
new file mode 100644
index 000000000..f6c65e478
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/spike_processing.c.jinja2
@@ -0,0 +1,47 @@
+#include "spike_processing.h"
+#include "param_defs.h"
+#include {{ '<' }}comms.h{{ '>' }}
+#include {{ '<' }}spinn2.h{{ '>' }}
+#include "spinn_log.h"
+
+extern volatile routing_info* routing_info_ptr;
+
+/// Reconfigure the comms with the destination for sending the next SpiNNaker packet
+///
+/// @param dest: 4 bit vector representing the targetted PEs on the target QPE.
+/// The MSB (index 3) is for PE0, the LSB (index 0) is for PE3 following
+/// the format in NoC Packets.
+/// @param qpex: x coordinate of target QPE
+/// @param qpey: y coordinate of target QPE
+void comms_reconfigure(uint8_t dest, uint8_t qpex, uint8_t qpey){
+ // log_info("comms_reconfigure(%d, %d, %d)\n", dest, qpex, qpey);
+ uint32_t tcr =
+ (0x3 <<29) |
+ ((qpex << COMMS_TCR_DEST_X_SHIFT) & COMMS_TCR_DEST_X_MASK)|
+ ((qpey<< COMMS_TCR_DEST_Y_SHIFT) & COMMS_TCR_DEST_Y_MASK);
+ tcr |= (dest << COMMS_TCR_DEST_P_SHIFT) & COMMS_TCR_DEST_P_MASK;
+
+ tcr |= COMMS_TCR_TYPE_SPINN;
+ tcr |= COMMS_TCR_SPINN_FROUTE_NONE;
+ comms[COMMS_TCR_A] = tcr;
+ comms[COMMS_TCTL] = 0xE;
+}
+
+void send_spikes(uint32_t key) {
+ // log_info("send_spikes(%d)\n", key);
+ while ((comms[COMMS_TCTL] & 0x80000000) == 0) {
+ }
+ comms[COMMS_TKR_A] = key;
+}
+
+/// send spikes to all targets
+void send_spikes_to_all_targets(uint32_t key) {
+ // log_info("send_spikes_to_all_targets(%u)\n", key);
+ uint32_t n_targets = routing_info_ptr->n_routing_targets;
+ for (uint32_t i=0; i{{ '<' }}n_targets; ++i) {
+ routing_target tgt = routing_info_ptr->routing_targets[i];
+ comms_reconfigure(tgt.pes, tgt.qpe_x, tgt.qpe_y);
+ send_spikes(key);
+ }
+}
+
diff --git a/pynestml/codegeneration/resources_spinnaker2/spike_processing.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/spike_processing.h.jinja2
new file mode 100644
index 000000000..93677c367
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/spike_processing.h.jinja2
@@ -0,0 +1,22 @@
+#ifndef _SPIKE_PROCESSING_H_
+#define _SPIKE_PROCESSING_H_
+#include {{ '<' }}stdint.h{{ '>' }}
+
+typedef struct routing_target {
+ uint8_t qpe_x;
+ uint8_t qpe_y;
+ uint8_t pes;
+ uint8_t _pad0;
+
+} routing_target;
+
+typedef struct routing_info {
+ uint32_t key_offset;
+ uint32_t n_routing_targets;
+ routing_target* routing_targets;
+} routing_info;
+
+
+void send_spikes_to_all_targets(uint32_t key);
+
+#endif // _SPIKE_PROCESSING_H_
diff --git a/pynestml/codegeneration/resources_spinnaker2/synapse_row.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/synapse_row.h.jinja2
new file mode 100644
index 000000000..da3c67988
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/synapse_row.h.jinja2
@@ -0,0 +1,104 @@
+#ifndef _SYNAPSE_ROW_H_
+#define _SYNAPSE_ROW_H_
+
+#include "common/neuron-typedefs.h"
+#include "param_defs.h"
+
+#ifndef SYNAPSE_WEIGHT_BITS
+#define SYNAPSE_WEIGHT_BITS 4
+#endif
+
+#ifndef SYNAPSE_DELAY_BITS
+#define SYNAPSE_DELAY_BITS 3
+
+#endif
+
+#ifndef SYNAPSE_TYPE_BITS
+#define SYNAPSE_TYPE_BITS 1
+#endif
+
+#ifndef SYNAPSE_TYPE_COUNT
+#define SYNAPSE_TYPE_COUNT 2
+#endif
+
+#ifndef SYNAPSE_INDEX_BITS
+#define SYNAPSE_INDEX_BITS 8
+#endif
+
+#define SYNAPSE_TYPE_INDEX_BITS (SYNAPSE_TYPE_BITS + SYNAPSE_INDEX_BITS)
+
+#define SYNAPSE_DELAY_MASK ((1 << SYNAPSE_DELAY_BITS) - 1)
+#define SYNAPSE_TYPE_MASK ((1 << SYNAPSE_TYPE_BITS) - 1)
+#define SYNAPSE_INDEX_MASK ((1 << SYNAPSE_INDEX_BITS) - 1)
+#define SYNAPSE_TYPE_INDEX_MASK ((1 << SYNAPSE_TYPE_INDEX_BITS) - 1)
+
+#ifdef SYNAPSE_WEIGHTS_SIGNED
+typedef __int_t(SYNAPSE_WEIGHT_BITS) weight_t;
+#else
+#ifdef SYNAPSE_WEIGHTS_REAL
+typedef REAL weight_t;
+
+#else
+#ifdef SYNAPSE_WEIGHTS_UINT
+typedef uint32_t weight_t;
+
+#else
+typedef __uint_t(SYNAPSE_WEIGHT_BITS) weight_t;
+#endif
+#endif
+
+#endif
+
+typedef uint16_t control_t;
+
+#define N_SYNAPSE_ROW_HEADER_WORDS 3
+
+static inline size_t synapse_row_plastic_size(address_t row) {
+ return (size_t) row[0];
+}
+
+static inline address_t synapse_row_plastic_region(address_t row) {
+ return ((address_t) (&(row[1])));
+}
+
+static inline address_t synapse_row_fixed_region(address_t row) {
+ return ((address_t) (&(row[synapse_row_plastic_size(row) + 1])));
+}
+
+static inline size_t synapse_row_num_fixed_synapses(address_t fixed) {
+ return ((size_t) (fixed[0]));
+}
+
+static inline size_t synapse_row_num_plastic_controls(address_t fixed) {
+ return ((size_t) (fixed[1]));
+}
+
+static inline control_t* synapse_row_plastic_controls(address_t fixed) {
+ return ((control_t*) (&(fixed[2 + synapse_row_num_fixed_synapses(fixed)])));
+}
+
+static inline uint32_t *synapse_row_fixed_weight_controls(address_t fixed) {
+ return (&(fixed[2]));
+}
+
+static inline index_t synapse_row_sparse_index(uint32_t x) {
+ return (x & SYNAPSE_INDEX_MASK);
+}
+
+static inline index_t synapse_row_sparse_type(uint32_t x) {
+ return ((x >> SYNAPSE_INDEX_BITS) & SYNAPSE_TYPE_MASK);
+}
+
+static inline index_t synapse_row_sparse_type_index(uint32_t x) {
+ return (x & SYNAPSE_TYPE_INDEX_MASK);
+}
+
+static inline index_t synapse_row_sparse_delay(uint32_t x) {
+ return ((x >> SYNAPSE_TYPE_INDEX_BITS) & SYNAPSE_DELAY_MASK);
+}
+
+static inline weight_t synapse_row_sparse_weight(uint32_t x) {
+ return ((weight_t)((uint32_t)(x >> (SYNAPSE_TYPE_INDEX_BITS+SYNAPSE_DELAY_BITS))));
+}
+
+#endif // SYNAPSE_ROW_H
\ No newline at end of file
diff --git a/pynestml/codegeneration/resources_spinnaker2/synapse_types.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/synapse_types.h.jinja2
new file mode 100644
index 000000000..1a46f61b6
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/synapse_types.h.jinja2
@@ -0,0 +1,21 @@
+#ifndef _SYNAPSE_TYPES_H_
+#define _SYNAPSE_TYPES_H_
+
+#include "../common/neuron-typedefs.h"
+#include "../synapse_row.h"
+
+static inline index_t synapse_types_get_input_buffer_index(
+ index_t synapse_type_index, index_t neuron_index) {
+ return ((synapse_type_index << SYNAPSE_INDEX_BITS) | neuron_index);
+}
+
+static void synapse_types_add_neuron_input(
+ input_t *input_buffers, index_t synapse_type_index, index_t neuron_index,
+ input_t input);
+
+static input_t synapse_types_get_excitatory_input(
+ input_t *input_buffers, index_t neuron_index);
+
+static input_t synapse_types_get_inhibitory_input(
+ input_t *input_buffers, index_t neuron_index);
+#endif // _SYNAPSE_TYPES_H_
diff --git a/pynestml/codegeneration/resources_spinnaker2/synapse_types/synapse_types.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/synapse_types/synapse_types.h.jinja2
new file mode 100644
index 000000000..7f65bac21
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/synapse_types/synapse_types.h.jinja2
@@ -0,0 +1,24 @@
+#ifndef _SYNAPSE_TYPES_H_
+#define _SYNAPSE_TYPES_H_
+
+#include "../common/neuron-typedefs.h"
+#include "../synapse_row.h"
+
+static inline index_t synapse_types_get_input_buffer_index(
+ index_t synapse_type_index, index_t neuron_index) {
+ return ((synapse_type_index << SYNAPSE_INDEX_BITS) | neuron_index);
+}
+
+static void synapse_types_shape_input(
+ input_t *input_buffers, index_t neuron_index, synapse_param_t* parameters);
+
+static void synapse_types_add_neuron_input(
+ input_t *input_buffers, index_t synapse_type_index, index_t neuron_index,
+ synapse_param_t* parameters, input_t input);
+
+static input_t synapse_types_get_excitatory_input(
+ input_t *input_buffers, index_t neuron_index);
+
+static input_t synapse_types_get_inhibitory_input(
+ input_t *input_buffers, index_t neuron_index);
+#endif // _SYNAPSE_TYPES_H_
diff --git a/pynestml/codegeneration/resources_spinnaker2/synapse_types/synapse_types_exponential_impl.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/synapse_types/synapse_types_exponential_impl.h.jinja2
new file mode 100644
index 000000000..091b919fd
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/synapse_types/synapse_types_exponential_impl.h.jinja2
@@ -0,0 +1,70 @@
+
+#ifndef _SYNAPSE_TYPES_EXPONENTIAL_IMPL_H_
+#define _SYNAPSE_TYPES_EXPONENTIAL_IMPL_H_
+#define SYNAPSE_TYPE_BITS 1
+#define SYNAPSE_TYPE_COUNT 2
+
+#include "../decay.h"
+typedef struct synapse_param_t {
+ decay_t exc_decay;
+ decay_t inh_decay;
+} synapse_param_t;
+
+#include "synapse_types.h"
+
+typedef enum input_buffer_regions {
+ EXCITATORY, INHIBITORY,
+} input_buffer_regions;
+
+static inline index_t _ex_offset(index_t neuron_index) {
+ return synapse_types_get_input_buffer_index(EXCITATORY, neuron_index);
+}
+
+static inline index_t _in_offset(index_t neuron_index) {
+ return synapse_types_get_input_buffer_index(INHIBITORY, neuron_index);
+}
+
+static inline decay_t _ex_decay(
+ synapse_param_t *parameters, index_t neuron_index) {
+ return (parameters[neuron_index].exc_decay);
+}
+
+static inline decay_t _in_decay(
+ synapse_param_t *parameters, index_t neuron_index) {
+ return (parameters[neuron_index].inh_decay);
+}
+
+static inline void synapse_types_shape_input(
+ input_t *input_buffers, index_t neuron_index,
+ synapse_param_t* parameters) {
+ input_buffers[_ex_offset(neuron_index)] =
+ input_buffers[_ex_offset(neuron_index)]*
+ _ex_decay(parameters, 0);
+ input_buffers[_in_offset(neuron_index)] =
+ input_buffers[_in_offset(neuron_index)]*
+ _in_decay(parameters, 0);
+}
+
+static inline void synapse_types_add_neuron_input(
+ input_t *input_buffers, index_t synapse_type_index,
+ index_t neuron_index, synapse_param_t* parameters, input_t input) {
+ use(parameters);
+ if (synapse_type_index == EXCITATORY) {
+ uint32_t index = _ex_offset(neuron_index);
+ input_buffers[index] = input_buffers[index] + input;
+ } else if (synapse_type_index == INHIBITORY) {
+ uint32_t index = _in_offset(neuron_index);
+ input_buffers[index] = input_buffers[index] + input;
+ }
+}
+
+static inline input_t synapse_types_get_excitatory_input(
+ input_t *input_buffers, index_t neuron_index) {
+ return input_buffers[_ex_offset(neuron_index)];
+}
+
+static inline input_t synapse_types_get_inhibitory_input(
+ input_t *input_buffers, index_t neuron_index) {
+ return input_buffers[_in_offset(neuron_index)];
+}
+#endif // _SYNAPSE_TYPES_EXPONENTIAL_IMPL_H_
diff --git a/pynestml/codegeneration/resources_spinnaker2/synapse_types_exponential_impl.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/synapse_types_exponential_impl.h.jinja2
new file mode 100644
index 000000000..61523780c
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/synapse_types_exponential_impl.h.jinja2
@@ -0,0 +1,61 @@
+
+#ifndef _SYNAPSE_TYPES_EXPONENTIAL_IMPL_H_
+#define _SYNAPSE_TYPES_EXPONENTIAL_IMPL_H_
+#define SYNAPSE_TYPE_BITS 1
+#define SYNAPSE_TYPE_COUNT 2
+typedef UFRACT decay_t;
+
+
+typedef struct synapse_param_t {
+ decay_t exc_decay;
+ decay_t inh_decay;
+} synapse_param_t;
+
+#include "synapse_types.h"
+
+typedef enum input_buffer_regions {
+ EXCITATORY, INHIBITORY,
+} input_buffer_regions;
+
+static inline index_t _ex_offset(index_t neuron_index) {
+ return synapse_types_get_input_buffer_index(EXCITATORY, neuron_index);
+}
+
+static inline index_t _in_offset(index_t neuron_index) {
+ return synapse_types_get_input_buffer_index(INHIBITORY, neuron_index);
+}
+
+static inline decay_t _ex_decay(
+ synapse_param_t *parameters, index_t neuron_index) {
+ return (parameters[neuron_index].exc_decay); //->__P__I_syn_exc__I_syn_exc
+}
+
+static inline decay_t _in_decay(
+ synapse_param_t *parameters, index_t neuron_index) {
+ return (parameters[neuron_index].inh_decay); //->__P__I_syn_inh__I_syn_inh
+}
+
+static inline void synapse_types_add_neuron_input(
+ input_t *input_buffers, index_t synapse_type_index,
+ index_t neuron_index, input_t input) {
+ if (synapse_type_index == EXCITATORY) {
+ uint32_t index = _ex_offset(neuron_index);
+ // log_info("input:%u\n", input);
+
+ input_buffers[index] = input_buffers[index] + input;
+ } else if (synapse_type_index == INHIBITORY) {
+ uint32_t index = _in_offset(neuron_index);
+ input_buffers[index] = input_buffers[index] + input;
+ }
+}
+
+static inline input_t synapse_types_get_excitatory_input(
+ input_t *input_buffers, index_t neuron_index) {
+ return input_buffers[_ex_offset(neuron_index)];
+}
+
+static inline input_t synapse_types_get_inhibitory_input(
+ input_t *input_buffers, index_t neuron_index) {
+ return input_buffers[_in_offset(neuron_index)];
+}
+#endif // _SYNAPSE_TYPES_EXPONENTIAL_IMPL_H_
\ No newline at end of file
diff --git a/pynestml/codegeneration/resources_spinnaker2/synapses.c.jinja2 b/pynestml/codegeneration/resources_spinnaker2/synapses.c.jinja2
new file mode 100644
index 000000000..b6807a8f0
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/synapses.c.jinja2
@@ -0,0 +1,99 @@
+#include "synapses.h"
+#include "global_params.h"
+#include "neuron_model_{{ neuronName }}_impl.h"
+#include "spinn_log.h"
+
+static weight_t ring_buffers[RING_BUFFER_SIZE];
+input_t input_buffers[INPUT_BUFFER_SIZE];
+
+extern neuron_params_t neuron_params_array[N_NEURONS];
+extern global_neuron_params_t global_neuron_params __attribute__((aligned(0x10)));
+
+extern volatile struct global_params* global_params_ptr;
+
+static inline void _process_fixed_synapses(address_t fixed_region_address,
+ uint32_t time) {
+ register uint16_t *synaptic_words = (uint16_t*) synapse_row_fixed_weight_controls(
+ fixed_region_address);
+ register uint32_t fixed_synapse = synapse_row_num_fixed_synapses(
+ fixed_region_address);
+
+ for (; fixed_synapse > 0; fixed_synapse--) {
+
+ uint16_t synaptic_word = *synaptic_words++;
+
+ uint32_t delay = synapse_row_sparse_delay((uint32_t)synaptic_word);
+ uint32_t combined_synapse_neuron_index = synapse_row_sparse_type_index(
+ (uint32_t)synaptic_word);
+ weight_t weight =(weight_t)((uint32_t) synapse_row_sparse_weight((uint32_t)synaptic_word));
+
+ //log_info("\t\tweight: %i, delay %i, index %i\n", weight, delay, combined_synapse_neuron_index);
+
+ uint32_t ring_buffer_index = synapses_get_ring_buffer_index_combined(
+ delay + time, combined_synapse_neuron_index);
+
+ weight_t accumulation = ring_buffers[ring_buffer_index] + (weight * global_neuron_params.weight_scaling_factor); // added weight_scaling factor here
+ ring_buffers[ring_buffer_index] = accumulation;
+
+ }
+}
+
+bool synapses_initialise() {
+
+ for (uint32_t i = 0; i < INPUT_BUFFER_SIZE; i++) {
+ input_buffers[i] = 0;
+ }
+
+ for (uint32_t i = 0; i < RING_BUFFER_SIZE; i++) {
+ ring_buffers[i] = 0;
+ }
+
+ return true;
+}
+
+void synapses_reset(){
+
+ for (uint32_t i = 0; i < INPUT_BUFFER_SIZE; i++) {
+ input_buffers[i] = 0;
+ }
+ for (uint32_t i = 0; i < RING_BUFFER_SIZE; i++) {
+ ring_buffers[i] = 0;
+ }
+
+}
+
+void synapses_do_timestep_update(uint32_t time) {
+
+ __disable_irq();
+
+ uint32_t n_used_neurons = global_params_ptr->n_used_neurons;
+ for (index_t neuron_index = 0; neuron_index < n_used_neurons; neuron_index++) {
+ neuron_params_t* neuron_params = &neuron_params_array[neuron_index];
+
+ input_buffers[_ex_offset(neuron_index)] =
+ input_buffers[_ex_offset(neuron_index)] * neuron_params->__P__I_syn_exc__I_syn_exc;
+ input_buffers[_in_offset(neuron_index)] =
+ input_buffers[_in_offset(neuron_index)] * neuron_params->__P__I_syn_inh__I_syn_inh;
+
+ for (uint32_t synapse_type_index = 0;
+ synapse_type_index < SYNAPSE_TYPE_COUNT; synapse_type_index++)
+ {
+ uint32_t ring_buffer_index = synapses_get_ring_buffer_index(
+ time, synapse_type_index, neuron_index);
+
+ synapse_types_add_neuron_input(input_buffers, synapse_type_index,
+ neuron_index,
+ synapses_convert_weight_to_input(
+ ring_buffers[ring_buffer_index],0));
+ ring_buffers[ring_buffer_index] = 0;
+ }
+ }
+
+ __enable_irq();
+}
+
+bool synapses_process_synaptic_row(uint32_t time, synaptic_row_t row) {
+ address_t fixed_region_address = synapse_row_fixed_region(row);
+ _process_fixed_synapses(fixed_region_address, time);
+ return true;
+}
diff --git a/pynestml/codegeneration/resources_spinnaker2/synapses.h.jinja2 b/pynestml/codegeneration/resources_spinnaker2/synapses.h.jinja2
new file mode 100644
index 000000000..63b2b8d9f
--- /dev/null
+++ b/pynestml/codegeneration/resources_spinnaker2/synapses.h.jinja2
@@ -0,0 +1,46 @@
+#ifndef _SYNAPSES_H_
+#define _SYNAPSES_H_
+
+#include "common/neuron-typedefs.h"
+#include "synapse_row.h"
+#include "param_defs.h"
+#include "synapse_types/synapse_types_exponential_impl.h"
+
+
+#define INPUT_BUFFER_SIZE (1 {{ '<<' }} (SYNAPSE_TYPE_BITS + SYNAPSE_INDEX_BITS))
+#define RING_BUFFER_SIZE (1 {{ '<<' }} (SYNAPSE_DELAY_BITS + SYNAPSE_TYPE_BITS\
+ + SYNAPSE_INDEX_BITS))
+static inline index_t synapses_get_ring_buffer_index(
+ uint32_t simuation_timestep, uint32_t synapse_type_index,
+ uint32_t neuron_index) {
+ return (((simuation_timestep & SYNAPSE_DELAY_MASK)
+ {{ '<<' }} SYNAPSE_TYPE_INDEX_BITS)
+ | (synapse_type_index {{ '<<' }} SYNAPSE_INDEX_BITS)
+ | neuron_index);
+}
+
+static inline index_t synapses_get_ring_buffer_index_combined(
+ uint32_t simulation_timestep, uint32_t combined_synapse_neuron_index) {
+ return (((simulation_timestep & SYNAPSE_DELAY_MASK)
+ {{ '<<' }} SYNAPSE_TYPE_INDEX_BITS)
+ | combined_synapse_neuron_index);
+}
+
+static inline input_t synapses_convert_weight_to_input(weight_t weight,
+ uint32_t left_shift) {
+ return ((REAL)weight)/((uint32_t)1{{ '<<' }}left_shift);
+}
+bool synapses_initialise();
+
+void synapses_reset();
+
+// process received spikes
+void synapses_process_spikes();
+void synapses_do_timestep_update(uint32_t time);
+
+// process on synaptic row based on the received spike
+// adds the weights to respective positions in the ring buffer
+bool synapses_process_synaptic_row(uint32_t time, synaptic_row_t row);
+
+
+#endif // _SYNAPSES_H_
\ No newline at end of file
diff --git a/pynestml/codegeneration/spinnaker2TargetTools.py b/pynestml/codegeneration/spinnaker2TargetTools.py
new file mode 100644
index 000000000..fb9f1b4eb
--- /dev/null
+++ b/pynestml/codegeneration/spinnaker2TargetTools.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+#
+# python_code_generator_utils.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+from pynestml.meta_model.ast_node import ASTNode
+from pynestml.meta_model.ast_model import ASTModel
+
+from pynestml.meta_model.ast_variable import ASTVariable
+from pynestml.symbols.variable_symbol import VariableSymbol
+from pynestml.symbols.variable_symbol import BlockType
+
+
+class Spinnaker2TargetTools:
+ @classmethod
+ def get_propagators_as_math_expressions(cls, neuron:ASTNode, parameters:dict) -> dict:
+ propagators_as_math_expressions = {}
+ propagator_expressions = neuron.analytic_solver["propagators"]
+ for propagator_expression in propagator_expressions:
+ # propagator_expressions[propagator_expression] = propagator_expressions[propagator_expression].replace(
+ # '__h', str(1))
+ # for symbol, value in parameters.items():
+ # propagator_expressions[propagator_expression] = propagator_expressions[propagator_expression].replace(symbol, str(value))
+ # propagators_as_math_expressions.update({propagator_expression: propagator_expressions[propagator_expression]})
+ propagators_as_math_expressions[propagator_expression] = propagator_expressions[propagator_expression]
+ return propagators_as_math_expressions
\ No newline at end of file
diff --git a/pynestml/codegeneration/spinnaker2_builder.py b/pynestml/codegeneration/spinnaker2_builder.py
new file mode 100644
index 000000000..46625983a
--- /dev/null
+++ b/pynestml/codegeneration/spinnaker2_builder.py
@@ -0,0 +1,133 @@
+# -*- coding: utf-8 -*-
+#
+# spinnaker_builder.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+from __future__ import annotations
+
+from typing import Any, Mapping, Optional
+
+import os
+import subprocess
+import sys
+import fnmatch
+
+from pynestml.codegeneration.builder import Builder
+from pynestml.exceptions.generated_code_build_exception import GeneratedCodeBuildException
+from pynestml.exceptions.invalid_path_exception import InvalidPathException
+from pynestml.frontend.frontend_configuration import FrontendConfiguration
+
+
+class SpiNNaker2Builder(Builder):
+ r"""
+ Compiles and build the SpiNNaker2 Python Class and generated C code.
+ """
+
+
+ def __init__(self, options: Optional[Mapping[str, Any]] = None):
+ super().__init__(options)
+
+ def build(self) -> None:
+ r"""
+ This method can be used to build the generated code.
+
+ Raises
+ ------
+ GeneratedCodeBuildException
+ If any kind of failure occurs during compile or build.
+ InvalidPathException
+ If a failure occurs while trying to access the target path or the SpiNNaker installation path.
+ """
+ target_path = FrontendConfiguration.get_target_path()
+
+ if not os.path.isdir(target_path):
+ raise InvalidPathException('Target path (' + target_path + ') is not a directory!')
+
+ install_path = FrontendConfiguration.get_install_path()
+
+ if install_path is None or not os.path.isdir(install_path):
+ raise InvalidPathException('Installation path (' + str(install_path) + ') is not a directory!')
+
+ generated_file_names = os.listdir(target_path)
+ generated_file_names_neuron_py = [fn for fn in generated_file_names if fnmatch.fnmatch(fn, "*.py") and not "impl.py" in fn and not "example" in fn]
+ generated_file_names_synapse_types = [fn for fn in generated_file_names if fn in {'synapse_types.h', 'synapse_types_exponential_impl.h'}]
+ generated_file_names_common = [fn for fn in generated_file_names if fn in {'maths-util.h', 'neuron-typedefs.h'}]
+
+ old_cwd = os.getcwd()
+ try:
+ os.chdir(install_path)
+
+ # check if we run on win
+ if sys.platform.startswith('win'):
+ shell = True
+ else:
+ shell = False
+
+ try:
+ os.mkdir(os.path.join(install_path, "PySpiNNaker2Application"))
+ for fn in generated_file_names_neuron_py:
+ subprocess.check_call(["cp", "-v", fn, os.path.join(install_path, "PySpiNNaker2Application")],
+ stderr=subprocess.STDOUT,
+ shell=shell,
+ cwd=target_path)
+ subprocess.check_call(["rm", "-rf", fn, os.path.join(target_path, fn)],
+ stderr=subprocess.STDOUT,
+ shell=shell,
+ cwd=target_path)
+
+ except:
+ pass
+
+ try:
+ os.mkdir(os.path.join(target_path, "common"))
+ for fn in generated_file_names_common:
+ subprocess.check_call(["cp", "-v", fn, os.path.join(target_path, "common")],
+ stderr=subprocess.STDOUT,
+ shell=shell,
+ cwd=target_path)
+ subprocess.check_call(["rm", "-rf", fn, os.path.join(target_path, fn)],
+ stderr=subprocess.STDOUT,
+ shell=shell,
+ cwd=target_path)
+ except:
+ pass
+
+ try:
+ os.mkdir(os.path.join(target_path, "synapse_types"))
+ for fn in generated_file_names_synapse_types:
+ subprocess.check_call(["cp", "-v", fn, os.path.join(target_path, "synapse_types")],
+ stderr=subprocess.STDOUT,
+ shell=shell,
+ cwd=target_path)
+ subprocess.check_call(["rm", "-rf", fn, os.path.join(target_path, fn)],
+ stderr=subprocess.STDOUT,
+ shell=shell,
+ cwd=target_path)
+ except:
+ pass
+ try:
+ for name in os.listdir(old_cwd):
+ if name.startswith("nestml_python_target_"):
+ subprocess.check_call(["rm", "-rf", os.path.join(old_cwd, name)],
+ stderr=subprocess.STDOUT,
+ shell=shell)
+ except Exception as e:
+ print(f"Error deleting temporary directories: {e}")
+ finally:
+ os.chdir(old_cwd)
diff --git a/pynestml/codegeneration/spinnaker2_code_generator.py b/pynestml/codegeneration/spinnaker2_code_generator.py
new file mode 100644
index 000000000..8f9db0805
--- /dev/null
+++ b/pynestml/codegeneration/spinnaker2_code_generator.py
@@ -0,0 +1,257 @@
+import os
+import copy
+import pynestml
+
+from typing import Sequence, Optional, Mapping, Any, Dict
+
+
+from pynestml.cocos.co_cos_manager import CoCosManager
+from pynestml.codegeneration.code_generator import CodeGenerator
+
+from pynestml.visitors.ast_parent_visitor import ASTParentVisitor
+
+
+from pynestml.codegeneration.code_generator import CodeGenerator
+from pynestml.codegeneration.nest_code_generator import NESTCodeGenerator
+from pynestml.codegeneration.printers.cpp_expression_printer import CppExpressionPrinter
+from pynestml.codegeneration.printers.cpp_printer import CppPrinter
+from pynestml.codegeneration.printers.c_simple_expression_printer import CSimpleExpressionPrinter
+from pynestml.codegeneration.printers.gsl_variable_printer import GSLVariablePrinter
+from pynestml.codegeneration.printers.ode_toolbox_expression_printer import ODEToolboxExpressionPrinter
+from pynestml.codegeneration.printers.ode_toolbox_function_call_printer import ODEToolboxFunctionCallPrinter
+from pynestml.codegeneration.printers.ode_toolbox_variable_printer import ODEToolboxVariablePrinter
+
+from pynestml.codegeneration.printers.spinnaker2_c_function_call_printer import Spinnaker2CFunctionCallPrinter
+from pynestml.codegeneration.printers.spinnaker_c_type_symbol_printer import SpinnakerCTypeSymbolPrinter
+from pynestml.codegeneration.printers.spinnaker2_c_variable_printer import Spinnaker2CVariablePrinter
+from pynestml.codegeneration.printers.spinnaker_gsl_function_call_printer import SpinnakerGSLFunctionCallPrinter
+
+
+from pynestml.codegeneration.printers.constant_printer import ConstantPrinter
+from pynestml.codegeneration.printers.python_expression_printer import PythonExpressionPrinter
+from pynestml.codegeneration.printers.python_standalone_printer import PythonStandalonePrinter
+from pynestml.codegeneration.printers.python_stepping_function_function_call_printer import PythonSteppingFunctionFunctionCallPrinter
+from pynestml.codegeneration.printers.python_stepping_function_variable_printer import PythonSteppingFunctionVariablePrinter
+from pynestml.codegeneration.printers.python_variable_printer import PythonVariablePrinter
+from pynestml.codegeneration.printers.spinnaker_python_function_call_printer import SpinnakerPythonFunctionCallPrinter
+from pynestml.codegeneration.printers.spinnaker_python_simple_expression_printer import SpinnakerPythonSimpleExpressionPrinter
+from pynestml.codegeneration.printers.spinnaker_python_type_symbol_printer import SpinnakerPythonTypeSymbolPrinter
+from pynestml.codegeneration.python_standalone_code_generator import PythonStandaloneCodeGenerator
+from pynestml.codegeneration.python_code_generator_utils import PythonCodeGeneratorUtils
+from pynestml.meta_model.ast_model import ASTModel
+from pynestml.visitors.ast_symbol_table_visitor import ASTSymbolTableVisitor
+
+from pynestml.codegeneration.python_standalone_target_tools import PythonStandaloneTargetTools
+from pynestml.codegeneration.spinnaker2TargetTools import Spinnaker2TargetTools
+
+
+
+class CustomNESTCodeGenerator(NESTCodeGenerator):
+ def _get_model_namespace(self, astnode: ASTModel) -> Dict:
+ namespace = super()._get_model_namespace(astnode)
+ namespace["python_codegen_utils"] = PythonCodeGeneratorUtils
+ namespace["gsl_printer"] = self._gsl_printer
+ namespace["neuronName"] = astnode.get_name()
+ namespace["neuron"] = astnode
+ # namespace["parameters"], namespace["state"] = PythonStandaloneTargetTools.get_neuron_parameters_and_state(astnode.file_path)
+ # namespace["propagators_as_math_expressions"] = Spinnaker2TargetTools.get_propagators_as_math_expressions(
+ # namespace["neuron"], namespace["parameters"])
+ return namespace
+
+ def setup_printers(self):
+ self._constant_printer = ConstantPrinter()
+
+ # C/Spinnaker API printers
+ self._type_symbol_printer = SpinnakerCTypeSymbolPrinter()
+ self._nest_variable_printer = Spinnaker2CVariablePrinter(expression_printer=None, with_origin=True,
+ with_vector_parameter=True)
+ self._nest_function_call_printer = Spinnaker2CFunctionCallPrinter(None)
+ self._nest_function_call_printer_no_origin = Spinnaker2CFunctionCallPrinter(None)
+
+ self._printer = CppExpressionPrinter(
+ simple_expression_printer=CSimpleExpressionPrinter(variable_printer=self._nest_variable_printer,
+ constant_printer=self._constant_printer,
+ function_call_printer=self._nest_function_call_printer))
+ self._nest_variable_printer._expression_printer = self._printer
+ self._nest_function_call_printer._expression_printer = self._printer
+ self._nest_printer = CppPrinter(expression_printer=self._printer)
+
+ self._nest_variable_printer_no_origin = Spinnaker2CVariablePrinter(None, with_origin=False,
+ with_vector_parameter=False)
+ self._printer_no_origin = CppExpressionPrinter(
+ simple_expression_printer=CSimpleExpressionPrinter(variable_printer=self._nest_variable_printer_no_origin,
+ constant_printer=self._constant_printer,
+ function_call_printer=self._nest_function_call_printer_no_origin))
+ self._nest_variable_printer_no_origin._expression_printer = self._printer_no_origin
+ self._nest_function_call_printer_no_origin._expression_printer = self._printer_no_origin
+
+ # GSL printers
+ self._gsl_variable_printer = GSLVariablePrinter(None)
+ self._gsl_function_call_printer = SpinnakerGSLFunctionCallPrinter(None)
+
+ self._gsl_printer = CppExpressionPrinter(
+ simple_expression_printer=CSimpleExpressionPrinter(variable_printer=self._gsl_variable_printer,
+ constant_printer=self._constant_printer,
+ function_call_printer=self._gsl_function_call_printer))
+ self._gsl_function_call_printer._expression_printer = self._gsl_printer
+
+ # ODE-toolbox printers
+ self._ode_toolbox_variable_printer = ODEToolboxVariablePrinter(None)
+ self._ode_toolbox_function_call_printer = ODEToolboxFunctionCallPrinter(None)
+ self._ode_toolbox_printer = ODEToolboxExpressionPrinter(
+ simple_expression_printer=CSimpleExpressionPrinter(
+ variable_printer=self._ode_toolbox_variable_printer,
+ constant_printer=self._constant_printer,
+ function_call_printer=self._ode_toolbox_function_call_printer))
+ self._ode_toolbox_variable_printer._expression_printer = self._ode_toolbox_printer
+ self._ode_toolbox_function_call_printer._expression_printer = self._ode_toolbox_printer
+
+
+
+class CustomPythonStandaloneCodeGenerator(PythonStandaloneCodeGenerator):
+ def _get_model_namespace(self, astnode: ASTModel) -> Dict:
+ namespace = super()._get_model_namespace(astnode)
+ namespace["python_codegen_utils"] = PythonCodeGeneratorUtils
+ namespace["gsl_printer"] = self._gsl_printer
+ namespace["neuronName"] = astnode.get_name()
+ namespace["neuron"] = astnode
+ namespace["parameters"], namespace["state"] = PythonStandaloneTargetTools.get_neuron_parameters_and_state(astnode.file_path)
+ namespace["propagators_as_math_expressions"] = Spinnaker2TargetTools.get_propagators_as_math_expressions(
+ namespace["neuron"], namespace["parameters"])
+ return namespace
+
+
+
+
+
+ def setup_printers(self):
+ super().setup_printers()
+
+ self._type_symbol_printer = SpinnakerPythonTypeSymbolPrinter()
+ self._constant_printer = ConstantPrinter()
+
+ # Python/mini simulation environment API printers
+ self._nest_variable_printer = PythonVariablePrinter(expression_printer=None, with_origin=False,
+ with_vector_parameter=True)
+ self._nest_function_call_printer = SpinnakerPythonFunctionCallPrinter(None)
+ self._nest_function_call_printer_no_origin = SpinnakerPythonFunctionCallPrinter(None)
+
+ self._printer = PythonExpressionPrinter(simple_expression_printer=SpinnakerPythonSimpleExpressionPrinter(
+ variable_printer=self._nest_variable_printer,
+ constant_printer=self._constant_printer,
+ function_call_printer=self._nest_function_call_printer))
+ self._nest_variable_printer._expression_printer = self._printer
+ self._nest_function_call_printer._expression_printer = self._printer
+ self._nest_printer = PythonStandalonePrinter(expression_printer=self._printer)
+
+ self._nest_variable_printer_no_origin = PythonVariablePrinter(None, with_origin=False,
+ with_vector_parameter=False)
+ self._printer_no_origin = PythonExpressionPrinter(
+ simple_expression_printer=SpinnakerPythonSimpleExpressionPrinter(
+ variable_printer=self._nest_variable_printer_no_origin,
+ constant_printer=self._constant_printer,
+ function_call_printer=self._nest_function_call_printer_no_origin))
+ self._nest_variable_printer_no_origin._expression_printer = self._printer_no_origin
+ self._nest_function_call_printer_no_origin._expression_printer = self._printer_no_origin
+
+ # GSL printers
+ self._gsl_variable_printer = PythonSteppingFunctionVariablePrinter(None)
+ self._gsl_function_call_printer = PythonSteppingFunctionFunctionCallPrinter(None)
+ self._gsl_printer = PythonExpressionPrinter(simple_expression_printer=SpinnakerPythonSimpleExpressionPrinter(
+ variable_printer=self._gsl_variable_printer,
+ constant_printer=self._constant_printer,
+ function_call_printer=self._gsl_function_call_printer))
+ self._gsl_function_call_printer._expression_printer = self._gsl_printer
+ self._gsl_variable_printer._expression_printer = self._gsl_printer
+
+class Spinnaker2CodeGenerator(CodeGenerator):
+ """
+ Code generator for Spinnaker 2
+ """
+
+ _default_options = {
+ # "neuron_synapse_pairs": [
+ # {"neuron": 'nestml/models/neurons/iaf_psc_exp_neuron.nestml',# "iaf_psc_exp_neuron",
+ # "synapse": 'nestml/models/synapses/stdp_synapse.nestml', #"stdp_synapse",
+ # "post_ports": ["post_spikes"]}
+ # ],
+ "templates": {
+ "path": os.path.join(os.path.realpath(os.path.join(os.path.dirname(__file__), "resources_spinnaker2"))), #, os.path.join(os.path.realpath(os.path.join(os.path.dirname(__file__), "resources_spinnaker2/common"))), os.path.join(os.path.realpath(os.path.join(os.path.dirname(__file__), "resources_spinnaker2/synapse_types")))],
+ "model_templates": {
+ "neuron": ["@NEURON_NAME@.py.jinja2",
+ "@NEURON_NAME@.c.jinja2",
+ "@NEURON_NAME@.h.jinja2",
+ # "decay.h.jinja2",
+ "global_params.h.jinja2",
+ "neuron.c.jinja2",
+ "neuron.h.jinja2",
+ "neuron_model.h.jinja2",
+ # "neuron_model_@NEURON_NAME@_impl.c.jinja2",
+ "neuron_model_@NEURON_NAME@_impl.h.jinja2",
+ "param_defs.h.jinja2",
+ "population_table.h.jinja2",
+ "population_table_binary_search_impl.c.jinja2",
+ "regions.h.jinja2",
+ "simulation.h.jinja2",
+ "maths-util.h.jinja2",
+ "neuron-typedefs.h.jinja2",
+ "spike_processing.c.jinja2",
+ "spike_processing.h.jinja2",
+ "qpe.ld.jinja2",
+ "qpe_isr.c.jinja2",
+ "synapse_row.h.jinja2",
+ "synapses.c.jinja2",
+ "synapses.h.jinja2",
+ "synapse_types.h.jinja2",
+ "synapse_types_exponential_impl.h.jinja2",
+ ],
+ "synapse": [
+ # "synapse_row.h.jinja2",
+ # "synapses.c.jinja2",
+ # "synapses.h.jinja2",
+ # "synapse_types.h.jinja2",
+ # "synapse_types_exponential_impl.h.jinja2",
+ ],
+ },
+ "module_templates": ["Makefile.jinja2",]
+ }
+ }
+
+ def __init__(self, options: Optional[Mapping[str, Any]] = None):
+ super().__init__(options)
+
+ options_cpp = copy.deepcopy(NESTCodeGenerator._default_options)
+ # options_cpp["neuron_synapse_pairs"] = self._options["neuron_synapse_pairs"]
+ options_cpp["templates"]["model_templates"]["neuron"] = [fname for fname in
+ self._options["templates"]["model_templates"]["neuron"]
+ if ((fname.endswith(".h.jinja2") or fname.endswith(".c.jinja2") or fname.endswith(".ld.jinja2")
+ or ("Makefile" in fname)))] # and "@NEURON_NAME@" in fname)]
+ options_cpp["templates"]["model_templates"]["synapse"] = [fname for fname in
+ self._options["templates"]["model_templates"]["synapse"]
+ if ((fname.endswith(".h.jinja2") or fname.endswith(".c.jinja2") or ("Makefile" in fname)))] # and "@SYNAPSE_NAME@" in fname)]
+ options_cpp["nest_version"] = pynestml.__version__
+ options_cpp["templates"]["module_templates"] = self._options["templates"]["module_templates"]
+ options_cpp["templates"]["path"] = self._options["templates"]["path"]
+ self.codegen_cpp = CustomNESTCodeGenerator(options_cpp)
+
+ options_py = copy.deepcopy(PythonStandaloneCodeGenerator._default_options)
+ options_py["templates"]["model_templates"]["neuron"] = [fname for fname in
+ self._options["templates"]["model_templates"]["neuron"]
+ if (fname.endswith(".py.jinja2")) and ("@NEURON_NAME@" in fname or fname == "__init__.py.jinja2")]
+ options_py["templates"]["model_templates"]["synapse"] = [fname for fname in
+ self._options["templates"]["model_templates"][
+ "synapse"] if (fname.endswith(".py.jinja2")) and "@SYNAPSE_NAME@" in fname]
+ options_py["nest_version"] = pynestml.__version__
+ options_py["templates"]["module_templates"] = []
+ options_py["templates"]["path"] = self._options["templates"]["path"]
+ self.codegen_py = CustomPythonStandaloneCodeGenerator(options_py)
+
+ def generate_code(self, models: Sequence[ASTModel]) -> None:
+ cloned_models = []
+ for model in models: # TODO: check if this can be removed
+ cloned_model = model.clone()
+ cloned_models.append(cloned_model)
+
+ self.codegen_cpp.generate_code(models)
+ self.codegen_py.generate_code(cloned_models)
+ # self.codegen_py.generate_code(models)
diff --git a/pynestml/codegeneration/spinnaker2_code_generator_utils.py b/pynestml/codegeneration/spinnaker2_code_generator_utils.py
new file mode 100644
index 000000000..b80c10e82
--- /dev/null
+++ b/pynestml/codegeneration/spinnaker2_code_generator_utils.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+#
+# spinnaker_code_generator_utils.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+from pynestml.meta_model.ast_model import ASTModel
+from pynestml.symbols.variable_symbol import VariableSymbol
+from pynestml.symbols.variable_symbol import BlockType
+
+
+
+
+class SPINNAKER2CodeGeneratorUtils:
+
+ @classmethod
+ def print_symbol_origin(cls, variable_symbol: VariableSymbol, numerical_state_symbols=None) -> str:
+ """
+ Returns a prefix corresponding to the origin of the variable symbol.
+ :param variable_symbol: a single variable symbol.
+ :return: the corresponding prefix
+ """
+ if variable_symbol.block_type in [BlockType.STATE, BlockType.EQUATION]:
+ if numerical_state_symbols and variable_symbol.get_symbol_name() in numerical_state_symbols:
+ return 'NUMERICAL STATE SYMBOL' #'S_.ode_state[State_::%s]'
+
+ return 'state->%s'
+
+ if variable_symbol.block_type == BlockType.PARAMETERS:
+ return 'neuron_params->%s'
+
+ if variable_symbol.block_type == BlockType.COMMON_PARAMETERS:
+ return 'neuron_params->%s'
+
+ if variable_symbol.block_type == BlockType.INTERNALS: # and not variable_symbol.name == "__h":
+ return 'neuron_params->%s'
+
+
+ if variable_symbol.block_type == BlockType.INPUT:
+ return 'input->%s'
+
+ return ''
+
+ # @classmethod
+ # def get_propagators_as_python_expression(cls, propagators:dict) -> dict:
+ # import math
+ #
+ # # Define supported math functions and constants
+ # safe_dict = {
+ # # Basic math functions
+ # 'exp': math.exp,
+ # 'ln': math.log,
+ # 'log10': math.log10,
+ # 'pow': math.pow,
+ # 'sqrt': math.sqrt,
+ # # Trigonometric functions
+ # 'sin': math.sin,
+ # 'cos': math.cos,
+ # 'tan': math.tan,
+ # 'asin': math.asin,
+ # 'acos': math.acos,
+ # 'atan': math.atan,
+ # 'atan2': math.atan2,
+ # # Hyperbolic functions
+ # 'sinh': math.sinh,
+ # 'cosh': math.cosh,
+ # 'tanh': math.tanh,
+ # # Math functions
+ # 'abs': abs,
+ # 'ceil': math.ceil,
+ # 'floor': math.floor,
+ # 'round': round,
+ # 'erf': math.erf,
+ # 'erfc': math.erfc,
+ # # Constants
+ # 'e': math.e,
+ # 'pi': math.pi,
+ # 'inf': float('inf'),
+ # '__h': '__h',
+ # }
+ #
+ # propagators_as_python_expressions = dict()
+ # for key, expression in propagators.items():
+ # # Remove all function names from the expression before checking the pattern
+ # result = eval(expression, {"__builtins__": {}}, safe_dict)
+ # propagators_as_python_expressions[key] = result
+ # pass
diff --git a/pynestml/codegeneration/spinnaker_unit_converter.py b/pynestml/codegeneration/spinnaker_unit_converter.py
deleted file mode 100644
index fb5b12b29..000000000
--- a/pynestml/codegeneration/spinnaker_unit_converter.py
+++ /dev/null
@@ -1,79 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# spinnaker_unit_converter.py
-#
-# This file is part of NEST.
-#
-# Copyright (C) 2004 The NEST Initiative
-#
-# NEST is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 2 of the License, or
-# (at your option) any later version.
-#
-# NEST is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with NEST. If not, see .
-
-from astropy import units
-
-
-class SpinnakerUnitConverter:
- r"""
- NEST Simulator uses a set of default physical units internally. This class calculates the factor needed to convert any given physical unit to its NEST counterpart.
- """
-
- @classmethod
- def get_factor(cls, unit: units.UnitBase) -> float:
- """
- Gives a factor for a given unit that transforms it to a "neuroscience" scale. If the given unit is not listed as a neuroscience unit, the factor is 1.
-
- :param unit: an astropy unit
- :type unit: IrreducibleUnit or Unit or CompositeUnit
- :return: a factor to that unit, converting it to "neuroscience" scales.
- """
- assert (isinstance(unit, units.IrreducibleUnit) or isinstance(unit, units.CompositeUnit)
- or isinstance(unit, units.Unit) or isinstance(unit, units.PrefixUnit)), \
- "UnitConverter: given parameter is not a unit (%s)!" % type(unit)
-
- # check if it is dimensionless, thus only a prefix
- if unit.physical_type == 'dimensionless':
- return unit.si
-
- # otherwise check if it is one of the base units
- target_unit = None
- if unit.physical_type == 'electrical conductance':
- target_unit = units.nS
-
- if unit.physical_type == 'electrical resistance':
- target_unit = units.Gohm
-
- if unit.physical_type == 'time':
- target_unit = units.ms
-
- if unit.physical_type == 'electrical capacitance':
- target_unit = units.pF
-
- if unit.physical_type == 'electrical potential':
- target_unit = units.mV
-
- if unit.physical_type == 'electrical current':
- target_unit = units.nA
-
- if target_unit is not None:
- return (unit / target_unit).si.scale
-
- if unit == unit.bases[0] and len(unit.bases) == 1:
- # this case means that we stuck in a recursive definition
- # just return the factor 1.0
- return 1.0
-
- # now if it is not a base unit, it has to be a combined one, e.g. s**2, decompose it
- factor = 1.0
- for i in range(0, len(unit.bases)):
- factor *= cls.get_factor(unit.bases[i]) ** unit.powers[i]
- return factor
diff --git a/pynestml/frontend/pynestml_frontend.py b/pynestml/frontend/pynestml_frontend.py
index 89e2bb7db..94a54b5f2 100644
--- a/pynestml/frontend/pynestml_frontend.py
+++ b/pynestml/frontend/pynestml_frontend.py
@@ -46,7 +46,7 @@
def get_known_targets():
- targets = ["NEST", "NEST_compartmental", "python_standalone", "autodoc", "pretty_render", "spinnaker", "NEST_DESKTOP", "none"]
+ targets = ["NEST", "NEST_compartmental", "python_standalone", "autodoc", "pretty_render", "spinnaker", "spinnaker2", "NEST_DESKTOP", "none"]
targets = [s.upper() for s in targets]
return targets
@@ -61,7 +61,7 @@ def transformers_from_target_name(target_name: str, options: Optional[Mapping[st
if options is None:
options = {}
- if target_name.upper() in ["NEST", "SPINNAKER", "PYTHON_STANDALONE", "NEST_COMPARTMENTAL", "NEST_DESKTOP"]:
+ if target_name.upper() in ["SPINNAKER2", "NEST", "SPINNAKER", "PYTHON_STANDALONE", "NEST_COMPARTMENTAL", "NEST_DESKTOP"]:
from pynestml.transformers.add_timestep_to_internals_transformer import AddTimestepToInternalsTransformer
add_timestep_to_internals_transformer = AddTimestepToInternalsTransformer()
@@ -84,6 +84,13 @@ def transformers_from_target_name(target_name: str, options: Optional[Mapping[st
options = synapse_post_neuron_co_generation.set_options(options)
transformers.append(synapse_post_neuron_co_generation)
+ if target_name.upper() in ["SPINNAKER2"]:
+ pass
+ # from pynestml.transformers.spinnaker2_unit_transformer import Spinnaker2UnitTransformer
+
+ # unit_transformer = Spinnaker2UnitTransformer()
+ # transformers.append(unit_transformer)
+
if target_name.upper() == "NEST":
from pynestml.transformers.synapse_post_neuron_transformer import SynapsePostNeuronTransformer
@@ -107,6 +114,13 @@ def transformers_from_target_name(target_name: str, options: Optional[Mapping[st
options = synapse_post_neuron_co_generation.set_options(options)
transformers.append(synapse_post_neuron_co_generation)
+ if target_name.upper() in ["NEST", "SPINNAKER2"]:
+ from pynestml.transformers.non_dimensionalisation_transformer import NonDimensionalisationTransformer
+
+ non_dimensionalisation_transformer = NonDimensionalisationTransformer()
+ options = non_dimensionalisation_transformer.set_options(options)
+ transformers.append(non_dimensionalisation_transformer)
+
return transformers, options
@@ -145,6 +159,10 @@ def code_generator_from_target_name(target_name: str, options: Optional[Mapping[
from pynestml.codegeneration.spinnaker_code_generator import SpiNNakerCodeGenerator
return SpiNNakerCodeGenerator(options)
+ if target_name.upper() == "SPINNAKER2":
+ from pynestml.codegeneration.spinnaker2_code_generator import Spinnaker2CodeGenerator
+ return Spinnaker2CodeGenerator(options)
+
if target_name.upper() == "NONE":
# dummy/null target: user requested to not generate any code (for instance, when just doing validation of a model)
code, message = Messages.get_no_code_generated()
@@ -172,6 +190,12 @@ def builder_from_target_name(target_name: str, options: Optional[Mapping[str, An
remaining_options = builder.set_options(options)
return builder, remaining_options
+ if target_name.upper() == "SPINNAKER2":
+ from pynestml.codegeneration.spinnaker2_builder import SpiNNaker2Builder
+ builder = SpiNNaker2Builder(options)
+ remaining_options = builder.set_options(options)
+ return builder, remaining_options
+
if target_name.upper() == "AUTODOC":
from pynestml.codegeneration.autodoc_builder import AutodocBuilder
builder = AutodocBuilder(options)
@@ -359,6 +383,37 @@ def generate_spinnaker_target(input_path: Union[str, Sequence[str]], target_path
logging_level=logging_level, store_log=store_log, suffix=suffix, dev=dev,
codegen_opts=codegen_opts)
+def generate_spinnaker2_target(input_path: Union[str, Sequence[str]], target_path: Optional[str] = None, install_path: Optional[str] = None,
+ logging_level="ERROR", module_name: str = "nestmlmodule", store_log: bool=False,
+ suffix: str="", dev: bool=False, codegen_opts: Optional[Mapping[str, Any]]=None):
+ r"""Generate and build code for the SpiNNaker target.
+
+ Parameters
+ ----------
+ input_path : str **or** Sequence[str]
+ Path to the NESTML file(s) or to folder(s) containing NESTML files to convert to NEST code.
+ target_path : str, optional (default: append "target" to `input_path`)
+ Path to the generated C++ code and install files.
+ install_path
+ Path to the directory where the generated code will be installed.
+ logging_level : str, optional (default: "ERROR")
+ Sets which level of information should be displayed duing code generation (among "ERROR", "WARNING", "INFO", or "NO").
+ module_name : str, optional (default: "nestmlmodule")
+ The name of the generated Python module.
+ store_log : bool, optional (default: False)
+ Whether the log should be saved to file.
+ suffix : str, optional (default: "")
+ A suffix string that will be appended to the name of all generated models.
+ dev : bool, optional (default: False)
+ Enable development mode: code generation is attempted even for models that contain errors, and extra information is rendered in the generated code.
+ codegen_opts : Optional[Mapping[str, Any]]
+ A dictionary containing additional options for the target code generator.
+ """
+ generate_target(input_path, target_platform="spinnaker2", target_path=target_path,
+ install_path=install_path,
+ logging_level=logging_level, store_log=store_log, suffix=suffix, dev=dev,
+ codegen_opts=codegen_opts)
+
def generate_nest_compartmental_target(input_path: Union[str, Sequence[str]], target_path: Optional[str] = None,
install_path: Optional[str] = None, logging_level="ERROR",
diff --git a/pynestml/meta_model/ast_arithmetic_operator.py b/pynestml/meta_model/ast_arithmetic_operator.py
index 59bc3fd04..9718b159d 100644
--- a/pynestml/meta_model/ast_arithmetic_operator.py
+++ b/pynestml/meta_model/ast_arithmetic_operator.py
@@ -38,7 +38,7 @@ class ASTArithmeticOperator(ASTNode):
is_pow_op = False # type:bool
"""
- def __init__(self, is_times_op: bool, is_div_op: bool, is_modulo_op: bool, is_plus_op: bool, is_minus_op: bool, is_pow_op: bool, *args, **kwargs):
+ def __init__(self, is_times_op: bool = False, is_div_op: bool = False, is_modulo_op: bool = False, is_plus_op: bool = False, is_minus_op: bool = False, is_pow_op: bool = False, *args, **kwargs):
super(ASTArithmeticOperator, self).__init__(*args, **kwargs)
assert ((is_times_op + is_div_op + is_modulo_op + is_plus_op + is_minus_op + is_pow_op) == 1), \
'(PyNESTML.AST.ArithmeticOperator) Type of arithmetic operator not specified!'
@@ -67,8 +67,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_assignment.py b/pynestml/meta_model/ast_assignment.py
index 83e03e3e7..940d4f86f 100644
--- a/pynestml/meta_model/ast_assignment.py
+++ b/pynestml/meta_model/ast_assignment.py
@@ -103,8 +103,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_bit_operator.py b/pynestml/meta_model/ast_bit_operator.py
index fe5af3790..a2a9c37f7 100644
--- a/pynestml/meta_model/ast_bit_operator.py
+++ b/pynestml/meta_model/ast_bit_operator.py
@@ -79,8 +79,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_block_with_variables.py b/pynestml/meta_model/ast_block_with_variables.py
index db2a48084..37d819274 100644
--- a/pynestml/meta_model/ast_block_with_variables.py
+++ b/pynestml/meta_model/ast_block_with_variables.py
@@ -94,8 +94,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_comparison_operator.py b/pynestml/meta_model/ast_comparison_operator.py
index 2c94d59e2..dc6f5a194 100644
--- a/pynestml/meta_model/ast_comparison_operator.py
+++ b/pynestml/meta_model/ast_comparison_operator.py
@@ -91,8 +91,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_compound_stmt.py b/pynestml/meta_model/ast_compound_stmt.py
index 96f497f9c..176a714ce 100644
--- a/pynestml/meta_model/ast_compound_stmt.py
+++ b/pynestml/meta_model/ast_compound_stmt.py
@@ -90,8 +90,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_data_type.py b/pynestml/meta_model/ast_data_type.py
index 07eec76c3..5ecd53367 100644
--- a/pynestml/meta_model/ast_data_type.py
+++ b/pynestml/meta_model/ast_data_type.py
@@ -99,8 +99,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_declaration.py b/pynestml/meta_model/ast_declaration.py
index f7d7aca03..1cf99658f 100644
--- a/pynestml/meta_model/ast_declaration.py
+++ b/pynestml/meta_model/ast_declaration.py
@@ -123,8 +123,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
@@ -136,6 +135,14 @@ def get_variables(self):
"""
return self.variables
+ def set_variables(self, _variables):
+ """
+ Sets set of left-hand side variables.
+ :type: list(ASTVariables)
+ """
+ self.variables = _variables
+
+
def get_decorators(self):
"""
"""
@@ -149,6 +156,13 @@ def get_data_type(self):
"""
return self.data_type
+ def set_data_type(self, data_type):
+ """
+ Sets the data type.
+ :type: ASTDataType
+ """
+ self.data_type = data_type
+
def has_size_parameter(self) -> bool:
"""
Returns whether the declaration has a size parameter or not.
diff --git a/pynestml/meta_model/ast_elif_clause.py b/pynestml/meta_model/ast_elif_clause.py
index 8332409ca..d7b4f0451 100644
--- a/pynestml/meta_model/ast_elif_clause.py
+++ b/pynestml/meta_model/ast_elif_clause.py
@@ -65,8 +65,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_else_clause.py b/pynestml/meta_model/ast_else_clause.py
index a6c604107..586f119cc 100644
--- a/pynestml/meta_model/ast_else_clause.py
+++ b/pynestml/meta_model/ast_else_clause.py
@@ -57,8 +57,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_equations_block.py b/pynestml/meta_model/ast_equations_block.py
index bf6abb75f..6ff9575de 100644
--- a/pynestml/meta_model/ast_equations_block.py
+++ b/pynestml/meta_model/ast_equations_block.py
@@ -73,8 +73,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_expression.py b/pynestml/meta_model/ast_expression.py
index 6f2528b09..5a8a701e7 100644
--- a/pynestml/meta_model/ast_expression.py
+++ b/pynestml/meta_model/ast_expression.py
@@ -152,8 +152,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_external_variable.py b/pynestml/meta_model/ast_external_variable.py
index 5418ffc85..92e2cf510 100644
--- a/pynestml/meta_model/ast_external_variable.py
+++ b/pynestml/meta_model/ast_external_variable.py
@@ -55,8 +55,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
def update_alt_scope(self, scope):
self._altscope = scope
diff --git a/pynestml/meta_model/ast_for_stmt.py b/pynestml/meta_model/ast_for_stmt.py
index 191bf6cd9..6237694b9 100644
--- a/pynestml/meta_model/ast_for_stmt.py
+++ b/pynestml/meta_model/ast_for_stmt.py
@@ -85,8 +85,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_function.py b/pynestml/meta_model/ast_function.py
index 989a7dfae..8eadf9088 100644
--- a/pynestml/meta_model/ast_function.py
+++ b/pynestml/meta_model/ast_function.py
@@ -97,8 +97,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_function_call.py b/pynestml/meta_model/ast_function_call.py
index a07c3483a..019ac66cf 100644
--- a/pynestml/meta_model/ast_function_call.py
+++ b/pynestml/meta_model/ast_function_call.py
@@ -71,8 +71,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_if_clause.py b/pynestml/meta_model/ast_if_clause.py
index 999485f9b..b80c54a0f 100644
--- a/pynestml/meta_model/ast_if_clause.py
+++ b/pynestml/meta_model/ast_if_clause.py
@@ -64,8 +64,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_if_stmt.py b/pynestml/meta_model/ast_if_stmt.py
index 5d2cfcfe3..ebe9cdbce 100644
--- a/pynestml/meta_model/ast_if_stmt.py
+++ b/pynestml/meta_model/ast_if_stmt.py
@@ -84,8 +84,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_inline_expression.py b/pynestml/meta_model/ast_inline_expression.py
index b8af0f928..3e6169bce 100644
--- a/pynestml/meta_model/ast_inline_expression.py
+++ b/pynestml/meta_model/ast_inline_expression.py
@@ -89,8 +89,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_input_block.py b/pynestml/meta_model/ast_input_block.py
index d74dd8c36..afa5529a0 100644
--- a/pynestml/meta_model/ast_input_block.py
+++ b/pynestml/meta_model/ast_input_block.py
@@ -72,8 +72,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_input_port.py b/pynestml/meta_model/ast_input_port.py
index 45bc87dbb..8dc72593e 100644
--- a/pynestml/meta_model/ast_input_port.py
+++ b/pynestml/meta_model/ast_input_port.py
@@ -103,8 +103,7 @@ def clone(self) -> ASTInputPort:
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_input_qualifier.py b/pynestml/meta_model/ast_input_qualifier.py
index 6c34c33ec..0b6c280ff 100644
--- a/pynestml/meta_model/ast_input_qualifier.py
+++ b/pynestml/meta_model/ast_input_qualifier.py
@@ -68,8 +68,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_kernel.py b/pynestml/meta_model/ast_kernel.py
index e152e118f..125e90577 100644
--- a/pynestml/meta_model/ast_kernel.py
+++ b/pynestml/meta_model/ast_kernel.py
@@ -66,8 +66,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_logical_operator.py b/pynestml/meta_model/ast_logical_operator.py
index e3f3a314f..612f300c0 100644
--- a/pynestml/meta_model/ast_logical_operator.py
+++ b/pynestml/meta_model/ast_logical_operator.py
@@ -65,8 +65,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_model.py b/pynestml/meta_model/ast_model.py
index f2fae8021..e049f867e 100644
--- a/pynestml/meta_model/ast_model.py
+++ b/pynestml/meta_model/ast_model.py
@@ -21,6 +21,9 @@
from typing import List, Optional
+import astropy.units as u
+from pynestml.meta_model.ast_expression import ASTExpression
+from pynestml.meta_model.ast_simple_expression import ASTSimpleExpression
from pynestml.meta_model.ast_block_with_variables import ASTBlockWithVariables
from pynestml.meta_model.ast_kernel import ASTKernel
from pynestml.meta_model.ast_on_condition_block import ASTOnConditionBlock
@@ -46,7 +49,7 @@ class ASTModel(ASTNode):
This class is used to stuff common to neurons and synapses
"""
- def __init__(self, name: str, body: ASTModelBody, artifact_name=None, *args, **kwargs):
+ def __init__(self, name: str, body: ASTModelBody, artifact_name=None, file_path=None, *args, **kwargs):
"""
Standard constructor.
@@ -68,6 +71,7 @@ def __init__(self, name: str, body: ASTModelBody, artifact_name=None, *args, **k
self.name = name
self.body = body
self.artifact_name = artifact_name
+ self.file_path = file_path # add file path to ast model as it's needed for use of python_standalone_target_tools with SpiNNaker2
def clone(self):
"""
@@ -79,13 +83,13 @@ def clone(self):
dup = ASTModel(name=self.name,
body=self.body.clone(),
artifact_name=self.artifact_name,
+ file_path=self.file_path,
# ASTNode common attributes:
source_position=self.source_position,
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
@@ -534,6 +538,45 @@ def get_initial_value(self, variable_name: str):
return None
+ def _to_base_value_from_string(self,quantity_str):
+ local_dict = {'u': u}
+ quantity = eval(quantity_str, {"__builtins__": {}}, local_dict)
+ canonical_unit = u.get_physical_type(quantity.unit)._unit
+ # Return the SI base value and unit name
+ return quantity.si.value, str(canonical_unit)
+
+
+ def get_parameter_value_dict(self) -> dict:
+ """
+ Generates a dict which maps the initial parameter values to their variable names from the parameters section
+ :param node: the neuron or synapse containing the parameter
+ :return: a dict {"parameter_names": initial_values}
+ """
+ parameters_block = self.get_parameters_blocks()[0]
+ parameter_value_dict = {}
+ for declarations in parameters_block.get_declarations():
+ if isinstance(declarations.expression, ASTSimpleExpression):
+ # declarations.variables[0].astropy_unit = None
+ # declarations.data_type = ' real'
+ if ((declarations.expression.numeric_literal.real != None) and hasattr(declarations.expression.variable, 'name')):
+ expr = str(declarations.expression.numeric_literal) + '* u.' + declarations.expression.variable.name
+ float_value_in_si, unit_in_si = self._to_base_value_from_string(expr)
+ declarations.expression.numeric_literal = float_value_in_si
+ parameter_value_dict[declarations.variables[0].name] = float_value_in_si
+ declarations.expression.variable.name = unit_in_si
+ pass
+
+ if isinstance(declarations.expression, ASTExpression):
+ expr = str(declarations.expression.unary_operator) + str(
+ declarations.expression.expression.numeric_literal) + '* u.' + declarations.expression.expression.variable.name
+ float_value_in_si, unit_in_si = self._to_base_value_from_string(expr)
+ declarations.expression.expression.numeric_literal = abs(float_value_in_si)
+ parameter_value_dict[declarations.variables[0].name] = float_value_in_si
+ declarations.expression.expression.variable.name = unit_in_si
+ pass
+
+ return parameter_value_dict
+
def has_delay_variables(self) -> bool:
"""
This method indicates if the neuron has variables with a delay parameter.
diff --git a/pynestml/meta_model/ast_model_body.py b/pynestml/meta_model/ast_model_body.py
index 6e32561ce..7aac35cff 100644
--- a/pynestml/meta_model/ast_model_body.py
+++ b/pynestml/meta_model/ast_model_body.py
@@ -72,8 +72,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_namespace_decorator.py b/pynestml/meta_model/ast_namespace_decorator.py
index 33cc63b54..d26c50672 100644
--- a/pynestml/meta_model/ast_namespace_decorator.py
+++ b/pynestml/meta_model/ast_namespace_decorator.py
@@ -47,8 +47,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_nestml_compilation_unit.py b/pynestml/meta_model/ast_nestml_compilation_unit.py
index 6e36f1fe3..a6982d523 100644
--- a/pynestml/meta_model/ast_nestml_compilation_unit.py
+++ b/pynestml/meta_model/ast_nestml_compilation_unit.py
@@ -63,8 +63,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_node.py b/pynestml/meta_model/ast_node.py
index 64234fbce..f8a302f5d 100644
--- a/pynestml/meta_model/ast_node.py
+++ b/pynestml/meta_model/ast_node.py
@@ -44,11 +44,10 @@ class ASTNode(metaclass=ABCMeta):
pre_comments = list()
in_comment = None
#
- implicit_conversion_factor = None
"""
def __init__(self, source_position: ASTSourceLocation = None, scope: Scope = None, comment: Optional[str] = None, pre_comments: Optional[List[str]] = None,
- in_comment: Optional[str] = None, implicit_conversion_factor: Optional[float] = None):
+ in_comment: Optional[str] = None):
"""
The standard constructor.
:param source_position: a source position element.
@@ -56,7 +55,6 @@ def __init__(self, source_position: ASTSourceLocation = None, scope: Scope = Non
:param comment: comment for this node
:param pre_comments: pre-comments for this node
:param in_comment: in-comment for this node
- :param implicit_conversion_factor: see set_implicit_conversion_factor()
"""
self.source_position = source_position
self.scope = scope
@@ -65,7 +63,6 @@ def __init__(self, source_position: ASTSourceLocation = None, scope: Scope = Non
pre_comments = []
self.pre_comments = pre_comments
self.in_comment = in_comment
- self.implicit_conversion_factor = implicit_conversion_factor
@abstractmethod
def clone(self):
@@ -103,22 +100,6 @@ def get_children(self) -> List[ASTNode]:
"""
pass
- def set_implicit_conversion_factor(self, implicit_factor: Optional[float]) -> None:
- """
- Sets a factor that, when applied to the (unit-typed) expression, converts it to the magnitude of the
- context where it is used. eg. Volt + milliVolt needs to either be
- 1000*Volt + milliVolt or Volt + 0.001 * milliVolt
- :param implicit_factor: the factor to be installed
- """
- self.implicit_conversion_factor = implicit_factor
-
- def get_implicit_conversion_factor(self) -> Optional[float]:
- """
- Returns the factor installed as implicitConversionFactor for this expression
- :return: the conversion factor, if present, or None
- """
- return self.implicit_conversion_factor
-
def get_source_position(self):
"""
Returns the source position of the element.
diff --git a/pynestml/meta_model/ast_ode_equation.py b/pynestml/meta_model/ast_ode_equation.py
index 95567b367..f0679d824 100644
--- a/pynestml/meta_model/ast_ode_equation.py
+++ b/pynestml/meta_model/ast_ode_equation.py
@@ -82,8 +82,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_on_condition_block.py b/pynestml/meta_model/ast_on_condition_block.py
index 6f38044d8..5fbd7ad66 100644
--- a/pynestml/meta_model/ast_on_condition_block.py
+++ b/pynestml/meta_model/ast_on_condition_block.py
@@ -60,8 +60,7 @@ def clone(self) -> ASTOnConditionBlock:
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_on_receive_block.py b/pynestml/meta_model/ast_on_receive_block.py
index 9118eea4c..707f806f8 100644
--- a/pynestml/meta_model/ast_on_receive_block.py
+++ b/pynestml/meta_model/ast_on_receive_block.py
@@ -65,8 +65,7 @@ def clone(self) -> ASTOnReceiveBlock:
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_output_block.py b/pynestml/meta_model/ast_output_block.py
index 33cf33245..baaddbc59 100644
--- a/pynestml/meta_model/ast_output_block.py
+++ b/pynestml/meta_model/ast_output_block.py
@@ -68,8 +68,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_parameter.py b/pynestml/meta_model/ast_parameter.py
index 6e30f4af5..c3fc0c967 100644
--- a/pynestml/meta_model/ast_parameter.py
+++ b/pynestml/meta_model/ast_parameter.py
@@ -62,8 +62,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_return_stmt.py b/pynestml/meta_model/ast_return_stmt.py
index 43719747a..ec75c06db 100644
--- a/pynestml/meta_model/ast_return_stmt.py
+++ b/pynestml/meta_model/ast_return_stmt.py
@@ -64,8 +64,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_simple_expression.py b/pynestml/meta_model/ast_simple_expression.py
index 8514f76d2..9172b7fc8 100644
--- a/pynestml/meta_model/ast_simple_expression.py
+++ b/pynestml/meta_model/ast_simple_expression.py
@@ -126,8 +126,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_small_stmt.py b/pynestml/meta_model/ast_small_stmt.py
index e570084f2..eefb54ffa 100644
--- a/pynestml/meta_model/ast_small_stmt.py
+++ b/pynestml/meta_model/ast_small_stmt.py
@@ -88,8 +88,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_stmt.py b/pynestml/meta_model/ast_stmt.py
index 652ad48e8..c38b64aee 100644
--- a/pynestml/meta_model/ast_stmt.py
+++ b/pynestml/meta_model/ast_stmt.py
@@ -69,8 +69,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_stmts_body.py b/pynestml/meta_model/ast_stmts_body.py
index 52e7159d9..d3c55906d 100644
--- a/pynestml/meta_model/ast_stmts_body.py
+++ b/pynestml/meta_model/ast_stmts_body.py
@@ -66,8 +66,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_unary_operator.py b/pynestml/meta_model/ast_unary_operator.py
index 2871956cc..982e3738a 100644
--- a/pynestml/meta_model/ast_unary_operator.py
+++ b/pynestml/meta_model/ast_unary_operator.py
@@ -70,8 +70,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_unit_type.py b/pynestml/meta_model/ast_unit_type.py
index 346b9d7e4..f1f3902a7 100644
--- a/pynestml/meta_model/ast_unit_type.py
+++ b/pynestml/meta_model/ast_unit_type.py
@@ -136,8 +136,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_update_block.py b/pynestml/meta_model/ast_update_block.py
index 6a7f05dc3..1d0cdcbe9 100644
--- a/pynestml/meta_model/ast_update_block.py
+++ b/pynestml/meta_model/ast_update_block.py
@@ -55,8 +55,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/meta_model/ast_variable.py b/pynestml/meta_model/ast_variable.py
index c0645be25..ceca2cf6e 100644
--- a/pynestml/meta_model/ast_variable.py
+++ b/pynestml/meta_model/ast_variable.py
@@ -82,8 +82,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
def resolve_in_own_scope(self):
from pynestml.symbols.symbol import SymbolKind
diff --git a/pynestml/meta_model/ast_while_stmt.py b/pynestml/meta_model/ast_while_stmt.py
index 1a1c51138..413fefe36 100644
--- a/pynestml/meta_model/ast_while_stmt.py
+++ b/pynestml/meta_model/ast_while_stmt.py
@@ -70,8 +70,7 @@ def clone(self):
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
- in_comment=self.in_comment,
- implicit_conversion_factor=self.implicit_conversion_factor)
+ in_comment=self.in_comment)
return dup
diff --git a/pynestml/symbols/unit_type_symbol.py b/pynestml/symbols/unit_type_symbol.py
index 1f9977de0..4c6aa4755 100644
--- a/pynestml/symbols/unit_type_symbol.py
+++ b/pynestml/symbols/unit_type_symbol.py
@@ -137,12 +137,11 @@ def add_or_sub_another_unit(self, other):
def attempt_magnitude_cast(self, other):
if self.differs_only_in_magnitude(other):
- factor = UnitTypeSymbol.get_conversion_factor(other.astropy_unit, self.astropy_unit)
- other.referenced_object.set_implicit_conversion_factor(factor)
- code, message = Messages.get_implicit_magnitude_conversion(self, other, factor)
- Logger.log_message(code=code, message=message,
- error_position=self.referenced_object.get_source_position(),
- log_level=LoggingLevel.INFO)
+ # factor = UnitTypeSymbol.get_conversion_factor(other.astropy_unit, self.astropy_unit)
+ # code, message = Messages.get_implicit_magnitude_conversion(self, other, factor)
+ # Logger.log_message(code=code, message=message,
+ # error_position=self.referenced_object.get_source_position(),
+ # log_level=LoggingLevel.INFO)
return self
diff --git a/pynestml/transformers/non_dimensionalisation_transformer.py b/pynestml/transformers/non_dimensionalisation_transformer.py
new file mode 100644
index 000000000..2985d9087
--- /dev/null
+++ b/pynestml/transformers/non_dimensionalisation_transformer.py
@@ -0,0 +1,549 @@
+# -*- coding: utf-8 -*-
+#
+# non_dimensionalisation_transformer.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+from __future__ import annotations
+
+from typing import Any, Dict, Sequence, Mapping, Optional, Union
+
+from quantities.quantity import get_conversion_factor
+from scipy.stats import reciprocal
+
+from pynestml.cocos.co_cos_manager import CoCosManager
+from pynestml.frontend.frontend_configuration import FrontendConfiguration
+from pynestml.meta_model.ast_arithmetic_operator import ASTArithmeticOperator
+from pynestml.meta_model.ast_assignment import ASTAssignment
+from pynestml.meta_model.ast_data_type import ASTDataType
+from pynestml.meta_model.ast_declaration import ASTDeclaration
+from pynestml.meta_model.ast_equations_block import ASTEquationsBlock
+from pynestml.meta_model.ast_expression import ASTExpression
+from pynestml.meta_model.ast_function_call import ASTFunctionCall
+from pynestml.meta_model.ast_inline_expression import ASTInlineExpression
+from pynestml.meta_model.ast_model import ASTModel
+from pynestml.meta_model.ast_node import ASTNode
+from pynestml.meta_model.ast_ode_equation import ASTOdeEquation
+from pynestml.meta_model.ast_simple_expression import ASTSimpleExpression
+from pynestml.meta_model.ast_variable import ASTVariable
+from pynestml.symbols.integer_type_symbol import IntegerTypeSymbol
+from pynestml.symbols.predefined_types import PredefinedTypes
+from pynestml.symbols.real_type_symbol import RealTypeSymbol
+from pynestml.symbols.unit_type_symbol import UnitTypeSymbol
+from pynestml.symbols.error_type_symbol import ErrorTypeSymbol
+from pynestml.symbols.symbol import SymbolKind
+from pynestml.symbols.variable_symbol import BlockType
+from pynestml.transformers.transformer import Transformer
+from pynestml.utils.ast_utils import ASTUtils
+from pynestml.utils.model_parser import ModelParser
+from pynestml.utils.logger import Logger
+from pynestml.utils.logger import LoggingLevel
+from pynestml.utils.string_utils import removesuffix
+from pynestml.visitors.ast_parent_visitor import ASTParentVisitor
+from pynestml.visitors.ast_symbol_table_visitor import ASTSymbolTableVisitor
+from pynestml.visitors.ast_higher_order_visitor import ASTHigherOrderVisitor
+from pynestml.visitors.ast_visitor import ASTVisitor
+import astropy.units as u
+import re
+
+class NonDimVis(ASTVisitor):
+ r"""
+ Base class for non-dimensionalisation transformers.
+ """
+ def __init__(self, preferred_prefix: Dict[str, str]):
+ super().__init__()
+ self.preferred_prefix = preferred_prefix
+ # self.variable_original_metric_prefix_dict = dict()
+
+ PREFIX_FACTORS = {
+ 'Y': 1e24, # yotta
+ 'Z': 1e21, # zetta
+ 'E': 1e18, # exa
+ 'P': 1e15, # peta
+ 'T': 1e12, # tera
+ 'G': 1e9, # giga
+ 'M': 1e6, # mega
+ 'k': 1e3, # kilo
+ 'h': 1e2, # hecto
+ 'da': 1e1, # deca
+ '': 1.0, # no prefix
+ '1': 1.0, # no prefix
+ 'd': 1e-1, # deci
+ 'c': 1e-2, # centi
+ 'm': 1e-3, # milli
+ 'u': 1e-6, # micro (μ)
+ 'n': 1e-9, # nano
+ 'p': 1e-12, # pico
+ 'f': 1e-15, # femto
+ 'a': 1e-18, # atto
+ 'z': 1e-21, # zepto
+ 'y': 1e-24, # yocto
+ }
+
+ def get_conversion_factor_to_si(self, from_unit_str):
+ r"""
+ Return the conversion factor from the unit we have in the NESTML file to SI units.
+ """
+
+ from_unit = u.Unit(from_unit_str)
+ scale = from_unit.si.scale
+
+ return scale
+
+class NonDimensionalisationVarToRealTypeVisitor(NonDimVis):
+ r"""
+ This visitor changes the variable type on the LHS to "real"
+ E.g.: My_declaration V = (30 * 1.0E-03) -> My_declaration real = (30 * 1.0E-03)
+ This visitor has to be called last in the transformation process as the unit type information is needed before
+ """
+ def __init__(self, preferred_prefix: Dict[str, str]):
+ super().__init__(preferred_prefix)
+
+ def visit_variable(self, node: ASTVariable):
+ if (isinstance(node.get_type_symbol(), RealTypeSymbol) or isinstance(node.get_type_symbol(), UnitTypeSymbol)):
+ if(isinstance(node.get_type_symbol(), RealTypeSymbol)):
+ print("\tReal number, no unit\n")
+ elif (isinstance(node.get_type_symbol(), UnitTypeSymbol)):
+ print("The unit is: "+str(node.get_type_symbol().unit.unit))
+ print("The quantity is: "+str(node.get_type_symbol().unit.unit.physical_type))
+
+ parent_node = node.get_parent()
+ new_node_type = RealTypeSymbol()
+ new_variable = ASTVariable(name=node.name, type_symbol=node.get_type_symbol(), scope=node.get_scope())
+ new_data_type = ASTDataType(is_real=True, type_symbol=new_node_type, scope=node.get_scope())
+
+ if isinstance(parent_node, ASTDeclaration):
+ parent_node.variables[0] = new_variable
+ parent_node.data_type = new_data_type
+ pass
+
+
+ def visit_input_port(self, node):
+ # return super().visit_input_port(node)
+ if node.data_type is not None:
+ new_node_type = RealTypeSymbol()
+ new_data_type = ASTDataType(is_real=True, type_symbol=new_node_type, scope=node.get_scope())
+ parent_node = node.get_parent()
+ for index, inputportexpression in enumerate(parent_node.input_definitions):
+ if inputportexpression.name == node.name:
+ new_node = node.clone()
+ new_node.data_type = new_data_type
+ parent_node.input_definitions[index] = new_node
+ pass
+
+
+ def visit_inline_expression(self, node):
+ # return super().visit_inline_expression(node)
+ if (isinstance(node.data_type.type_symbol, RealTypeSymbol) or isinstance(node.data_type.type_symbol, UnitTypeSymbol)):
+ if(isinstance(node.data_type.type_symbol, RealTypeSymbol)):
+ print("\tReal number, no unit\n")
+ elif (isinstance(node.data_type.type_symbol, UnitTypeSymbol)):
+ print("The unit is: "+str(node.data_type.type_symbol.unit.unit))
+ print("The quantity is: "+str(node.data_type.type_symbol.unit.unit.physical_type))
+
+ parent_node = node.get_parent()
+ new_node_type = RealTypeSymbol()
+ # new_variable = ASTVariable(name=node.name, type_symbol=node.get_type_symbol(), scope=node.get_scope())
+ new_data_type = ASTDataType(is_real=True, type_symbol=new_node_type, scope=node.get_scope())
+
+ if isinstance(parent_node, ASTEquationsBlock):
+ for declaration in parent_node.declarations:
+ if declaration.variable_name == node.variable_name:
+ declaration.data_type = new_data_type
+ pass
+
+
+
+
+class NonDimensionalisationPreferredPrefixFactorOnRhsVisitor(NonDimVis):
+ r"""
+ This visitor inserts the inverse value of the preferred prefix in scientific notation as a factor for the old encapsulated RHS expression for declarations and ODE equations
+ E.g.: V_m V = -70 * 1.0E-03, preferred prefix of mili for 'electric potential' -> V_m V = (1.0E+03 * (-70.0 * 1.0E-0.3))
+ """
+ def __init__(self, preferred_prefix: Dict[str, str], model):
+ super().__init__(preferred_prefix)
+ self.model = model
+
+ def visit_declaration(self, node: ASTVariable) -> None:
+
+ # get preferred prefix that declaring variable has
+ if not node.data_type.is_real:
+ if str(node.data_type.type_symbol.astropy_unit.physical_type) != "unknown":
+ if node.variables[0].name != "__h":
+ for physical_type_string in self.preferred_prefix:
+ if physical_type_string in str(node.data_type.type_symbol.astropy_unit.physical_type):
+ variable_physical_type_string = physical_type_string
+ # variable_physical_type_string = str(node.data_type.type_symbol.astropy_unit.physical_type)
+ inverse_preferred_prefix_this_node_string = f"{1/self.PREFIX_FACTORS[self.preferred_prefix[variable_physical_type_string]]:.1E}"
+ # modify the node.expression to include the metric prefix as a factor in scientific notation on the lhs
+ cloned_node = node.clone()
+ lhs_expression = ASTSimpleExpression(numeric_literal=float(inverse_preferred_prefix_this_node_string), scope=node.get_scope())
+ rhs_expression = node.expression
+ new_sub_node = ASTExpression(is_encapsulated=False,
+ binary_operator=ASTArithmeticOperator(is_times_op=True),
+ lhs=lhs_expression, rhs=rhs_expression, scope=node.get_scope())
+ cloned_node.expression = ASTExpression(is_encapsulated=True, expression=new_sub_node, scope=node.get_scope())
+
+
+ for declaration in node.get_parent().declarations:
+ if declaration.variables[0].name == node.variables[0].name:
+ declaration.expression = cloned_node.expression
+ pass
+
+
+ @staticmethod
+ def _derivate_regex(var_names:list)->re:
+ # escaped = map(re.escape, var_names)
+ pattern = rf"^({'|'.join(map(re.escape, var_names))})('+)?$"
+ return re.compile(pattern)
+
+ def visit_ode_equation(self, node: ASTOdeEquation):
+ # insert preferred prefix conversion factor for LHS on rhs
+ var_names = [str(obj) for obj in ASTUtils.all_variables_defined_in_block(self.model.get_state_blocks()+self.model.get_parameters_blocks())]
+ regex = self._derivate_regex(var_names)
+ corresponding_non_diff_variable = regex.match(node.lhs.name).group()
+ if hasattr(ASTUtils.get_variable_by_name(self.model, corresponding_non_diff_variable).type_symbol, "astropy_unit"):
+ corresponding_non_diff_variable_physical_type_string = str(ASTUtils.get_variable_by_name(self.model, corresponding_non_diff_variable).type_symbol.astropy_unit.physical_type)
+ inverse_preferred_prefix_this_node_string = f"{(1.0E-3)*1/self.PREFIX_FACTORS[self.preferred_prefix[corresponding_non_diff_variable_physical_type_string]]:.1E}"
+ # inverse_preferred_prefix_this_node_string = f"{1:.1E}"
+ cloned_node = node.clone()
+ lhs_expression = ASTSimpleExpression(numeric_literal=float(inverse_preferred_prefix_this_node_string), scope=node.get_scope())
+ rhs_expression = ASTExpression(is_encapsulated=True, expression=node.rhs)
+ new_sub_node = ASTExpression(is_encapsulated=False,
+ binary_operator=ASTArithmeticOperator(is_times_op=True),
+ lhs=lhs_expression, rhs=rhs_expression, scope=node.get_scope())
+ cloned_node.rhs = ASTExpression(is_encapsulated=True, expression=new_sub_node, scope=node.get_scope())
+ for declaration in node.get_parent().declarations:
+ if declaration.lhs.name == node.lhs.name:
+ declaration.rhs = cloned_node.rhs
+ return
+ else:
+ return
+
+
+ def visit_inline_expression(self, node):
+ if not node.data_type.is_real:
+ if str(node.data_type.type_symbol.astropy_unit.physical_type) != "unknown":
+ for physical_type_string in self.preferred_prefix:
+ if physical_type_string in str(node.data_type.type_symbol.astropy_unit.physical_type):
+ variable_physical_type_string = physical_type_string
+ # variable_physical_type_string = str(node.data_type.type_symbol.astropy_unit.physical_type)
+ inverse_preferred_prefix_this_node_string = f"{1/self.PREFIX_FACTORS[self.preferred_prefix[variable_physical_type_string]]:.1E}"
+ # modify the node.expression to include the metric prefix as a factor in scientific notation on the lhs
+ cloned_node = node.clone()
+ lhs_expression = ASTSimpleExpression(numeric_literal=float(inverse_preferred_prefix_this_node_string), scope=node.get_scope())
+ rhs_expression = node.expression
+ new_sub_node = ASTExpression(is_encapsulated=False,
+ binary_operator=ASTArithmeticOperator(is_times_op=True),
+ lhs=lhs_expression, rhs=rhs_expression, scope=node.get_scope())
+ cloned_node.expression = ASTExpression(is_encapsulated=True, expression=new_sub_node, scope=node.get_scope())
+ pass
+ for declaration in node.get_parent().declarations:
+ if declaration.variable_name == node.variable_name:
+ declaration.expression = cloned_node.expression
+ # return super().visit_inline_expression(node)
+ pass
+
+
+class NonDimensionalisationVariableVisitor(NonDimVis):
+ r"""
+ This visitor changes unit symbols and numeric prefixes to numerical factors in epxressions on RHSs, where the numerical prefix and unit are positioned after an expression
+ E.g.: Var_a V = .... + (4 + 3) * mV -> Var_a V = .... + ((4 + 3) * 1.0E-03)
+ """
+ def __init__(self, preferred_prefix: Dict[str, str]):
+ super().__init__(preferred_prefix)
+
+ def _is_valid_astropy_unit(self, unit_string):
+ """Check if a string can be interpreted as an astropy unit"""
+ try:
+ u.Unit(str(unit_string))
+ return True
+ except (ValueError, TypeError, u.UnitTypeError):
+ return False
+
+ def visit_variable(self, node: ASTVariable) -> None:
+ # if not ((isinstance(node.get_type_symbol(), RealTypeSymbol)) or (isinstance(node.get_type_symbol(), UnitTypeSymbol)) or (node.get_type_symbol() is None)):
+ if hasattr(node.get_parent(), "variable"):
+ if self._is_valid_astropy_unit(node.name):
+ if (isinstance(node, ASTVariable) and node.get_parent().variable.name == node.get_name() and node.get_parent().numeric_literal == None):
+ # Then the variable encountered is something like mV, without a numeric literal in front, e.g. (4 + 3) * mV
+ conversion_factor = f"{super().get_conversion_factor_to_si(node.get_name()):.1E}"
+ parent_node = node.get_parent()
+ grandparent_node = parent_node.get_parent()
+ new_expression = ASTSimpleExpression(numeric_literal=float(str(conversion_factor)), scope=node.get_scope())
+ if grandparent_node.binary_operator is not None:
+ if grandparent_node.rhs == parent_node:
+ grandparent_node.rhs = new_expression
+ elif grandparent_node.lhs == parent_node:
+ grandparent_node.lhs = new_expression
+ pass
+ else:
+ pass
+ # raise Exception("This case has not yet been implemented!")
+
+
+
+class NonDimensionalisationSimpleExpressionVisitor(NonDimVis):
+ r"""
+ This Visitor converts unit-ful simple expressions with metric prefixes to real type expressions in the corresponding SI base unit in RHSs
+ E.g.: Var_a V = ...... * 3MV -> Var_a V = ...... * (3 * 1.0E+06)
+ """
+ def __init__(self, preferred_prefix: Dict[str, str], model):
+ super().__init__(preferred_prefix)
+ self.model = model
+
+ def _is_valid_astropy_unit(self, unit_string):
+ """Check if a string can be interpreted as an astropy unit"""
+ try:
+ u.Unit(str(unit_string))
+ return True
+ except (ValueError, TypeError, u.UnitTypeError):
+ return False
+
+ def visit_simple_expression(self, node):
+ if hasattr(node, "variable"):
+ if str(node.variable) == "spikes":
+ return
+ if str(node.variable) == "I_stim":
+ pass
+ if node.get_numeric_literal() is not None:
+ print("Numeric literal: " + str(node.get_numeric_literal()))
+ if(isinstance(node.type, RealTypeSymbol)):
+ print("\tReal number, no unit\n")
+ return
+ elif (isinstance(node.type, UnitTypeSymbol)):
+ # the expression 3 MV is a SimpleExpression for example
+ parent_node = node.get_parent()
+ print("\tUnit: " + str(node.type.unit.unit))
+ conversion_factor = f"{super().get_conversion_factor_to_si(node.variable.name):.1E}"
+ numeric_literal = node.get_numeric_literal()
+ lhs_expression = ASTSimpleExpression(numeric_literal=float(numeric_literal), scope=node.get_scope())
+ rhs_expression = ASTSimpleExpression(numeric_literal=float(str(conversion_factor)), scope=node.get_scope())
+ if isinstance(parent_node, ASTExpression):
+ new_sub_node = ASTExpression(is_encapsulated=False,
+ binary_operator=ASTArithmeticOperator(is_times_op=True),
+ lhs=lhs_expression, rhs=rhs_expression, scope=node.get_scope())
+ new_node = ASTExpression(is_encapsulated=True, expression=new_sub_node, scope=node.get_scope(),
+ unary_operator=parent_node.unary_operator)
+ if parent_node.binary_operator is not None:
+ parent_node.binary_operator = parent_node.binary_operator
+ if parent_node.rhs == node:
+ parent_node.rhs = new_node
+ elif parent_node.lhs == node:
+ parent_node.lhs = new_node
+ else:
+ raise Exception("Node is neither lhs nor rhs of parent, possibly expression - should not execute until here.")
+ elif parent_node.binary_operator is None:
+ parent_node.rhs = None
+ parent_node.expression = new_node
+ parent_node.unary_operator = None
+ else:
+ raise Exception("This case is also possible and needs handling")
+ if isinstance(parent_node, ASTDeclaration):
+ new_sub_node = ASTExpression(is_encapsulated=False,
+ binary_operator=ASTArithmeticOperator(is_times_op=True),
+ lhs=lhs_expression, rhs=rhs_expression, scope=node.get_scope())
+ new_node = ASTExpression(is_encapsulated=True, expression=new_sub_node, scope=node.get_scope())
+ parent_node.expression = new_node
+ pass
+
+
+ elif (isinstance(node.type, IntegerTypeSymbol)):
+ print("\tInteger type number, no unit\n")
+ else:
+ raise Exception("Node type is neither RealTypeSymbol nor UnitTypeSymbol")
+ return
+ if node.function_call is None:
+ if isinstance(node.get_parent(), ASTFunctionCall):
+ return
+ if node.get_numeric_literal() is None:
+ # get physical type of node
+ if isinstance(node.type, UnitTypeSymbol):
+ if (self._is_valid_astropy_unit(node.variable.name) and
+ (node.get_parent().binary_operator is not None or node.get_parent().unary_operator is not None)):
+ # This should be handled by visit_variable instead - return early
+ return
+ if not (hasattr(node.get_parent(), "type") and isinstance(node.get_parent().type, ErrorTypeSymbol)):
+ if str(node.type.astropy_unit.physical_type) != 'unknown':
+ for physical_type_string in self.preferred_prefix:
+ if physical_type_string in str(node.type.astropy_unit.physical_type):
+ variable_physical_type_string = physical_type_string
+ # variable_physical_type_string = str(node.type.astropy_unit.physical_type)
+ # get preferred prefix for this node
+ preferred_prefix_this_node_string = f"{self.PREFIX_FACTORS[self.preferred_prefix[variable_physical_type_string]]:.1E}"
+ # create a new sub node that multiplies the variable with the reciprocal of the preferred prefix
+ lhs_expression = node.clone()
+ rhs_expression = ASTSimpleExpression(numeric_literal=float(preferred_prefix_this_node_string), scope=node.get_scope())
+ new_sub_node = ASTExpression(is_encapsulated=False, binary_operator=ASTArithmeticOperator(is_times_op=True),
+ lhs=lhs_expression, rhs=rhs_expression, scope=node.get_scope())
+ parent_node = node.get_parent()
+ if hasattr(parent_node, "unary_operator"):
+ # create new node encapsulating multiplication
+
+ new_node = ASTExpression(is_encapsulated=True, expression=new_sub_node, scope=node.get_scope(),
+ unary_operator=parent_node.unary_operator)
+ # attach new node to parent node
+ grandparent_node = parent_node.get_parent()
+ if any(hasattr(parent_node, attr) for attr in ["lhs", "rhs"]):
+ if node == parent_node.lhs:
+ if parent_node.binary_operator is not None:
+ parent_node.binary_operator = parent_node.binary_operator
+ parent_node.lhs = new_node
+ parent_node.rhs = parent_node.rhs
+ return
+ elif parent_node.binary_operator is None:
+ parent_node.rhs = None
+ parent_node.expression = new_node
+ parent_node.unary_operator = None
+ return
+ if node == parent_node.rhs:
+ if parent_node.binary_operator is not None:
+ parent_node.binary_operator = parent_node.binary_operator
+ parent_node.rhs = new_node
+ parent_node.lhs = parent_node.lhs
+ return
+ elif parent_node.binary_operator is None:
+ parent_node.rhs = None
+ parent_node.expression = new_node
+ parent_node.unary_operator = None
+ return
+ if parent_node == grandparent_node.lhs:
+ grandparent_node.lhs = new_node
+ return
+ if parent_node == parent_node.rhs:
+ grandparent_node.rhs = new_node
+ return
+ elif(parent_node == parent_node.expression):
+ parent_node.expression = new_node
+ return
+ else:
+ raise Exception("Parent node has no attribute lhs, rhs or expression.")
+ elif not (hasattr(parent_node, "unary_operator")):
+ # create new node encapsulating multiplication
+ new_node = ASTExpression(is_encapsulated=True, expression=new_sub_node, scope=node.get_scope())
+ # attach new node to parent node
+ if any(hasattr(parent_node, attr) for attr in ["lhs", "rhs"]):
+ if node == parent_node.lhs:
+ if parent_node.binary_operator is not None:
+ parent_node.binary_operator = parent_node.binary_operator
+ parent_node.lhs = new_node
+ parent_node.rhs = parent_node.rhs
+ return
+ elif parent_node.binary_operator is None:
+ parent_node.rhs = None
+ parent_node.expression = new_node
+ parent_node.unary_operator = None
+ return
+ if node == parent_node.rhs:
+ if not hasattr(node, "binary_operator"):
+ parent_node.expression = new_node
+ return
+ elif parent_node.binary_operator is not None:
+ parent_node.binary_operator = parent_node.binary_operator
+ parent_node.rhs = new_node
+ parent_node.lhs = parent_node.lhs
+ return
+ elif parent_node.binary_operator is None:
+ parent_node.rhs = None
+ parent_node.expression = new_node
+ parent_node.unary_operator = None
+ return
+ elif(hasattr(parent_node, "expression")):
+ parent_node.expression = new_node
+ return
+ else:
+ raise Exception("Parent node has no rhs or lhs.")
+
+ super().visit_simple_expression(node)
+
+
+class NonDimensionalisationTransformer(Transformer):
+ r"""Remove all units from the model and replace them with real type.
+
+ NESTML model:
+ V_m V = -70 mV
+
+ generated code:
+ float V_m = -0.07 # implicit: units of V
+ float V_m = -70 # implicit: units of mV
+
+
+ """
+
+ # _default_options = {
+ # "quantity_to_preferred_prefix": {
+ # "time": "m",
+ # "voltage": "m"
+ # },
+ # "variable_to_preferred_prefix": {
+ # "V_m": "m",
+ # "V_dend": "u"
+ # }
+ # }
+
+ _default_options = {
+ "quantity_to_preferred_prefix": {
+ },
+ "variable_to_preferred_prefix": {
+ }
+ }
+
+
+
+ def __init__(self, options: Optional[Mapping[str, Any]] = None):
+ super(Transformer, self).__init__(options)
+
+ def transform_(self, model: Union[ASTNode, Sequence[ASTNode]]) -> Union[ASTNode, Sequence[ASTNode]]:
+ transformed_model = model.clone()
+
+ variable_visitor = NonDimensionalisationVariableVisitor(self.get_option("quantity_to_preferred_prefix"))
+ simple_expression_visitor = NonDimensionalisationSimpleExpressionVisitor(self.get_option("quantity_to_preferred_prefix"), model)
+ declaration_visitor = NonDimensionalisationPreferredPrefixFactorOnRhsVisitor(self.get_option("quantity_to_preferred_prefix"), model)
+ var_to_real_type_visitor = NonDimensionalisationVarToRealTypeVisitor(self.get_option("quantity_to_preferred_prefix"))
+
+ transformed_model.accept(ASTParentVisitor())
+ transformed_model.accept(variable_visitor)
+ transformed_model.accept(simple_expression_visitor)
+ transformed_model.accept(declaration_visitor)
+ transformed_model.accept(var_to_real_type_visitor)
+ transformed_model.accept(ASTSymbolTableVisitor())
+
+ print("--------------------------------")
+ print("model after transformation:")
+ print("--------------------------------")
+ print(transformed_model)
+ with open("transformed_model_test_exp_in_equation_block.txt", "a") as f:
+ f.write(str(transformed_model))
+
+ return transformed_model
+
+ def transform(self, models: Union[ASTNode, Sequence[ASTNode]]) -> Union[ASTNode, Sequence[ASTNode]]:
+ transformed_models = []
+
+ single = False
+ if isinstance(models, ASTNode):
+ single = True
+ model = [models]
+
+ for model in models:
+ transformed_models.append(self.transform_(model))
+
+ if single:
+ return transformed_models[0]
+
+ return transformed_models
diff --git a/pynestml/utils/ast_utils.py b/pynestml/utils/ast_utils.py
index aea86a3b7..f8e533e31 100644
--- a/pynestml/utils/ast_utils.py
+++ b/pynestml/utils/ast_utils.py
@@ -26,6 +26,8 @@
import odetoolbox
+from astropy import units as u
+
from pynestml.codegeneration.printers.ast_printer import ASTPrinter
from pynestml.codegeneration.printers.cpp_variable_printer import CppVariablePrinter
from pynestml.frontend.frontend_configuration import FrontendConfiguration
@@ -1569,6 +1571,58 @@ def get_parameter_variable_by_name(cls, node: ASTModel, var_name: str) -> ASTVar
return var
return None
+ @staticmethod
+ def _to_base_value_from_string(quantity_str):
+ local_dict = {'u': u}
+ quantity = eval(quantity_str, {"__builtins__": {}}, local_dict)
+ canonical_unit = u.get_physical_type(quantity.unit)._unit
+ # Return the SI base value and unit name
+ return quantity.si.value, str(canonical_unit)
+
+
+ # @classmethod
+ # def generate_updated_state_dict(cls, neuron: ASTModel, parameter_value_dict: dict) -> dict:
+ # state_block = neuron.get_state_blocks()[0]
+ # updated_state_dict = {}
+ # for declarations in state_block.get_declarations():
+ # if isinstance(declarations.expression, ASTSimpleExpression) and declarations.expression.numeric_literal == None:
+ # if declarations.expression.variable.name in parameter_value_dict:
+ # updated_state_dict[declarations.variables[0]] = parameter_value_dict[declarations.expression.variable.name]
+ # pass
+ # if isinstance(declarations.expression, ASTSimpleExpression) and declarations.expression.numeric_literal != None:
+ # expr = str(declarations.expression.numeric_literal) + '* u.' + declarations.expression.variable.name
+ # float_value_in_si, unit_in_si = cls._to_base_value_from_string(cls, expr)
+ # declarations.expression.numeric_literal = float_value_in_si
+ # updated_state_dict[declarations.variables[0]] = float_value_in_si
+ #
+ # return updated_state_dict
+
+ @classmethod
+ def generate_updated_state_dict(cls, initial_values: dict, parameter_value_dict: dict) -> dict:
+ updated_state_dict = {}
+ for key, value in initial_values.items():
+ if value in parameter_value_dict:
+ updated_state_dict[key] = float(parameter_value_dict[value])
+ else:
+ updated_state_dict[key] = float(value)
+ return updated_state_dict
+
+ # @classmethod
+ # def get_propagators_as_math_expressions(cls, neuron: ASTNode, parameters: dict) -> dict:
+ # propagators_as_math_expressions = {}
+ # propagator_expressions = neuron.analytic_solver["propagators"]
+ # for propagator_expression in propagator_expressions:
+ # # propagator_expressions[propagator_expression] = propagator_expressions[propagator_expression].replace(
+ # # '__h', str(1))
+ # # for symbol, value in parameters.items():
+ # # propagator_expressions[propagator_expression] = propagator_expressions[propagator_expression].replace(symbol, str(value))
+ # # propagators_as_math_expressions.update({propagator_expression: propagator_expressions[propagator_expression]})
+ # propagators_as_math_expressions[propagator_expression] = propagator_expressions[propagator_expression]
+ # return propagators_as_math_expressions
+ #
+ # # @classmethod
+ # # def
+
@classmethod
def get_internal_by_name(cls, node: ASTModel, var_name: str) -> ASTDeclaration:
"""
@@ -2256,6 +2310,18 @@ def remove_kernel_definitions_from_equations_block(cls, model: ASTModel) -> ASTD
return decl_to_remove
+ @classmethod
+ def add_timestep_symbol(cls, model: ASTModel) -> None:
+ """
+ Add timestep variable to the internals block
+ """
+ from pynestml.utils.model_parser import ModelParser
+ assert model.get_initial_value(
+ "__h") is None, "\"__h\" is a reserved name, please do not use variables by this name in your NESTML file"
+ assert not "__h" in [sym.name for sym in model.get_internal_symbols(
+ )], "\"__h\" is a reserved name, please do not use variables by this name in your NESTML file"
+ model.add_to_internals_block(ModelParser.parse_declaration('__h ms = resolution()'), index=0)
+
@classmethod
def generate_kernel_buffers(cls, model: ASTModel, equations_block: Union[ASTEquationsBlock, List[ASTEquationsBlock]]) -> Mapping[ASTKernel, ASTInputPort]:
"""
diff --git a/pynestml/utils/cloning_helpers.py b/pynestml/utils/cloning_helpers.py
index 5b9976c93..d1ca3b929 100644
--- a/pynestml/utils/cloning_helpers.py
+++ b/pynestml/utils/cloning_helpers.py
@@ -30,7 +30,7 @@ def clone_numeric_literal(numeric_literal):
# Python basic type
return numeric_literal
- if type(numeric_literal) in [np.int, np.int8, np.int16, np.int32, np.int64]:
+ if type(numeric_literal) in [np.int8, np.int16, np.int32, np.int64, np.float64]:
# NumPy types
return numeric_literal.copy()
diff --git a/pynestml/utils/messages.py b/pynestml/utils/messages.py
index 1930d91e0..220a0e44c 100644
--- a/pynestml/utils/messages.py
+++ b/pynestml/utils/messages.py
@@ -216,12 +216,6 @@ def get_convolve_needs_buffer_parameter(cls):
message = 'Convolve requires a buffer variable as second parameter!'
return MessageCode.CONVOLVE_NEEDS_BUFFER_PARAMETER, message
- @classmethod
- def get_implicit_magnitude_conversion(cls, lhs, rhs, conversion_factor):
- message = 'Implicit magnitude conversion from %s to %s with factor %s ' % (
- lhs.print_symbol(), rhs.print_symbol(), conversion_factor)
- return MessageCode.IMPLICIT_CAST, message
-
@classmethod
def get_function_call_implicit_cast(
cls,
diff --git a/pynestml/utils/model_parser.py b/pynestml/utils/model_parser.py
index f05cdf0e1..497c5acc3 100644
--- a/pynestml/utils/model_parser.py
+++ b/pynestml/utils/model_parser.py
@@ -71,7 +71,6 @@
from pynestml.utils.error_listener import NestMLErrorListener
from pynestml.utils.logger import Logger, LoggingLevel
from pynestml.utils.messages import Messages
-from pynestml.visitors.assign_implicit_conversion_factors_visitor import AssignImplicitConversionFactorsVisitor
from pynestml.visitors.ast_builder_visitor import ASTBuilderVisitor
from pynestml.visitors.ast_higher_order_visitor import ASTHigherOrderVisitor
from pynestml.visitors.ast_parent_visitor import ASTParentVisitor
@@ -153,7 +152,6 @@ def parse_file(cls, file_path=None):
model.accept(ASTSymbolTableVisitor())
SymbolTable.add_model_scope(model.get_name(), model.get_scope())
Logger.set_current_node(model)
- model.accept(AssignImplicitConversionFactorsVisitor())
Logger.set_current_node(None)
# store source paths
diff --git a/pynestml/utils/type_caster.py b/pynestml/utils/type_caster.py
deleted file mode 100644
index 2f7827bad..000000000
--- a/pynestml/utils/type_caster.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# type_caster.py
-#
-# This file is part of NEST.
-#
-# Copyright (C) 2004 The NEST Initiative
-#
-# NEST is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 2 of the License, or
-# (at your option) any later version.
-#
-# NEST is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with NEST. If not, see .
-
-from pynestml.symbols.unit_type_symbol import UnitTypeSymbol
-from pynestml.utils.logger import Logger, LoggingLevel
-from pynestml.utils.messages import Messages
-
-
-class TypeCaster:
- @staticmethod
- def do_magnitude_conversion_rhs_to_lhs(_rhs_type_symbol, _lhs_type_symbol, _containing_expression):
- """
- Determine conversion factor from rhs to lhs, register it with the relevant expression
- """
- _containing_expression.set_implicit_conversion_factor(
- UnitTypeSymbol.get_conversion_factor(_rhs_type_symbol.astropy_unit,
- _lhs_type_symbol.astropy_unit))
- code, message = Messages.get_implicit_magnitude_conversion(_lhs_type_symbol, _rhs_type_symbol,
- _containing_expression.get_implicit_conversion_factor())
- Logger.log_message(code=code, message=message,
- error_position=_containing_expression.get_source_position(),
- log_level=LoggingLevel.INFO)
-
- @staticmethod
- def try_to_recover_or_error(_lhs_type_symbol, _rhs_type_symbol, _containing_expression, set_implicit_conversion_factor_on_lhs=False):
- if _rhs_type_symbol.is_castable_to(_lhs_type_symbol):
- if isinstance(_lhs_type_symbol, UnitTypeSymbol) \
- and isinstance(_rhs_type_symbol, UnitTypeSymbol):
- conversion_factor = UnitTypeSymbol.get_conversion_factor(_rhs_type_symbol.astropy_unit, _lhs_type_symbol.astropy_unit)
-
- if conversion_factor is None:
- # error during conversion
- code, message = Messages.get_type_different_from_expected(_lhs_type_symbol, _rhs_type_symbol)
- Logger.log_message(error_position=_containing_expression.get_source_position(),
- code=code, message=message, log_level=LoggingLevel.ERROR)
- return
-
- if set_implicit_conversion_factor_on_lhs and not conversion_factor == 1.:
- # the units are mutually convertible, but require a factor unequal to 1 (e.g. mV and A*Ohm)
- TypeCaster.do_magnitude_conversion_rhs_to_lhs(_rhs_type_symbol, _lhs_type_symbol, _containing_expression)
-
- # the units are mutually convertible (e.g. V and A*Ohm)
- code, message = Messages.get_implicit_cast_rhs_to_lhs(_rhs_type_symbol.print_symbol(),
- _lhs_type_symbol.print_symbol())
- Logger.log_message(error_position=_containing_expression.get_source_position(),
- code=code, message=message, log_level=LoggingLevel.INFO)
- return
-
- code, message = Messages.get_type_different_from_expected(_lhs_type_symbol, _rhs_type_symbol)
- Logger.log_message(error_position=_containing_expression.get_source_position(),
- code=code, message=message, log_level=LoggingLevel.ERROR)
diff --git a/pynestml/visitors/__init__.py b/pynestml/visitors/__init__.py
index 541cabdd1..f50261123 100644
--- a/pynestml/visitors/__init__.py
+++ b/pynestml/visitors/__init__.py
@@ -35,6 +35,7 @@
'ast_line_operation_visitor.py',
'ast_logical_not_visitor.py',
'ast_numeric_literal_visitor.py',
+ 'ast_parent_visitor',
'ast_parent_aware_visitor.py',
'ast_parentheses_visitor.py',
'ast_power_visitor.py',
diff --git a/pynestml/visitors/assign_implicit_conversion_factors_visitor.py b/pynestml/visitors/assign_implicit_conversion_factors_visitor.py
deleted file mode 100644
index bad89d52c..000000000
--- a/pynestml/visitors/assign_implicit_conversion_factors_visitor.py
+++ /dev/null
@@ -1,326 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# assign_implicit_conversion_factors_visitor.py
-#
-# This file is part of NEST.
-#
-# Copyright (C) 2004 The NEST Initiative
-#
-# NEST is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 2 of the License, or
-# (at your option) any later version.
-#
-# NEST is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with NEST. If not, see .
-
-from typing import Sequence, Union
-
-from pynestml.meta_model.ast_compound_stmt import ASTCompoundStmt
-from pynestml.meta_model.ast_declaration import ASTDeclaration
-from pynestml.meta_model.ast_inline_expression import ASTInlineExpression
-from pynestml.meta_model.ast_model import ASTModel
-from pynestml.meta_model.ast_node import ASTNode
-from pynestml.meta_model.ast_small_stmt import ASTSmallStmt
-from pynestml.meta_model.ast_stmt import ASTStmt
-from pynestml.symbols.error_type_symbol import ErrorTypeSymbol
-from pynestml.symbols.predefined_types import PredefinedTypes
-from pynestml.symbols.symbol import SymbolKind
-from pynestml.symbols.template_type_symbol import TemplateTypeSymbol
-from pynestml.symbols.variadic_type_symbol import VariadicTypeSymbol
-from pynestml.utils.ast_source_location import ASTSourceLocation
-from pynestml.utils.ast_utils import ASTUtils
-from pynestml.utils.logger import LoggingLevel, Logger
-from pynestml.utils.logging_helper import LoggingHelper
-from pynestml.utils.messages import Messages
-from pynestml.utils.type_caster import TypeCaster
-from pynestml.visitors.ast_visitor import ASTVisitor
-
-
-class AssignImplicitConversionFactorsVisitor(ASTVisitor):
- r"""
- Assign implicit conversion factors in expressions.
- """
-
- def visit_model(self, model: ASTModel):
- self.__assign_return_types(model)
-
- def visit_declaration(self, node):
- """
- Visits a single declaration and asserts that type of lhs is equal to type of rhs.
- :param node: a single declaration.
- :type node: ASTDeclaration
- """
- assert isinstance(node, ASTDeclaration)
- if node.has_expression():
- if node.get_expression().get_source_position().equals(ASTSourceLocation.get_added_source_position()):
- # no type checks are executed for added nodes, since we assume correctness
- return
- lhs_type = node.get_data_type().get_type_symbol()
- rhs_type = node.get_expression().type
- if isinstance(rhs_type, ErrorTypeSymbol):
- LoggingHelper.drop_missing_type_error(node)
- return
- if self.__types_do_not_match(lhs_type, rhs_type):
- TypeCaster.try_to_recover_or_error(lhs_type, rhs_type, node.get_expression(),
- set_implicit_conversion_factor_on_lhs=True)
-
- def visit_inline_expression(self, node):
- """
- Visits a single inline expression and asserts that type of lhs is equal to type of rhs.
- """
- assert isinstance(node, ASTInlineExpression)
- lhs_type = node.get_data_type().get_type_symbol()
- rhs_type = node.get_expression().type
- if isinstance(rhs_type, ErrorTypeSymbol):
- LoggingHelper.drop_missing_type_error(node)
- return
-
- if self.__types_do_not_match(lhs_type, rhs_type):
- TypeCaster.try_to_recover_or_error(lhs_type, rhs_type, node.get_expression(),
- set_implicit_conversion_factor_on_lhs=True)
-
- def visit_assignment(self, node):
- """
- Visits a single expression and assures that type(lhs) == type(rhs).
- :param node: a single assignment.
- :type node: ASTAssignment
- """
- from pynestml.meta_model.ast_assignment import ASTAssignment
- assert isinstance(node, ASTAssignment)
-
- if node.get_source_position().equals(ASTSourceLocation.get_added_source_position()):
- # no type checks are executed for added nodes, since we assume correctness
- return
- if node.is_direct_assignment: # case a = b is simple
- self.handle_simple_assignment(node)
- else:
- self.handle_compound_assignment(node) # e.g. a *= b
-
- def handle_compound_assignment(self, node):
- rhs_expr = node.get_expression()
- lhs_variable_symbol = node.get_variable().resolve_in_own_scope()
- rhs_type_symbol = rhs_expr.type
-
- if lhs_variable_symbol is None:
- code, message = Messages.get_equation_var_not_in_state_block(node.get_variable().get_complete_name())
- Logger.log_message(code=code, message=message, error_position=node.get_source_position(),
- log_level=LoggingLevel.ERROR)
- return
-
- if isinstance(rhs_type_symbol, ErrorTypeSymbol):
- LoggingHelper.drop_missing_type_error(node)
- return
-
- lhs_type_symbol = lhs_variable_symbol.get_type_symbol()
-
- if node.is_compound_product:
- if self.__types_do_not_match(lhs_type_symbol, lhs_type_symbol * rhs_type_symbol):
- TypeCaster.try_to_recover_or_error(lhs_type_symbol, lhs_type_symbol * rhs_type_symbol,
- node.get_expression(),
- set_implicit_conversion_factor_on_lhs=True)
- return
- return
-
- if node.is_compound_quotient:
- if self.__types_do_not_match(lhs_type_symbol, lhs_type_symbol / rhs_type_symbol):
- TypeCaster.try_to_recover_or_error(lhs_type_symbol, lhs_type_symbol / rhs_type_symbol,
- node.get_expression(),
- set_implicit_conversion_factor_on_lhs=True)
- return
- return
-
- assert node.is_compound_sum or node.is_compound_minus
- if self.__types_do_not_match(lhs_type_symbol, rhs_type_symbol):
- TypeCaster.try_to_recover_or_error(lhs_type_symbol, rhs_type_symbol,
- node.get_expression(),
- set_implicit_conversion_factor_on_lhs=True)
-
- @staticmethod
- def __types_do_not_match(lhs_type_symbol, rhs_type_symbol):
- if lhs_type_symbol is None:
- return True
-
- return not lhs_type_symbol.equals(rhs_type_symbol)
-
- def handle_simple_assignment(self, node):
- from pynestml.symbols.symbol import SymbolKind
- lhs_variable_symbol = node.get_scope().resolve_to_symbol(node.get_variable().get_complete_name(),
- SymbolKind.VARIABLE)
-
- rhs_type_symbol = node.get_expression().type
- if isinstance(rhs_type_symbol, ErrorTypeSymbol):
- LoggingHelper.drop_missing_type_error(node)
- return
-
- if lhs_variable_symbol is not None and self.__types_do_not_match(lhs_variable_symbol.get_type_symbol(),
- rhs_type_symbol):
- TypeCaster.try_to_recover_or_error(lhs_variable_symbol.get_type_symbol(), rhs_type_symbol,
- node.get_expression(),
- set_implicit_conversion_factor_on_lhs=True)
-
- def visit_function_call(self, node):
- """
- Check consistency for a single function call: check if the called function has been declared, whether the number and types of arguments correspond to the declaration, etc.
-
- :param node: a single function call.
- :type node: ASTFunctionCall
- """
- func_name = node.get_name()
-
- if func_name == 'convolve':
- return
-
- symbol = node.get_scope().resolve_to_symbol(node.get_name(), SymbolKind.FUNCTION)
-
- if symbol is None and ASTUtils.is_function_delay_variable(node):
- return
-
- # first check if the function has been declared
- if symbol is None:
- code, message = Messages.get_function_not_declared(node.get_name())
- Logger.log_message(error_position=node.get_source_position(), log_level=LoggingLevel.ERROR,
- code=code, message=message)
- return
-
- # check if the number of arguments is the same as in the symbol; accept anything for variadic types
- is_variadic: bool = len(symbol.get_parameter_types()) == 1 and isinstance(symbol.get_parameter_types()[0], VariadicTypeSymbol)
- if (not is_variadic) and len(node.get_args()) != len(symbol.get_parameter_types()):
- code, message = Messages.get_wrong_number_of_args(str(node), len(symbol.get_parameter_types()),
- len(node.get_args()))
- Logger.log_message(code=code, message=message, log_level=LoggingLevel.ERROR,
- error_position=node.get_source_position())
- return
-
- # finally check if the call is correctly typed
- expected_types = symbol.get_parameter_types()
- actual_args = node.get_args()
- actual_types = [arg.type for arg in actual_args]
- for actual_arg, actual_type, expected_type in zip(actual_args, actual_types, expected_types):
- if isinstance(actual_type, ErrorTypeSymbol):
- code, message = Messages.get_type_could_not_be_derived(actual_arg)
- Logger.log_message(code=code, message=message, log_level=LoggingLevel.ERROR,
- error_position=actual_arg.get_source_position())
- return
-
- if isinstance(expected_type, VariadicTypeSymbol):
- # variadic type symbol accepts anything
- return
-
- if not actual_type.equals(expected_type) and not isinstance(expected_type, TemplateTypeSymbol):
- TypeCaster.try_to_recover_or_error(expected_type, actual_type, actual_arg,
- set_implicit_conversion_factor_on_lhs=True)
-
- def __assign_return_types(self, _node):
- for userDefinedFunction in _node.get_functions():
- symbol = userDefinedFunction.get_scope().resolve_to_symbol(userDefinedFunction.get_name(),
- SymbolKind.FUNCTION)
- # first ensure that the block contains at least one statement
- if symbol is not None and len(userDefinedFunction.get_stmts_body().get_stmts()) > 0:
- # now check that the last statement is a return
- self.__check_return_recursively(userDefinedFunction,
- symbol.get_return_type(),
- userDefinedFunction.get_stmts_body().get_stmts(),
- False)
- # now if it does not have a statement, but uses a return type, it is an error
- elif symbol is not None and userDefinedFunction.has_return_type() and \
- not symbol.get_return_type().equals(PredefinedTypes.get_void_type()):
- code, message = Messages.get_no_return()
- Logger.log_message(node=_node, code=code, message=message,
- error_position=userDefinedFunction.get_source_position(),
- log_level=LoggingLevel.ERROR)
-
- def __check_return_recursively(self, processed_function, type_symbol=None, stmts=None, ret_defined: bool = False) -> None:
- """
- For a handed over statement, it checks if the statement is a return statement and if it is typed according to the handed over type symbol.
- :param type_symbol: a single type symbol
- :type type_symbol: type_symbol
- :param stmts: a list of statements, either simple or compound
- :type stmts: list(ASTSmallStmt,ASTCompoundStmt)
- :param ret_defined: indicates whether a ret has already been defined after this block of stmt, thus is not
- necessary. Implies that the return has been defined in the higher level block
- """
- # in order to ensure that in the sub-blocks, a return is not necessary, we check if the last one in this
- # block is a return statement, thus it is not required to have a return in the sub-blocks, but optional
- last_statement = stmts[len(stmts) - 1]
- ret_defined = False or ret_defined
- if (len(stmts) > 0 and isinstance(last_statement, ASTStmt)
- and last_statement.is_small_stmt()
- and last_statement.small_stmt.is_return_stmt()):
- ret_defined = True
-
- # now check that returns are there if necessary and correctly typed
- for c_stmt in stmts:
- if c_stmt.is_small_stmt():
- stmt = c_stmt.small_stmt
- else:
- stmt = c_stmt.compound_stmt
-
- # if it is a small statement, check if it is a return statement
- if isinstance(stmt, ASTSmallStmt) and stmt.is_return_stmt():
- # first check if the return is the last one in this block of statements
- if stmts.index(c_stmt) != (len(stmts) - 1):
- code, message = Messages.get_not_last_statement('Return')
- Logger.log_message(error_position=stmt.get_source_position(),
- code=code, message=message,
- log_level=LoggingLevel.WARNING)
-
- # now check that it corresponds to the declared type
- if stmt.get_return_stmt().has_expression() and type_symbol is PredefinedTypes.get_void_type():
- code, message = Messages.get_type_different_from_expected(PredefinedTypes.get_void_type(),
- stmt.get_return_stmt().get_expression().type)
- Logger.log_message(error_position=stmt.get_source_position(),
- message=message, code=code, log_level=LoggingLevel.ERROR)
-
- # if it is not void check if the type corresponds to the one stated
- if not stmt.get_return_stmt().has_expression() and \
- not type_symbol.equals(PredefinedTypes.get_void_type()):
- code, message = Messages.get_type_different_from_expected(PredefinedTypes.get_void_type(),
- type_symbol)
- Logger.log_message(error_position=stmt.get_source_position(),
- message=message, code=code, log_level=LoggingLevel.ERROR)
-
- if stmt.get_return_stmt().has_expression():
- type_of_return = stmt.get_return_stmt().get_expression().type
- if isinstance(type_of_return, ErrorTypeSymbol):
- code, message = Messages.get_type_could_not_be_derived(processed_function.get_name())
- Logger.log_message(error_position=stmt.get_source_position(),
- code=code, message=message, log_level=LoggingLevel.ERROR)
- elif not type_of_return.equals(type_symbol):
- TypeCaster.try_to_recover_or_error(type_symbol, type_of_return,
- stmt.get_return_stmt().get_expression(),
- set_implicit_conversion_factor_on_lhs=True)
- elif isinstance(stmt, ASTCompoundStmt):
- # otherwise it is a compound stmt, thus check recursively
- if stmt.is_if_stmt():
- self.__check_return_recursively(processed_function,
- type_symbol,
- stmt.get_if_stmt().get_if_clause().get_stmts_body().get_stmts(),
- ret_defined)
- for else_ifs in stmt.get_if_stmt().get_elif_clauses():
- self.__check_return_recursively(processed_function,
- type_symbol, else_ifs.get_stmts_body().get_stmts(), ret_defined)
- if stmt.get_if_stmt().has_else_clause():
- self.__check_return_recursively(processed_function,
- type_symbol,
- stmt.get_if_stmt().get_else_clause().get_stmts_body().get_stmts(),
- ret_defined)
- elif stmt.is_while_stmt():
- self.__check_return_recursively(processed_function,
- type_symbol, stmt.get_while_stmt().get_stmts_body().get_stmts(),
- ret_defined)
- elif stmt.is_for_stmt():
- self.__check_return_recursively(processed_function,
- type_symbol, stmt.get_for_stmt().get_stmts_body().get_stmts(),
- ret_defined)
- # now, if a return statement has not been defined in the corresponding higher level block, we have to ensure that it is defined here
- elif not ret_defined and stmts.index(c_stmt) == (len(stmts) - 1):
- if not (isinstance(stmt, ASTSmallStmt) and stmt.is_return_stmt()):
- code, message = Messages.get_no_return()
- Logger.log_message(error_position=stmt.get_source_position(), log_level=LoggingLevel.ERROR,
- code=code, message=message)
diff --git a/pynestml/visitors/ast_power_visitor.py b/pynestml/visitors/ast_power_visitor.py
index 2bd43aec4..26eddf664 100644
--- a/pynestml/visitors/ast_power_visitor.py
+++ b/pynestml/visitors/ast_power_visitor.py
@@ -19,7 +19,6 @@
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see .
-from pynestml.codegeneration.nest_unit_converter import NESTUnitConverter
from pynestml.meta_model.ast_expression import ASTExpression
from pynestml.meta_model.ast_simple_expression import ASTSimpleExpression
from pynestml.symbols.predefined_units import PredefinedUnits
@@ -108,7 +107,7 @@ def calculate_numeric_value(self, expr):
symbol = variable.get_scope().resolve_to_symbol(variable.get_complete_name(), SymbolKind.VARIABLE)
if symbol is None:
if PredefinedUnits.is_unit(variable.get_complete_name()):
- return NESTUnitConverter.get_factor(PredefinedUnits.get_unit(variable.get_complete_name()).get_unit())
+ return PredefinedUnits.get_unit(variable.get_complete_name()).get_unit()
raise Exception("Declaration for symbol '" + str(variable) + "' not found and is not a unit.")
diff --git a/tests/expression_type_calculation_test.py b/tests/expression_type_calculation_test.py
index 02146536a..2469a55bc 100644
--- a/tests/expression_type_calculation_test.py
+++ b/tests/expression_type_calculation_test.py
@@ -22,7 +22,6 @@
import os
import unittest
-from pynestml.codegeneration.nest_unit_converter import NESTUnitConverter
from pynestml.symbol_table.symbol_table import SymbolTable
from pynestml.symbols.predefined_functions import PredefinedFunctions
from pynestml.symbols.predefined_types import PredefinedTypes
@@ -63,7 +62,7 @@ def endvisit_assignment(self, node):
if isinstance(_expr.type, UnitTypeSymbol):
message += " Neuroscience Factor: " + \
- str(NESTUnitConverter.get_factor(_expr.type.astropy_unit))
+ str(_expr.type.astropy_unit)
Logger.log_message(error_position=node.get_source_position(), code=MessageCode.TYPE_MISMATCH,
message=message, log_level=LoggingLevel.INFO)
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/README.md b/tests/nest_tests/non_dimensionalisation_transformer/README.md
new file mode 100644
index 000000000..4db53b8ff
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/README.md
@@ -0,0 +1,127 @@
+# Tests for Non-Dimensionilisation-Transformer
+
+The transformer relates to [PR-1217](https://github.com/nest/nestml/pull/1217) and [Issue-984](https://github.com/nest/nestml/issues/984).
+In a first instance the correct transformation of the expressions inside a NESTML file should be checked.
+
+The tests should include:
+- checking for all metric prefixes
+- checking for nested expressions with metric prefixes
+- checking that transformations occur in every part of the NESTML file units are specified
+- checking of transformation for derived variables
+- checking for transformation of reciprocal units/ expressions with reciprocal units
+ - does it make sense for these to have the same desired unit?
+ - E.g.: desired unit of 'electrical potential' is mV -> should variables with physical type of '1/V' also be then expressed as '1/mV' post transformation?
+ - see *test_reciprocal_unit_in_paramterblock*
+- checking additional parenthesis are set correctly
+
+In a second instance the unit arithmetic and consistency of physical types needs to be checked pre-transformation after the original AST is built:
+- will the expression on the RHS of an equation yield a unit that is a unit of what is specified on the LHS of the equation?
+- How should exceptions be handled, for example if LHS is 'V' but result on RHS is '1/V'?
+- Are the arguments inside of functions like exp(), log(), sin(), etc. dimensionless or has the user made a mistake?
+- What should happen if unknown units are encountered?
+
+### test_exp_in_equationblock
+This test checks if the transformer can deal with functions like exp() in the equation block.
+The target unit for V_exp'(s) is mV as the 1/s is being carried implicitly by declaring the variable with a tick, signifying that it is a derived unit with respect to time
+
+### test_real_factor_in_stateblock
+This test checks if state block expressions with a RHS with a unit being multiplied by a real factor a LHS with type 'real' will get processed correctly.
+The target unit JSON file is
+```JSON
+{"quantity_to_preferred_prefix":
+ {
+ "electrical potential": "m", # needed for V_m_init and U_m
+ "electrical current": "1", # needed for currents not part of the test
+ "electrical capacitance": "1", # needed for caps not part of the test
+ }
+}
+```
+Before the transformation the relevant .NESTML should read
+
+```NESTML
+ state:
+ U_m real = b * V_m_init # Membrane potential recovery variable
+
+ parameters:
+ b real = 0.2 # sensitivity of recovery variable
+ V_m_init mV = -65 mV # Initial membrane potential
+```
+After the transformation it should read
+```NESTML
+ state:
+ U_m real = b * V_m_init # Membrane potential recovery variable
+
+ parameters:
+ b real = 0.2 # sensitivity of recovery variable
+ V_m_init real = 1e3 * (-65e-3) # Initial membrane potential
+```
+
+### test_inline_expression_in_equationblock
+This test checks if the transformer can deal with inline expressions in the equation block. Additionally there is an exp() in the expression
+
+The target unit JSON file is
+```JSON
+{"quantity_to_preferred_prefix":
+ {
+ "electrical potential": "m", # needed for V_m_init
+ "electrical current": "p", # needed for I_spike_test
+ "electrical capacitance": "1", # needed for caps not part of the test
+ }
+}
+```
+
+Before the transformation the relevant .NESTML should read
+```NESTML
+ equations:
+ inline I_spike_test A = 30.0 nS * (-V_m_init / 130e3) * exp(((-80 mV) - (-20 mV)) / 3000 uV)
+
+ parameters:
+ V_m_init mV = -65 mV # Initial membrane potential
+```
+
+After the transformation it should read
+```NESTML
+ equations:
+ inline I_spike_test real = 1e12 * ((30.0 * 1e-9) * ((-V_m_init * * 1e-3)/ 130e3) * exp(((-80 * 1e-3) - (-20 * 1e-3)) / (3000 * 1e-6)))
+
+ parameters:
+ V_m_init real = 1e3 * (-65 * 1e-3) # Initial membrane potential
+```
+
+### test_reciprocal_unit_in_paramterblock
+This test checks if the transformer can deal with reciprocal units on the LHS of an equation inside the parameter block.
+
+The target unit JSON file is
+```JSON
+{"quantity_to_preferred_prefix":
+ {
+ "electrical potential": "m", # needed for V_exp, alpha_exp
+ "electrical current": "1", # needed for I_spike_test
+ "electrical capacitance": "1", # needed for caps not part of the test
+ }
+}
+```
+Before the transformation the relevant .NESTML should read
+```NESTML
+ state:
+ V_exp V = 2500 uV + V_m_init * exp(alpha_exp * 10 V)
+
+ parameters:
+ V_m_init mV = -65 mV # Initial membrane potential
+ alpha_exp 1/V = 2 /(3 MV) # this could be a factor for a voltage inside of en exp(), e.g. exp(alpha_exp * V_test)
+```
+
+After the transformation it should read
+```NESTML
+ state:
+ V_exp V = (2500 * 1e-6) + (V_m_init * 1e-3) * exp((alpha_exp * 1e-6) * 10)
+
+ parameters:
+ V_m_init real = 1e3 * (-65 * 1e-3) # Initial membrane potential
+ alpha_exp real = 1e-3 * (2 / (3 * 1e6)) # this could be a factor for a voltage inside of en exp(), e.g. exp(alpha_exp * V_test)
+```
+
+### test_giga - test_atto
+These tests will check if the standardized metric prefixes in the range of Giga- to Atto- can be resolved.
+The prefixes Deci- and Deca- are probably little used in a neuroscience context.
+The test for Femto- includes the use of a combined physical type, the "magnetic field strength".
\ No newline at end of file
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/resources/izhikevich_neuron.nestml b/tests/nest_tests/non_dimensionalisation_transformer/resources/izhikevich_neuron.nestml
new file mode 100644
index 000000000..22f657fe7
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/resources/izhikevich_neuron.nestml
@@ -0,0 +1,101 @@
+# izhikevich - Izhikevich neuron model
+# ####################################
+#
+# Description
+# +++++++++++
+#
+# Implementation of the simple spiking neuron model introduced by Izhikevich [1]_. The dynamics are given by:
+#
+# .. math::
+#
+# dV_{m}/dt &= 0.04 V_{m}^2 + 5 V_{m} + 140 - U_{m} + I\\
+# dU_{m}/dt &= aaa (bbb V_{m} - U_{m})
+#
+#
+# .. math::
+#
+# &\text{if}\;\; V_{m} \geq V_{th}:\\
+# &\;\;\;\; V_{m} \text{ is set to } c\\
+# &\;\;\;\; U_{m} \text{ is incremented by } ddd\\
+# & \, \\
+# &V_{m} \text{ jumps on each spike arrival by the weight of the spike}
+#
+# Incoming spikes cause an instantaneous jump in the membrane potential proportional to the strength of the synapse.
+#
+# As published in [1]_, the numerics differs from the standard forward Euler technique in two ways:
+#
+# 1) the new value of :math:`U_{m}` is calculated based on the new value of :math:`V_{m}`, rather than the previous value
+# 2) the variable :math:`V_{m}` is updated using aaa time step half the size of that used to update variable :math:`U_{m}`.
+#
+# This model will instead be simulated using the numerical solver that is recommended by ODE-toolbox during code generation.
+#
+#
+# References
+# ++++++++++
+#
+# .. [1] Izhikevich, Simple Model of Spiking Neurons, IEEE Transactions on Neural Networks (2003) 14:1569-1572
+#
+#
+# Copyright statement
+# +++++++++++++++++++
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+#
+#
+model izhikevich_neuron:
+ state:
+ V_m mV = V_m_init # Membrane potential
+ U_m real = bbb * V_m_init # Membrane potential recovery variable
+
+ equations:
+ V_m' = ( 0.04 * V_m * V_m / mV + 5.0 * V_m + ( 140 - U_m ) * mV + ( (I_e + I_stim) * GOhm ) ) / ms
+ U_m' = aaa*(bbb*V_m-U_m * mV) / (mV*ms)
+
+ parameters:
+ aaa real = 0.02 # describes time scale of recovery variable
+ bbb real = 0.2 # sensitivity of recovery variable
+ c mV = -65 mV # after-spike reset value of V_m
+ ddd real = 8.0 # after-spike reset value of U_m
+ V_m_init mV = -65 mV # initial membrane potential
+ V_min mV = -inf * mV # Absolute lower value for the membrane potential.
+ V_th mV = 30 mV # Threshold potential
+
+ # constant external input current
+ I_e pA = 0 pA
+
+ input:
+ spikes <- spike
+ I_stim pA <- continuous
+
+ output:
+ spike
+
+ update:
+ integrate_odes()
+
+ # Add synaptic current
+ V_m += spikes * mV * s
+
+ # lower bound of membrane potential
+ V_m = max(V_min, V_m)
+
+ onCondition(V_m >= V_th):
+ # threshold crossing
+ V_m = c
+ U_m += ddd
+ emit_spike()
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/resources/non_dimensionalisation_transformer_test_neuron.nestml b/tests/nest_tests/non_dimensionalisation_transformer/resources/non_dimensionalisation_transformer_test_neuron.nestml
new file mode 100644
index 000000000..d43ff5604
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/resources/non_dimensionalisation_transformer_test_neuron.nestml
@@ -0,0 +1,63 @@
+model non_dimensionalisation_transformer_test_neuron:
+
+ state:
+ I_foo A = 42 mA
+ I_m A = 10 mA
+ V_3 mV = I_foo / 5 nS
+ V_m mV = E_L
+ U_m real = b * V_m_init # Membrane potential recovery variable
+ V_exp_der mV = 2500 uV + V_m_init * exp(alpha_exp * 10 V)
+ refr_t ms = 2 ms # Refractory period timer
+ I_eq mA = 30 mA
+
+ equations:
+ V_m' = I_eq / C_m
+ refr_t' = -1 / s
+ recordable inline I_spike_test A = 30.0 nS * (-V_m_init / 130e3) * exp(((-80 mV) - (-20 mV)) / 3000 uV)
+ V_exp_der' = (I_foo - 200 uA) / (C_exp_0 * (1+exp(alpha_exp * V_m_init)))
+
+ parameters:
+ E_L mV = -70 mV # Resting potential
+ C_m F = 250 pF * 1.0001 # Test if factor works
+ V_m_init mV = -65 mV # Initial membrane potential
+ C_exp_0 F = 150pF
+ alpha_exp 1/V = 2 /3 MV # this could be a factor for a voltage inside of en exp(), e.g. exp(alpha_exp * V_test)
+ b real = 0.2 # sensitivity of recovery variable
+
+ para_giga Ohm = 0.5 GOhm
+ para_mega Hz = 1.1 * 3MHz
+ para_kilo W = 2 kW
+ para_hecto Pa = 1024 hPa
+ para_deca m = 23 dam # this might cause problems, but also deca- is not used particularly frequently
+ para_deci mol = 8 dmol
+ para_centi m = 67 cm
+ para_milli V = 4 mV
+ para_micro S = 2 uS
+ para_nano F = 11 nF
+ para_pico H = 3 pH
+ para_femto A/m = 77 fA/m
+ para_atto s = 40 as
+
+
+ internals:
+ alpha_m_init real = ( 0.1 * ( V_m_init / mV + 40. ) ) / ( 1. - exp( -( V_m_init / mV + 40. ) / 10. ) )
+
+ update:
+ if refr_t > 2 ms:
+ # this has to do nothing as equations are not a real ODE system
+ integrate_odes(refr_t)
+
+ onCondition(refr_t > 2 ms):
+ refr_t = refr_t
+
+
+
+
+# V = 10 nA * 50 Ohm -> convert nA to mA --- 1E-6
+# = 500 nV
+# ---> V = 10 * 1E-6 * 50 = 500E-6
+
+# V = 10 * 0.00001 * mA * 50 Ohm -> convert mA to mA --- 1
+# ---> V = 10 * 0.00001 * 50 = 500E-6
+
+
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/resources/test_function_call_in_equation_block.nestml b/tests/nest_tests/non_dimensionalisation_transformer/resources/test_function_call_in_equation_block.nestml
new file mode 100644
index 000000000..3420f0937
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/resources/test_function_call_in_equation_block.nestml
@@ -0,0 +1,14 @@
+model test_function_call_in_equation_block_transformation_neuron:
+ state:
+ V_m V = -70 mV
+
+ equations:
+ V_m' = -V_m / (tau_m * (1 + exp(alpha_exp * V_m_init)))
+
+ parameters:
+ V_m_init V = -65 mV # test potential
+ tau_m s = 12.85 ms # test time constant
+ alpha_exp 1/V = 2 /(70 GV) # test factor
+
+ update:
+ integrate_odes()
\ No newline at end of file
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/resources/test_inline_expression_in_equation_block.nestml b/tests/nest_tests/non_dimensionalisation_transformer/resources/test_inline_expression_in_equation_block.nestml
new file mode 100644
index 000000000..9eb0ee95e
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/resources/test_inline_expression_in_equation_block.nestml
@@ -0,0 +1,6 @@
+model test_inline_expression_in_equation_block_transformation_neuron:
+ equations:
+ inline I_spike_test A = 30.0 nS * (-V_m_init / 130e3) * exp(((-80 mV) - (-20 mV)) / 3000 uV)
+
+ parameters:
+ V_m_init mV = -65 mV # Initial membrane potential
\ No newline at end of file
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/resources/test_internals_block.nestml b/tests/nest_tests/non_dimensionalisation_transformer/resources/test_internals_block.nestml
new file mode 100644
index 000000000..8e0f1a4e4
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/resources/test_internals_block.nestml
@@ -0,0 +1,24 @@
+model non_dimensionalisation_transformer_test_internals_block_neuron:
+ state:
+ Act_n real = alpha_n_init / ( alpha_n_init + beta_n_init )
+
+ parameters:
+ C_m pF = 100 pF # Membrane capacitance
+ g_Na nS = 3500 nS # Sodium peak conductance
+ g_K nS = 900 nS # Potassium peak conductance
+ g_L nS = 10 nS # Leak conductance
+ E_Na mV = 55 mV # Sodium reversal potential
+ E_K mV = -90 mV # Potassium reversal potential
+ E_L mV = -65 mV # Leak reversal potential (aka resting potential)
+ V_Tr mV = -55 mV # Spike threshold
+ refr_T ms = 2 ms # Duration of refractory period
+
+ tau_syn_exc ms = 0.2 ms # Rise time of the excitatory synaptic alpha function
+ tau_syn_inh ms = 10 ms # Rise time of the inhibitory synaptic alpha function
+ E_exc mV = 0 mV # Excitatory synaptic reversal potential
+ E_inh mV = -75 mV # Inhibitory synaptic reversal potential
+
+ internals:
+ alpha_n_init 1/ms = -0.05/(ms*mV) * (E_L + 34.0 mV) / (exp(-0.1 * (E_L + 34.0 mV)) - 1.0)
+ beta_n_init 1/ms = 0.625/ms * exp(-(E_L + 44.0 mV) / 80.0 mV)
+
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/resources/test_metric_prefix_transformation.nestml b/tests/nest_tests/non_dimensionalisation_transformer/resources/test_metric_prefix_transformation.nestml
new file mode 100644
index 000000000..bcefe7170
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/resources/test_metric_prefix_transformation.nestml
@@ -0,0 +1,17 @@
+model test_metric_prefix_transformation_neuron:
+ parameters:
+ para_femto A = 30fA
+ para_atto aH = 40aH
+ para_giga Ohm = 0.5 GOhm
+ para_mega Hz = 1.1 * 3MHz
+ para_kilo W = 2 kW
+ para_hecto Pa = 1024 hPa
+ para_deca m = 23 dam
+ para_deci mol = 8 dmol
+ para_centi m = 67 cm
+ para_milli V = 4 mV
+ para_micro S = 2 uS
+ para_nano F = 11 nF
+ para_pico H = 3 pH
+ # para_femto A / m = 77 fA / m # this causes problems as unit division on the LHS is not allowed currently in the initial parsing
+ # para_atto as = 40 as # This causes problems as the string 'as' is not being added to the predefined units as it is not in dir(u.si), probably because of conflict with python keyword 'as' used in imports
\ No newline at end of file
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/resources/test_real_factor_in_state_block.nestml b/tests/nest_tests/non_dimensionalisation_transformer/resources/test_real_factor_in_state_block.nestml
new file mode 100644
index 000000000..2b018f6ce
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/resources/test_real_factor_in_state_block.nestml
@@ -0,0 +1,7 @@
+model test_real_factor_in_state_block_transformation_neuron:
+ state:
+ U_m real = b * V_m_init # Membrane potential recovery variable
+
+ parameters:
+ V_m_init mV = -65 mV # Initial membrane potential
+ b real = 0.2 # sensitivity of recovery variable
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/resources/test_reciprocal_units_in_parameter_block.nestml b/tests/nest_tests/non_dimensionalisation_transformer/resources/test_reciprocal_units_in_parameter_block.nestml
new file mode 100644
index 000000000..e6c3a0d83
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/resources/test_reciprocal_units_in_parameter_block.nestml
@@ -0,0 +1,4 @@
+model test_reciprocal_units_in_parameter_block_transformation_neuron:
+ parameters:
+ alpha_exp 1/V = 2 /3 MV # this could be a factor for a voltage inside of en exp(), e.g. exp(alpha_exp * V_test)
+
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/test_non_dimensionalisation_transformer.py b/tests/nest_tests/non_dimensionalisation_transformer/test_non_dimensionalisation_transformer.py
new file mode 100644
index 000000000..3f4d56fa7
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/test_non_dimensionalisation_transformer.py
@@ -0,0 +1,779 @@
+# -*- coding: utf-8 -*-
+#
+# test_non_dimensionalisation_transformer.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+import nest
+import numpy as np
+import scipy as sp
+import os
+import pytest
+
+from pynestml.frontend.pynestml_frontend import generate_nest_target
+
+
+class TestNonDimensionalisationTransformer:
+
+ r"""
+
+
+
+ ### test_giga - test_atto
+ These tests will check if the standardized metric prefixes in the range of Giga- to Atto- can be resolved.
+ The prefixes Deci- and Deca- are probably little used in a neuroscience context.
+ The test for Femto- includes the use of a combined physical type, the "magnetic field strength".
+
+ """
+
+ def generate_code(self, codegen_opts=None):
+ input_path = os.path.join(os.path.realpath(os.path.join(os.path.dirname(__file__), "resources", "non_dimensionalisation_transformer_test_neuron.nestml")))
+ target_path = "target"
+ logging_level = "DEBUG"
+ module_name = "nestmlmodule"
+ suffix = "_nestml"
+
+ nest.set_verbosity("M_ALL")
+ generate_nest_target(input_path,
+ target_path=target_path,
+ logging_level=logging_level,
+ module_name=module_name,
+ suffix=suffix,
+ codegen_opts=codegen_opts)
+
+ def generate_code_metric_prefixes(self, codegen_opts=None):
+ input_path = os.path.join(os.path.realpath(os.path.join(os.path.dirname(__file__), "resources", "test_metric_prefix_transformation.nestml")))
+ target_path = "target"
+ logging_level = "DEBUG"
+ module_name = "nestmlmodule"
+ suffix = "_nestml"
+
+ nest.set_verbosity("M_ALL")
+ generate_nest_target(input_path,
+ target_path=target_path,
+ logging_level=logging_level,
+ module_name=module_name,
+ suffix=suffix,
+ codegen_opts=codegen_opts)
+
+
+ @pytest.mark.parametrize("preffered_prefix", ["1", "m"])
+ def test_non_dimensionalisation_transformer(self, preffered_prefix: str):
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "M",
+ "electrical current": preffered_prefix}}
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+
+ nest.SetStatus(mm, {"record_from": ["I_foo", "V_3"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ I_foo = mm.get("events")["I_foo"]
+ V_3 = mm.get("events")["V_3"]
+
+ if preffered_prefix == "1":
+ # we want representation in Ampère
+ np.testing.assert_allclose(I_foo, 0.042)
+ elif preffered_prefix == "m":
+ # we want representation in mA
+ np.testing.assert_allclose(I_foo, 42)
+
+ np.testing.assert_allclose(V_3, 8.4)
+
+
+
+
+
+
+ def test_exp_in_equationblock(self):
+ """
+ This test checks if the transformer can deal with functions like exp() in the equation block
+ V_exp_der' (s) is a time dependent voltage
+ original expression: V_exp_der' V = (I_foo - 200uA) / (C_exp_0 * (1+exp(alpha_exp * V_m_init)))
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "m", # needed for V_m_init and V_exp'
+ "electrical current": "n", # needed for I_foo
+ "electrical capacitance": "p", # needed for C_exp_0
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "1",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["I_foo", "V_m_init"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ I_foo = mm.get("events")["I_foo"]
+ V_m_init = mm.get("events")["V_m_init"]
+
+ np.testing.assert_allclose(I_foo, 4.2e7) # should be 42.000.000 (nA)
+ np.testing.assert_allclose(V_m_init, -65) # should be -65 (mV)
+
+ lhs_expression_after_transformation = "V_exp_der' real"
+ rhs_expression_after_transformation = "1e3 * ((I_foo * 1e-3) - (200 * 1e-6)) / ((C_exp_0 * 1e-12) * (1 + exp((alpha_exp * 1e-6) * (V_m_init * 1e-3))))"
+
+
+
+ def test_real_factor_in_stateblock(self):
+ r"""
+ This test checks if state block expressions with
+ a RHS with a unit being multiplied by a real factor and
+ a LHS with type 'real'
+ will get processed correctly
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "m", # needed for V_m_init and U_m
+ "electrical current": "1", # needed for currents not part of the test
+ "electrical capacitance": "1", # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "1",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["V_m_init", "U_m"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ V_m_init = mm.get("events")["V_m_init"]
+ U_m = mm.get("events")["U_m"]
+
+ np.testing.assert_allclose(V_m_init, -65)
+ np.testing.asser_allclose(U_m, -13)
+
+ lhs_expression_after_transformation = "U_m real"
+ rhs_expression_after_transformation = "b * (V_m_init * 1e-3)"
+
+
+ def test_inline_expression_in_equationblock(self):
+ """
+ This test checks if the transformer can deal with inline expressions in the equation block
+ Additionally there is an exp() in the expression
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "m", # needed for voltages not part of the test
+ "electrical current": "p", # needed for currents not part of the test
+ "electrical capacitance": "1", # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "1",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["I_spike_test", "V_m_init"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ V_m_init = mm.get("events")["V_m_init"]
+ I_spike_test = mm.get("events")["I_spike_test"]
+
+ np.testing.assert_allclose(V_m_init, -65) # should be -65 mV
+ np.testing.assert_allclose(I_spike_test, 60) # should be 60 pA
+
+ lhs_expression_after_transformation = "Inline I_spike_test real"
+ rhs_after_expression = "1e12 *((30.0 * 1e-9) * ((-V_m_init * 1e-3) / 130e3) * exp((((-80 * 1e-3)) - ((-20 * 1e-3))) / (3000 * 1e-6)))"
+
+
+
+
+
+
+ def test_reciprocal_unit_in_parameterblock(self):
+ """
+ This test checks if the transformer can deal with reciprocal units on the LHS of an equation inside the parameter block
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "m", # needed for V_m_init and U_m
+ "electrical current": "1", # needed for currents not part of the test
+ "electrical capacitance": "1", # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "1",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["V_exp", "alpha_exp"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ V_exp = mm.get("events")["V_exp"]
+ alpha_exp = mm.get("events")["alpha_exp"]
+
+ np.testing.assert_allclose(V_exp, -62.5) # should be -62.5004333 mV
+ np.testing.assert_allclose(alpha_exp, 6.667e-10) # should be (2e-10/3) (1/mV)
+
+ lhs_expression_after_transformation_parameter = "alpha_exp real"
+ rhs_expression_after_transformation_parameter = "2 /(3 * 1e6)"
+
+
+ def test_giga(self):
+ """
+ This test checks if the transformer can deal with the Giga- prefix
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "m", # needed for V_m_init and U_m
+ "electrical current": "1", # needed for currents not part of the test
+ "electrical capacitance": "1", # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "1",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["para_giga"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ para_giga = mm.get("events")["para_giga"]
+
+ np.testing.assert_allclose(para_giga, 500) # should be 500 MOhm
+
+
+ def test_mega(self):
+ """
+ This test checks if the transformer can deal with the Mega- prefix
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "m", # needed for V_m_init and U_m
+ "electrical current": "1",
+ # needed for currents not part of the test
+ "electrical capacitance": "1",
+ # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "1",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code_metric_prefixes(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["para_mega"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ para_mega = mm.get("events")["para_mega"]
+
+ np.testing.assert_allclose(para_mega, 3300) # should be 3300 kHz
+
+
+ def test_kilo(self):
+ """
+ This test checks if the transformer can deal with the Kilo- prefix
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "m", # needed for V_m_init and U_m
+ "electrical current": "1",
+ # needed for currents not part of the test
+ "electrical capacitance": "1",
+ # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "1",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["para_kilo"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ para_kilo = mm.get("events")["para_kilo"]
+
+ np.testing.assert_allclose(para_kilo, 0.002) # should be 0.002 MW
+
+
+ def test_hecto(self):
+ """
+ This test checks if the transformer can deal with the Hecto- prefix
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "m", # needed for V_m_init and U_m
+ "electrical current": "1",
+ # needed for currents not part of the test
+ "electrical capacitance": "1",
+ # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "1",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["para_hecto"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ para_hecto = mm.get("events")["para_hecto"]
+
+ np.testing.assert_allclose(para_hecto, 102.4) # should be 102.4 kPa
+
+
+ def test_deca(self):
+ """
+ This test checks if the transformer can deal with the Deca- prefix
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "m", # needed for V_m_init and U_m
+ "electrical current": "1",
+ # needed for currents not part of the test
+ "electrical capacitance": "1",
+ # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "1",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["para_deca"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ para_deca = mm.get("events")["para_deca"]
+
+ np.testing.assert_allclose(para_deca, 2300) # should be 2300 m
+
+
+ def test_deci(self):
+ """
+ This test checks if the transformer can deal with the Deci- prefix
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "m", # needed for V_m_init and U_m
+ "electrical current": "1",
+ # needed for currents not part of the test
+ "electrical capacitance": "1",
+ # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "1",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["para_deci"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ para_deci = mm.get("events")["para_deca"]
+
+ np.testing.assert_allclose(para_deci, 80) # should be 80 mol
+
+
+ def test_centi(self):
+ """
+ This test checks if the transformer can deal with the Centi- prefix
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "m", # needed for V_m_init and U_m
+ "electrical current": "1",
+ # needed for currents not part of the test
+ "electrical capacitance": "1",
+ # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "m",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["para_centi"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ para_centi = mm.get("events")["para_centi"]
+
+ np.testing.assert_allclose(para_centi, 6700) # should be 6700 mM
+
+
+ def test_milli(self):
+ """
+ This test checks if the transformer can deal with the Milli- prefix
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "m", # needed for V_m_init and U_m
+ "electrical current": "1",
+ # needed for currents not part of the test
+ "electrical capacitance": "1",
+ # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "m",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["para_milli"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ para_milli = mm.get("events")["para_milli"]
+
+ np.testing.assert_allclose(para_milli, 6700) # should be 6700 mM
+
+
+ def test_micro(self):
+ """
+ This test checks if the transformer can deal with the Micro- prefix
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "u", # needed for V_m_init and U_m
+ "electrical current": "1",
+ # needed for currents not part of the test
+ "electrical capacitance": "1",
+ # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "m",
+ "amount of substance": "1",
+ "electrical conductance":"m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["para_micro"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ para_micro = mm.get("events")["para_micro"]
+
+ np.testing.assert_allclose(para_micro, 0.002) # should be 0.002 mS
+
+
+ def test_nano(self):
+ """
+ This test checks if the transformer can deal with the Nano- prefix
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "u", # needed for V_m_init and U_m
+ "electrical current": "1",
+ # needed for currents not part of the test
+ "electrical capacitance": "u",
+ # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "m",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["para_nano"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ para_nano = mm.get("events")["para_nano"]
+
+ np.testing.assert_allclose(para_nano, 0.011) # should be 0.011 uF
+
+
+ def test_pico(self):
+ """
+ This test checks if the transformer can deal with the Pico- prefix
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "u", # needed for V_m_init and U_m
+ "electrical current": "1",
+ # needed for currents not part of the test
+ "electrical capacitance": "u",
+ # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "m",
+ "amount of substance": "1",
+ "electrical conductance":"m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["para_pico"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ para_pico = mm.get("events")["para_pico"]
+
+ np.testing.assert_allclose(para_pico, 0.003) # should be 0.003 nF
+
+
+ def test_femto(self):
+ """
+ This test checks if the transformer can deal with the Femto- prefix
+ si.A/si.m is the unit of magnetic field strength, so there might be problems
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "u", # needed for V_m_init and U_m
+ "electrical current": "p",
+ # needed for currents not part of the test
+ "electrical capacitance": "u",
+ # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "1",
+ "amount of substance": "1",
+ "electrical conductance":"m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["para_femto"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ para_femto = mm.get("events")["para_femto"]
+
+ np.testing.assert_allclose(para_femto, 77000) # should be 77000 pA/m
+
+
+ def test_atto(self):
+ """
+ This test checks if the transformer can deal with the Atto- prefix
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "u", # needed for V_m_init and U_m
+ "electrical current": "p",
+ # needed for currents not part of the test
+ "electrical capacitance": "u",
+ # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "1",
+ "amount of substance": "1",
+ "electrical conductance":"m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["para_atto"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ para_atto = mm.get("events")["para_atto"]
+
+ np.testing.assert_allclose(para_atto, 0.04) # should be 0.04 fs
+
+
+
+
\ No newline at end of file
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/tests/reference_test_non_dim_transformer_function_call_in_equation_block.py b/tests/nest_tests/non_dimensionalisation_transformer/tests/reference_test_non_dim_transformer_function_call_in_equation_block.py
new file mode 100644
index 000000000..29fece2e9
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/tests/reference_test_non_dim_transformer_function_call_in_equation_block.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+#
+# test_non_dimensionalisation_transformer.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+
+import numpy as np
+from scipy.integrate import solve_ivp
+import matplotlib.pyplot as plt
+
+# parameters (SI)
+params = {
+ "tau_m": 12.85e-3, # membrane time constant (s)
+ "alpha_exp": 2 / 70e6, # exponential factor (1/V)
+ "V_rest": -65e-3 # resting potential (V)
+}
+
+V_m0 = -70e-3 # start 5mV below rest
+
+tau_eff = params["tau_m"] * (1 + np.exp(params["alpha_exp"] *
+ params["V_rest"]))
+
+# ODE
+def neuron_ode(t, v):
+ return -v / tau_eff
+
+# simulation: 0–50ms
+t_span = (0.0, 0.05) # s
+t_eval = np.linspace(*t_span, 1001)
+
+# sol = solve_ivp(neuron_ode, t_span, [V_m0],
+# t_eval=t_eval, rtol=1e-9, atol=1e-12)
+sol = solve_ivp(neuron_ode, t_span, [V_m0],
+ t_eval=t_eval, rtol=1e-6, atol=1e-6)
+
+# checkpoints
+check_times_ms = np.array([25, 50]) # ms
+check_idx = [np.argmin(np.abs(t_eval * 1e3 - ct)) for ct in check_times_ms]
+check_vm_mV = sol.y[0, check_idx] * 1e3 # mV
+
+# plot
+plt.figure(figsize=(8, 5))
+
+# membrane‑potential trace
+plt.plot(t_eval * 1e3, sol.y[0] * 1e3, label="numeric (solve_ivp)")
+
+# Xs at checkpoints
+plt.plot(check_times_ms, check_vm_mV, "x", markersize=9,
+ markeredgewidth=2, label="checkpoints")
+
+# annotate Xs with their values
+for t, v in zip(check_times_ms, check_vm_mV):
+ offset = 2 if v > 0 else -2
+ plt.text(t, v + offset, f"{v:+.2f}mV",
+ ha="center", va="bottom" if v > 0 else "top",
+ fontsize=9)
+
+plt.xlabel("Time (ms)")
+plt.ylabel("Membrane potential (mV)")
+plt.title("50ms leak‑decay reference trace with bar checkpoints")
+plt.grid(alpha=0.6, linestyle="--")
+plt.legend()
+plt.tight_layout()
+plt.savefig("reference_test_non_dim_transformer_function_call_in_equation_block.png")
+print('test')
\ No newline at end of file
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/tests/test_forward_euler_integrator.py b/tests/nest_tests/non_dimensionalisation_transformer/tests/test_forward_euler_integrator.py
new file mode 100644
index 000000000..d7ab269e2
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/tests/test_forward_euler_integrator.py
@@ -0,0 +1,117 @@
+# -*- coding: utf-8 -*-
+#
+# test_forward_euler_integrator.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+import numpy as np
+import os
+import pytest
+
+import nest
+
+from pynestml.codegeneration.nest_tools import NESTTools
+from pynestml.frontend.pynestml_frontend import generate_nest_target
+
+
+class TestForwardEulerIntegrator:
+ """
+ Tests the forward Euler integrator by comparing it to RK45.
+ """
+
+ def generate_target(self, numeric_solver: str):
+ r"""Generate the neuron model code"""
+
+ input_path = os.path.join(os.path.realpath(os.path.join(os.path.dirname(__file__), "../resources", "izhikevich_neuron.nestml")))
+ generate_nest_target(input_path=input_path,
+ logging_level="DEBUG",
+ suffix="_" + numeric_solver.replace("-", "_") + "_nestml",
+ module_name="nestml" + numeric_solver.replace("-", "_") + "module",
+ codegen_opts={"numeric_solver": numeric_solver,
+ "quantity_to_preferred_prefix": {"electrical potential": "m", # needed for V_m_init and U_m
+ "electrical current": "p",
+ "electrical resistance": "G",
+ "time": "m",
+ }})
+
+ return "nestml" + numeric_solver.replace("-", "_") + "module"
+
+ @pytest.mark.skipif(NESTTools.detect_nest_version().startswith("v2"),
+ reason="This test does not support NEST 2")
+ def test_forward_euler_integrator(self):
+ # forward_euler_module_name = self.generate_target("forward-Euler")
+ # rk45_module_name = self.generate_target("rk45")
+
+ # nest.ResetKernel()
+ # # nest.Install(forward_euler_module_name)
+ # nest.Install(rk45_module_name)
+ # nest.resolution = .001
+
+ # nrn1 = nest.Create("izhikevich_neuron_rk45_nestml")
+ # # nrn2 = nest.Create("izhikevich_neuron_forward_Euler_nestml")
+
+ # nrn1.I_e = 10.
+ # # nrn2.I_e = 10.
+
+ # mm1 = nest.Create("multimeter")
+ # mm1.set({"record_from": ["V_m"]})
+
+ # # mm2 = nest.Create("multimeter")
+ # # mm2.set({"record_from": ["V_m"]})
+
+ # nest.Connect(mm1, nrn1)
+ # # nest.Connect(mm2, nrn2)
+ # sr = nest.Create('spike_recorder')
+ # nest.Connect(nrn1, sr)
+
+ # nest.Simulate(100.)
+
+ # v_m1 = mm1.get("events")["V_m"]
+
+ # # v_m2 = mm2.get("events")["V_m"]
+
+ # # np.testing.assert_allclose(v_m1, v_m2, atol=2, rtol=0) # allow max 2 mV difference between the solutions
+ forward_euler_module_name = self.generate_target("forward-Euler")
+ rk45_module_name = self.generate_target("rk45")
+
+ nest.ResetKernel()
+ nest.Install(forward_euler_module_name)
+ nest.Install(rk45_module_name)
+ nest.resolution = .001
+
+ nrn1 = nest.Create("izhikevich_neuron_rk45_nestml")
+ nrn2 = nest.Create("izhikevich_neuron_forward_Euler_nestml")
+
+ nrn1.I_e = 10.
+ nrn2.I_e = 10.
+
+ mm1 = nest.Create("multimeter")
+ mm1.set({"record_from": ["V_m"]})
+
+ mm2 = nest.Create("multimeter")
+ mm2.set({"record_from": ["V_m"]})
+
+ nest.Connect(mm1, nrn1)
+ nest.Connect(mm2, nrn2)
+
+ nest.Simulate(100.)
+
+ v_m1 = mm1.get("events")["V_m"]
+ v_m2 = mm2.get("events")["V_m"]
+
+ np.testing.assert_allclose(v_m1, v_m2, atol=2, rtol=0) # allow max 2 mV difference between the solutions
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_function_call_in_equation_block.py b/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_function_call_in_equation_block.py
new file mode 100644
index 000000000..165bcb8f8
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_function_call_in_equation_block.py
@@ -0,0 +1,91 @@
+# -*- coding: utf-8 -*-
+#
+# test_non_dimensionalisation_transformer.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+import nest
+import numpy as np
+import scipy as sp
+import os
+import pytest
+
+from pynestml.frontend.pynestml_frontend import generate_nest_target
+
+class TestNonDimensionalisationTransformerEqationBlock:
+
+ def generate_code(self, codegen_opts=None):
+ input_path = os.path.join(os.path.realpath(os.path.join(os.path.dirname(__file__), "../resources", "test_function_call_in_equation_block.nestml")))
+ target_path = "target"
+ logging_level = "DEBUG"
+ module_name = "nestmlmodule"
+ suffix = "_nestml"
+
+ nest.set_verbosity("M_ALL")
+ generate_nest_target(input_path,
+ target_path=target_path,
+ logging_level=logging_level,
+ module_name=module_name,
+ suffix=suffix,
+ codegen_opts=codegen_opts)
+
+
+ def test_exp_in_equationblock(self):
+ """
+ This test checks if the transformer can deal with functions like exp() in the equation block
+ V_m' (s) is a time dependent voltage
+ """
+ codegen_opts = {"solver": "numeric",
+ "quantity_to_preferred_prefix": {"electrical potential": "m", # needed for V_m_init and V_exp'
+ # "electrical current": "n", # needed for I_foo
+ # "electrical capacitance": "p", # needed for C_exp_0
+ # "electrical resistance": "M",
+ # "frequency": "k",
+ # "power": "M",
+ # "pressure": "k",
+ # "length": "1",
+ # "amount of substance": "1",
+ # "electrical conductance": "m",
+ # "inductance": "n",
+ "time": "m",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+ nest.resolution = 1
+ nrn = nest.Create("test_function_call_in_equation_block_transformation_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["V_m"]})
+ nest.Connect(mm, nrn)
+ # assert nrn.V_m == -70 # mV
+ # assert nrn.V_m_init == -65 # mV
+ # assert nrn.tau_m == 12.85 # mS
+ assert nrn.alpha_exp == 2 / ((70.0 * 1.0E+09)) # 1/V
+ nest.Simulate(500.)
+ V_m_end = mm.get("events")["V_m"]
+ print("V_m progression:", V_m_end)
+ print("stop here and inspect V_m_end")
+
+
+ # after transformation: V_m real =
+ # v_m_declaration_rhs_after_transformation="1.0e3 * (-70 * 1.0E-3)"
+
+ # lhs_expression_after_transformation = "V_exp_der' real"
+ # rhs_expression_after_transformation = "1e3 * (((V_m * 1e-3)/ (tau_m * 1e-3)) / (((I_foo * 1e-12) * (1 + exp((alpha_exp * 1e-6) * (V_m_init * 1e-3)))) / (C_exp_0 * 1e-12)))"
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_inline_expression_in_equationblock.py b/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_inline_expression_in_equationblock.py
new file mode 100644
index 000000000..91f7a8089
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_inline_expression_in_equationblock.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+#
+# test_non_dimensionalisation_transformer.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+import nest
+import numpy as np
+import scipy as sp
+import os
+import pytest
+
+from pynestml.frontend.pynestml_frontend import generate_nest_target
+
+
+class TestNonDimensionalisationTransformerInlineEquationBlock:
+ r"""
+ This test checks if the transformer can deal with inline expressions in the equation block
+ Additionally there is an exp() in the expression
+ """
+
+ def generate_code(self, codegen_opts=None):
+ input_path = os.path.join(os.path.realpath(os.path.join(os.path.dirname(__file__), "../resources", "test_inline_expression_in_equation_block.nestml")))
+ target_path = "target"
+ logging_level = "DEBUG"
+ module_name = "nestmlmodule"
+ suffix = "_nestml"
+
+ nest.set_verbosity("M_ALL")
+ generate_nest_target(input_path,
+ target_path=target_path,
+ logging_level=logging_level,
+ module_name=module_name,
+ suffix=suffix,
+ codegen_opts=codegen_opts)
+
+ def test_inline_expression_in_equationblock(self):
+ """
+ This test checks if the transformer can deal with inline expressions in the equation block
+ Additionally there is an exp() in the expression
+ """
+ codegen_opts = {
+ "quantity_to_preferred_prefix": {"electrical potential": "m", # needed for voltages not part of the test
+ "electrical current": "p", # needed for currents not part of the test
+ "electrical capacitance": "1", # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "1",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["I_spike_test", "V_m_init"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ V_m_init = mm.get("events")["V_m_init"]
+ I_spike_test = mm.get("events")["I_spike_test"]
+
+ np.testing.assert_allclose(V_m_init, -65) # should be -65 mV
+ np.testing.assert_allclose(I_spike_test, 60) # should be 60 pA
+
+ lhs_expression_after_transformation = "Inline I_spike_test real"
+ rhs_after_expression = "1e12 *((30.0 * 1e-9) * ((-V_m_init * 1e-3) / 130e3) * exp((((-80 * 1e-3)) - ((-20 * 1e-3))) / (3000 * 1e-6)))"
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_internals_block.py b/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_internals_block.py
new file mode 100644
index 000000000..8ee70fdd2
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_internals_block.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+#
+# test_non_dimensionalisation_transformer.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+import nest
+import numpy as np
+import scipy as sp
+import os
+import pytest
+
+from pynestml.frontend.pynestml_frontend import generate_nest_target
+
+
+class TestNonDimensionalisationTransformerInternalsBlock:
+ r"""
+ This test checks if the transformer can deal with transforming the expressions inside the internals block
+ """
+
+ def generate_code(self, codegen_opts=None):
+ input_path = os.path.join(os.path.realpath(os.path.join(os.path.dirname(__file__), "../resources", "test_internals_block.nestml")))
+ target_path = "target"
+ logging_level = "DEBUG"
+ module_name = "nestmlmodule"
+ suffix = "_nestml"
+
+ nest.set_verbosity("M_ALL")
+ generate_nest_target(input_path,
+ target_path=target_path,
+ logging_level=logging_level,
+ module_name=module_name,
+ suffix=suffix,
+ codegen_opts=codegen_opts)
+
+ def test_internals_block(self):
+
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "m", # needed for V_m_init and U_m
+ "electrical current": "1", # needed for currents not part of the test
+ "electrical capacitance": "1", # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "1",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("non_dimensionalisation_transformer_test_internals_block_neuron_nestml")
+ mm = nest.Create("multimeter")
+ nest.SetStatus(mm, {"record_from": ["alpha_n_init"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ alpha_m_init = nrn.get("alpha_m_init")
+
+ # np.testing.assert_allclose(alpha_m_init, -20) # should be -20
+
+ lhs_expression_after_transformation = "alpha_m_init real"
+ rhs_expression_after_transformation = "2 * ( (((V_m_init * 1e-3) / (1e-3)) + 40.))"
\ No newline at end of file
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_metric_prefixes.py b/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_metric_prefixes.py
new file mode 100644
index 000000000..a07756298
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_metric_prefixes.py
@@ -0,0 +1,92 @@
+# -*- coding: utf-8 -*-
+#
+# test_non_dimensionalisation_transformer.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+import nest
+import numpy as np
+import scipy as sp
+import os
+import pytest
+
+from pynestml.frontend.pynestml_frontend import generate_nest_target
+
+
+class TestNonDimensionalisationTransformer:
+
+ r"""
+ Test Metric Prefixes
+ These tests will check if the standardized metric prefixes in the range of Giga- to Atto- can be resolved.
+ The prefixes Deci- and Deca- are probably little used in a neuroscience context.
+ The test for Femto- includes the use of a combined physical type, the "magnetic field strength".
+
+ """
+ @pytest.fixture(scope="module", autouse=True)
+ def generate_code_metric_prefixes(self, codegen_opts=None):
+ input_path = os.path.join(os.path.realpath(os.path.join(os.path.dirname(__file__), "../resources", "test_metric_prefix_transformation.nestml")))
+ target_path = "target"
+ logging_level = "DEBUG"
+ module_name = "nestmlmodule"
+ suffix = "_nestml"
+
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "m", # needed for V_m_init and U_m
+ "electrical current": "1", # needed for currents not part of the test
+ "electrical capacitance": "1", # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "1",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+
+ nest.set_verbosity("M_ALL")
+ generate_nest_target(input_path,
+ target_path=target_path,
+ logging_level=logging_level,
+ module_name=module_name,
+ suffix=suffix,
+ codegen_opts=codegen_opts)
+
+
+ @pytest.mark.parametrize("para_name, expected", [("para_giga", 500) , ("para_mega", 3300), ("para_kilo", 0.002), ("para_hecto", 102.4), ("para_deca", 230), ("para_deci", 0.8), ("para_centi", 0.67), ("para_milli", 4), ("para_micro", 0.002), ("para_nano", 1.1e-8), ("para_pico", 0.003), ("para_femto", 30e-15), ("para_atto", 4e-8)])
+ def test_metric_prefixes(self, para_name, expected):
+ """
+ This test checks if the transformer can deal with all metric prefixes in the range of Giga- to Atto- can be resolved and the corresponding factor found.
+ """
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("test_metric_prefix_transformation_neuron_nestml")
+ mm = nest.Create("multimeter")
+ # nest.SetStatus(mm, {"record_from": [para_name]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ res = nrn.get(para_name)
+
+ np.testing.assert_allclose(res, expected)
+ pass
\ No newline at end of file
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_real_factor_in_stateblock.py b/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_real_factor_in_stateblock.py
new file mode 100644
index 000000000..428e54626
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_real_factor_in_stateblock.py
@@ -0,0 +1,98 @@
+# -*- coding: utf-8 -*-
+#
+# test_non_dimensionalisation_transformer.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+import nest
+import numpy as np
+import scipy as sp
+import os
+import pytest
+
+from pynestml.frontend.pynestml_frontend import generate_nest_target
+
+
+class TestNonDimensionalisationTransformerStateBlock:
+ r"""
+ This test checks if state block expressions with
+ a RHS with a unit being multiplied by a real factor and
+ a LHS with type 'real'
+ will get processed correctly
+ """
+
+ def generate_code(self, codegen_opts=None):
+ input_path = os.path.join(os.path.realpath(os.path.join(os.path.dirname(__file__), "../resources", "test_real_factor_in_state"
+ "_block.nestml")))
+ target_path = "target"
+ logging_level = "DEBUG"
+ module_name = "nestmlmodule"
+ suffix = "_nestml"
+
+ nest.set_verbosity("M_ALL")
+ generate_nest_target(input_path,
+ target_path=target_path,
+ logging_level=logging_level,
+ module_name=module_name,
+ suffix=suffix,
+ codegen_opts=codegen_opts)
+
+ def test_real_factor_in_stateblock(self):
+ r"""
+ This test checks if state block expressions with
+ a RHS with a unit being multiplied by a real factor and
+ a LHS with type 'real'
+ will get processed correctly
+ """
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "k", # needed for V_m_init and U_m
+ "electrical current": "1",
+ # needed for currents not part of the test
+ "electrical capacitance": "1",
+ # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "1",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ nest.ResetKernel()
+ nest.Install("nestmlmodule")
+
+ nrn = nest.Create("test_real_factor_in_state_block_transformation_neuron_nestml")
+ mm = nest.Create("multimeter")
+ # nest.SetStatus(mm, {"record_from": ["V_m_init", "U_m"]})
+
+ nest.Connect(mm, nrn)
+
+ nest.Simulate(10.)
+
+ V_m_init = nrn.get("V_m_init")
+
+
+ np.testing.assert_allclose(V_m_init, -65e-6)
+
+
+ # lhs_expression_after_transformation = "U_m real"
+ # rhs_expression_after_transformation = "b * (V_m_init * 1e-3)"
\ No newline at end of file
diff --git a/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_reciprocal_unit_in_parameterblock.py b/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_reciprocal_unit_in_parameterblock.py
new file mode 100644
index 000000000..2a8fa387b
--- /dev/null
+++ b/tests/nest_tests/non_dimensionalisation_transformer/tests/test_non_dimensionalisation_transformer_reciprocal_unit_in_parameterblock.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+#
+# test_non_dimensionalisation_transformer.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+import nest
+import numpy as np
+import scipy as sp
+import os
+import re
+import pytest
+
+from pynestml.frontend.pynestml_frontend import generate_nest_target
+
+
+class TestNonDimensionalisationTransformerStateBlock:
+ r"""
+ This test checks if the transformer can deal with reciprocal units on the LHS of an equation inside the parameter block
+ TODO: The grammar needs to be changed for reciprocal units to be accepted on LHSs
+ """
+
+ def generate_code(self, codegen_opts=None):
+ input_path = os.path.join(os.path.realpath(os.path.join(os.path.dirname(__file__), "../resources", "test_reciprocal_units_in_parameter_block.nestml")))
+ target_path = "target"
+ logging_level = "DEBUG"
+ module_name = "nestmlmodule"
+ suffix = ""
+
+ nest.set_verbosity("M_ALL")
+ generate_nest_target(input_path,
+ target_path=target_path,
+ logging_level=logging_level,
+ module_name=module_name,
+ suffix=suffix,
+ codegen_opts=codegen_opts)
+
+ def test_reciprocal_unit_in_parameterblock(self):
+
+ codegen_opts = {"quantity_to_preferred_prefix": {"electrical potential": "m", # needed for V_m_init and U_m
+ "electrical current": "1", # needed for currents not part of the test
+ "electrical capacitance": "1", # needed for caps not part of the test
+ "electrical resistance": "M",
+ "frequency": "k",
+ "power": "M",
+ "pressure": "k",
+ "length": "1",
+ "amount of substance": "1",
+ "electrical conductance": "m",
+ "inductance": "n",
+ "time": "f",
+ }
+ }
+ self.generate_code(codegen_opts)
+
+ # nest.ResetKernel()
+ # nest.Install("nestmlmodule")
+ #
+ # nrn = nest.Create("test_reciprocal_units_in_parameter_block_transformation_neuron")
+ # mm = nest.Create("multimeter")
+ # nest.SetStatus(mm, {"record_from": ["alpha_exp"]})
+ # nrn.get("alpha_exp") or neuron.alpha_exp
+ # nest.Connect(mm, nrn)
+ #
+ # nest.Simulate(10.)
+ #
+ # alpha_exp = mm.get("events")["alpha_exp"]
+ #
+ # np.testing.assert_allclose(alpha_exp, 6.667e-10) # should be (2e-10/3) (1/mV)
+
+ lhs_expression_after_transformation_parameter = "alpha_exp real"
+ rhs_expression_after_transformation_parameter = "2 / (3.0 * 1.0E+06)"
+
+ with open("transformed_model.txt") as file:
+ lines = file.readlines()
+ for i in range(len(lines)):
+ lines[i] = lines[i].lstrip()
+ for line in lines:
+ if line.startswith("alpha_exp 1/V ="):
+ start = '= '
+ end = ' #'
+ transformed_rhs = re.search('%s(.*)%s' % (start, end), line).group(1)
+ assert(transformed_rhs == rhs_expression_after_transformation_parameter)
+ print("stop")
diff --git a/tests/nest_tests/plot.png b/tests/nest_tests/plot.png
new file mode 100644
index 000000000..e1434f8ee
Binary files /dev/null and b/tests/nest_tests/plot.png differ
diff --git a/tests/nest_tests/plot_timestep.png b/tests/nest_tests/plot_timestep.png
new file mode 100644
index 000000000..14f01035d
Binary files /dev/null and b/tests/nest_tests/plot_timestep.png differ
diff --git a/tests/nest_tests/resources/iaf_psc_exp_neuron.nestml b/tests/nest_tests/resources/iaf_psc_exp_neuron.nestml
new file mode 100644
index 000000000..73f6fe1ff
--- /dev/null
+++ b/tests/nest_tests/resources/iaf_psc_exp_neuron.nestml
@@ -0,0 +1,108 @@
+# iaf_psc_exp - Leaky integrate-and-fire neuron model
+# ###################################################
+#
+# Description
+# +++++++++++
+#
+# iaf_psc_exp is an implementation of a leaky integrate-and-fire model
+# with exponentially decaying synaptic currents according to [1]_.
+# Thus, postsynaptic currents have an infinitely short rise time.
+#
+# The threshold crossing is followed by an absolute refractory period
+# during which the membrane potential is clamped to the resting potential
+# and spiking is prohibited.
+#
+# The general framework for the consistent formulation of systems with
+# neuron like dynamics interacting by point events is described in
+# [1]_. A flow chart can be found in [2]_.
+#
+# Critical tests for the formulation of the neuron model are the
+# comparisons of simulation results for different computation step
+# sizes.
+#
+# .. note::
+# If tau_m is very close to tau_syn_exc or tau_syn_inh, numerical problems
+# may arise due to singularities in the propagator matrics. If this is
+# the case, replace equal-valued parameters by a single parameter.
+#
+# For details, please see ``IAF_neurons_singularity.ipynb`` in
+# the NEST source code (``docs/model_details``).
+#
+#
+# References
+# ++++++++++
+#
+# .. [1] Rotter S, Diesmann M (1999). Exact simulation of
+# time-invariant linear systems with applications to neuronal
+# modeling. Biologial Cybernetics 81:381-402.
+# DOI: https://doi.org/10.1007/s004220050570
+# .. [2] Diesmann M, Gewaltig M-O, Rotter S, & Aertsen A (2001). State
+# space analysis of synchronous spiking in cortical neural
+# networks. Neurocomputing 38-40:565-571.
+# DOI: https://doi.org/10.1016/S0925-2312(01)00409-X
+# .. [3] Morrison A, Straube S, Plesser H E, Diesmann M (2006). Exact
+# subthreshold integration with continuous spike times in discrete time
+# neural network simulations. Neural Computation, in press
+# DOI: https://doi.org/10.1162/neco.2007.19.1.47
+#
+#
+# See also
+# ++++++++
+#
+# iaf_psc_delta, iaf_psc_alpha, iaf_cond_exp
+#
+#
+model iaf_psc_exp_neuron:
+
+ state:
+ V_m mV = E_L # Membrane potential
+ refr_t ms = 0 ms # Refractory period timer
+ I_syn_exc pA = 0 pA
+ I_syn_inh pA = 0 pA
+
+ equations:
+ I_syn_exc' = -I_syn_exc / tau_syn_exc
+ I_syn_inh' = -I_syn_inh / tau_syn_inh
+ V_m' = -(V_m - E_L) / tau_m + (I_syn_exc - I_syn_inh + I_e + I_stim) / C_m
+ refr_t' = -1e3 * ms/s # refractoriness is implemented as an ODE, representing a timer counting back down to zero. XXX: TODO: This should simply read ``refr_t' = -1 / s`` (see https://github.com/nest/nestml/issues/984)
+
+ parameters:
+ C_m pF = 250 pF # Capacitance of the membrane
+ tau_m ms = 10 ms # Membrane time constant
+ tau_syn_inh ms = 2 ms # Time constant of inhibitory synaptic current
+ tau_syn_exc ms = 2 ms # Time constant of excitatory synaptic current
+ refr_T ms = 2 ms # Duration of refractory period
+ E_L mV = -70 mV # Resting potential
+ V_reset mV = -70 mV # Reset value of the membrane potential
+ V_th mV = -55 mV # Spike threshold potential
+
+ # constant external input current
+ I_e pA = 0 pA
+
+ input:
+ exc_spikes <- excitatory spike
+ inh_spikes <- inhibitory spike
+ I_stim pA <- continuous
+
+ output:
+ spike
+
+ update:
+ if refr_t > 0 ms:
+ # neuron is absolute refractory, do not evolve V_m
+ integrate_odes(I_syn_exc, I_syn_inh, refr_t)
+ else:
+ # neuron not refractory
+ integrate_odes(I_syn_exc, I_syn_inh, V_m)
+
+ onReceive(exc_spikes):
+ I_syn_exc += exc_spikes * pA * s
+
+ onReceive(inh_spikes):
+ I_syn_inh += inh_spikes * pA * s
+
+ onCondition(refr_t <= 0 ms and V_m >= V_th):
+ # threshold crossing
+ refr_t = refr_T # start of the refractory period
+ V_m = V_reset
+ emit_spike()
diff --git a/tests/nest_tests/resources/iaf_psc_exp_neuron_NO_ISTIM.nestml b/tests/nest_tests/resources/iaf_psc_exp_neuron_NO_ISTIM.nestml
new file mode 100644
index 000000000..36d194a81
--- /dev/null
+++ b/tests/nest_tests/resources/iaf_psc_exp_neuron_NO_ISTIM.nestml
@@ -0,0 +1,130 @@
+# iaf_psc_exp - Leaky integrate-and-fire neuron model
+# ###################################################
+#
+# Description
+# +++++++++++
+#
+# iaf_psc_exp is an implementation of a leaky integrate-and-fire model
+# with exponentially decaying synaptic currents according to [1]_.
+# Thus, postsynaptic currents have an infinitely short rise time.
+# The input current I_stim is removed for code generation testing purposes.
+#
+# The threshold crossing is followed by an absolute refractory period
+# during which the membrane potential is clamped to the resting potential
+# and spiking is prohibited.
+#
+# The general framework for the consistent formulation of systems with
+# neuron like dynamics interacting by point events is described in
+# [1]_. A flow chart can be found in [2]_.
+#
+# Critical tests for the formulation of the neuron model are the
+# comparisons of simulation results for different computation step
+# sizes.
+#
+# .. note::
+#
+# If tau_m is very close to tau_syn_exc or tau_syn_inh, numerical problems
+# may arise due to singularities in the propagator matrics. If this is
+# the case, replace equal-valued parameters by a single parameter.
+#
+# For details, please see ``IAF_neurons_singularity.ipynb`` in
+# the NEST source code (``docs/model_details``).
+#
+#
+# References
+# ++++++++++
+#
+# .. [1] Rotter S, Diesmann M (1999). Exact simulation of
+# time-invariant linear systems with applications to neuronal
+# modeling. Biologial Cybernetics 81:381-402.
+# DOI: https://doi.org/10.1007/s004220050570
+# .. [2] Diesmann M, Gewaltig M-O, Rotter S, & Aertsen A (2001). State
+# space analysis of synchronous spiking in cortical neural
+# networks. Neurocomputing 38-40:565-571.
+# DOI: https://doi.org/10.1016/S0925-2312(01)00409-X
+# .. [3] Morrison A, Straube S, Plesser H E, Diesmann M (2006). Exact
+# subthreshold integration with continuous spike times in discrete time
+# neural network simulations. Neural Computation, in press
+# DOI: https://doi.org/10.1162/neco.2007.19.1.47
+#
+#
+# See also
+# ++++++++
+#
+# iaf_psc_delta, iaf_psc_alpha, iaf_cond_exp
+#
+#
+# Copyright statement
+# +++++++++++++++++++
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+#
+#
+model iaf_psc_exp_neuron:
+
+ state:
+ V_m mV = E_L # Membrane potential
+ refr_t ms = 0 ms # Refractory period timer
+ I_syn_exc pA = 0 pA
+ I_syn_inh pA = 0 pA
+
+ equations:
+ I_syn_exc' = -I_syn_exc / tau_syn_exc
+ I_syn_inh' = -I_syn_inh / tau_syn_inh
+ V_m' = -(V_m - E_L) / tau_m + (I_syn_exc - I_syn_inh + I_e) / C_m
+ refr_t' = -1e3 * ms/s # refractoriness is implemented as an ODE, representing a timer counting back down to zero. XXX: TODO: This should simply read ``refr_t' = -1 / s`` (see https://github.com/nest/nestml/issues/984)
+
+ parameters:
+ C_m pF = 250 pF # Capacitance of the membrane
+ tau_m ms = 10 ms # Membrane time constant
+ tau_syn_inh ms = 2 ms # Time constant of inhibitory synaptic current
+ tau_syn_exc ms = 2 ms # Time constant of excitatory synaptic current
+ refr_T ms = 2 ms # Duration of refractory period
+ E_L mV = -70 mV # Resting potential
+ V_reset mV = -70 mV # Reset value of the membrane potential
+ V_th mV = -55 mV # Spike threshold potential
+
+ # constant external input current
+ I_e pA = 0 pA
+
+ input:
+ exc_spikes <- excitatory spike
+ inh_spikes <- inhibitory spike
+
+ output:
+ spike
+
+ update:
+ if refr_t > 0 ms:
+ # neuron is absolute refractory, do not evolve V_m
+ integrate_odes(I_syn_exc, I_syn_inh, refr_t)
+ else:
+ # neuron not refractory
+ integrate_odes(I_syn_exc, I_syn_inh, V_m)
+
+ onReceive(exc_spikes):
+ I_syn_exc += exc_spikes * pA * s
+
+ onReceive(inh_spikes):
+ I_syn_inh += inh_spikes * pA * s
+
+ onCondition(refr_t <= 0 ms and V_m >= V_th):
+ # threshold crossing
+ refr_t = refr_T # start of the refractory period
+ V_m = V_reset
+ emit_spike()
diff --git a/tests/nest_tests/resources/non_dimensionalisation_transformer_test_neuron.nestml b/tests/nest_tests/resources/non_dimensionalisation_transformer_test_neuron.nestml
new file mode 100644
index 000000000..230969bad
--- /dev/null
+++ b/tests/nest_tests/resources/non_dimensionalisation_transformer_test_neuron.nestml
@@ -0,0 +1,66 @@
+model non_dimensionalisation_transformer_test_neuron:
+
+ state:
+ I_foo A = 42 mA
+ I_m A = 10 mA
+ V_3 mV = I_foo / 5 nS
+ V_m mV = E_L
+ U_m real = b * V_m_init # Membrane potential recovery variable
+ V_exp mV = 2500 uV + V_m_init * exp(alpha_exp * 10 V)
+ refr_t ms = 2 ms # Refractory period timer
+ I_eq A = 30 mA
+
+ equations:
+ # V_m' = I_eq / C_m
+ V_m' = I_eq / C_m
+ refr_t' s = -1 / s
+ inline I_spike_test A = 30.0 nS * (-V_m_init / 130e3) * exp(((-80 mV) - (-20 mV)) / 3000 uV)
+ # V_exp_der' = (I_foo - 200uA) / (C_exp_0 * (1+exp(alpha_exp * V_m_init)))
+ V_exp_der' = (I_foo - 200uA) / (C_exp_0 * (1+exp(alpha_exp * V_m_init)))
+
+ parameters:
+ E_L mV = -70 mV # Resting potential
+ C_m F = 250 pF * 1.0001 # Test if factor works
+ V_m_init mV = -65 mV # Initial membrane potential
+ C_exp_0 F = 150pF
+ alpha_exp = 2 /3 MV # this could be a factor for a voltage inside of en exp(), e.g. exp(alpha_exp * V_test)
+ b real = 0.2 # sensitivity of recovery variable
+
+ para_giga Ohm = 0.5 GOhm
+ para_mega Hz = 1.1 * 3MHz
+ para_kilo W = 2 kW
+ para_hecto Pa = 1024 hPa
+ para_deca m = 23 dam # this might cause problems, but also deca- is not used particularly frequently
+ para_deci mol = 8 dmol
+ para_centi m = 67 cm
+ para_milli V = 4 mV
+ para_micro S = 2 uS
+ para_nano F = 11 nF
+ para_pico H = 3 pH
+ # para_femto A/m = 77 fA/m
+ para_atto s = 40 as
+
+
+ internals:
+ alpha_m_init real = ( 0.1 * ( V_m_init / mV + 40. ) ) / ( 1. - exp( -( V_m_init / mV + 40. ) / 10. ) )
+
+ update:
+ if refr_t > 2 ms:
+ # this has to do nothing as equations are not a real ODE system
+ integrate_odes(refr_t)
+
+ onCondition(refr_t > 2 ms):
+ #this should never be reached
+ refr_t = refr_t
+
+
+
+
+# V = 10 nA * 50 Ohm -> convert nA to mA --- 1E-6
+# = 500 nV
+# ---> V = 10 * 1E-6 * 50 = 500E-6
+
+# V = 10 * 0.00001 * mA * 50 Ohm -> convert mA to mA --- 1
+# ---> V = 10 * 0.00001 * 50 = 500E-6
+
+
diff --git a/tests/nest_tests/test_iaf_psc_exp_neuron_no_ISTIM.py b/tests/nest_tests/test_iaf_psc_exp_neuron_no_ISTIM.py
new file mode 100644
index 000000000..8a48efd19
--- /dev/null
+++ b/tests/nest_tests/test_iaf_psc_exp_neuron_no_ISTIM.py
@@ -0,0 +1,73 @@
+import os.path
+
+import nest
+import numpy as np
+import matplotlib
+
+from pynestml.codegeneration.nest_code_generator_utils import NESTCodeGeneratorUtils
+
+matplotlib.use("Agg")
+import matplotlib.pyplot as plt
+import pytest
+from pynestml.codegeneration.nest_tools import NESTTools
+
+from pynestml.frontend.pynestml_frontend import generate_nest_target
+
+@pytest.mark.skipif(NESTTools.detect_nest_version().startswith("v2"),
+ reason="This test does not support NEST 2")
+def test_iaf_psc_exp_single_neuron_VS_SpiNNaker2():
+ """
+ A test for iaf_psc_exp model single neuron spiking to compare
+ spike times and v_mem plots with PySpiNNaker2 implementation
+ """
+ target_path = "target_iaf_psc_exp_neuron_NO_ISTIM_VS_spiNNaker2"
+ module_name = "nestml_module"
+ input_path = os.path.realpath(os.path.join(os.path.dirname(__file__), "resources", "iaf_psc_exp_neuron_NO_ISTIM.nestml"))
+ module_name, neuron_model_name = \
+ NESTCodeGeneratorUtils.generate_code_for("iaf_psc_exp_neuron_NO_ISTIM.nestml")
+ # generate_nest_target(input_path=input_path,
+ # target_path=target_path,
+ # logging_level="INFO",
+ # module_name=module_name)
+ nest.Install(module_name)
+ nest.resolution = 1
+ spikeSource = nest.Create("spike_train_injector",
+ params={"spike_times": [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]})
+ neuron = nest.Create(neuron_model_name)
+ vm = nest.Create("multimeter", params={"interval": 1, "record_from": ["V_m"], "record_to": "memory", "time_in_steps":True})
+ spikerecorderNeuron = nest.Create("spike_recorder")
+ spikerecorderSource = nest.Create("spike_recorder")
+ nest.Connect(vm, neuron)
+ nest.Connect(neuron, spikerecorderNeuron)
+ nest.Connect(spikeSource, spikerecorderSource)
+ nest.Connect(spikeSource, neuron, syn_spec={"weight": 4000.0})
+ nest.Simulate(60)
+ t_step = [60.]
+ # Plotting
+ membraneVoltage = vm.get("events")
+ spikesNeuron = spikerecorderNeuron.get("events")
+ spikesSource = spikerecorderSource.get("events")
+
+
+ fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, height_ratios=(1, 2, 1))
+
+ indices, times = spikesSource["senders"], spikesSource["times"]
+ ax1.plot(times, indices, "|", ms=20)
+ ax1.set_ylabel("input spikes")
+ ax1.set_ylim((-5, 5))
+
+ times = np.arange(t_step[0])
+ ax2.plot(membraneVoltage["times"].tolist(), membraneVoltage["V_m"].tolist(), label="iaf_psc_exp_neuron")
+ ax2.axhline(-55, ls="--", c="0.5", label="threshold")
+ ax2.axhline(0, ls="-", c="0.8", zorder=0)
+ ax2.set_xlim(0, t_step[0])
+ ax2.set_ylabel("voltage")
+ ax2.legend()
+
+ indices, times = spikesNeuron["senders"].tolist(), spikesNeuron["times"].tolist()
+ ax3.plot(times, indices, "|", ms=20)
+ ax3.set_ylabel("output spikes")
+ ax3.set_xlabel("time step")
+ ax3.set_ylim((-5, 5))
+ fig.suptitle("NESTML iaf_psc_exp_single_neuron_VS_SpiNNaker2")
+ plt.savefig("plot_timestep")
diff --git a/tests/nest_tests/test_iaf_psc_exp_single_neuron_VS_SpiNNaker2.py b/tests/nest_tests/test_iaf_psc_exp_single_neuron_VS_SpiNNaker2.py
new file mode 100644
index 000000000..90014ad04
--- /dev/null
+++ b/tests/nest_tests/test_iaf_psc_exp_single_neuron_VS_SpiNNaker2.py
@@ -0,0 +1,68 @@
+import os.path
+
+import nest
+import numpy as np
+import matplotlib
+matplotlib.use("Agg")
+import matplotlib.pyplot as plt
+import pytest
+from pynestml.codegeneration.nest_tools import NESTTools
+
+from pynestml.frontend.pynestml_frontend import generate_nest_target
+
+@pytest.mark.skipif(NESTTools.detect_nest_version().startswith("v2"),
+ reason="This test does not support NEST 2")
+def test_iaf_psc_exp_single_neuron_VS_SpiNNaker2():
+ """
+ A test for iaf_psc_exp model single neuron spiking to compare
+ spike times and v_mem plots with PySpiNNaker2 implementation
+ """
+ target_path = "target_iaf_psc_exp_single_neuron_VS_spiNNaker2"
+ module_name = "nestml_module"
+ input_path = os.path.realpath(os.path.join(os.path.dirname(__file__), "resources", "iaf_psc_exp_neuron.nestml"))
+ generate_nest_target(input_path=input_path,
+ target_path=target_path,
+ logging_level="INFO",
+ module_name=module_name)
+ nest.Install(module_name)
+ nest.resolution = 1
+ spikeSource = nest.Create("spike_train_injector",
+ params={"spike_times": [1.0, 5.0, 100.0]})
+ neuron = nest.Create("iaf_psc_exp_neuron")
+ vm = nest.Create("multimeter", params={"interval": 1, "record_from": ["V_m"], "record_to": "memory", "time_in_steps":True})
+ spikerecorderNeuron = nest.Create("spike_recorder")
+ spikerecorderSource = nest.Create("spike_recorder")
+ nest.Connect(vm, neuron)
+ nest.Connect(neuron, spikerecorderNeuron)
+ nest.Connect(spikeSource, spikerecorderSource)
+ nest.Connect(spikeSource, neuron, syn_spec={"weight": 2000.0})
+ nest.Simulate(150)
+ t_step = [150.]
+ # Plotting
+ membraneVoltage = vm.get("events")
+ spikesNeuron = spikerecorderNeuron.get("events")
+ spikesSource = spikerecorderSource.get("events")
+
+
+ fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True, height_ratios=(1, 2, 1))
+
+ indices, times = spikesSource["senders"], spikesSource["times"]
+ ax1.plot(times, indices, "|", ms=20)
+ ax1.set_ylabel("input spikes")
+ ax1.set_ylim((-5, 5))
+
+ times = np.arange(t_step[0])
+ ax2.plot(membraneVoltage["times"].tolist(), membraneVoltage["V_m"].tolist(), label="iaf_psc_exp_neuron")
+ ax2.axhline(-55, ls="--", c="0.5", label="threshold")
+ ax2.axhline(0, ls="-", c="0.8", zorder=0)
+ ax2.set_xlim(0, t_step[0])
+ ax2.set_ylabel("voltage")
+ ax2.legend()
+
+ indices, times = spikesNeuron["senders"].tolist(), spikesNeuron["times"].tolist()
+ ax3.plot(times, indices, "|", ms=20)
+ ax3.set_ylabel("output spikes")
+ ax3.set_xlabel("time step")
+ ax3.set_ylim((-5, 5))
+ fig.suptitle("NESTML iaf_psc_exp_single_neuron_VS_SpiNNaker2")
+ plt.savefig("plot_timestep")
diff --git a/tests/spinnaker2_tests/test_spinnaker2_iaf_psc_exp.py b/tests/spinnaker2_tests/test_spinnaker2_iaf_psc_exp.py
new file mode 100644
index 000000000..51d82203a
--- /dev/null
+++ b/tests/spinnaker2_tests/test_spinnaker2_iaf_psc_exp.py
@@ -0,0 +1,37 @@
+import os
+import pytest
+
+from pynestml.frontend.pynestml_frontend import generate_spinnaker2_target
+
+
+class TestSpiNNaker2IafPscExp:
+ """SpiNNaker2 code generation tests"""
+
+ # @pytest.fixture(autouse=True,
+ # scope="module")
+ def generate_code(self):
+ # codegen_opts = {"neuron_synapse_pairs": [{"neuron": "iaf_psc_exp_neuron",
+ # "synapse": "stdp_synapse",
+ # "post_ports": ["post_spikes"]}]}
+
+ files = [
+ os.path.join("models", "neurons", "iaf_psc_exp_neuron_NO_ISTIM.nestml"),
+ # os.path.join("models", "synapses", "stdp_synapse.nestml")
+ ]
+ input_path = [os.path.realpath(os.path.join(os.path.dirname(__file__), os.path.join(
+ os.pardir, os.pardir, s))) for s in files]
+ target_path = "spinnaker2-target"
+ install_path = "spinnaker2-install"
+ logging_level = "DEBUG"
+ module_name = "nestmlmodule"
+ suffix = ''#"_nestml"
+ generate_spinnaker2_target(input_path,
+ target_path=target_path,
+ install_path=install_path,
+ logging_level=logging_level,
+ module_name=module_name,
+ suffix=suffix)
+ # codegen_opts=codegen_opts)
+
+ def test_generate_code(self):
+ self.generate_code()
\ No newline at end of file
diff --git a/tests/spinnaker_tests/test_spinnaker_iaf_psc_exp.py b/tests/spinnaker_tests/test_spinnaker_iaf_psc_exp.py
index 065d64420..714a10d84 100644
--- a/tests/spinnaker_tests/test_spinnaker_iaf_psc_exp.py
+++ b/tests/spinnaker_tests/test_spinnaker_iaf_psc_exp.py
@@ -54,6 +54,9 @@ def generate_code(self):
suffix=suffix)
# codegen_opts=codegen_opts)
+ def test_generate_code(self):
+ self.generate_code()
+
def test_iaf_psc_exp(self):
# import spynnaker and plotting stuff
import pyNN.spiNNaker as p