Skip to content

Models

Models are the core of neurolib. The Model superclass will help you to load, simulate, and analyse models. It also makes it very easy to implement your own neural mass model (see Example 0.6 custom model).

Loading a model

To load a model, we need to import the submodule of a model and instantiate it. This example shows how to load a single node of the ALNModel. See Example 0 aln minimal on how to simulate a whole-brain network using this model.

from neurolib.models.aln import ALNModel # Import the model
model = ALNModel() # Create an instance
model.run() # Run it

Model base class methods

The Model base class runs models, manages their outputs, parameters and more. This class should serve as the base class for all implemented models.

Source code in neurolib/models/model.py
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
class Model:
    """The Model base class runs models, manages their outputs, parameters and more.
    This class should serve as the base class for all implemented models.
    """

    def __init__(self, integration, params):
        if hasattr(self, "name"):
            if self.name is not None:
                assert isinstance(self.name, str), f"Model name is not a string."
        else:
            self.name = "Noname"

        assert integration is not None, "Model integration function not given."
        self.integration = integration

        assert isinstance(params, dict), "Parameters must be a dictionary."
        self.params = dotdict(params)

        # assert self.state_vars not None:
        assert hasattr(
            self, "state_vars"
        ), f"Model {self.name} has no attribute `state_vars`, which should be alist of strings containing all variable names."
        assert np.all([type(s) is str for s in self.state_vars]), "All entries in state_vars must be strings."

        assert hasattr(
            self, "default_output"
        ), f"Model {self.name} needs to define a default output variable in `default_output`."

        assert isinstance(self.default_output, str), "`default_output` must be a string."

        # if no output_vars is set, it will be replaced by state_vars
        if not hasattr(self, "output_vars"):
            self.output_vars = self.state_vars

        # create output and state dictionary
        self.outputs = dotdict({})
        self.state = dotdict({})
        self.maxDelay = None
        self.initializeRun()

        self.boldInitialized = False

        logging.info(f"{self.name}: Model initialized.")

    def initializeBold(self):
        """Initialize BOLD model."""
        self.boldInitialized = False

        # function to transform model state before passing it to the bold model
        # Note: This can be used like the parameter \epsilon in Friston2000
        # (neural efficacy) by multiplying the input with a constant via
        # self.boldInputTransform = lambda x: x * epsilon
        if not hasattr(self, "boldInputTransform"):
            self.boldInputTransform = None

        self.boldModel = bold.BOLDModel(self.params["N"], self.params["dt"])
        self.boldInitialized = True
        # logging.info(f"{self.name}: BOLD model initialized.")

    def get_bold_variable(self, variables):
        default_index = self.state_vars.index(self.default_output)
        return variables[default_index]

    def simulateBold(self, bold_variable, append=True):
        """Gets the default output of the model and simulates the BOLD model.
        Adds the simulated BOLD signal to outputs.
        """
        if not self.boldInitialized:
            logging.warn("BOLD model not initialized, not simulating BOLD. Use `run(bold=True)`")
            return

        bold_input = bold_variable[:, self.startindt :]
        # logging.debug(f"BOLD input `{svn}` of shape {bold_input.shape}")
        if not bold_input.shape[1] >= self.boldModel.samplingRate_NDt:
            logging.warn(
                f"Will not simulate BOLD if output {bold_input.shape[1]*self.params['dt']} not at least of duration {self.boldModel.samplingRate_NDt*self.params['dt']}"
            )
            return

        # only if the length of the output has a zero mod to the sampling rate,
        # the downsampled output from the boldModel can correctly appended to previous data
        # so: we are lazy here and simply disable appending in that case ...
        if append and not bold_input.shape[1] % self.boldModel.samplingRate_NDt == 0:
            append = False
            logging.warn(
                f"Output size {bold_input.shape[1]} is not a multiple of BOLD sampling length { self.boldModel.samplingRate_NDt}, will not append data."
            )
        logging.debug(f"Simulating BOLD: boldModel.run()")

        # transform bold input according to self.boldInputTransform
        if self.boldInputTransform:
            bold_input = self.boldInputTransform(bold_input)

        # simulate bold model
        self.boldModel.run(bold_input)

        t_BOLD = self.boldModel.t_BOLD
        BOLD = self.boldModel.BOLD
        self.setOutput("BOLD.t_BOLD", t_BOLD, append=append)
        self.setOutput("BOLD.BOLD", BOLD, append=append)

    def checkChunkwise(self, chunksize):
        """Checks if the model fulfills requirements for chunkwise simulation.
        Checks whether the sampling rate for outputs fits to chunksize and duration.
        Throws errors if not."""
        assert self.state_vars is not None, "State variable names not given."
        assert self.init_vars is not None, "Initial value variable names not given."
        assert len(self.state_vars) == len(self.init_vars), "State variables are not same length as initial values."

        # throw a warning if the user is nasty
        if int(self.params["duration"] / self.params["dt"]) % chunksize != 0:
            logging.warning(
                f"It is strongly advised to use a `chunksize` ({chunksize}) that is a divisor of `duration / dt` ({int(self.params['duration']/self.params['dt'])})."
            )

        # if `sampling_dt` is set, do some checks
        if self.params.get("sampling_dt") is not None:
            # sample_dt checks are required after setting chunksize
            assert (
                chunksize * self.params["dt"] >= self.params["sampling_dt"]
            ), "`chunksize * dt` must be >= `sampling_dt`"

            # ugly floating point modulo hack: instead of float1%float2==0, we do
            # (float1/float2)%1==0
            assert ((chunksize * self.params["dt"]) / self.params["sampling_dt"]) % 1 == 0, (
                f"Chunksize {chunksize * self.params['dt']} must be divisible by sampling dt "
                f"{self.params['sampling_dt']}"
            )
            assert (
                (self.params["duration"] % (chunksize * self.params["dt"])) / self.params["sampling_dt"]
            ) % 1 == 0, (
                f"Last chunk of size {self.params['duration'] % (chunksize * self.params['dt'])} must be divisible by sampling dt "
                f"{self.params['sampling_dt']}"
            )

    def setSamplingDt(self):
        """Checks if sampling_dt is set correctly and sets self.`sample_every`
        1) Check if sampling_dt is multiple of dt
        2) Check if semplind_dt is greater than duration
        """

        if self.params.get("sampling_dt") is None:
            self.sample_every = 1
        elif self.params.get("sampling_dt") > 0:
            assert self.params["sampling_dt"] >= self.params["dt"], "`sampling_dt` needs to be >= `dt`"
            assert (
                self.params["duration"] >= self.params["sampling_dt"]
            ), "`sampling_dt` needs to be lower than `duration`"
            self.sample_every = int(self.params["sampling_dt"] / self.params["dt"])
        else:
            raise ValueError(f"Can't handle `sampling_dt`={self.params.get('sampling_dt')}")

    def initializeRun(self, initializeBold=False):
        """Initialization before each run.

        :param initializeBold: initialize BOLD model
        :type initializeBold: bool
        """
        # get the maxDelay of the system
        self.maxDelay = self.getMaxDelay()

        # length of the initial condition
        self.startindt = self.maxDelay + 1

        # check dt / sampling_dt
        self.setSamplingDt()

        # set up the bold model, if it didn't happen yet
        if initializeBold and not self.boldInitialized:
            self.initializeBold()

    def run(
        self,
        chunkwise=False,
        chunksize=None,
        bold=False,
        append_outputs=False,
        continue_run=False,
    ):
        """
        Main interfacing function to run a model.

        The model can be run in three different ways:
        1) `model.run()` starts a new run.
        2) `model.run(chunkwise=True)` runs the simulation in chunks of length `chunksize`.
        3) `mode.run(continue_run=True)` continues the simulation of a previous run. This has no effect during the first run.

        :param inputs: list of inputs to the model, must have the same order as model.input_vars. Note: no sanity check is performed for performance reasons. Take care of the inputs yourself.
        :type inputs: list[np.ndarray|]
        :param chunkwise: simulate model chunkwise or in one single run, defaults to False
        :type chunkwise: bool, optional
        :param chunksize: size of the chunk to simulate in dt, if set will imply chunkwise=True, defaults to 2s
        :type chunksize: int, optional
        :param bold: simulate BOLD signal (only for chunkwise integration), defaults to False
        :type bold: bool, optional
        :param append_outputs: append new and chunkwise outputs to the outputs attribute, defaults to False. Note: BOLD outputs are always appended.
        :type append_outputs: bool, optional
        :param continue_run: continue a simulation by using the initial values from a previous simulation. This has no effect during the first run.
        :type continue_run: bool
        """
        self.initializeRun(initializeBold=bold)

        # if a previous run is not to be continued clear the model's state
        if continue_run:
            self.setInitialValuesToLastState()
        else:
            self.clearModelState()

        # enable chunkwise if chunksize is set
        chunkwise = chunkwise if chunksize is None else True

        if chunkwise is False:
            self.integrate(append_outputs=append_outputs, simulate_bold=bold)

        else:
            if chunksize is None:
                chunksize = int(2000 / self.params["dt"])

            # check if model is safe for chunkwise integration
            # and whether sampling_dt is compatible with duration and chunksize
            self.checkChunkwise(chunksize)

            self.integrateChunkwise(chunksize=chunksize, bold=bold, append_outputs=append_outputs)

        # check if there was a problem with the simulated data
        self.checkOutputs()

    def checkOutputs(self):
        # check nans in output
        if np.isnan(self.output).any():
            logging.error("nan in model output!")
        else:
            EXPLOSION_THRESHOLD = 1e20
            if (self.output > EXPLOSION_THRESHOLD).any() > 0:
                logging.error("nan in model output!")

        # check nans in BOLD
        if "BOLD" in self.outputs:
            if np.isnan(self.outputs.BOLD.BOLD).any():
                logging.error("nan in BOLD output!")

    def integrate(self, append_outputs=False, simulate_bold=False):
        """Calls each models `integration` function and saves the state and the outputs of the model.

        :param append: append the chunkwise outputs to the outputs attribute, defaults to False
        :type append: bool, optional
        """
        # run integration
        t, *variables = self.integration(self.params)
        self.storeOutputsAndStates(t, variables, append=append_outputs)

        # bold simulation after integration
        if simulate_bold and self.boldInitialized:
            bold_variable = self.get_bold_variable(variables)
            self.simulateBold(bold_variable, append=True)

    def integrateChunkwise(self, chunksize, bold=False, append_outputs=False):
        """Repeatedly calls the chunkwise integration for the whole duration of the simulation.
        If `bold==True`, the BOLD model is simulated after each chunk.

        :param chunksize: size of each chunk to simulate in units of dt
        :type chunksize: int
        :param bold: simulate BOLD model after each chunk, defaults to False
        :type bold: bool, optional
        :param append_outputs: append the chunkwise outputs to the outputs attribute, defaults to False
        :type append_outputs: bool, optional
        """
        totalDuration = self.params["duration"]

        dt = self.params["dt"]
        # create a shallow copy of the parameters
        lastT = 0
        while totalDuration - lastT >= dt - 1e-6:
            # Determine the size of the next chunk
            # account for floating point errors
            remainingChunkSize = int(round((totalDuration - lastT) / dt))
            currentChunkSize = min(chunksize, remainingChunkSize)

            self.autochunk(chunksize=currentChunkSize, append_outputs=append_outputs, bold=bold)
            # we save the last simulated time step
            lastT += currentChunkSize * dt
            # or
            # lastT = self.state["t"][-1]

        # set duration back to its original value
        self.params["duration"] = totalDuration

    def clearModelState(self):
        """Clears the model's state to create a fresh one"""
        self.state = dotdict({})
        self.outputs = dotdict({})
        # reinitialize bold model
        if self.boldInitialized:
            self.initializeBold()

    def storeOutputsAndStates(self, t, variables, append=False):
        """Takes the simulated variables of the integration and stores it to the appropriate model output and state object.

        :param t: time vector
        :type t: list
        :param variables: variable from time integration
        :type variables: numpy.ndarray
        :param append: append output to existing output or overwrite, defaults to False
        :type append: bool, optional
        """
        # save time array
        self.setOutput("t", t, append=append, removeICs=True)
        self.setStateVariables("t", t)
        # save outputs
        for svn, sv in zip(self.state_vars, variables):
            if svn in self.output_vars:
                self.setOutput(svn, sv, append=append, removeICs=True)
            self.setStateVariables(svn, sv)

    def setInitialValuesToLastState(self):
        """Reads the last state of the model and sets the initial conditions to that state for continuing a simulation."""
        if not all([sv in self.state for sv in self.state_vars]):
            return
        for iv, sv in zip(self.init_vars, self.state_vars):
            # if state variables are one-dimensional (in space only)
            if (self.state[sv].ndim == 0) or (self.state[sv].ndim == 1):
                self.params[iv] = self.state[sv]
            # if they are space-time arrays
            else:
                # we set the next initial condition to the last state
                self.params[iv] = self.state[sv][:, -self.startindt :]

    def randomICs(self, min=0, max=1):
        """Generates a new set of uniformly-distributed random initial conditions for the model.

        TODO: All parameters are drawn from the same distribution / range. Allow for independent ranges.

        :param min: Minium of uniform distribution
        :type min: float
        :param max: Maximum of uniform distribution
        :type max: float
        """
        for iv in self.init_vars:
            if self.params[iv].ndim == 1:
                self.params[iv] = np.random.uniform(min, max, (self.params["N"]))
            elif self.params[iv].ndim == 2:
                self.params[iv] = np.random.uniform(min, max, (self.params["N"], 1))

    def setInputs(self, inputs):
        """Take inputs from a list and store it in the appropriate model parameter for external input.
        TODO: This is not safe yet, checks should be implemented whether the model has inputs defined or not for example.

        :param inputs: list of inputs
        :type inputs: list[np.ndarray(), ...]
        """
        for i, iv in enumerate(self.input_vars):
            self.params[iv] = inputs[i].copy()

    def autochunk(self, inputs=None, chunksize=1, append_outputs=False, bold=False):
        """Executes a single chunk of integration, either for a given duration
        or a single timestep `dt`. Gathers all inputs to the model and resets
        the initial conditions as a preparation for the next chunk.

        :param inputs: list of input values, ordered according to self.input_vars, defaults to None
        :type inputs: list[np.ndarray|], optional
        :param chunksize: length of a chunk to simulate in dt, defaults 1
        :type chunksize: int, optional
        :param append_outputs: append the chunkwise outputs to the outputs attribute, defaults to False
        :type append_outputs: bool, optional
        """

        # set the duration for this chunk
        self.params["duration"] = chunksize * self.params["dt"]

        # set inputs
        if inputs is not None:
            self.setInputs(inputs)

        # run integration
        self.integrate(append_outputs=append_outputs, simulate_bold=bold)

        # set initial conditions to last state for the next chunk
        self.setInitialValuesToLastState()

    def getMaxDelay(self):
        """Computes the maximum delay of the model. This function should be overloaded
        if the model has internal delays (additional to delay between nodes defined by Dmat)
        such as the delay between an excitatory and inhibitory population within each brain area.
        If this function is not overloaded, the maximum delay is assumed to be defined from the
        global delay matrix `Dmat`.

        Note: Maxmimum delay is given in units of dt.

        :return: maxmimum delay of the model in units of dt
        :rtype: int
        """
        dt = self.params.get("dt")
        Dmat = self.params.get("lengthMat")

        if Dmat is not None:
            # divide Dmat by signalV
            signalV = self.params.get("signalV") or 0
            if signalV > 0:
                Dmat = Dmat / signalV
            else:
                # if signalV is 0, eliminate delays
                Dmat = Dmat * 0.0

        # only if Dmat and dt exist, a global max delay can be computed
        if Dmat is not None and dt is not None:
            Dmat_ndt = np.around(Dmat / dt)  # delay matrix in multiples of dt
            max_global_delay = int(np.amax(Dmat_ndt))
        else:
            max_global_delay = 0
        return max_global_delay

    def setStateVariables(self, name, data):
        """Saves the models current state variables.

        TODO: Cut state variables to length of self.maxDelay
        However, this could be time-memory tradeoff

        :param name: name of the state variable
        :type name: str
        :param data: value of the variable
        :type data: np.ndarray
        """
        # old
        # self.state[name] = data.copy()

        # if the data is temporal, cut off initial values
        # NOTE: this shuold actually check for
        # if data.shape[1] > 1:
        # else: data.copy()
        # there coulb be (N, 1)-dimensional output, right now
        # it is requred to be of shape (N, )
        if data.ndim == 2:
            self.state[name] = data[:, -self.startindt :].copy()
        else:
            self.state[name] = data.copy()

    def setOutput(self, name, data, append=False, removeICs=False):
        """Adds an output to the model, typically a simulation result.
        :params name: Name of the output in dot.notation, a la "outputgroup.output"
        :type name: str
        :params data: Output data, can't be a dictionary!
        :type data: `numpy.ndarray`
        """
        assert not isinstance(data, dict), "Output data cannot be a dictionary."
        assert isinstance(name, str), "Output name must be a string."
        assert isinstance(data, np.ndarray), "Output must be a `numpy.ndarray`."

        # remove initial conditions from output
        if removeICs and name != "t":
            if data.ndim == 1:
                data = data[self.startindt :]
            elif data.ndim == 2:
                data = data[:, self.startindt :]
            else:
                raise ValueError(f"Don't know how to truncate data of shape {data.shape}.")

        # subsample to sampling dt
        if data.shape[-1] >= self.params["duration"] - self.startindt:
            if data.ndim == 1:
                data = data[:: self.sample_every]
            elif data.ndim == 2:
                data = data[:, :: self.sample_every]
            else:
                raise ValueError(f"Don't know how to subsample data of shape {data.shape}.")

        def save_leaf(node, name, data, append):
            if name in node:
                if data.ndim == 1 and name == "t":
                    # special treatment for time data:
                    # increment the time by the last recorded duration
                    data += node[name][-1]
                if append and data.shape[-1] != 0:
                    data = np.hstack((node[name], data))
            node[name] = data
            return node

        # if the output is a single name (not dot.separated)
        if "." not in name:
            save_leaf(self.outputs, name, data, append)
            # set output as an attribute
            setattr(self, name, self.outputs[name])
        else:
            # build results dictionary and write into self.outputs
            # dot.notation iteration
            keys = name.split(".")
            level = self.outputs  # not copy, reference!
            for i, k in enumerate(keys):
                # if it's the last iteration, store data
                if i == len(keys) - 1:
                    level = save_leaf(level, k, data, append)
                # if key is in outputs, then go deeper
                elif k in level:
                    level = level[k]
                # if it's a new key, create new nested dictionary, set attribute, then go deeper
                else:
                    level[k] = dotdict({})
                    setattr(self, k, level[k])
                    level = level[k]

    def getOutput(self, name):
        """Get an output of a given name (dot.semarated)
        :param name: A key, grouped outputs in the form group.subgroup.variable
        :type name: str

        :returns: Output data
        """
        assert isinstance(name, str), "Output name must be a string."
        keys = name.split(".")
        lastOutput = self.outputs.copy()
        for i, k in enumerate(keys):
            assert k in lastOutput, f"Key {k} not found in outputs."
            lastOutput = lastOutput[k]
        return lastOutput

    def __getitem__(self, key):
        """Index outputs with a dictionary-like key, e.g., `model['rates_exc']`."""
        return self.getOutput(key)

    def getOutputs(self, group=""):
        """Get all outputs of an output group. Examples: `getOutputs("BOLD")` or simply `getOutputs()`

        :param group: Group name, subgroups separated by dots. If left empty (default), all outputs of the root group
            are returned.
        :type group: str
        """
        assert isinstance(group, str), "Group name must be a string."

        def filterOutputsFromGroupDict(groupDict):
            """Return a dictionary with the output data of a group disregarding all other nested dicts.
            :param groupDict: Dictionary of outputs (can include other groups)
            :type groupDict: dict
            """
            assert isinstance(groupDict, dict), "Not a dictionary."
            # make a deep copy of the dictionary
            returnDict = groupDict.copy()
            for key, value in groupDict.items():
                if isinstance(value, dict):
                    del returnDict[key]
            return returnDict

        # if a group deeper than the root is given, select the last node
        lastOutput = self.outputs.copy()
        if len(group) > 0:
            keys = group.split(".")
            for i, k in enumerate(keys):
                assert k in lastOutput, f"Key {k} not found in outputs."
                lastOutput = lastOutput[k]
                assert isinstance(lastOutput, dict), f"Key {k} does not refer to a group."
        # filter out all output *groups* that might be in this node and return only output data
        return filterOutputsFromGroupDict(lastOutput)

    @property
    def output(self):
        """Returns value of default output as defined by `self.default_output`.
        Note that all outputs are saved in the attribute `self.outputs`.
        """
        assert self.default_output is not None, "Default output has not been set yet. Use `setDefaultOutput()`."
        return self.getOutput(self.default_output)

    def xr(self, group=""):
        """Converts a group of outputs to xarray. Output group needs to contain an
        element that starts with the letter "t" or it will not recognize any time axis.

        :param group: Output group name, example:  "BOLD". Leave empty for top group.
        :type group: str
        """
        assert isinstance(group, str), "Group name must be a string."
        # take all outputs of one group: disregard all dictionaries because they are subgroups
        outputDict = self.getOutputs(group)
        # make sure that there is a time array
        timeDictKey = ""
        if "t" in outputDict:
            timeDictKey = "t"
        else:
            for k in outputDict:
                if k.startswith("t"):
                    timeDictKey = k
                    logging.info(f"Assuming {k} to be the time axis.")
                    break
        assert len(timeDictKey) > 0, f"No time array found (starting with t) in output group {group}."
        t = outputDict[timeDictKey].copy()
        del outputDict[timeDictKey]

        outputNames, outputs = zip(*outputDict.items())
        outputNames = list(outputNames)

        nNodes = outputs[0].shape[0]
        nodes = list(range(nNodes))
        allOutputsStacked = np.stack(outputs)  # What? Where? When?
        result = xr.DataArray(allOutputsStacked, coords=[outputNames, nodes, t], dims=["output", "space", "time"])
        return result

output property

Returns value of default output as defined by self.default_output. Note that all outputs are saved in the attribute self.outputs.

__getitem__(key)

Index outputs with a dictionary-like key, e.g., model['rates_exc'].

Source code in neurolib/models/model.py
524
525
526
def __getitem__(self, key):
    """Index outputs with a dictionary-like key, e.g., `model['rates_exc']`."""
    return self.getOutput(key)

autochunk(inputs=None, chunksize=1, append_outputs=False, bold=False)

Executes a single chunk of integration, either for a given duration or a single timestep dt. Gathers all inputs to the model and resets the initial conditions as a preparation for the next chunk.

Parameters:

Name Type Description Default
inputs list[np.ndarray|], optional

list of input values, ordered according to self.input_vars, defaults to None

None
chunksize int, optional

length of a chunk to simulate in dt, defaults 1

1
append_outputs bool, optional

append the chunkwise outputs to the outputs attribute, defaults to False

False
Source code in neurolib/models/model.py
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
def autochunk(self, inputs=None, chunksize=1, append_outputs=False, bold=False):
    """Executes a single chunk of integration, either for a given duration
    or a single timestep `dt`. Gathers all inputs to the model and resets
    the initial conditions as a preparation for the next chunk.

    :param inputs: list of input values, ordered according to self.input_vars, defaults to None
    :type inputs: list[np.ndarray|], optional
    :param chunksize: length of a chunk to simulate in dt, defaults 1
    :type chunksize: int, optional
    :param append_outputs: append the chunkwise outputs to the outputs attribute, defaults to False
    :type append_outputs: bool, optional
    """

    # set the duration for this chunk
    self.params["duration"] = chunksize * self.params["dt"]

    # set inputs
    if inputs is not None:
        self.setInputs(inputs)

    # run integration
    self.integrate(append_outputs=append_outputs, simulate_bold=bold)

    # set initial conditions to last state for the next chunk
    self.setInitialValuesToLastState()

checkChunkwise(chunksize)

Checks if the model fulfills requirements for chunkwise simulation. Checks whether the sampling rate for outputs fits to chunksize and duration. Throws errors if not.

Source code in neurolib/models/model.py
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
def checkChunkwise(self, chunksize):
    """Checks if the model fulfills requirements for chunkwise simulation.
    Checks whether the sampling rate for outputs fits to chunksize and duration.
    Throws errors if not."""
    assert self.state_vars is not None, "State variable names not given."
    assert self.init_vars is not None, "Initial value variable names not given."
    assert len(self.state_vars) == len(self.init_vars), "State variables are not same length as initial values."

    # throw a warning if the user is nasty
    if int(self.params["duration"] / self.params["dt"]) % chunksize != 0:
        logging.warning(
            f"It is strongly advised to use a `chunksize` ({chunksize}) that is a divisor of `duration / dt` ({int(self.params['duration']/self.params['dt'])})."
        )

    # if `sampling_dt` is set, do some checks
    if self.params.get("sampling_dt") is not None:
        # sample_dt checks are required after setting chunksize
        assert (
            chunksize * self.params["dt"] >= self.params["sampling_dt"]
        ), "`chunksize * dt` must be >= `sampling_dt`"

        # ugly floating point modulo hack: instead of float1%float2==0, we do
        # (float1/float2)%1==0
        assert ((chunksize * self.params["dt"]) / self.params["sampling_dt"]) % 1 == 0, (
            f"Chunksize {chunksize * self.params['dt']} must be divisible by sampling dt "
            f"{self.params['sampling_dt']}"
        )
        assert (
            (self.params["duration"] % (chunksize * self.params["dt"])) / self.params["sampling_dt"]
        ) % 1 == 0, (
            f"Last chunk of size {self.params['duration'] % (chunksize * self.params['dt'])} must be divisible by sampling dt "
            f"{self.params['sampling_dt']}"
        )

clearModelState()

Clears the model's state to create a fresh one

Source code in neurolib/models/model.py
297
298
299
300
301
302
303
def clearModelState(self):
    """Clears the model's state to create a fresh one"""
    self.state = dotdict({})
    self.outputs = dotdict({})
    # reinitialize bold model
    if self.boldInitialized:
        self.initializeBold()

getMaxDelay()

Computes the maximum delay of the model. This function should be overloaded if the model has internal delays (additional to delay between nodes defined by Dmat) such as the delay between an excitatory and inhibitory population within each brain area. If this function is not overloaded, the maximum delay is assumed to be defined from the global delay matrix Dmat.

Note: Maxmimum delay is given in units of dt.

Returns:

Type Description
int

maxmimum delay of the model in units of dt

Source code in neurolib/models/model.py
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
def getMaxDelay(self):
    """Computes the maximum delay of the model. This function should be overloaded
    if the model has internal delays (additional to delay between nodes defined by Dmat)
    such as the delay between an excitatory and inhibitory population within each brain area.
    If this function is not overloaded, the maximum delay is assumed to be defined from the
    global delay matrix `Dmat`.

    Note: Maxmimum delay is given in units of dt.

    :return: maxmimum delay of the model in units of dt
    :rtype: int
    """
    dt = self.params.get("dt")
    Dmat = self.params.get("lengthMat")

    if Dmat is not None:
        # divide Dmat by signalV
        signalV = self.params.get("signalV") or 0
        if signalV > 0:
            Dmat = Dmat / signalV
        else:
            # if signalV is 0, eliminate delays
            Dmat = Dmat * 0.0

    # only if Dmat and dt exist, a global max delay can be computed
    if Dmat is not None and dt is not None:
        Dmat_ndt = np.around(Dmat / dt)  # delay matrix in multiples of dt
        max_global_delay = int(np.amax(Dmat_ndt))
    else:
        max_global_delay = 0
    return max_global_delay

getOutput(name)

Get an output of a given name (dot.semarated)

Parameters:

Name Type Description Default
name str

A key, grouped outputs in the form group.subgroup.variable

required

Returns:

Type Description

Output data

Source code in neurolib/models/model.py
509
510
511
512
513
514
515
516
517
518
519
520
521
522
def getOutput(self, name):
    """Get an output of a given name (dot.semarated)
    :param name: A key, grouped outputs in the form group.subgroup.variable
    :type name: str

    :returns: Output data
    """
    assert isinstance(name, str), "Output name must be a string."
    keys = name.split(".")
    lastOutput = self.outputs.copy()
    for i, k in enumerate(keys):
        assert k in lastOutput, f"Key {k} not found in outputs."
        lastOutput = lastOutput[k]
    return lastOutput

getOutputs(group='')

Get all outputs of an output group. Examples: getOutputs("BOLD") or simply getOutputs()

Parameters:

Name Type Description Default
group str

Group name, subgroups separated by dots. If left empty (default), all outputs of the root group are returned.

''
Source code in neurolib/models/model.py
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
def getOutputs(self, group=""):
    """Get all outputs of an output group. Examples: `getOutputs("BOLD")` or simply `getOutputs()`

    :param group: Group name, subgroups separated by dots. If left empty (default), all outputs of the root group
        are returned.
    :type group: str
    """
    assert isinstance(group, str), "Group name must be a string."

    def filterOutputsFromGroupDict(groupDict):
        """Return a dictionary with the output data of a group disregarding all other nested dicts.
        :param groupDict: Dictionary of outputs (can include other groups)
        :type groupDict: dict
        """
        assert isinstance(groupDict, dict), "Not a dictionary."
        # make a deep copy of the dictionary
        returnDict = groupDict.copy()
        for key, value in groupDict.items():
            if isinstance(value, dict):
                del returnDict[key]
        return returnDict

    # if a group deeper than the root is given, select the last node
    lastOutput = self.outputs.copy()
    if len(group) > 0:
        keys = group.split(".")
        for i, k in enumerate(keys):
            assert k in lastOutput, f"Key {k} not found in outputs."
            lastOutput = lastOutput[k]
            assert isinstance(lastOutput, dict), f"Key {k} does not refer to a group."
    # filter out all output *groups* that might be in this node and return only output data
    return filterOutputsFromGroupDict(lastOutput)

initializeBold()

Initialize BOLD model.

Source code in neurolib/models/model.py
54
55
56
57
58
59
60
61
62
63
64
65
66
def initializeBold(self):
    """Initialize BOLD model."""
    self.boldInitialized = False

    # function to transform model state before passing it to the bold model
    # Note: This can be used like the parameter \epsilon in Friston2000
    # (neural efficacy) by multiplying the input with a constant via
    # self.boldInputTransform = lambda x: x * epsilon
    if not hasattr(self, "boldInputTransform"):
        self.boldInputTransform = None

    self.boldModel = bold.BOLDModel(self.params["N"], self.params["dt"])
    self.boldInitialized = True

initializeRun(initializeBold=False)

Initialization before each run.

Parameters:

Name Type Description Default
initializeBold bool

initialize BOLD model

False
Source code in neurolib/models/model.py
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
def initializeRun(self, initializeBold=False):
    """Initialization before each run.

    :param initializeBold: initialize BOLD model
    :type initializeBold: bool
    """
    # get the maxDelay of the system
    self.maxDelay = self.getMaxDelay()

    # length of the initial condition
    self.startindt = self.maxDelay + 1

    # check dt / sampling_dt
    self.setSamplingDt()

    # set up the bold model, if it didn't happen yet
    if initializeBold and not self.boldInitialized:
        self.initializeBold()

integrate(append_outputs=False, simulate_bold=False)

Calls each models integration function and saves the state and the outputs of the model.

Parameters:

Name Type Description Default
append bool, optional

append the chunkwise outputs to the outputs attribute, defaults to False

required
Source code in neurolib/models/model.py
251
252
253
254
255
256
257
258
259
260
261
262
263
264
def integrate(self, append_outputs=False, simulate_bold=False):
    """Calls each models `integration` function and saves the state and the outputs of the model.

    :param append: append the chunkwise outputs to the outputs attribute, defaults to False
    :type append: bool, optional
    """
    # run integration
    t, *variables = self.integration(self.params)
    self.storeOutputsAndStates(t, variables, append=append_outputs)

    # bold simulation after integration
    if simulate_bold and self.boldInitialized:
        bold_variable = self.get_bold_variable(variables)
        self.simulateBold(bold_variable, append=True)

integrateChunkwise(chunksize, bold=False, append_outputs=False)

Repeatedly calls the chunkwise integration for the whole duration of the simulation. If bold==True, the BOLD model is simulated after each chunk.

Parameters:

Name Type Description Default
chunksize int

size of each chunk to simulate in units of dt

required
bold bool, optional

simulate BOLD model after each chunk, defaults to False

False
append_outputs bool, optional

append the chunkwise outputs to the outputs attribute, defaults to False

False
Source code in neurolib/models/model.py
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
def integrateChunkwise(self, chunksize, bold=False, append_outputs=False):
    """Repeatedly calls the chunkwise integration for the whole duration of the simulation.
    If `bold==True`, the BOLD model is simulated after each chunk.

    :param chunksize: size of each chunk to simulate in units of dt
    :type chunksize: int
    :param bold: simulate BOLD model after each chunk, defaults to False
    :type bold: bool, optional
    :param append_outputs: append the chunkwise outputs to the outputs attribute, defaults to False
    :type append_outputs: bool, optional
    """
    totalDuration = self.params["duration"]

    dt = self.params["dt"]
    # create a shallow copy of the parameters
    lastT = 0
    while totalDuration - lastT >= dt - 1e-6:
        # Determine the size of the next chunk
        # account for floating point errors
        remainingChunkSize = int(round((totalDuration - lastT) / dt))
        currentChunkSize = min(chunksize, remainingChunkSize)

        self.autochunk(chunksize=currentChunkSize, append_outputs=append_outputs, bold=bold)
        # we save the last simulated time step
        lastT += currentChunkSize * dt
        # or
        # lastT = self.state["t"][-1]

    # set duration back to its original value
    self.params["duration"] = totalDuration

randomICs(min=0, max=1)

Generates a new set of uniformly-distributed random initial conditions for the model.

TODO: All parameters are drawn from the same distribution / range. Allow for independent ranges.

Parameters:

Name Type Description Default
min float

Minium of uniform distribution

0
max float

Maximum of uniform distribution

1
Source code in neurolib/models/model.py
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
def randomICs(self, min=0, max=1):
    """Generates a new set of uniformly-distributed random initial conditions for the model.

    TODO: All parameters are drawn from the same distribution / range. Allow for independent ranges.

    :param min: Minium of uniform distribution
    :type min: float
    :param max: Maximum of uniform distribution
    :type max: float
    """
    for iv in self.init_vars:
        if self.params[iv].ndim == 1:
            self.params[iv] = np.random.uniform(min, max, (self.params["N"]))
        elif self.params[iv].ndim == 2:
            self.params[iv] = np.random.uniform(min, max, (self.params["N"], 1))

run(chunkwise=False, chunksize=None, bold=False, append_outputs=False, continue_run=False)

Main interfacing function to run a model.

The model can be run in three different ways: 1) model.run() starts a new run. 2) model.run(chunkwise=True) runs the simulation in chunks of length chunksize. 3) mode.run(continue_run=True) continues the simulation of a previous run. This has no effect during the first run.

Parameters:

Name Type Description Default
inputs list[np.ndarray|]

list of inputs to the model, must have the same order as model.input_vars. Note: no sanity check is performed for performance reasons. Take care of the inputs yourself.

required
chunkwise bool, optional

simulate model chunkwise or in one single run, defaults to False

False
chunksize int, optional

size of the chunk to simulate in dt, if set will imply chunkwise=True, defaults to 2s

None
bold bool, optional

simulate BOLD signal (only for chunkwise integration), defaults to False

False
append_outputs bool, optional

append new and chunkwise outputs to the outputs attribute, defaults to False. Note: BOLD outputs are always appended.

False
continue_run bool

continue a simulation by using the initial values from a previous simulation. This has no effect during the first run.

False
Source code in neurolib/models/model.py
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
def run(
    self,
    chunkwise=False,
    chunksize=None,
    bold=False,
    append_outputs=False,
    continue_run=False,
):
    """
    Main interfacing function to run a model.

    The model can be run in three different ways:
    1) `model.run()` starts a new run.
    2) `model.run(chunkwise=True)` runs the simulation in chunks of length `chunksize`.
    3) `mode.run(continue_run=True)` continues the simulation of a previous run. This has no effect during the first run.

    :param inputs: list of inputs to the model, must have the same order as model.input_vars. Note: no sanity check is performed for performance reasons. Take care of the inputs yourself.
    :type inputs: list[np.ndarray|]
    :param chunkwise: simulate model chunkwise or in one single run, defaults to False
    :type chunkwise: bool, optional
    :param chunksize: size of the chunk to simulate in dt, if set will imply chunkwise=True, defaults to 2s
    :type chunksize: int, optional
    :param bold: simulate BOLD signal (only for chunkwise integration), defaults to False
    :type bold: bool, optional
    :param append_outputs: append new and chunkwise outputs to the outputs attribute, defaults to False. Note: BOLD outputs are always appended.
    :type append_outputs: bool, optional
    :param continue_run: continue a simulation by using the initial values from a previous simulation. This has no effect during the first run.
    :type continue_run: bool
    """
    self.initializeRun(initializeBold=bold)

    # if a previous run is not to be continued clear the model's state
    if continue_run:
        self.setInitialValuesToLastState()
    else:
        self.clearModelState()

    # enable chunkwise if chunksize is set
    chunkwise = chunkwise if chunksize is None else True

    if chunkwise is False:
        self.integrate(append_outputs=append_outputs, simulate_bold=bold)

    else:
        if chunksize is None:
            chunksize = int(2000 / self.params["dt"])

        # check if model is safe for chunkwise integration
        # and whether sampling_dt is compatible with duration and chunksize
        self.checkChunkwise(chunksize)

        self.integrateChunkwise(chunksize=chunksize, bold=bold, append_outputs=append_outputs)

    # check if there was a problem with the simulated data
    self.checkOutputs()

setInitialValuesToLastState()

Reads the last state of the model and sets the initial conditions to that state for continuing a simulation.

Source code in neurolib/models/model.py
324
325
326
327
328
329
330
331
332
333
334
335
def setInitialValuesToLastState(self):
    """Reads the last state of the model and sets the initial conditions to that state for continuing a simulation."""
    if not all([sv in self.state for sv in self.state_vars]):
        return
    for iv, sv in zip(self.init_vars, self.state_vars):
        # if state variables are one-dimensional (in space only)
        if (self.state[sv].ndim == 0) or (self.state[sv].ndim == 1):
            self.params[iv] = self.state[sv]
        # if they are space-time arrays
        else:
            # we set the next initial condition to the last state
            self.params[iv] = self.state[sv][:, -self.startindt :]

setInputs(inputs)

Take inputs from a list and store it in the appropriate model parameter for external input. TODO: This is not safe yet, checks should be implemented whether the model has inputs defined or not for example.

Parameters:

Name Type Description Default
inputs list[np.ndarray(), ...]

list of inputs

required
Source code in neurolib/models/model.py
353
354
355
356
357
358
359
360
361
def setInputs(self, inputs):
    """Take inputs from a list and store it in the appropriate model parameter for external input.
    TODO: This is not safe yet, checks should be implemented whether the model has inputs defined or not for example.

    :param inputs: list of inputs
    :type inputs: list[np.ndarray(), ...]
    """
    for i, iv in enumerate(self.input_vars):
        self.params[iv] = inputs[i].copy()

setOutput(name, data, append=False, removeICs=False)

Adds an output to the model, typically a simulation result.

Parameters:

Name Type Description Default
name str

Name of the output in dot.notation, a la "outputgroup.output"

required
data `numpy.ndarray`

Output data, can't be a dictionary!

required
Source code in neurolib/models/model.py
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
def setOutput(self, name, data, append=False, removeICs=False):
    """Adds an output to the model, typically a simulation result.
    :params name: Name of the output in dot.notation, a la "outputgroup.output"
    :type name: str
    :params data: Output data, can't be a dictionary!
    :type data: `numpy.ndarray`
    """
    assert not isinstance(data, dict), "Output data cannot be a dictionary."
    assert isinstance(name, str), "Output name must be a string."
    assert isinstance(data, np.ndarray), "Output must be a `numpy.ndarray`."

    # remove initial conditions from output
    if removeICs and name != "t":
        if data.ndim == 1:
            data = data[self.startindt :]
        elif data.ndim == 2:
            data = data[:, self.startindt :]
        else:
            raise ValueError(f"Don't know how to truncate data of shape {data.shape}.")

    # subsample to sampling dt
    if data.shape[-1] >= self.params["duration"] - self.startindt:
        if data.ndim == 1:
            data = data[:: self.sample_every]
        elif data.ndim == 2:
            data = data[:, :: self.sample_every]
        else:
            raise ValueError(f"Don't know how to subsample data of shape {data.shape}.")

    def save_leaf(node, name, data, append):
        if name in node:
            if data.ndim == 1 and name == "t":
                # special treatment for time data:
                # increment the time by the last recorded duration
                data += node[name][-1]
            if append and data.shape[-1] != 0:
                data = np.hstack((node[name], data))
        node[name] = data
        return node

    # if the output is a single name (not dot.separated)
    if "." not in name:
        save_leaf(self.outputs, name, data, append)
        # set output as an attribute
        setattr(self, name, self.outputs[name])
    else:
        # build results dictionary and write into self.outputs
        # dot.notation iteration
        keys = name.split(".")
        level = self.outputs  # not copy, reference!
        for i, k in enumerate(keys):
            # if it's the last iteration, store data
            if i == len(keys) - 1:
                level = save_leaf(level, k, data, append)
            # if key is in outputs, then go deeper
            elif k in level:
                level = level[k]
            # if it's a new key, create new nested dictionary, set attribute, then go deeper
            else:
                level[k] = dotdict({})
                setattr(self, k, level[k])
                level = level[k]

setSamplingDt()

Checks if sampling_dt is set correctly and sets self.sample_every 1) Check if sampling_dt is multiple of dt 2) Check if semplind_dt is greater than duration

Source code in neurolib/models/model.py
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
def setSamplingDt(self):
    """Checks if sampling_dt is set correctly and sets self.`sample_every`
    1) Check if sampling_dt is multiple of dt
    2) Check if semplind_dt is greater than duration
    """

    if self.params.get("sampling_dt") is None:
        self.sample_every = 1
    elif self.params.get("sampling_dt") > 0:
        assert self.params["sampling_dt"] >= self.params["dt"], "`sampling_dt` needs to be >= `dt`"
        assert (
            self.params["duration"] >= self.params["sampling_dt"]
        ), "`sampling_dt` needs to be lower than `duration`"
        self.sample_every = int(self.params["sampling_dt"] / self.params["dt"])
    else:
        raise ValueError(f"Can't handle `sampling_dt`={self.params.get('sampling_dt')}")

setStateVariables(name, data)

Saves the models current state variables.

TODO: Cut state variables to length of self.maxDelay However, this could be time-memory tradeoff

Parameters:

Name Type Description Default
name str

name of the state variable

required
data np.ndarray

value of the variable

required
Source code in neurolib/models/model.py
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
def setStateVariables(self, name, data):
    """Saves the models current state variables.

    TODO: Cut state variables to length of self.maxDelay
    However, this could be time-memory tradeoff

    :param name: name of the state variable
    :type name: str
    :param data: value of the variable
    :type data: np.ndarray
    """
    # old
    # self.state[name] = data.copy()

    # if the data is temporal, cut off initial values
    # NOTE: this shuold actually check for
    # if data.shape[1] > 1:
    # else: data.copy()
    # there coulb be (N, 1)-dimensional output, right now
    # it is requred to be of shape (N, )
    if data.ndim == 2:
        self.state[name] = data[:, -self.startindt :].copy()
    else:
        self.state[name] = data.copy()

simulateBold(bold_variable, append=True)

Gets the default output of the model and simulates the BOLD model. Adds the simulated BOLD signal to outputs.

Source code in neurolib/models/model.py
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
def simulateBold(self, bold_variable, append=True):
    """Gets the default output of the model and simulates the BOLD model.
    Adds the simulated BOLD signal to outputs.
    """
    if not self.boldInitialized:
        logging.warn("BOLD model not initialized, not simulating BOLD. Use `run(bold=True)`")
        return

    bold_input = bold_variable[:, self.startindt :]
    # logging.debug(f"BOLD input `{svn}` of shape {bold_input.shape}")
    if not bold_input.shape[1] >= self.boldModel.samplingRate_NDt:
        logging.warn(
            f"Will not simulate BOLD if output {bold_input.shape[1]*self.params['dt']} not at least of duration {self.boldModel.samplingRate_NDt*self.params['dt']}"
        )
        return

    # only if the length of the output has a zero mod to the sampling rate,
    # the downsampled output from the boldModel can correctly appended to previous data
    # so: we are lazy here and simply disable appending in that case ...
    if append and not bold_input.shape[1] % self.boldModel.samplingRate_NDt == 0:
        append = False
        logging.warn(
            f"Output size {bold_input.shape[1]} is not a multiple of BOLD sampling length { self.boldModel.samplingRate_NDt}, will not append data."
        )
    logging.debug(f"Simulating BOLD: boldModel.run()")

    # transform bold input according to self.boldInputTransform
    if self.boldInputTransform:
        bold_input = self.boldInputTransform(bold_input)

    # simulate bold model
    self.boldModel.run(bold_input)

    t_BOLD = self.boldModel.t_BOLD
    BOLD = self.boldModel.BOLD
    self.setOutput("BOLD.t_BOLD", t_BOLD, append=append)
    self.setOutput("BOLD.BOLD", BOLD, append=append)

storeOutputsAndStates(t, variables, append=False)

Takes the simulated variables of the integration and stores it to the appropriate model output and state object.

Parameters:

Name Type Description Default
t list

time vector

required
variables numpy.ndarray

variable from time integration

required
append bool, optional

append output to existing output or overwrite, defaults to False

False
Source code in neurolib/models/model.py
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
def storeOutputsAndStates(self, t, variables, append=False):
    """Takes the simulated variables of the integration and stores it to the appropriate model output and state object.

    :param t: time vector
    :type t: list
    :param variables: variable from time integration
    :type variables: numpy.ndarray
    :param append: append output to existing output or overwrite, defaults to False
    :type append: bool, optional
    """
    # save time array
    self.setOutput("t", t, append=append, removeICs=True)
    self.setStateVariables("t", t)
    # save outputs
    for svn, sv in zip(self.state_vars, variables):
        if svn in self.output_vars:
            self.setOutput(svn, sv, append=append, removeICs=True)
        self.setStateVariables(svn, sv)

xr(group='')

Converts a group of outputs to xarray. Output group needs to contain an element that starts with the letter "t" or it will not recognize any time axis.

Parameters:

Name Type Description Default
group str

Output group name, example: "BOLD". Leave empty for top group.

''
Source code in neurolib/models/model.py
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
def xr(self, group=""):
    """Converts a group of outputs to xarray. Output group needs to contain an
    element that starts with the letter "t" or it will not recognize any time axis.

    :param group: Output group name, example:  "BOLD". Leave empty for top group.
    :type group: str
    """
    assert isinstance(group, str), "Group name must be a string."
    # take all outputs of one group: disregard all dictionaries because they are subgroups
    outputDict = self.getOutputs(group)
    # make sure that there is a time array
    timeDictKey = ""
    if "t" in outputDict:
        timeDictKey = "t"
    else:
        for k in outputDict:
            if k.startswith("t"):
                timeDictKey = k
                logging.info(f"Assuming {k} to be the time axis.")
                break
    assert len(timeDictKey) > 0, f"No time array found (starting with t) in output group {group}."
    t = outputDict[timeDictKey].copy()
    del outputDict[timeDictKey]

    outputNames, outputs = zip(*outputDict.items())
    outputNames = list(outputNames)

    nNodes = outputs[0].shape[0]
    nodes = list(range(nNodes))
    allOutputsStacked = np.stack(outputs)  # What? Where? When?
    result = xr.DataArray(allOutputsStacked, coords=[outputNames, nodes, t], dims=["output", "space", "time"])
    return result