Example 5.1 oc phenomenological model deterministic
Optimal control of deterministic phenomenological models
This notebook shows how to compute the optimal control (OC) signal for phenomenological models (FHN, Hopf) for a simple example task.
import matplotlib.pyplot as plt
import numpy as np
import os
while os.getcwd().split(os.sep)[-1] != "neurolib":
os.chdir('..')
# We import the model, stimuli, and the optimal control package
from neurolib.models.fhn import FHNModel
from neurolib.models.hopf import HopfModel
from neurolib.utils.stimulus import ZeroInput
from neurolib.control.optimal_control import oc_fhn
from neurolib.control.optimal_control import oc_hopf
from neurolib.utils.plot_oc import plot_oc_singlenode, plot_oc_network
# This will reload all imports as soon as the code changes
%load_ext autoreload
%autoreload 3
We stimulate the system with a known control signal, define the resulting activity as target, and compute the optimal control for this target. We define weights such that precision is penalized only (w_p=1, w_2=0). Hence, the optimal control signal should converge to the input signal.
# We import the model
model = FHNModel()
# model = HopfModel() # OC can be computed for the Hopf model completely analogously
# Some parameters to define stimulation signals
dt = model.params["dt"]
duration = 10.
amplitude = 1.
period = duration/4.
# We define a "zero-input", and a sine-input
zero_input = ZeroInput().generate_input(duration=duration+dt, dt=dt)
input = np.copy(zero_input)
input[0,1:-2] = np.sin(2.*np.pi*np.arange(0,duration-0.2, dt)/period) # other functions or random values can be used as well
# We set the duration of the simulation and the initial values
model.params["duration"] = duration
x_init = 0.
y_init = 0.
model.params["xs_init"] = np.array([[x_init]])
model.params["ys_init"] = np.array([[y_init]])
# We set the stimulus in x and y variables, and run the simulation
model.params["x_ext"] = input
model.params["y_ext"] = zero_input
model.run()
# Define the result of the stimulation as target
target = np.concatenate((np.concatenate( (model.params["xs_init"], model.params["ys_init"]), axis=1)[:,:, np.newaxis], np.stack( (model.x, model.y), axis=1)), axis=2)
target_input = np.concatenate( (input,zero_input), axis=0)[np.newaxis,:,:]
# Remove stimuli and re-run the simulation
model.params["x_ext"] = zero_input
model.params["y_ext"] = zero_input
control = np.concatenate( (zero_input,zero_input), axis=0)[np.newaxis,:,:]
model.run()
# combine initial value and simulation result to one array
state = np.concatenate((np.concatenate( (model.params["xs_init"], model.params["ys_init"]), axis=1)[:,:, np.newaxis], np.stack( (model.x, model.y), axis=1)), axis=2)
plot_oc_singlenode(duration, dt, state, target, control, target_input)
# We set the external stimulation to zero. This is the "initial guess" for the OC algorithm
model.params["x_ext"] = zero_input
model.params["y_ext"] = zero_input
# We load the optimal control class
# print array (optional parameter) defines, for which iterations intermediate results will be printed
# Parameters will be taken from the input model
if model.name == 'fhn':
model_controlled = oc_fhn.OcFhn(model, target, print_array=np.arange(0,501,25))
elif model.name == 'hopf':
model_controlled = oc_hopf.OcHopf(model, target, print_array=np.arange(0,501,25))
# per default, the weights are set to w_p = 1 and w_2 = 0, meaning that energy costs do not contribute. The algorithm will produce a control such that the signal will match the target exactly, regardless of the strength of the required control input.
# If you want to adjust the ratio of precision and energy weight, you can change the values in the weights dictionary
model_controlled.weights["w_p"] = 1. # default value 1
model_controlled.weights["w_2"] = 0. # default value 0
# We run 500 iterations of the optimal control gradient descent algorithm
model_controlled.optimize(500)
state = model_controlled.get_xs()
control = model_controlled.control
plot_oc_singlenode(duration, dt, state, target, control, target_input, model_controlled.cost_history)
# Do another 100 iterations if you want to.
# Repeated execution will continue with further 100 iterations.
model_controlled.optimize(100)
state = model_controlled.get_xs()
control = model_controlled.control
plot_oc_singlenode(duration, dt, state, target, control, target_input, model_controlled.cost_history)
Network of neural populations (no delay)
Let us know study a simple 2-node network of FHN oscillators. We first define the coupling matrix and the distance matrix. We can then initialize the model.
cmat = np.array( [[0., 0.5], [1., 0.]] ) # diagonal elements are zero, connection strength is 1 (0.5) from node 0 to node 1 (from node 1 to node 0)
dmat = np.array( [[0., 0.], [0., 0.]] ) # no delay
if model.name == 'fhn':
model = FHNModel(Cmat=cmat, Dmat=dmat)
elif model.name == 'hopf':
model = HopfModel(Cmat=cmat, Dmat=dmat)
# we define the control input matrix to enable or disable certain channels and nodes
control_mat = np.zeros( (model.params.N, len(model.state_vars)) )
control_mat[0,0] = 1. # only allow inputs in x-channel in node 0
if control_mat[0,0] == 0. and control_mat[1,0] == 0:
# if x is input channel, high connection strength can lead to numerical issues
model.params.K_gl = 5. # increase for stronger connectivity, WARNING: too high value will cause numerical problems
model.params["duration"] = duration
zero_input = ZeroInput().generate_input(duration=duration+dt, dt=dt)
input = np.copy(zero_input)
input[0,1:-3] = np.sin(np.arange(0,duration-0.3, dt)) # other functions or random values can be used as well
model.params["xs_init"] = np.vstack( [x_init, x_init] )
model.params["ys_init"] = np.vstack( [y_init, y_init] )
# We set the stimulus in x and y variables, and run the simulation
input_nw = np.concatenate( (np.vstack( [control_mat[0,0] * input, control_mat[0,1] * input] )[np.newaxis,:,:],
np.vstack( [control_mat[1,0] * input, control_mat[1,1] * input] )[np.newaxis,:,:]), axis=0)
zero_input_nw = np.concatenate( (np.vstack( [zero_input, zero_input] )[np.newaxis,:,:],
np.vstack( [zero_input, zero_input] )[np.newaxis,:,:]), axis=0)
model.params["x_ext"] = input_nw[:,0,:]
model.params["y_ext"] = input_nw[:,1,:]
model.run()
# Define the result of the stimulation as target
target = np.concatenate( (np.concatenate( (model.params["xs_init"], model.params["ys_init"]), axis=1)[:,:, np.newaxis], np.stack( (model.x, model.y), axis=1)), axis=2)
# Remove stimuli and re-run the simulation
model.params["x_ext"] = zero_input_nw[:,0,:]
model.params["y_ext"] = zero_input_nw[:,0,:]
model.run()
# combine initial value and simulation result to one array
state = np.concatenate( (np.concatenate( (model.params["xs_init"], model.params["ys_init"]), axis=1)[:,:, np.newaxis], np.stack( (model.x, model.y), axis=1)), axis=2)
plot_oc_network(model.params.N, duration, dt, state, target, zero_input_nw, input_nw)
# we define the precision matrix to specify, in which nodes and channels we measure deviations from the target
cost_mat = np.zeros( (model.params.N, len(model.output_vars)) )
cost_mat[1,0] = 1. # only measure in y-channel in node 1
# We set the external stimulation to zero. This is the "initial guess" for the OC algorithm
model.params["x_ext"] = zero_input_nw[:,0,:]
model.params["y_ext"] = zero_input_nw[:,0,:]
# We load the optimal control class
# print array (optional parameter) defines, for which iterations intermediate results will be printed
# Parameters will be taken from the input model
if model.name == 'fhn':
model_controlled = oc_fhn.OcFhn(model, target, print_array=np.arange(0,501,25), control_matrix=control_mat, cost_matrix=cost_mat)
elif model.name == 'hopf':
model_controlled = oc_hopf.OcHopf(model, target, print_array=np.arange(0,501,25), control_matrix=control_mat, cost_matrix=cost_mat)
# We run 500 iterations of the optimal control gradient descent algorithm
model_controlled.optimize(500)
state = model_controlled.get_xs()
control = model_controlled.control
plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)
# Do another 100 iterations if you want to.
# Repeated execution will continue with further 100 iterations.
model_controlled.optimize(100)
state = model_controlled.get_xs()
control = model_controlled.control
plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)
Delayed network of neural populations
We now consider a network topology with delayed signalling between the two nodes.
cmat = np.array( [[0., 0.], [1., 0.]] ) # diagonal elements are zero, connection strength is 1 from node 0 to node 1
dmat = np.array( [[0., 0.], [18, 0.]] ) # distance from 0 to 1, delay is computed by dividing by the signal speed params.signalV
if model.name == 'fhn':
model = FHNModel(Cmat=cmat, Dmat=dmat)
elif model.name == 'hopf':
model = HopfModel(Cmat=cmat, Dmat=dmat)
duration, dt = 2000., 0.1
model.params.duration = duration
model.params.dt = dt
# change coupling parameters for faster and stronger connection between nodes
model.params.K_gl = 1.
model.params.x_ext = np.zeros((1,))
model.params.y_ext = np.zeros((1,))
model.run()
e0 = model.x[0,-1]
e1 = model.x[1,-1]
i0 = model.y[0,-1]
i1 = model.y[1,-1]
maxdelay = model.getMaxDelay()
model.params["xs_init"] = np.array([[e0] * (maxdelay + 1), [e1] * (maxdelay + 1) ])
model.params["ys_init"] = np.array([[i0] * (maxdelay + 1), [i1] * (maxdelay + 1) ])
duration = 6.
model.params.duration = duration
time = np.arange(dt, duration+dt, dt)
# we define the control input matrix to enable or disable certain channels and nodes
control_mat = np.zeros( (model.params.N, len(model.state_vars)) )
control_mat[0,0] = 1. # only allow inputs in E-channel in node 0
zero_input = ZeroInput().generate_input(duration=duration+dt, dt=dt)
input = np.copy(zero_input)
input[0,10] = 1.
input[0,20] = 1.
input[0,30] = 1. # Three pulses as control input
input_nw = np.concatenate( (np.vstack( [control_mat[0,0] * input, control_mat[0,1] * input] )[np.newaxis,:,:],
np.vstack( [control_mat[1,0] * input, control_mat[1,1] * input] )[np.newaxis,:,:]), axis=0)
zero_input_nw = np.concatenate( (np.vstack( [zero_input, zero_input] )[np.newaxis,:,:],
np.vstack( [zero_input, zero_input] )[np.newaxis,:,:]), axis=0)
model.params["x_ext"] = input_nw[:,0,:]
model.params["y_ext"] = input_nw[:,1,:]
model.params["xs_init"] = np.array([[e0] * (maxdelay + 1), [e1] * (maxdelay + 1) ])
model.params["ys_init"] = np.array([[i0] * (maxdelay + 1), [i1] * (maxdelay + 1) ])
model.run()
# Define the result of the stimulation as target
target = np.concatenate( (np.stack( (model.params["xs_init"][:,-1], model.params["ys_init"][:,-1]), axis=1)[:,:, np.newaxis], np.stack( (model.x, model.y), axis=1)), axis=2)
# Remove stimuli and re-run the simulation
model.params["x_ext"] = zero_input_nw[:,0,:]
model.params["y_ext"] = zero_input_nw[:,0,:]
model.run()
# combine initial value and simulation result to one array
state = np.concatenate( (np.stack( (model.params["xs_init"][:,-1], model.params["ys_init"][:,-1]), axis=1)[:,:, np.newaxis], np.stack( (model.x, model.y), axis=1)), axis=2)
plot_oc_network(model.params.N, duration, dt, state, target, zero_input_nw, input_nw)
# We set the external stimulation to zero. This is the "initial guess" for the OC algorithm
model.params["x_ext"] = zero_input_nw[:,0,:]
model.params["y_ext"] = zero_input_nw[:,0,:]
# We load the optimal control class
# print array (optional parameter) defines, for which iterations intermediate results will be printed
# Parameters will be taken from the input model
if model.name == "fhn":
model_controlled = oc_fhn.OcFhn(model, target, print_array=np.arange(0,501,25), control_matrix=control_mat, cost_matrix=cost_mat)
elif model.name == "hopf":
model_controlled = oc_hopf.OcHopf(model, target, print_array=np.arange(0,501,25), control_matrix=control_mat, cost_matrix=cost_mat)
# We run 500 iterations of the optimal control gradient descent algorithm
model_controlled.optimize(500)
state = model_controlled.get_xs()
control = model_controlled.control
plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)
# perofrm another 100 iterations to improve result
# repeat execution to add another 100 iterations
model_controlled.optimize(100)
state = model_controlled.get_xs()
control = model_controlled.control
plot_oc_network(model.params.N, duration, dt, state, target, control, input_nw, model_controlled.cost_history, model_controlled.step_sizes_history)