Reinforcement learning of targeted movement (Chadderdon et al. 2012)

 Download zip file   Auto-launch 
Help downloading and running models
Accession:144538
"Sensorimotor control has traditionally been considered from a control theory perspective, without relation to neurobiology. In contrast, here we utilized a spiking-neuron model of motor cortex and trained it to perform a simple movement task, which consisted of rotating a single-joint “forearm” to a target. Learning was based on a reinforcement mechanism analogous to that of the dopamine system. This provided a global reward or punishment signal in response to decreasing or increasing distance from hand to target, respectively. Output was partially driven by Poisson motor babbling, creating stochastic movements that could then be shaped by learning. The virtual forearm consisted of a single segment rotated around an elbow joint, controlled by flexor and extensor muscles. ..."
Reference:
1 . Chadderdon GL, Neymotin SA, Kerr CC, Lytton WW (2012) Reinforcement learning of targeted movement in a spiking neuronal model of motor cortex PLoS ONE 2012 7(10):e47251
Model Information (Click on a link to find other models with that property)
Model Type: Realistic Network;
Brain Region(s)/Organism: Neocortex;
Cell Type(s): Neocortex fast spiking (FS) interneuron; Neocortex spiking regular (RS) neuron; Neocortex spiking low threshold (LTS) neuron;
Channel(s):
Gap Junctions:
Receptor(s): GabaA; AMPA; NMDA;
Gene(s):
Transmitter(s): Dopamine; Gaba; Glutamate;
Simulation Environment: NEURON;
Model Concept(s): Simplified Models; Synaptic Plasticity; Long-term Synaptic Plasticity; Reinforcement Learning; Reward-modulated STDP;
Implementer(s): Neymotin, Sam [samn at neurosim.downstate.edu]; Chadderdon, George [gchadder3 at gmail.com];
Search NeuronDB for information about:  GabaA; AMPA; NMDA; Dopamine; Gaba; Glutamate;
/
arm1d
README
drspk.mod *
infot.mod *
intf6_.mod *
intfsw.mod *
misc.mod *
nstim.mod *
stats.mod *
updown.mod *
vecst.mod *
arm.hoc
basestdp.hoc
col.hoc *
colors.hoc *
declist.hoc *
decmat.hoc *
decnqs.hoc *
decvec.hoc *
default.hoc *
drline.hoc *
filtutils.hoc *
geom.hoc
grvec.hoc *
hinton.hoc *
infot.hoc *
init.hoc
intfsw.hoc *
labels.hoc *
local.hoc *
misc.h *
mosinit.hoc
network.hoc
nload.hoc
nqs.hoc *
nqsnet.hoc *
nrnoc.hoc *
params.hoc
run.hoc
samutils.hoc *
sense.hoc *
setup.hoc *
sim.hoc
simctrl.hoc *
stats.hoc *
stim.hoc
syncode.hoc *
units.hoc *
xgetargs.hoc *
                            
// $Id: geom.hoc,v 1.7 2011/11/22 22:49:07 samn Exp $

declare("armLen","d[2]") // length of each arm segment -- fixed throughout sim
declare("MLen","d[4]") // length of each muscle -- varies throughout sim
declare("MFctr",1)
declare("minDPr",0,"maxDPr",1)

begintemplate DPC

public soma,drspk,id,type,subtype,xloc,yloc,zloc,mlenmin,mlenmax
public setmlenrange,updatedrive
external armLen,MLen,MFctr,minDPr,maxDPr
objref drspk
double id[1],type[1],subtype[1],xloc[1],yloc[1],zloc[1],mlenmin[1],mlenmax[1]

create soma[1]

proc init () {
  if(numarg()>0) id=$1
  if(numarg()>1) type=$2
  initsoma()
}

proc initsoma () {
  soma {
    diam = 30
    L = 30
    nseg = 1
    Ra = 1
//    cm = 0
    drspk = new DRSPK(0.5)
    insert pas    
    e_pas = 0
  }
}

proc setmlenrange () {
  mlenmin = $1
  mlenmax = $2
}

proc updatedrive () { local mlen
  // The first argument is the relevant muscle length.  If none is provided,
  // the default is the current muscle length.
  if (numarg() > 0) mlen = $1 else mlen = MLen[zloc] 

  // Set the drive between minDPr and maxDPr where muscle length divided by the
  // arm length for the corresponding arm segment sets distance from the 
  // minDPr baseline firing rate.
  // drspk.drive = minDPr + (maxDPr-minDPr) * MFctr * MLen[zloc] / armLen[zloc/2]

  // If the muscle length associated with the cell is within the range,
  // then set maximal drive, else set minimal drive.
  if ((mlen >= mlenmin) && (mlen <= mlenmax)) {
    drspk.drive = maxDPr
  } else {
    drspk.drive = minDPr
  }
}

endtemplate DPC

Loading data, please wait...