Reinforcement learning of targeted movement (Chadderdon et al. 2012)

 Download zip file   Auto-launch 
Help downloading and running models
Accession:144538
"Sensorimotor control has traditionally been considered from a control theory perspective, without relation to neurobiology. In contrast, here we utilized a spiking-neuron model of motor cortex and trained it to perform a simple movement task, which consisted of rotating a single-joint “forearm” to a target. Learning was based on a reinforcement mechanism analogous to that of the dopamine system. This provided a global reward or punishment signal in response to decreasing or increasing distance from hand to target, respectively. Output was partially driven by Poisson motor babbling, creating stochastic movements that could then be shaped by learning. The virtual forearm consisted of a single segment rotated around an elbow joint, controlled by flexor and extensor muscles. ..."
Reference:
1 . Chadderdon GL, Neymotin SA, Kerr CC, Lytton WW (2012) Reinforcement learning of targeted movement in a spiking neuronal model of motor cortex PLoS ONE 2012 7(10):e47251
Model Information (Click on a link to find other models with that property)
Model Type: Realistic Network;
Brain Region(s)/Organism: Neocortex;
Cell Type(s): Neocortex fast spiking (FS) interneuron; Neocortex spiking regular (RS) neuron; Neocortex spiking low threshold (LTS) neuron;
Channel(s):
Gap Junctions:
Receptor(s): GabaA; AMPA; NMDA;
Gene(s):
Transmitter(s): Dopamine; Gaba; Glutamate;
Simulation Environment: NEURON;
Model Concept(s): Simplified Models; Synaptic Plasticity; Long-term Synaptic Plasticity; Reinforcement Learning; Reward-modulated STDP;
Implementer(s): Neymotin, Sam [samn at neurosim.downstate.edu]; Chadderdon, George [gchadder3 at gmail.com];
Search NeuronDB for information about:  GabaA; AMPA; NMDA; Dopamine; Gaba; Glutamate;
/
arm1d
README
drspk.mod *
infot.mod *
intf6_.mod *
intfsw.mod *
misc.mod *
nstim.mod *
stats.mod *
updown.mod *
vecst.mod *
arm.hoc
basestdp.hoc
col.hoc *
colors.hoc *
declist.hoc *
decmat.hoc *
decnqs.hoc *
decvec.hoc *
default.hoc *
drline.hoc *
filtutils.hoc *
geom.hoc
grvec.hoc *
hinton.hoc *
infot.hoc *
init.hoc
intfsw.hoc *
labels.hoc *
local.hoc *
misc.h *
mosinit.hoc
network.hoc
nload.hoc
nqs.hoc *
nqsnet.hoc *
nrnoc.hoc *
params.hoc
run.hoc
samutils.hoc *
sense.hoc *
setup.hoc *
sim.hoc
simctrl.hoc *
stats.hoc *
stim.hoc
syncode.hoc *
units.hoc *
xgetargs.hoc *
                            
// $Id: sense.hoc,v 1.21 2011/11/22 22:55:23 samn Exp $ 

//* proprioceptive inputs
declare("stimnq","o[1]","DPseed",9658,"DPdt",1,"ldp","o[1]","minDPr",0,"maxDPr",1)

rdmthresh_DRSPK = 1

//* mkstimnq([seed]) - make NQS with inputs to DP(proprioceptive) cells
proc mkstimnq () { local ii,se,a localobj xo,rdm,ind,vr
  if(numarg()>0) se=$1 else se=9658
  {nqsdel(stimnq) stimnq=new NQS("eflex","eext","wflex","wext")}
  rdm=new Random() rdm.ACG(se) a=allocvecs(ind,vr)
  ind.indgen(0,PI/2,0.01)
  for ii=0,10 {stimnq.v[0].append(ind) stimnq.v[0].append(ind.c.reverse)}
  stimnq.v[1].copy(stimnq.v[0])
  ind.indgen(PI/4,3*PI/4,0.01) // recording at 1e-3 but play back at 0.3
  for ii=0,10 {stimnq.v[2].append(ind) stimnq.v[2].append(ind.c.reverse)}
  stimnq.v[3].copy(stimnq.v[2])
  for case(&ii,0,2) stimnq.v(ii).apply("sin")
  for case(&ii,1,3) stimnq.v(ii).apply("cos")
  for ii=0,2 stimnq.v(ii).scale(0.1,0.5)
  for ltr(xo,cedp) stimnq.v[xo.zloc].play(&xo.drive, 0.6)
  rdm.poisson(8)
  vr.resize(tstop)
  for ltr(xo,cedp,&ii) {
    vr.setrand(rdm)
    vr.scale(minDPr,maxDPr)
    vr.play(&xo.rand, 0.6)
  }
  dealloc(a)
}

//* setDPrand - set random thresholds for DP cells
proc setDPrand () { local ii,se,a localobj xo,rdm,vr,vec
  if(numarg()>0) se=$1 else se=9658
  if(rdmthresh_DRSPK) {
    ldp=new List() rdm=new Random() rdm.ACG(se) rdm.poisson(8)
  } else {rdm=new Random() rdm.ACG(se)}
  for ltr(xo,cedp,&ii) {
   if(rdmthresh_DRSPK) {
     rdm.normal(25,10)
     ldp.append(vr=new Vector(tstop / DPdt + 1))
     vr.setrand(rdm)
//     vr.scale(minDPr,maxDPr)
     vr.scale(1.5,50)
     vr.play(&xo.drspk.rand,DPdt)
   } else {
     xo.drspk.thresh = xo.drspk.rand = rdm.uniform(minDPr,maxDPr)
   }
  }
}

//* func calls

setDPrand(DPseed)


Loading data, please wait...