In silico hippocampal modeling for multi-target pharmacotherapy in schizophrenia (Sherif et al 2020)

 Download zip file   Auto-launch 
Help downloading and running models
Accession:258738
"Using a hippocampal CA3 computer model with 1200 neurons, we examined the effects of alterations in NMDAR, HCN (Ih current), and GABAAR on information flow (measured with normalized transfer entropy), and in gamma activity in local field potential (LFP). We found that altering NMDARs, GABAAR, Ih, individually or in combination, modified information flow in an inverted-U shape manner, with information flow reduced at low and high levels of these parameters. Theta-gamma phase-amplitude coupling also had an inverted-U shape relationship with NMDAR augmentation. The strong information flow was associated with an intermediate level of synchrony, seen as an intermediate level of gamma activity in the LFP, and an intermediate level of pyramidal cell excitability"
Reference:
1 . Sherif MA, Neymotin SA, Lytton WW (2020) In silico hippocampal modeling for multi-target pharmacotherapy in schizophrenia. NPJ Schizophr 6:25 [PubMed]
Citations  Citation Browser
Model Information (Click on a link to find other models with that property)
Model Type: Realistic Network;
Brain Region(s)/Organism: Hippocampus;
Cell Type(s): Hippocampus CA3 pyramidal GLU cell; Hippocampus CA3 interneuron basket GABA cell; Hippocampus CA3 stratum oriens lacunosum-moleculare interneuron;
Channel(s): I h;
Gap Junctions:
Receptor(s): AMPA; NMDA;
Gene(s): NR2A GRIN2A;
Transmitter(s): Glutamate; Gaba;
Simulation Environment: NEURON;
Model Concept(s): Schizophrenia;
Implementer(s): Sherif, Mohamed [mohamed.sherif.md at gmail.com];
Search NeuronDB for information about:  Hippocampus CA3 pyramidal GLU cell; Hippocampus CA3 interneuron basket GABA cell; AMPA; NMDA; I h; Gaba; Glutamate;
/
CA3modelCode_npjSchizophrenia_September2020--main
data
README.md
CA1ih.mod
CA1ika.mod *
CA1ikdr.mod *
CA1ina.mod *
cagk.mod *
caolmw.mod *
capr.mod *
expsynstdp.mod
Gfluctp.mod *
HCN1.mod *
HCN2.mod
IA.mod
icaolmw.mod *
icapr.mod *
iholmkop.mod *
iholmw.mod *
ihpyrkop.mod *
ihstatic.mod *
infot.mod *
kahppr.mod *
kaolmkop.mod *
kapyrkop.mod *
kcaolmw.mod *
kcpr.mod *
kdrbwb.mod *
kdrolmkop.mod *
kdrpr.mod *
kdrpyrkop.mod *
km.mod
misc.mod *
MyExp2Syn.mod *
MyExp2SynAlpha.mod *
MyExp2SynBB.mod *
MyExp2SynNMDA.mod *
MyExp2SynNMDABB.mod *
nafbwb.mod *
nafolmkop.mod *
nafpr.mod *
nafpyrkop.mod *
samnutils.mod
sampen.mod
stats.mod
updown.mod *
vecst.mod *
wrap.mod *
analysisPlottingCode.py
aux_fun.inc *
batch.py
conf.py
declist.hoc *
decmat.hoc *
decnqs.hoc *
decvec.hoc *
default.hoc *
drline.hoc *
fig1sample.png
fig1simulationConfig.cfg
geom.py
grvec.hoc *
init.hoc
labels.hoc *
local.hoc *
misc.h
network.py
nqs.hoc *
nqs_utils.hoc *
nrnoc.hoc *
params.py
psd.py
pyinit.py
pywrap.hoc *
run.py
runone.py
simctrl.hoc *
stats.hoc *
syncode.hoc *
updown.hoc
xgetargs.hoc *
                            
// $Id: stats.hoc,v 1.7 2012/07/20 15:09:22 samn Exp $ 

print "Loading stats.hoc..."

//based on code from:
//http://pdos.csail.mit.edu/grid/sim/capacity-ns.tgz/capacity-sim/new-ns/
//hoc template that allows sampling from a pareto power law distribution 
//specified with objref rd
//rd = new rdmpareto($1=avg,$2=shape,[$3=seed])
//then picking values with .pick , or assigning to a vec with assignv(vec)
begintemplate rdmpareto
public avg,shape,rd,seed,pick,repick,paretoc,pareto5,assignv,reset,pareto4,pareto3
double avg[1],shape[1],seed[1]
objref rd
proc init () {
  avg=$1 shape=$2
  if(numarg()>2)seed=$3 else seed=1234
  rd=new Random()
  rd.ACG(seed)
}
proc reset () {
  rd.ACG(seed)
}
func paretoc () { local scale,shape,U
  scale=$1 shape=$2 U = rd.uniform(0,1)
  return scale * (1.0/ U^(1/shape) )
}
func pareto5 () { local avg,shape
  avg=$1 shape=$2
  return paretoc( avg * (shape -1)/shape, shape)
}
func pareto4 () { local alpha,u
  alpha=$2
  u = 1 - rd.uniform(0,1)
  return $1 + 1 / u^(1/alpha)
}
func pareto3 () { local x,z,b,a
  b = avg // 1 //min value
  a = shape // 10
  x = rd.uniform(0,1)
  z = x^-1/a
  return 1 + b * z
}
func pick () {
  return pareto5(avg,shape)
}
func repick () {
  return pick()
}
func assignv () { local i localobj vi
  vi=$o1 
  for i=0,vi.size-1 vi.x(i)=pick()
}
endtemplate rdmpareto

func skew () { local a,ret localobj v1
  a=allocvecs(v1)
  $o1.getcol($s2).moment(v1)
  ret=v1.x[4]
  dealloc(a)
  return ret
}

func skewv () { localobj v1
  v1=new Vector(5)
  $o1.moment(v1)
  return v1.x(4)
}


//** test rsampsig
objref vIN0,vIN1,vhsout,myrdm,vrs,VA
R0SZ=30000//size of group 0
R1SZ=30000//size of group 1
RPRC=100 // # of trials (combinations)
RS0M=0 //mean of group 0
RS1M=0 //mean of group 1
RS0SD=1 //sdev of group 0
RS1SD=1 //sdev of group 1
proc rsi () {
  if(myrdm==nil) myrdm=new Random()  
  {myrdm.normal(RS0M,RS0SD) vIN0=new Vector(R0SZ) vIN0.setrand(myrdm)}  
  {myrdm.normal(RS1M,RS1SD) vIN1=new Vector(R1SZ) vIN1.setrand(myrdm)}
  vhsout=new Vector(vIN0.size+vIN1.size)
  if(RPRC>1){
    vrs=new Vector(RPRC)
  } else {
    vrs=new Vector(combs_stats(R0SZ+R1SZ,mmax(R0SZ,R1SZ))*RPRC)
  }
  VA=new Vector()  VA.copy(vIN0) VA.append(vIN1)
}
func hocmeasure () {
  hretval_stats=vhsout.mean
  return vhsout.mean
}
func compfunc () {
  if(verbose_stats>1) printf("$1=%g,$2=%g\n",$1,$2)
  hretval_stats=$1-$2
  return hretval_stats
}
onesided=0
nocmbchk=1
pval=tval=0
func testrs () { local dd localobj str
  if(numarg()>0)dd=$1 else dd=1
  str=new String()
  rsi()
  vhsout.resize(vIN0.size+vIN1.size)
  pval=vrs.rsampsig(vIN0,vIN1,RPRC,"hocmeasure","compfunc",vhsout,onesided,nocmbchk)
  tval=ttest(vIN0,vIN1)
  if(dd){
    sprint(str.s,"p(abs(m0-m1))>%g=%g, t=%g, e=%g",abs(vIN0.mean-vIN1.mean),pval,tval,abs(pval-tval)/tval)
    {ge() ers=0 clr=1 hist(g,VA) clr=2  hist(g,vIN0) clr=3  hist(g,vIN1) g.label(0,0.95,str.s)}
    sprint(str.s,"m0=%g, m1=%g, n0=%g, n1=%g, s0=%g, s1=%g",vIN0.mean,vIN1.mean,vIN0.size,vIN1.size,vIN0.stdev,vIN1.stdev)
    g.label(0.0,0.0,str.s)
    sprint(str.s,"m0-m1=%g",vIN0.mean-vIN1.mean)
    g.label(0,0.9,str.s)
    g.exec_menu("View = plot")
  }
  printf("pval=%g, tval=%g, err=%g\n",pval,tval,abs(pval-tval)/tval)
  return pval
}

//* nhppvec(intensityvec,dt,maxt[,se])
// returns a Vector of spike times generated by a nonhomogenous poisson process
// described by intensity function intensityvec, with dt time-step, maxt max time
// and se the seed for random # generator
// this algorithm is called 'thinning'
obfunc nhppvec () { local i,dt,tt,maxt,maxi,se,tidx localobj tvec,ivec,rdm
  tvec=new Vector(100e3) tvec.resize(0)
  ivec=$o1 dt=$2 maxt=$3
  if(numarg()>3)se=$4 else se=1234
  rdm=new Random()
  rdm.ACG(se)
  tt=0
  maxi=ivec.max
  while(tt<maxt) {
    tt = tt - 1.0/maxi * log(rdm.uniform(0,1))
    tidx = tt / dt
    if(tidx >= ivec.size) break
    if(rdm.uniform(0,1) <= ivec.x(tidx) / maxi) {
      tvec.append(tt)
    }
  }
  return tvec
}

//* cvpsync(vector of spike times, N == number of neurons)
// this function calculates the spike-train synchrony on a population of N neurons, as
// proposed by tiesinga & sejnowski in /u/samn/papers/nc_16_251.pdf .
// the function returns a number ranging from 0 to 1
//with 0 for asynchronous activity and 1 for synchronous.
func cvpsync () { local i,cvp,N,X,X2,sz,a localobj vt,vint
  a=allocvecs(vt,vint)
  {vt.copy($o1) vt.sort() vint.resize(vt.size) vint.resize(0) N = $2}
  if(N < 1) return 0
  sz = vt.size()
  if(sz < 2) return 0
  vint.deriv(vt,1,1) // gets ISI (interspike intervals)
  X = vint.sum()/sz
  if(X <= 0) return 0
  X2 = vint.sumsq()/sz
  cvp = sqrt( X2 - X^2 ) / X
  dealloc(a)
  return (cvp - 1.0) / sqrt(N)
}