In silico hippocampal modeling for multi-target pharmacotherapy in schizophrenia (Sherif et al 2020)

 Download zip file 
Help downloading and running models
Accession:258738
"Using a hippocampal CA3 computer model with 1200 neurons, we examined the effects of alterations in NMDAR, HCN (Ih current), and GABAAR on information flow (measured with normalized transfer entropy), and in gamma activity in local field potential (LFP). We found that altering NMDARs, GABAAR, Ih, individually or in combination, modified information flow in an inverted-U shape manner, with information flow reduced at low and high levels of these parameters. Theta-gamma phase-amplitude coupling also had an inverted-U shape relationship with NMDAR augmentation. The strong information flow was associated with an intermediate level of synchrony, seen as an intermediate level of gamma activity in the LFP, and an intermediate level of pyramidal cell excitability"
Reference:
1 . Sherif MA, Neymotin SA, Lytton WW (2020) In silico hippocampal modeling for multi-target pharmacotherapy in schizophrenia. NPJ Schizophr 6:25 [PubMed]
Model Information (Click on a link to find other models with that property)
Model Type: Realistic Network;
Brain Region(s)/Organism: Hippocampus;
Cell Type(s): Hippocampus CA3 pyramidal GLU cell; Hippocampus CA3 interneuron basket GABA cell; Hippocampus CA3 stratum oriens lacunosum-moleculare interneuron;
Channel(s): I h;
Gap Junctions:
Receptor(s): AMPA; NMDA;
Gene(s): NR2A GRIN2A;
Transmitter(s): Glutamate; Gaba;
Simulation Environment: NEURON;
Model Concept(s): Schizophrenia;
Implementer(s): Sherif, Mohamed [mohamed.sherif.md at gmail.com];
Search NeuronDB for information about:  Hippocampus CA3 pyramidal GLU cell; Hippocampus CA3 interneuron basket GABA cell; AMPA; NMDA; I h; Gaba; Glutamate;
/
CA3modelCode_npjSchizophrenia_September2020--main
data
README.md
CA1ih.mod
CA1ika.mod *
CA1ikdr.mod *
CA1ina.mod *
cagk.mod *
caolmw.mod *
capr.mod *
expsynstdp.mod
Gfluctp.mod *
HCN1.mod *
HCN2.mod
IA.mod
icaolmw.mod *
icapr.mod *
iholmkop.mod *
iholmw.mod *
ihpyrkop.mod *
ihstatic.mod *
infot.mod *
kahppr.mod *
kaolmkop.mod *
kapyrkop.mod *
kcaolmw.mod *
kcpr.mod *
kdrbwb.mod *
kdrolmkop.mod *
kdrpr.mod *
kdrpyrkop.mod *
km.mod
misc.mod *
MyExp2Syn.mod *
MyExp2SynAlpha.mod *
MyExp2SynBB.mod *
MyExp2SynNMDA.mod *
MyExp2SynNMDABB.mod *
nafbwb.mod *
nafolmkop.mod *
nafpr.mod *
nafpyrkop.mod *
samnutils.mod
sampen.mod
stats.mod
updown.mod *
vecst.mod *
wrap.mod *
analysisPlottingCode.py
aux_fun.inc *
batch.py
conf.py
declist.hoc *
decmat.hoc *
decnqs.hoc *
decvec.hoc *
default.hoc *
drline.hoc *
fig1sample.png
fig1simulationConfig.cfg
geom.py
grvec.hoc *
init.hoc
labels.hoc *
local.hoc *
misc.h
network.py
nqs.hoc *
nqs_utils.hoc *
nrnoc.hoc *
params.py
psd.py
pyinit.py
pywrap.hoc *
run.py
runone.py
simctrl.hoc *
stats.hoc *
syncode.hoc *
updown.hoc
xgetargs.hoc *
                            
'''contain functions in python to deal with data from mtlhpc model'''

import numpy
#from mtspec import *
#from neuron import h, rxd, gui # rxd doesn't support threads - simulation in the current state is multi-threaded
from neuron import h, gui

from matplotlib import pyplot as mp
mp.ion() # turn interactive plotting on

import numpy as np
import h5py
# execfile('/usr/site/nrniv/local/python/nqs.py')
from subprocess import check_output
import psd


def powerSum(lfp, sampr, freqLow, freqHigh, numOfSlices, slice_dur, start = 0):
    '''a function that would return the sum of the power in freq range freqLow to freqHigh, in signal lfp. will begin calculation at start (in seconds). slice_dur in seconds
    it will return a hoc vector'''
    LFP_tslice_list = []
    PSD_tslice_list = []
    sumOfFreq_list = []
    sumOfFreq_vec = h.Vector()
#cut lfp into time slices of length slice_dur
    for i in range(numOfSlices):
        lfpSlice = h.Vector()
        lfpSlice.copy(lfp, ((i+start)*slice_dur*sampr), (((i+start+1)*slice_dur*sampr)-1))
        lfpSlice.sub(lfpSlice.mean())
        LFP_tslice_list.append(lfpSlice)
#calculate psd for each of the lfp slices in LFP_tslice_list
    for i in range(len(LFP_tslice_list)):
        #h.pypmtm(LFP_tslice_list[i],sampr)
        PSD_tslice = h.pypmtm(LFP_tslice_list[i], sampr)#nqs containing PSD
        PSD_tslice_list.append(PSD_tslice)#list of NQS
#caculate the sum of power for freq range
    for i in range(len(PSD_tslice_list)):
        PSD_tslice_list[i].select("f", "()", freqLow, freqHigh)
        powerOfSelectedFreq = PSD_tslice_list[i].getcol("pow")
        sumOfFreq_list.append(powerOfSelectedFreq.sum())#append the sum of power
        PSD_tslice_list[i].tog()
    sumOfFreq_vec.from_python(sumOfFreq_list)
    return sumOfFreq_vec

def pravgratesTime(fileName, tstart, tstop):
    '''fileName would be the nqs file generated by minrunsv, calculate and print the avg rates of firing of cells between tstart and tstop in seconds'''
    fileName = PATH + fileName
    nqs = h.NQS(fileName)
    #tstart = tstart * 1e3
    #tstop = tstart * 1e3
    #select rows to calculate
    pyr_fire = nqs.select("t", "[]", tstart*1000, tstop*1000, "ty",0)
    nqs.tog()
    bas_fire = nqs.select("t", "[]", tstart*1000, tstop*1000, "ty", 1)
    nqs.tog()
    olm_fire = nqs.select("t", "[]", tstart*1000, tstop*1000, "ty", 2)
    nqs.tog()
    # divide firenumber of rows by duration and cell #
    duration = tstop - tstart # in seconds
    pyr_rate = pyr_fire / duration / 800
    bas_rate = bas_fire / duration / 200
    olm_rate = olm_fire / duration / 200
    print ("pyramidal rate:", pyr_rate)
    print ("basket rate:", bas_rate)
    print ("olm rate:", olm_rate)


def pravgratesTimeRun(tstart, tstop):
    '''mynqs is the nqs generated by net.setsnq(), calculate and print the avg rates of firing of cells between tstart and tstop in milliseconds - STILL IN PROGRESS'''
    # fileName = PATH + fileName
    try:
        nqs = h.NQS(net.snq)
    except:
        net.setsnq()
        nqs = h.NQS(net.snq)
    #select rows to calculate
    pyr_fire = nqs.select("t", "[]", tstart, tstop, "ty",0)
    nqs.tog('DB')
    bas_fire = nqs.select("t", "[]", tstart, tstop, "ty", 1)
    nqs.tog('DB')
    olm_fire = nqs.select("t", "[]", tstart, tstop, "ty", 2)
    nqs.tog('DB')
    if includeCCKcs:
        cckSoma_fire = nqs.select("t", "[]", tstart, tstop, "ty", 3)
        nqs.tog('DB')
        cckAdend2Pyr_fire = nqs.select("t", "[]", tstart, tstop, "ty", 4)
        nqs.tog('DB')
    else:
        cckSoma_fire = 0
        cckAdend2Pyr_fire = 0
    calcPrintAvgRates(pyr_fire, bas_fire, olm_fire, cckSoma_fire, cckAdend2Pyr_fire, pyrPop_cNum, basPop_cNum, olmPop_cNum, cck_somaPyrPop_cNum, cck_Adend2PyrPop_cNum, tstart, tstop)


def calcCellAvgRateLoaded(myfile, tstart, tstop, celltype):
    '''myfile is an H5Py file, which ahs been generated and saved by saveSumH5py. tstart and tstop are in milliseconds. The function will calculate and print the avg rates of firing of cell type celltype (to be found in myfile['spikesnqs']['ty']. pyr: ty==0; bas: ty == 1; olm: ty == 2; cckSomaPyr: ty == 3; cckAdend2Pyr == 4. The rate will be per second (Hz)'''
    if celltype == 3 or celltype == 4:
        if myfile.attrs['includeCCKcs']:
            if celltype == 3:
                cellPop_cNum = myfile.attrs['cck_somaPyrPop_cNum']
            if celltype == 4:
                cellPop_cNum = myfile.attrs['cck_Adend2PyrPop_cNum']
        else:
            print ('There are no CCK cells included in the model')
            return
    elif celltype == 0:
        cellPop_cNum = myfile.attrs['pyrPop_cNum']
    elif celltype == 1:
        cellPop_cNum = myfile.attrs['basPop_cNum']
    elif celltype == 2:
        cellPop_cNum = myfile.attrs['olmPop_cNum']
    duration = (tstop - tstart) / 1000. # in seconds
    # timeIntervalIndex = (myfile['spikesnqs']['t'].value > tstart) & (myfile['spikesnqs']['t'].value < tstop)
    timeIntervalIndex = (myfile['spikesnqs']['t'][()] > tstart) & (myfile['spikesnqs']['t'][()] < tstop)
    cell_numSpikes = np.sum(myfile['spikesnqs']['ty'][timeIntervalIndex] == celltype)
    cell_rate = cell_numSpikes / duration / cellPop_cNum
    return cell_rate


def readVecSpect(fileName, subtractMean = 1):
    '''this will read vector file and plot spectrogram. Use iPython for it'''
#    from neuron import * #uncomment to work
    import spectrogram as spect
    #reading vector file
    lfp_file = h.File()
    fileName = PATH + fileName
    lfp_file.ropen(fileName)
    vec_lfp = h.Vector()
    vec_lfp.vread(lfp_file)
    if subtractMean == 1: vec_lfp = vec_lfp.sub(vec_lfp.mean()) # subtract mean from signal
    #import numpy #imported at the top of file
    nar_lfp = numpy.array(vec_lfp)
    spect.plotspectrogram(nar_lfp, 10000, 1, 100, 3, 3)

def PSDslices(fileName, start_1, stop_1, start_2, stop_2, start_3, stop_3, subtractMean = 1, sampr = 10000 ):
    '''will take string name (LFP) as parameter, subtract mean (or not), take time slices from LFP (in seconds)
    and plot PSD on the same plot with different coloring and labels'''
    lfp_file = h.File()
    fileName = PATH + fileName
    lfp_file.ropen(fileName) #open LFP file
    vec_lfp = h.Vector()
    vec_lfp.vread(lfp_file)
    if subtractMean == 1: vec_lfp.sub(vec_lfp.mean())
    # take time slices and calc PSD
    slice_1 = h.Vector()
    slice_1.copy(vec_lfp, start_1 * 10e3, (stop_1 * 10e3)-1)
    slice_1PSD = h.pypmtm(slice_1, sampr)
    slice_2 = h.Vector()
    slice_2.copy(vec_lfp, start_2 * 10e3, (stop_2 * 10e3)-1)
    slice_2PSD = h.pypmtm(slice_2, sampr)
    slice_3 = h.Vector()
    slice_3.copy(vec_lfp, start_3 * 10e3, (stop_3 * 10e3)-1)
    slice_3PSD = h.pypmtm(slice_3, sampr)
    # plot PSD
    g = h.Graph()
    g.label(0.7, 0.9, "PSD before CPP", 2, 1, 0, 0, 1)
    g.label(0.7, 0.8, "PSD during CPP", 2, 1, 0, 0, 2)
    g.label(0.7, 0.7, "PSD after CPP", 2, 1, 0, 0, 3)
    slice_1PSD.gr("pow", "f", g, 1, 2)
    slice_2PSD.gr("pow", "f", g, 2, 2)
    slice_3PSD.gr("pow", "f", g, 3, 2)
def pypmtm(vec, samplingRate,nw =4):
    '''code from pywrap.hoc, returns nqs with power and freq to plot PSD. vec is hoc vector
    (or could be numpy array)'''
    nArr = numpy.array(vec)
    spc = 1.0 / samplingRate #spacing
    [Pxx, w] = mtspec(nArr, spc, nw)
    nqp = h.NQS("f","pow")
    nqp.v[0].from_python(w)
    nqp.v[1].from_python(Pxx)
    return nqp

def npypmtm(vec, samplingRate,nw =4):
    '''code from pywrap.hoc, returns numpy array for freq and numpy array for power
    to plot PSD. vec is hoc vector (or could be numpy array)'''
    print ('calculating PSD...')
    nArr = numpy.array(vec)
    spc = 1.0 / samplingRate #spacing
    [Pxx, w] = mtspec(nArr, spc, nw)
    return w, Pxx#return freq as first element, power as 2nd element

def mulFilesPsdCalc():
    '''will return 2 nqs (before and after CPP). The nqs is 26 columns in size.
    First column contains frequency, while the rest of 25 contains amplitude data obtained from
    batch run. You can change: lists of random seeds to fit your lists, PATH and file name, values of NMDAR
    or AMPAR conductance (r), sampling rate, periods of time over which to calculate before and washin
    periods''' 
    from pythonToolCode_mohdsh import pypmtm
    PATH = "/u/mohdsh/Projects/CPP/mtlhpc_CPP/data/"
    liseed = [1234, 6912, 9876, 6789,3219]
    lwseed = [4321, 5012, 9281, 8130,6143]
    OLMnmda = 1 # 1 # 0
    OLMampa = 1.0 # 1.0 # 1.25
    BASnmda =  0 # 1 # 0
    BASampa = 1.25 # 1.25 # 1.0
    PYRnmda =  0 # 1 # 0
    PYRampa = 1.25 # 1.0 # 1.25
    nq_before = h.NQS(26)
    nq_washin = h.NQS(26)
    print ("Calculating...")
    i = 1 #count for location in nq containing powers of various seeds, since col 0 contains freq
    for iseed in liseed:
        for wseed in lwseed:
                #make the string
                fileName = PATH + '12mar28_cppWashInOut_15_sec_iseed_' + str(iseed) + '_wseed_'\
                + str(wseed) + '_washOLM_NMDAr_' + str(OLMnmda) + '_washOLM_AMPAfr_' + str(OLMampa)\
                + '_washBAS_NMDAr_' + str(BASnmda) + '_washBAS_AMPAfr_' + str(BASampa) \
                + '_washPYR_NMDAr_' + str(PYRnmda) + '_washPYR_AMPAfr_' + str(PYRampa) + '_lfp.vec'
                lfp_file = h.File()
                lfp_file.ropen(fileName) #open LFP file
                if not lfp_file.isopen(): 
                    print ("could not open the file, check fileName string")
                    return
                #load file into vector
                vec_lfp = h.Vector()
                vec_lfp.vread(lfp_file)
                vec_lfp.sub(vec_lfp.mean())
                #extract lfp before CPP
                vec_before = h.Vector()
                vec_before.copy(vec_lfp, 10*10e3, (15*10e3)-1)
                psd_before = pypmtm(vec_before, 10e3)
                nq_before.v[i].copy(psd_before.v[1]) #will copy power vectors into nq starting with column 1. Column zero is frequency
                #extract lfp during washin period
                vec_washin = h.Vector()
                vec_washin.copy(vec_lfp, 25*10e3, (30*10e3)-1)
                psd_washin = pypmtm(vec_washin, 10e3)
                nq_washin.v[i].copy(psd_washin.v[1])
                i += 1
        
    nq_before.v[0].copy(psd_before.v[0]) #copy freq vector, they are all identical so one will suffice
    nq_washin.v[0].copy(psd_washin.v[0]) #copy freq vector, they are all identical so one will suffice
    return nq_before, nq_washin
def mulFilesPsdPlot(nqPSD, g):
    '''will plot nqs passed from mulFilesPsdCalc, on graph object g'''
    #for labels:  g.label(0.5, 0.9, "firing after CPP", 2, 1, 0, 0, 3)
    #g.label(x, y, "string", fixtype, scale, x_align, y_align, color)
    for i in range(1, int(nqPSD.getrow(0).size())):
        color = i
        if color >= 7: color = (color % 7)+1 #to rotate colors from black to violet
        nqPSD.v[i].plot(g, nqPSD.v[0], color, 1)
def batcheSingleNQSPsdCalc(from_sec, to_sec, batchDateString = '12mar28'):
    '''similar to mulFilesPsdCalc, but will return a single NQS generated from the batch
    string given as a parameter. Helpful to compare files with same settings from different batches'''
    from pythonToolCode_mohdsh import pypmtm
    PATH = "/u/mohdsh/Projects/CPP/mtlhpc_CPP/data/"
    liseed = [1234, 6912, 9876, 6789,3219]
    lwseed = [4321, 5012, 9281, 8130,6143]
    OLMnmda = 1 # 1 # 0
    OLMampa = 1.0 # 1.0 # 1.25
    BASnmda =  0 # 1 # 0
    BASampa = 1.25 # 1.25 # 1.0
    PYRnmda =  0 # 1 # 0
    PYRampa = 1.25 # 1.0 # 1.25
    nqs = h.NQS(26)
    print ("Calculating...")
    i = 1 #count for location in nq containing powers of various seeds, since col 0 contains freq
    for iseed in liseed:
        for wseed in lwseed:
                #make the string
                fileName = PATH + batchDateString + '_cppWashInOut_15_sec_iseed_' + str(iseed) + '_wseed_'\
                + str(wseed) + '_washOLM_NMDAr_' + str(OLMnmda) + '_washOLM_AMPAfr_' + str(OLMampa)\
                + '_washBAS_NMDAr_' + str(BASnmda) + '_washBAS_AMPAfr_' + str(BASampa) \
                + '_washPYR_NMDAr_' + str(PYRnmda) + '_washPYR_AMPAfr_' + str(PYRampa) + '_lfp.vec'
                lfp_file = h.File()
                lfp_file.ropen(fileName) #open LFP file
                if not lfp_file.isopen(): 
                    print ("could not open the file, check fileName string")
                    return
                #load file into vector
                vec_lfp = h.Vector()
                vec_lfp.vread(lfp_file)
                vec_lfp.sub(vec_lfp.mean())
                #extract lfp before CPP
                vec_forNQS = h.Vector()
                vec_forNQS.copy(vec_lfp, from_sec*10e3, (to_sec*10e3)-1)
                psd_forNQS = pypmtm(vec_forNQS, 10e3)
                nqs.v[i].copy(psd_forNQS.v[1]) #will copy power vectors into nq starting with column 1. Column zero is frequency
                i += 1
        
    nqs.v[0].copy(psd_forNQS.v[0]) #copy freq vector, they are all identical so one will suffice
    return nqs
    
    
def mulFilesPsdSEM(nqPSD):
    '''will calculate the mean and SEM of nqs generated by mulFilesPsdCalc, and return an nqs of freq,
    mean, and SEM'''
    nqSEM = h.NQS('freq', 'mean', 'SEM')
    nqSEM.v[0].copy(nqPSD.v[0])#copy freq vector from nqPSD
    nqSEM.pad() #make all vectors of nqSEM equal in size
    nqPSD.delcol(0) #remove freq vector from nqPSD to calculate the mean of all rows
    for i in range(int(nqPSD.size())):
        mean = nqPSD.getrow(i).mean()
        sem = nqPSD.getrow(i).stderr() #standard error of mean
        nqSEM.v[1].x[i] = mean
        nqSEM.v[2].x[i] = sem
    return nqSEM
def plotPsdSEM(nqPsdSEM, g, color=2):
    '''will plot the mean and standard error of mean on graph object g,
    using nqs generated by mulFilesPsdSEM. Choose color used to plot'''
    nqPsdSEM.v[1].plot(g, nqPsdSEM.v[0], color, 1)
    nqPsdSEM.v[1].ploterr(g, nqPsdSEM.v[0], nqPsdSEM.v[2])



    
def genFilename(initStr, drug, loc=''):
    '''will return a list containing filenames from which vectors and spike nqs will be loaded. initStr: simulation initial string. drug: 'ctrl', 'ket', or 'CPP'. loc: 'pyr', 'olm', 'bas' in any combination or none at all'''
    liseed = [1234, 6912, 9876, 6789,3219]
    lwseed = [4321, 5012, 9281, 8130,6143]
    ldrug = ((1.0, 1.0), (0.0, 1.0), (0.0, 1.5))
    lfnames = []
    if drug =='ctrl':
        nmda = 1.0
        ampa = 1.0
    elif drug =='ket':
        nmda = 0.0
        ampa = 1.0
    elif drug =='CPP':
        nmda = 0.0
        ampa = 1.5
    if 'olm' in loc:
        s1 = '_'.join(('OLM_NMDAr', str(nmda), 'OLM_AMPAfr', str(ampa)))
    else: s1 = 'OLM_NMDAr_1.0_OLM_AMPAfr_1.0'
    if 'bas' in loc:
        s2 = '_'.join(('BAS_NMDAr', str(nmda), 'BAS_AMPAfr', str(ampa)))
    else: s2 = 'BAS_NMDAr_1.0_BAS_AMPAfr_1.0'
    if 'pyr' in loc: 
        s3 = '_'.join(('PYR_NMDAr', str(nmda), 'PYR_AMPAfr', str(ampa)))
    else: s3 = 'PYR_NMDAr_1.0_PYR_AMPAfr_1.0'
    for iseed in liseed:
        for wseed in lwseed:
            s = '_'.join((initStr, 'iseed', str(iseed), 'wseed', str(wseed), s1, s2, s3))
            lfnames.append(s)
    return lfnames



def calcFreqVals_svPkl(initStr, drug, cellType, theta=(4,12), gamma=(30,100), dur=10):
    '''save a dictionary into a pickle contiining a numpy array for theta and numpy array for gamma values calculated from sims. initStr is the sim string. drug: 'ctrl', 'ket', or 'CPP'. loc: 'pyr', 'olm', 'bas' in any combination or none at all, ie: ''. It will remove the first 2 seconds of the simulation'''
    simNames = genFilename(initStr, drug, cellType)
    print ('working on:', initStr, drug, cellType)
    vecList = []
    i = 0
    for fn in simNames:
        mystr = PATH+fn+'_lfp.vec'
        myfile = h.File()
        myfile.ropen(mystr)
        print ('file ', i, 'Open?', myfile.isopen())
        myvec = h.Vector()
        myvec.vread(myfile)
        mylfp = h.Vector()
        mylfp.copy(myvec, 2*sampr, 12*sampr)#leaving out the first 2 seconds, during which the simulation is stabliziing
        vecList.append(mylfp)
        i += 1
    thetaList = []
    gammaList = []
    for lfp in vecList:
        print ('lfpDur:', lfp.size()/sampr)
        thetaVal = powerSum(lfp, sampr, theta[0], theta[1], 1, dur)
        thetaList += thetaVal
        gammaVal = powerSum(lfp, sampr, gamma[0], gamma[1], 1, dur)
        gammaList += gammaVal
    ctrlFreqVals = {'theta':np.array(thetaList), 'gamma':np.array(gammaList)}#dictionary of 2 numpy arrays
    pickleString = genPklStr(drug, cellType)
    print ('saving pickle to:', pickleString)
    pickle.dump(ctrlFreqVals, open(PATH+pickleString+'_FreqVals.pkl', 'wb'))


def genPklStr(drug, loc=''):
    '''generate a string that will be used to save pickles calcualted by calcFreqVals_svPkl'''
    if drug == 'ctrl': mystr = 'ctrl_n_n_n_'
    else:
        if 'olm' in loc:
            s1 = '_olm_'
        else: s1 = '_n_'
        if 'bas' in loc:
            s2 = 'bas_'
        else: s2 = 'n_'
        if 'pyr' in loc: 
            s3 = 'pyr'
        else: s3 = 'n'
        mystr = drug+s1+s2+s3
    return mystr

def mprasterplot(mynqs, rasterax, showInterneuron = True, *args, **kwargs):
        '''will use matplotlib.scatter to plot raster. mynqs is nqs generated by net.setsnq()'''
        rasterax.set_title('red:pyr, green:basket, blue:olm')
        colorList = ['red', 'green', 'blue']
        cellTypes = ['pyr', 'bas', 'olm']
        if mynqs.getcol('ty').max() ==4:
            cellTypes.append('cck_somaPyr')
            cellTypes.append('cck_Adend2Pyr')
            colorList.append('orange') # for soma-targeting cck
            colorList.append('cyan') # for dendrite-targeting cck
            rasterax.set_title(rasterax.get_title() + ', orange:cck_soma, cyan:cck_dendrite')
        colorId = 0
        plotDic = {}
        for ctype in enumerate(cellTypes):
            mynqs.tog('DB')
            if mynqs.select('ty', '==', ctype[0]):# will try to do selection (and check if there is selected part) - if not, will pass
              idVec = h.Vector()
              timeVec = h.Vector()
              mynqs.getcol('id', idVec)
              mynqs.getcol('t', timeVec)
              plotDic[ctype[1]] = {}
              plotDic[ctype[1]]['id'] = np.array(idVec)
              plotDic[ctype[1]]['t'] = np.array(timeVec)
            else: pass
        for ctype in enumerate(cellTypes):
            if ctype[1] in plotDic.keys():
                rasterax.scatter(plotDic[ctype[1]]['t'], plotDic[ctype[1]]['id'], s=20, c=colorList[ctype[0]], *args, **kwargs)
            else: pass
        ylim_max = max(plotDic[key]['id'].max() for key in plotDic.keys())
        rasterax.set_ylim(-20,ylim_max+20) #to avoid having negative label on the y-axis for the cell IDs
        xlim_max = max(plotDic[key]['t'].max() for key in plotDic.keys())
        rasterax.set_xlabel('time (msec)')
        rasterax.set_ylabel('cell ID')
        mp.draw()
        if showInterneuron: rasterax.set_ylim(700, )
        return plotDic


def mprasterplotH5py(mygroup, rasterax, killms=0, showInterneuron = True, *args, **kwargs):
        '''will use matplotlib.scatter to plot raster. mygroup is an H5Py group, which has been generated and saved by net.setsnq(), usually called spikesnqs in the h5py data structure'''
        rasterax.set_title('red:pyr, green:basket, blue:olm')
        cellTypes = ['pyr', 'bas', 'olm']
        cellTypeIndex = [0, 1, 2]
        colorList = ['red', 'green', 'blue']
        # if mygroup['ty'].value.max() ==4:
        if mygroup['ty'][()].max() ==4:
            cellTypes.append('cck_somaPyr')
            cellTypes.append('cck_Adend2Pyr')
            cellTypeIndex.append(3) # for soma targeting cck
            cellTypeIndex.append(4) # for mid apical targeting cck
            colorList.append('orange') # for soma-targeting cck
            colorList.append('cyan') # for dendrite-targeting cck
            rasterax.set_title(rasterax.get_title() + ', orange:cck_soma, cyan:cck_dendrite')
        colorId = 0
        plotDic = {}
        for ctype in enumerate(cellTypes):
            # whereArray = mygroup['ty'].value == [ctype[0]]
            whereArray = mygroup['ty'][()] == [ctype[0]]
            timeArray = np.array(mygroup['t'][whereArray])
            idArray = np.array(mygroup['id'][whereArray])
            plotDic[ctype[1]] = {'id':idArray,'t':timeArray}
            rasterax.scatter(plotDic[ctype[1]]['t'], plotDic[ctype[1]]['id'], s=20, c=colorList[ctype[0]], *args, **kwargs)
        ylim_max = max(plotDic[key]['id'].max() for key in plotDic.keys() if plotDic[key]['id'].size > 0)
        rasterax.set_ylim(-20,ylim_max+20) #to avoid having negative label on the y-axis for the cell IDs
        xlim_max = max(plotDic[key]['t'].max() for key in plotDic.keys() if plotDic[key]['t'].size > 0)
        rasterax.set_xlabel('time (msec)')
        rasterax.set_ylabel('cell ID')
        # rasterax.set_xlim(0, mygroup['t'].value.max())
        rasterax.set_xlim(0, mygroup['t'][()].max())
        mp.draw()
        if showInterneuron: rasterax.set_ylim(700, )
        return plotDic



def plotLFPOneRun(lfpvec, dt, lfpax, killms = 0, startPlotAt = 0, *args, **kwargs):
    '''will plot lfp on axix after simulation run from onerun.py file. killms will be the time removed from the begining of the plot in milliseconds'''
    lfpax.set_title('lfp plot')
    # lfpvec = np.array(lfpvec)[killms/h.dt:]
    lfpvec = np.array(lfpvec)[int(killms/dt):]
    # time = np.linspace(0, lfpvec.size * dt, lfpvec.size)
    time = np.linspace(startPlotAt, lfpvec.size * dt + startPlotAt, lfpvec.size)
    lfpax.plot(time, lfpvec, *args, **kwargs)
    lfpax.set_xlabel('time (ms)')
    lfpax.set_ylabel('voltage')
    lfpax.set_xlim(time[0], time[-1])
    mp.draw()

def simrunProfiling(lfpvec, rasterdata, killms, lfpax, rastax, psdax, *args, **kwargs):
    '''will plot lfp, raster and psd of lfp for a run, that is either just ran or has been loaded from H5py. lfpvec is a vector or numpy array containing voltage of LFP. killms will be the duration to be removed from the beginning of simulation results before calculating PSD. The remaining duration will be plotted'''
    if h.t != 0: # already a run has ran
        plotLFPOneRun(lfpvec, h.dt, lfpax, killms)
        mprasterplot(rasterdata, rastax, showInterneuron = False)
        rastax.set_xlim(killms,h.tstop)
        plot_psd(lfpvec, h.dt, killms, psdax, *args, **kwargs)


def plotsimrunProfiling(lfpvec, rasterdata, killms, *args, **kwargs):
    '''will create a grid and call simrunProfiling to plot LFP, raster figure and PSD'''
    myfig = mp.figure()
    gspec = mp.GridSpec(2, 4)
    lfpsp = mp.subplot(gspec[0,0:3])
    rastsp = mp.subplot(gspec[1,0:3], sharex = lfpsp)
    psdsp = mp.subplot(gspec[0:2, 3])
    simrunProfiling(lfpvec, rasterdata, killms, lfpsp, rastsp, psdsp, *args, **kwargs)
    myfig.tight_layout()
    lfpsp.grid(True)
    psdsp.grid(True)
    return myfig, lfpsp, rastsp, psdsp


def loadedSimProfiling(myfile, killms, lfpax, rastax, psdax, startPlotAt = 0, *args, **kwargs):
    '''will plot lfp, raster and psd of lfp for a simulation result which has been loaded by loadSimH5py. '''
    h.dt = myfile.attrs['dt']
    mprasterplotH5py(myfile['spikesnqs'], rastax, showInterneuron = False)
    plotLFPOneRun(myfile['lfp'], h.dt, lfpax, killms, startPlotAt)
    # rastax.set_xlim(startPlotAt, myfile['lfp'].size*h.dt + startPlotAt)
    # rastax.set_xlim(killms,myfile['lfp'].size*h.dt)
    plot_psd(myfile['lfp'], h.dt, killms, psdax, *args, **kwargs)


def plotloadedsimProfiling(myfile, killms, startPlotAt = 0, *args, **kwargs):
    '''will plot lfp, raster and psd of lfp for a simulation result which has been loaded by loadSimH5py. killms will be the duration to be removed from the beginning of simulation results before calculating PSD. The remaining duration will be plotted. Example on usage:
    myfile = loadSimH5py(mysimstr)
    loadedRunProfiling(myfile, 500)
    '''
    myfig = mp.figure()
    gspec = mp.GridSpec(2, 4)
    rastsp = mp.subplot(gspec[0,0:3])
    lfpsp = mp.subplot(gspec[1,0:3], sharex = rastsp)
    psdsp = mp.subplot(gspec[0:2, 3])
    loadedSimProfiling (myfile, killms, lfpsp, rastsp, psdsp, startPlotAt, *args, **kwargs)
    myfig.tight_layout()
    rastsp.grid(True)
    lfpsp.grid(True)
    psdsp.grid(True)
    return myfig, rastsp, lfpsp, psdsp

def indivCellVoltage(indivcell,indivax):
    '''this will plot voltage at soma (and basal and apical dendrites in case of pyramidal cell), for a cell given by indivcell, which will be in the format ('cellType', cellID), eg ('pyr',01). indivax is the axis where plotting will take place'''
    if indivcell[0] == 'pyr':
        somavoltvec = np.array(net.pyr.cell[indivcell[1]].soma_volt)
        bdendvoltvec = np.array(net.pyr.cell[indivcell[1]].Bdend_volt)
        adend3voltvec = np.array(net.pyr.cell[indivcell[1]].Adend3_volt)
        voltveclist = [somavoltvec, bdendvoltvec, adend3voltvec]
        timearr = np.linspace(0, h.tstop, somavoltvec.size)
        indivax.plot(timearr, somavoltvec, color='b', label='soma')
        indivax.plot(timearr, bdendvoltvec, color='g', label='bdend')
        indivax.plot(timearr, adend3voltvec, color='r', label='adend')
        indivax.set_title('pyr')
    elif indivcell[0] == 'bas':
        somavoltvec = np.array(net.bas.cell[indivcell[1]].soma_volt)
        timearr = np.linspace(0, h.tstop, somavoltvec.size)
        indivax.plot(timearr, somavoltvec, color='b', label='soma')
        indivax.set_title('bas')
    elif indivcell[0] == 'olm':
        somavoltvec = np.array(net.olm.cell[indivcell[1]].soma_volt)
        timearr = np.linspace(0, h.tstop, somavoltvec.size)
        indivax.plot(timearr, somavoltvec, color='b', label='soma')
        indivax.set_title('olm')
        # elif indivcell[0] == cck:
    #     net.cell.cck[indivcell[1]]
    indivax.set_xlabel('time (ms)')
    indivax.set_ylabel('voltage (uV)')
    
def calcTau_mm(voltArr):
    '''will return the membrane time constant from vector or array. It assuumes that injection of hyperpolarizing current started at t0 or earlier. This is assuming that voltArr has size of at least 20000'''
    voltArr = np.array(voltArr)
    # time = index * h.dt
    def calc_voltDeriv(t_indx):
        '''returns the derivative at voltArr index t_indx, where time = t_indx *h.dt'''
        v2 = voltArr[t_indx+1]; v1 = voltArr[t_indx-1]
        voltDeriv = (v2 - v1) / (2*h.dt)
        return voltDeriv
    # 2 points are needed (t1,v1), (t2,v2)
#    t1_indx = 10000; t2_indx = 20000
    t1_indx = 0.25 * voltArr.size; t2_indx = 0.5 * voltArr.size
    t1 = t1_indx * h.dt; t2 = t2_indx * h.dt
    v1 = voltArr[t1_indx]; v2 = voltArr[t2_indx]
    print ('v1: {0} mV, t1: {1} ms; v2: {2} mV, t2: {3} ms'.format(v1,t1,v2,t2))
    tau_mm = (v2 - v1) / (calc_voltDeriv(t1_indx) - calc_voltDeriv(t2_indx))
    return tau_mm


def saveNQS_h5pyGroup(f, mynqs, mynqsName, selected=False, *args, **kwargs):
    '''will save  mynqs (all if selected is False, only selected if True) as group named mynqsName in h5py file f (which has to be open for write)'''
    f.create_group(mynqsName)
    if not selected: mynqs.tog('DB')
    for i in range(int(mynqs.m[0])):
        f[mynqsName].create_dataset(mynqs.s[i].s, data=np.array(mynqs.getcol(mynqs.s[i].s)), *args, **kwargs)


def saveVec_h5pyDataset(f, myvec, myvecName, *args, **kwargs):
    '''will save hoc vector as numpy arrays dataset in h5py file f (which has to be open for write)'''
    f.create_dataset(myvecName, data=np.array(myvec), *args, **kwargs)

def savePyrDrivingSpikes_h5Group(f, *args, **kwargs):
    '''will save the pyr Driving Spikes as a h5py group called pyrDriveSpikes into file f'''
    f.create_group('pyrDriveSpikes')
    for mysyn, recvecs in net.NCV.items():
        maxLength = max((len(recvecs[i]) for i in range(len(recvecs))))
        myspikesArray = np.zeros((len(recvecs),maxLength))
        for i, myvec in enumerate(recvecs):
            myspikesArray[i][0:myvec.size()] = np.array(myvec)
        f['pyrDriveSpikes'].create_dataset(mysyn, data = myspikesArray)

def loadNQS_h5pyGroup(group):
    '''will load a group into nqs and return it'''
    mynqs = h.NQS(len(group))
    i = 0
    for pair in group.iteritems():
        myvec = h.Vector(pair[1])
        mynqs.v[i].copy(myvec)
        mynqs.s[i].s = pair[0]
        i += 1
    return mynqs



def saveSimH5py(simstr, datadir='./data/', savevoltnq = False, savePyrDrivingSpikes = False, saveSignalSpikes = False, *args, **kwargs):
    '''will save nqs, lfp, and network cell voltages. This requires h5py to be installed and imported. nqv is obtained by calling net.getnqvolt()'''
    # check if the vectors and nqs to be saved have been calculated
    try:
        net.vlfp
    except:
        print('Could not find lfp vector')
        return
    try:
        net.snq
    except:
        print ('Could not find spike nqs')
        return
    if savevoltnq:
        try:
            net.nqv
        except:
            print ('Could not find cell voltage nqs')
            return
    if savePyrDrivingSpikes:
        try:
            net.NCV
        except:
            print ('Could not find pyramidal driving input dictionary net.NCV')
            return
    f = h5py.File(datadir+simstr + '.h5py', 'a') # w')
    saveVec_h5pyDataset(f, net.vlfp, 'lfp', *args, **kwargs)
    saveNQS_h5pyGroup(f, net.snq, 'spikesnqs', *args, **kwargs)
    if savevoltnq:
        saveVoltNQcells(f, dconf['strCellsRecordVoltage'])
        # saveNQS_h5pyGroup(f, net.nqv, 'cellsVoltagenqs', *args, **kwargs)
    if savePyrDrivingSpikes:
        savePyrDrivingSpikes_h5Group(f)
    if saveSignalSpikes:
        saveVec_h5pyDataset(f, net.signalVec, 'signalSpikeTimes', *args, **kwargs)
    for item in dconf.items(): f.attrs.create(item[0], item[1])
    f.attrs['revision'] = check_output('hg id -in', shell=True)
    f.close()


def saveVoltNQcells(f, mylist):
    ''' will save voltage of cells with IDs in mylist'''
    # net.nqv presence is checked before calling this saveVoltNQcells
    f.create_group('cellsVoltagenqs')
    mynqs = net.nqv
    for cellid in mylist:
        f['cellsVoltagenqs'].create_dataset(mynqs.s[cellid].s, data = np.array(mynqs.getcol(mynqs.s[cellid].s)))
    # save "time" vector - it is always the last vector in the nqs
    f['cellsVoltagenqs'].create_dataset(mynqs.s[-1].s, data = np.array(mynqs.getcol(mynqs.s[-1].s)))
  


def loadSimH5py (simstr, datadir='./data/', *args, **kwargs):
    '''will load h5py file and return it'''
    f = h5py.File(datadir+simstr+'.h5py', 'r')
    return f

def loaded_h5py_toDic (myh5pyfile):
    '''will return a python dictionary containing the different data in loaded h5py file (myh5pyfile), including attributes'''
    mydic = {}
    # cellsVoltagenqs
    mydic['cellsVoltagenqs'] = {mykey: np.array(myh5pyfile['cellsVoltagenqs'][mykey]) for mykey in myh5pyfile['cellsVoltagenqs']}
    # lfp
    mydic['lfp'] = np.array(myh5pyfile['lfp'])
    # spikesnqs
    mydic['spikesnqs'] = {mykey: np.array(myh5pyfile['spikesnqs'][mykey]) for mykey in myh5pyfile['spikesnqs']}
    # attributes (configurtion parameters)
    mydic['attrs'] = {mykey: myh5pyfile.attrs[mykey] for mykey in myh5pyfile.attrs}
    return mydic
    
    

def plotIndividualVoltages(cell_type, cellidlist, f, axes, separate = 0, colormap = mp.cm.nipy_spectral, *args, **kwargs):
    '''Plot individual cells voltages using h5py group which contains individual cells voltages. separate will allow the separation of the plots by separate mV (eg seprate ==5 will make the plots separate from each other by 5 mV - for better viewing)'''
    group = f['cellsVoltagenqs']
    colorid = np.linspace(0,1,len(cellidlist))
    if cell_type == 'cck': cell_nameslist = ['cck_'+str(i+20) for i in cellidlist]
    elif cell_type == 'pv': cell_nameslist = ['bas_'+str(i) for i in cellidlist]
    elif cell_type == 'olm': cell_nameslist = ['olm_'+str(i+10) for i in cellidlist]
    tstop = f.attrs['tstop']
    i = 0
    for cellName in cell_nameslist:
        # axes.plot(np.linspace(0,tstop,group[cellName].size),group[cellName].value+i*separate, color = colormap(colorid[i]), label = cellName, *args, **kwargs)
        axes.plot(np.linspace(0,tstop,group[cellName].size),group[cellName][()]+i*separate, color = colormap(colorid[i]), label = cellName, *args, **kwargs)
    i += 1
    axes.set_ylabel('voltage (mV)')
    axes.set_xlabel('time (ms)')
    if separate !=0: axes.set_title('plots are translated by {0} mV for better viewing'.format((5)))
    axes.legend(loc=0)
    mp.draw()


def plot_psd(lfpvec, dt, killms, psdax, *args, **kwargs):
    '''will plot psd on axes psdax'''
    mylfp = np.array(lfpvec)
    mylfp = mylfp[int(killms/dt):]
    mylfp -= mylfp.mean()
    sampr = 1000/dt
    freqs, PSDs = psd.calc_psd(mylfp, fs = sampr, window='hanning', nperseg=500/dt, noverlap=None, nfft=None, detrend='constant')
    psdax.plot(freqs, PSDs, *args, **kwargs)
    psdax.set_xlim(0, 100)
    psdax.set_xlabel('freq (Hz)')
    psdax.set_ylabel('PSD (voltage^2 / Hz)')


def plot_psd2(freqs, psds, psdax, *args, **kwargs):
    '''will plot psd using freqs, psds, on psdax'''
    psdax.plot(freqs, psds, *args, **kwargs)
    psdax.set_xlim(0, 100)
    psdax.set_xlabel('freq (Hz)')
    psdax.set_ylabel('PSD (voltage^2 / Hz)')


def locState(state_0, state_1, mystate):
    '''return 0 if mystate == state_0, 1 if mystate == state_1. This is helpful to plot figures showing the state of a particular intervention, eg cb1R agonsim, whether it is there (1) or not (0)'''
    if mystate == state_0: return 0
    elif mystate == state_1: return 1
    else: print ("mystate does not correspond to either states")


def getspecg (vlfp,killms=0,sampr=1e3/h.dt,NFFT=256, freqstart=0, freqend=250,*args, **kwargs):
    '''# get a spectrogram using pylab'''
    # v1 = h.Vector()
    nsamp = killms / h.dt
    #v1.copy(vlfp,nsamp,vlfp.size-1-nsamp)
    vlfpcopy = np.array(vlfp)
    vlfpcopy = np.array(vlfpcopy)[nsamp:vlfp.size]
    vlfpcopy = vlfpcopy - vlfpcopy.mean()
    Pxx,freqs,tt,im = mp.specgram(vlfpcopy,Fs=sampr,NFFT=NFFT,noverlap=NFFT/2.0)
    position = (Pxx >= powerstart) & (Pxx <= powerend)
    selectedPxx = Pxx[position]
    selectedfreqs = freqs[position]
    selectedtt = tt[position]
#    return Pxx,freqs,tt,im
    return selectedPxx, selectedfreqs, selectedtt


def synthLFP (sampr=10e3,endt=10e3,nonModAmp=1,PhaseModF=8,AmpModF=30,noiseMU=0,noiseSTD=1,rdmS=1234):
    '''# synthesize an LFP with specific theta/gamma coupling'''
    sz = sampr*(endt/1e3)
    dt = 1e3/sampr
    vlfp,vtheta,vgamma = Vector(sz),Vector(sz),Vector(sz)
    vtheta.sin(PhaseModF,0,dt)
    vtheta.add(1)
    vtheta.mul(0.2)
    vtheta.add(nonModAmp*0.1)
    # 0.2*(sin(2*pi*t*Phase_Modulating_Freq/srate)+1)+nonmodulatedamplitude*0.1
    vgamma.sin(AmpModF,0,dt)
    vlfp.sin(PhaseModF,0,dt)
    vlfp.add(vgamma)
    vlfp.mul(vtheta)
    #sin(2*pi*t*Amp_Modulated_Freq/srate)+sin(2*pi*t*Phase_Modulating_Freq/srate)
    #lfp=(0.2*(sin(2*pi*t*Phase_Modulating_Freq/srate)+1)+nonmodulatedamplitude*0.1).*sin(2*pi*t*Amp_Modulated_Freq/srate)+sin(2*pi*t*Phase_Modulating_Freq/srate);
    #lfp=lfp+1*randn(1,length(lfp));%adds an element of randomness to lfp %signal  
    if noiseMU > 0 or noiseSTD > 0:
      vrd = RandNormVec(rdmS,sz,noiseMU,noiseSTD)
      vlfp.add(vrd)
    return vlfp


def my_specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mp.mlab.detrend_none,
             window=mp.mlab.window_hanning, noverlap=128,
             cmap=None, xextent=None, pad_to=None, sides='default',
             scale_by_freq=None, minfreq = None, maxfreq = None, **kwargs):
    """
    from:
    http://stackoverflow.com/questions/19468923/cutting-of-unused-frequencies-in-specgram-matplotlib
    call signature::

      specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
               window=mlab.window_hanning, noverlap=128,
               cmap=None, xextent=None, pad_to=None, sides='default',
               scale_by_freq=None, minfreq = None, maxfreq = None, **kwargs)

    Compute a spectrogram of data in *x*.  Data are split into
    *NFFT* length segments and the PSD of each section is
    computed.  The windowing function *window* is applied to each
    segment, and the amount of overlap of each segment is
    specified with *noverlap*.

    %(PSD)s

      *Fc*: integer
        The center frequency of *x* (defaults to 0), which offsets
        the y extents of the plot to reflect the frequency range used
        when a signal is acquired and then filtered and downsampled to
        baseband.

      *cmap*:
        A :class:`matplotlib.cm.Colormap` instance; if *None* use
        default determined by rc

      *xextent*:
        The image extent along the x-axis. xextent = (xmin,xmax)
        The default is (0,max(bins)), where bins is the return
        value from :func:`mlab.specgram`

      *minfreq, maxfreq*
        Limits y-axis. Both required

      *kwargs*:

        Additional kwargs are passed on to imshow which makes the
        specgram image

      Return value is (*Pxx*, *freqs*, *bins*, *im*):

      - *bins* are the time points the spectrogram is calculated over
      - *freqs* is an array of frequencies
      - *Pxx* is a len(times) x len(freqs) array of power
      - *im* is a :class:`matplotlib.image.AxesImage` instance

    Note: If *x* is real (i.e. non-complex), only the positive
    spectrum is shown.  If *x* is complex, both positive and
    negative parts of the spectrum are shown.  This can be
    overridden using the *sides* keyword argument.

    **Example:**

    .. plot:: mpl_examples/pylab_examples/specgram_demo.py

    """

    #####################################
    # modified  axes.specgram() to limit
    # the frequencies plotted
    #####################################

    # this will fail if there isn't a current axis in the global scope
    ax = mp.gca()
    Pxx, freqs, bins = mp.mlab.specgram(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq)
#    Pxx, freqs, bins = mp.specgram(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq)

    # modified here
    #####################################
    if minfreq is not None and maxfreq is not None:
        Pxx = Pxx[(freqs >= minfreq) & (freqs <= maxfreq)]
        freqs = freqs[(freqs >= minfreq) & (freqs <= maxfreq)]
    #####################################

    Z = 10. * np.log10(Pxx)
    Z = np.flipud(Z)

    if xextent is None: xextent = 0, np.amax(bins)
    xmin, xmax = xextent
    freqs += Fc
    extent = xmin, xmax, freqs[0], freqs[-1]
    im = ax.imshow(Z, cmap, extent=extent, **kwargs)
    ax.axis('auto')
    mp.draw()

    return Pxx, freqs, bins, im

def bootstrap_resample(data, n=None):
    """  from: http://nbviewer.ipython.org/gist/aflaxman/6871948
    Bootstrap resample an array_like
    Parameters
    ----------
    X : array_like
      data to resample
    n : int, optional
      length of resampled array, equal to len(X) if n==None
    Results
    -------
    returns X_resamples
    """
    if isinstance(data, numpy.ndarray):
        X = data
    else:
        X = np.array(data)
    if n == None:
        n = len(X)
    resample_i = np.floor(np.random.rand(n)*len(X)).astype(int)
    X_resample = np.array(X[resample_i])  # TODO: write a test demonstrating why array() is important
    return X_resample


def get_binEdges(min_val, max_val, binSize):
    '''will return the edges of the bins which will be used to calculate the spike count vectors (for nTE calculation)'''
    ledges = np.arange(max_val, min_val-binSize, -binSize)[::-1] # [::-1] to reverse the order so that it is ascending
    return ledges


def getPyrSpikeTrains(mygroup):
    '''return a dictionary of spikes for each of pyramidal cells (having GID of 0 to 799 inclusive) in a simulation. mygroup would be a spikesnqs group, which contains the datasets: 'id', 't', 'ty'. Only 'id' and 't' are needed. '''
    dpyrOutputTrains = {} # mypyr:[] for mypyr in range(800)}
    myt_array = np.asarray(mygroup['t'])
    myid_array = np.asarray(mygroup['id'])
    for mypyr in range(800):
        dpyrOutputTrains[mypyr] = myt_array[myid_array == mypyr]
    maxLength = max([myval.size for myval in dpyrOutputTrains.values()])
    pyrOutputSpikeTrain = np.zeros((800, maxLength))
    for mypyr in range(800):
        pyrOutputSpikeTrain[mypyr][0:dpyrOutputTrains[mypyr].size] = dpyrOutputTrains[mypyr]
    return pyrOutputSpikeTrain



def getPyrOutputHistogram(mygroup, binEdges):
    '''will return a numpy array containing histogram for each cell. mygroup is the spikesnq group, which contains the datasets: 'id', 't', 'ty'. binEdges would be a list of edges for the bins to count the spikes - returned from get_binEdges() function '''
    outputTrain = getPyrSpikeTrains(mygroup)
    outputHistogram = np.zeros((800, len(binEdges) -1))
    for mypyr in range(800):
        outputHistogram[mypyr], discardedList = np.histogram(outputTrain[mypyr], bins = binEdges)
    return outputHistogram


def getDrivingHisto(mygroup, binEdges):
    '''will return dictioary of arrays for each synapse type, 'AMPA', 'GABA' '''
    ddrivingHisto = {}
    for mylsyn in ['AMPA', 'GABA']:
        if mylsyn == 'AMPA': lsynkey = ['Adend3AMPAf', 'somaAMPAf']
        elif mylsyn == 'GABA': lsynkey = ['Adend3GABAf', 'somaGABAf']
        sumhisto = np.zeros((800, len(binEdges) -1))
        for mysyn in lsynkey:
            for mypyr in range(800):
                myhisto, discardedList = np.histogram(mygroup[mysyn][mypyr], bins = binEdges)
                sumhisto[mypyr] += myhisto
        ddrivingHisto[mylsyn] = sumhisto
    return ddrivingHisto
def compareTwoDictioaries(d1, d2):
    '''will compare the values of each key in dict1, with those in dict 2 and print out the ones in dict1 and not in dict2 and vice versa. From here: http://stackoverflow.com/questions/4527942/comparing-two-dictionaries-in-python'''
    d1_keys = set(d1.keys())
    d2_keys = set(d2.keys())
    intersect_keys = d1_keys.intersection(d2_keys)
    added = d1_keys - d2_keys
    removed = d2_keys - d1_keys
    modified = {o : (d1[o], d2[o]) for o in intersect_keys if d1[o] != d2[o]}
    same = set(o for o in intersect_keys if d1[o] == d2[o])
    return added, removed, modified, same

Loading data, please wait...