Spike-Timing-Based Computation in Sound Localization (Goodman and Brette 2010)

 Download zip file 
Help downloading and running models
" ... In neuron models consisting of spectro-temporal filtering and spiking nonlinearity, we found that the binaural structure induced by spatialized sounds is mapped to synchrony patterns that depend on source location rather than on source signal. Location-specific synchrony patterns would then result in the activation of location-specific assemblies of postsynaptic neurons. We designed a spiking neuron model which exploited this principle to locate a variety of sound sources in a virtual acoustic environment using measured human head-related transfer functions. ..."
1 . Goodman DF, Brette R (2010) Spike-timing-based computation in sound localization. PLoS Comput Biol 6:e1000993 [PubMed]
Model Information (Click on a link to find other models with that property)
Model Type: Realistic Network;
Brain Region(s)/Organism:
Cell Type(s):
Gap Junctions:
Simulation Environment: Brian; Python;
Model Concept(s): Coincidence Detection; Synchronization;
Implementer(s): Goodman, Dan F. M. ;
from shared import *

def hrtfset_itd_ild(hrtfset, cfmin, cfmax, cfN):
    cf = erbspace(cfmin, cfmax, cfN)
    man_name = hrtfset.name+'-'+str((int(cfmin)))+'-'+str(int(cfmax))+'-'+str(cfN)
    fname = datapath+'/itdild/'+man_name+'.pkl'
    if os.path.exists(fname):
        return pickle.load(open(fname, 'rb'))
    all_itds = []
    all_ilds = []
    num_indices = hrtfset.num_indices
    for j in xrange(num_indices):
        hrir = Sound(hrtfset.hrtf[j].fir.T, samplerate=hrtfset.samplerate)
        fb = Gammatone(Repeat(hrir, cfN), hstack((cf, cf)))
        filtered_hrirset = fb.process()
        itds = []
        ilds = []
        for i in xrange(cfN):
            left = filtered_hrirset[:, i]
            right = filtered_hrirset[:, i+cfN]
            # This FFT stuff does a correlate(left, right, 'full')
            Lf = fft(hstack((left, zeros(len(left)))))
            Rf = fft(hstack((right[::-1], zeros(len(right)))))
            C = ifft(Lf*Rf).real
            i = argmax(C)+1-len(left)
        itds = array(itds)
        ilds = array(ilds)
    pickle.dump((all_itds, all_ilds), open(fname, 'wb'), -1)
    return all_itds, all_ilds

def hrtfset_attenuations(cfmin, cfmax, cfN, hrtfset, sound=None):
    fname = datapath+'/hrtf_attenuation/'+hrtfset.name+'-'+str((int(cfmin)))+'-'+str(int(cfmax))+'-'+str(cfN)+'.pkl'
    if os.path.exists(fname):
        return pickle.load(open(fname, 'rb'))
    sound = Sound(array([1.]))[:40*ms]
    cf = erbspace(cfmin, cfmax, cfN)

    hrtfset_fb = hrtfset.filterbank(Repeat(sound, 2*hrtfset.num_indices))
    gfb = Gammatone(Repeat(hrtfset_fb, cfN),
                    tile(cf, hrtfset_fb.nchannels))

    gfb2 = Gammatone(sound, cf)
    output_fb = Join(gfb, gfb2)

    y = zeros(2*hrtfset.num_indices*cfN)
    z = zeros(cfN)
    endpoints = hstack((arange(0, sound.shape[0], 32), sound.shape[0]))
    for start, end in zip(endpoints[:-1], endpoints[1:]):
        output = output_fb.buffer_fetch(start, end)
        output1 = output[:, :-cfN]
        output2 = output[:, -cfN:]
        y = maximum(y, amax(output1, axis=0))
        z = maximum(z, amax(output2, axis=0))
    y.shape = (2, hrtfset.num_indices, cfN)
    z.shape = (1, 1, cfN)
    y = y[::-1, :, :]
    y /= z
    pickle.dump(y, open(fname, 'wb'), -1)
    return y

Loading data, please wait...