Reward modulated STDP (Legenstein et al. 2008)

 Download zip file 
Help downloading and running models
Accession:116837
"... This article provides tools for an analytic treatment of reward-modulated STDP, which allows us to predict under which conditions reward-modulated STDP will achieve a desired learning effect. These analytical results imply that neurons can learn through reward-modulated STDP to classify not only spatial but also temporal firing patterns of presynaptic neurons. They also can learn to respond to specific presynaptic firing patterns with particular spike patterns. Finally, the resulting learning theory predicts that even difficult credit-assignment problems, where it is very hard to tell which synaptic weights should be modified in order to increase the global reward for the system, can be solved in a self-organizing manner through reward-modulated STDP. This yields an explanation for a fundamental experimental result on biofeedback in monkeys by Fetz and Baker. In this experiment monkeys were rewarded for increasing the firing rate of a particular neuron in the cortex and were able to solve this extremely difficult credit assignment problem. ... In addition our model demonstrates that reward-modulated STDP can be applied to all synapses in a large recurrent neural network without endangering the stability of the network dynamics."
Reference:
1 . Legenstein R, Pecevski D, Maass W (2008) A learning theory for reward-modulated spike-timing-dependent plasticity with application to biofeedback. PLoS Comput Biol 4:e1000180 [PubMed]
Citations  Citation Browser
Model Information (Click on a link to find other models with that property)
Model Type: Realistic Network;
Brain Region(s)/Organism: Neocortex;
Cell Type(s):
Channel(s):
Gap Junctions:
Receptor(s):
Gene(s):
Transmitter(s):
Simulation Environment: Python; PCSIM;
Model Concept(s): Pattern Recognition; Spatio-temporal Activity Patterns; Reinforcement Learning; STDP; Biofeedback; Reward-modulated STDP;
Implementer(s):
#================================================================================
#  Computer Simulation 4 with weight-dependent RM STDP rule
#    from  Legenstein, R., Pecevski, D. and Maass, W., A Learning Theory
#       for Reward-Modulated Spike-Timing-Dependent Plasticity with 
#       Application to Biofeedback 
# 
#  Author: Dejan Pecevski, dejan@igi.tugraz.at
#
#  Date: February 2008
#
#================================================================================

import sys
from sys import *
import os


from pypcsim import *
import pypcsimplus as pcsim
from numpy import *
import random, getopt
import numpy
from datetime import datetime
from math import *
from tables import *
from math import exp
from mpi4py import MPI
from ReadoutModel import *
from TemplateInputModelKernelRwd import *


class PatternRewardSTDPExperiment(pcsim.Experiment):
        
    def defaultExpParameters(self):
        ep = self.expParams 
        
        # General simulation parameters        
        ep.DTsim = 1e-4
        
        ep.nTrainEpochs = 100
        ep.nTestEpochs = 10
        
        ep.trialT = 3
        
        # Network distribution parameters
        ep.netType = 'ST'
        ep.nThreads = 1
        ep.minDelay = 1e-3
        ep.maxDelay = 2   
        
        # Seeds of the experiment
        ep.numpyRandomSeed = 31342
        ep.pyRandomSeed = 124243        
        ep.constructionSeed = 3224356
        ep.simulationSeed = 134252439
        
        ep.runMode = "long"
        ep.input = "TemplateInputModelKernelRwd"
        
        ep.recordReadoutVm = True        
        ep.testWithNoise = True
        
        
        ep.numTrialsWithoutThreshold = 10
        ep.numTrialsRecordVm = 20
        
        
    def setupModels(self):        
        p = self.modelParams
        ep = self.expParams
        m = self.models
        net = self.net
        
        random.seed(ep.pyRandomSeed)
        numpy.random.seed(ep.numpyRandomSeed)
        
        
        m.input = eval(ep.input + '(self.net, self.expParams, p.get("input",{}))')        

        
        ep.Tsim = ep.nTrainEpochs * ep.trialT
        ep.samplingTime = int(ep.Tsim / (200 * ep.DTsim))
        
        m.input.generate()
                
        # create the readout model
        m.readout = ReadoutModel(self.net, self.expParams, p.get("readout", {}), depModels = m.input.elements)        
        m.readout.generate()
        
        m.input.connectReadout(m.readout)

        
    def setupRecordings(self):
        r = self.recordings        
        r.input = self.models.input.setupRecordings()        
        r.readout = self.models.readout.setupRecordings()
        return r
    
    def simulate(self):
        ep = self.expParams        
        ep.samplingTime = int(ep.Tsim / (200 * ep.DTsim))
        m = self.models
        
        currEpoch = 0  
        
        # Run simulation 
        print 'Running simulation:', datetime.today().strftime('%x %X')
        
        t0=datetime.today()
        
        self.net.reset();
        
        m.readout.setTestPhase()
        
        m.readout.increaseThreshold()
        
        print "Test Before Learning:"
        while currEpoch < ep.nTestEpochs:
            if currEpoch % 10 == 0:
                stdout.write(str(currEpoch))
            else:
                stdout.write(".")
            m.input.reset(currEpoch)        
            self.net.advance(int(ep.trialT  / ep.DTsim))
            if ep.recordReadoutVm and currEpoch == ep.numTrialsRecordVm:
                m.readout.switchOffRecordVmReadout()
            if ep.recordReadoutVm and currEpoch == ep.numTrialsWithoutThreshold:    
                m.readout.setNormalThreshold()
            currEpoch += 1
        
        m.readout.setTrainPhase()
        
        print "Train Epoch: "
        while currEpoch < ep.nTrainEpochs + ep.nTestEpochs:
            if currEpoch % 10 == 0:
                stdout.write(str(currEpoch))
            else:
                stdout.write(".")
            m.input.reset(currEpoch)        
            self.net.advance(int(ep.trialT  / ep.DTsim))                                  
            currEpoch += 1
        
        m.readout.setTestPhase()
        
        print "Test Epoch: "
        while currEpoch < ep.nTrainEpochs + 2*ep.nTestEpochs:
            if currEpoch % 10 == 0:
                stdout.write(str(currEpoch))
            else:
                stdout.write(".")
            m.input.reset(currEpoch)
            self.net.advance(int(ep.trialT / ep.DTsim))
            if ep.recordReadoutVm and currEpoch == ep.nTrainEpochs + 2*ep.nTestEpochs - ep.numTrialsRecordVm:
                m.readout.switchOnRecordVmReadout()
            if ep.recordReadoutVm and currEpoch == ep.nTrainEpochs + 2*ep.nTestEpochs - ep.numTrialsWithoutThreshold:    
                m.readout.increaseThreshold()        
            currEpoch += 1
        
        t1=datetime.today()
        print 'Done.', (t1-t0).seconds, 'sec CPU time for', ep.Tsim, 's simulation time';        
        self.expParams.simDuration = (t1 - t0).seconds
        
        
    def scriptList(self):
        return ["PatternRewardSTDPExperiment.py"]
    
if __name__ == "__main__":  
      
    numpySeedArray  = [1136268, 61886, 68688, 134381, 653434368, 2067876833]
    
    if len(sys.argv) > 1:
        runName = "final_"        
        seedNo = int(sys.argv[1])
        if len(sys.argv) > 2:
            directory = sys.argv[2]
        else:
            directory = "pattern_" + datetime.today().strftime("%Y%m%d_%H%M%S")
        exper = PatternRewardSTDPExperiment('PatternRewardSTDP', 
                                            experParams = {"numpyRandomSeed" : numpySeedArray[seedNo], 
                                            "nTrainEpochs":1000, "nTestEpochs":60}, 
                                            modelParams = {"readout":{"Wscale":0.17}}, 
                                            directory = directory)
        exper.run(runName+ "_" + sys.argv[1])
    else:
        exper = PatternRewardSTDPExperiment('PatternRewardSTDP', 
                                            experParams = {"numpyRandomSeed" : 4468, 
                                                           "nTrainEpochs":100, "nTestEpochs":30, "runMode" : "short",
                                                            "input" : "TemplateInputModelKernelRwd"} )
        exper.run("shortrun")