Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
#plt.show()
#sys.exit()
del robtrange,rateEvolveProbe # free some memory
if __name__ == "__main__":
#########################
### Create Nengo network
#########################
print('building model')
mainModel = nengo.Network(label="Single layer network", seed=seedR0)
with mainModel:
nodeIn = nengo.Node( size_in=N//2, output = lambda timeval,currval: inpfn(timeval)*varFactors[Nobs:] )
# scale input to network by torque factors
# input layer from which feedforward weights to ratorOut are computed
ratorIn = nengo.Ensemble( Nexc, dimensions=N//2, radius=reprRadiusIn,
neuron_type=nengo.neurons.LIF(), seed=seedR1, label='ratorIn' )
nengo.Connection(nodeIn, ratorIn, synapse=None)
# No filtering here as no filtering/delay in the plant/arm
# layer with learning incorporated
#intercepts = np.append(np.random.uniform(-0.2,0.2,size=Nexc//2),np.random.uniform(-1.,1.,size=Nexc//2))
ratorOut = nengo.Ensemble( Nexc, dimensions=Nobs, radius=reprRadius,\
neuron_type=nengo.neurons.LIF(), seed=seedR2, label='ratorOut')
# don't use the same seeds across the connections,
# else they seem to be all evaluated at the same values of low-dim variables
# causing seed-dependent convergence issues possibly due to similar frozen noise across connections
if trialClamp:
# clamp ratorOut at the end of each trial (Tperiod) for 100ms.
# Error clamped below during end of the trial for 100ms.
clampValsZeros = np.zeros(Nexc)
clampValsNegs = -100.*np.ones(Nexc)
endTrialClamp = nengo.Node(lambda t: clampValsZeros if (t%Tperiod)<(Tperiod-Tclamp) else clampValsNegs)
# even with python3.4, TypeError: gdbm mappings have byte or string elements only
############################
### Learn ratorOut EtoE connection
############################
with mainModel:
if errorLearning:
###
### copycat layer only for recurrent learning ###
###
# another layer that produces the expected signal for above layer to learn
# force the encoders, maxrates and intercepts to be same as ratorOut
# so that the weights are directly comparable between netExpect (copycat) and net2
# if Wdesired is a function, then this has to be LIF layer
if recurrentLearning and copycatLayer:
expectOut = nengo.Ensemble( Nexc, dimensions=N, radius=reprRadius, neuron_type=nengo.neurons.LIF(), seed=seedR4 )
# a node does not do the leaky integration / low-pass filtering that an ensemble does,
# so node won't work, unless I use the original W and not the one with tau and I, also input should not be *tau
# even with those above, it still gave some overflow error (dunno why)
#expectOut = nengo.Node(size_in=N, size_out=N, output = lambda timeval,x: x)
if copycatPreLearned:
InEtoEexpect = nengo.Connection(ratorIn.neurons, expectOut.neurons,
transform=Wdyn2, synapse=tau)
EtoEexpect = nengo.Connection(expectOut.neurons, expectOut.neurons,
transform=Wdyn2, synapse=tau) # synapse is tau_syn for filtering
else:
## the system didn't learn in this case
## possibly the problem is ensemble to ensemble here but neurons to neurons for InEtoE & EtoE?
InEtoEexpect = nengo.Connection(ratorIn, expectOut, synapse=tau)
# ACHTUNG! the ff transform if not unity must be set here...
EtoEexpect = nengo.Connection(expectOut, expectOut,
function=Wdesired, synapse=tau) # synapse is tau_syn for filtering