Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
)
Decision = pnl.DDM(
function=pnl.DriftDiffusionAnalytical(
drift_rate=(
1.0,
pnl.ControlProjection(
function=pnl.Linear,
control_signal_params={
pnl.ALLOCATION_SAMPLES: np.arange(0.1, 1.01, 0.3)
},
),
),
threshold=(
1.0,
pnl.ControlProjection(
function=pnl.Linear,
control_signal_params={
pnl.ALLOCATION_SAMPLES: np.arange(0.1, 1.01, 0.3)
},
),
),
noise=0.5,
starting_point=0,
t0=0.45,
),
output_ports=[
pnl.DECISION_VARIABLE,
pnl.RESPONSE_TIME,
pnl.PROBABILITY_UPPER_THRESHOLD,
],
name='Decision',
)
runs2 = 4
response_all = []
response_all2 = []
# Create mechanisms ---------------------------------------------------------------------------------------------------
# Linear input units, colors: ('red', 'green'), words: ('RED','GREEN')
colors_input_layer = pnl.TransferMechanism(size=3,
function=pnl.Linear,
name='COLORS_INPUT')
words_input_layer = pnl.TransferMechanism(size=3,
function=pnl.Linear,
name='WORDS_INPUT')
task_input_layer = pnl.TransferMechanism(size=2,
function=pnl.Linear,
name='TASK_INPUT')
# Task layer, tasks: ('name the color', 'read the word')
task_layer = pnl.RecurrentTransferMechanism(size=2,
function=pnl.Logistic(),
hetero=inhibition,
integrator_mode=True,
smoothing_factor=rate,
name='TASK')
# Hidden layer units, colors: ('red','green') words: ('RED','GREEN')
colors_hidden_layer = pnl.RecurrentTransferMechanism(size=3,
function=pnl.Logistic(bias=4.0),
integrator_mode=True,
hetero=inhibition,
# noise=pnl.NormalDist(mean=0.0, standard_dev=.0).function,
thresh = 0.21
x_0 = 0 # starting point
#wTarget = 0.065 # I think this has to do with learning and is constant over trials in Umemoto
costParam1 = 0.35
reconfCostParam1 = 5
#rewardTaskA = 50
#rewardTaskBToA = 0.7
# Control Parameters
signalSearchRange = pnl.SampleSpec(start=0.0, stop=0.5, step=0.2)
# signalSearchRange = pnl.SampleSpec(start=0.0, stop=0.4, step=0.2)
# Stimulus Mechanisms
Target_Stim = pnl.TransferMechanism(name='Target Stimulus', function=pnl.Linear)
Target_Stim.set_log_conditions('value')#, log_condition=pnl.PROCESSING) # Log Target_Rep
Distractor_Stim = pnl.TransferMechanism(name='Distractor Stimulus', function=pnl.Linear)
Distractor_Stim.set_log_conditions('value')#, log_condition=pnl.PROCESSING) # Log Target_Rep
# Processing Mechanisms (Control)
Target_Rep = pnl.TransferMechanism(name='Target Representation')
Target_Rep.set_log_conditions('value')#, log_condition=pnl.PROCESSING) # Log Target_Rep
Target_Rep.set_log_conditions('mod_slope')#, log_condition=pnl.PROCESSING)
Target_Rep.set_log_conditions('InputState-0')#, log_condition=pnl.PROCESSING)
Distractor_Rep = pnl.TransferMechanism(name='Distractor Representation')
Distractor_Rep.set_log_conditions('value')#, log_condition=pnl.PROCESSING) # Log Flanker_Rep
Distractor_Rep.set_log_conditions('mod_slope')#, log_condition=pnl.PROCESSING)
LAMBDA = 0.95
alpha = 11.24
beta = 9.46
#Conflict equation:
#C(t+1) = LAMBDA*C(t) +(1-LAMBDA) * (alpha*ENERGY(t) + beta)
# SET UP MECHANISMS
# Linear input units, colors: ('red', 'green'), words: ('RED','GREEN')
colors_input_layer = pnl.TransferMechanism(size=3,
function=pnl.Linear,
name='COLORS_INPUT')
words_input_layer = pnl.TransferMechanism(size=3,
function=pnl.Linear,
name='WORDS_INPUT')
task_input_layer = pnl.TransferMechanism(size=2,
function=pnl.Linear,
name='TASK_INPUT')
# Task layer, tasks: ('name the color', 'read the word')
#change from Linear to Logistic with control on. Linear for saniti checks
task_layer = pnl.RecurrentTransferMechanism(default_variable=np.array([[0, 0]]),
function=pnl.Logistic(),
size=2,
auto=-2,
smoothing_factor=0.1,
# function=pnl.Logistic(gain=(1.0, pnl.ControlProjection())),#receiver= response_layer.output_states[1],
#'DECISION_ENERGY'))
#modulation=pnl.ModulationParam.OVERRIDE,#what to implement here
name = 'Activity')
activation.set_log_conditions([pnl.RESULT, "mod_gain"])
stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]],
size = 2,
function = pnl.Linear(slope=1, intercept=0),
output_ports = [pnl.RESULT],
name = "Stimulus Info")
stimulusInfo.set_log_conditions([pnl.RESULT])
controlledElement = pnl.TransferMechanism(default_variable=[[0.0, 0.0]],
size = 2,
function=pnl.Linear(slope=1, intercept= 0),
input_ports=pnl.InputPort(combine=pnl.PRODUCT),
output_ports = [pnl.RESULT],
name = 'Stimulus Info * Activity')
controlledElement.set_log_conditions([pnl.RESULT])
ddmCombination = pnl.TransferMechanism(size = 1,
function = pnl.Linear(slope=1, intercept=0),
output_ports = [pnl.RESULT],
name = "DDM Integrator")
ddmCombination.set_log_conditions([pnl.RESULT])
decisionMaker = pnl.DDM(function=pnl.DriftDiffusionAnalytical(drift_rate = DRIFT,
starting_point = STARTING_POINT,
threshold = THRESHOLD,
noise = NOISE,
if stimulusInput[2] > stimulusInput[3]:
CorrectResp = 2
if stimulusInput[3] > stimulusInput[2]:
CorrectResp = 3
Resp = owner_value.index(max(owner_value))
if CorrectResp == Resp:
Accuracy = 1
if CorrectResp != Resp:
Accuracy = 0
return Accuracy
decisionMaker = pnl.LCAMechanism(default_variable=[[0.0, 0.0, 0.0, 0.0]],
size=4,
function=pnl.Linear(slope=1, intercept=0),
reinitialize_when=pnl.AtTrialStart(),
leak=LEAK,
competition=COMPETITION,
self_excitation=0,
noise=NOISE,
threshold=THRESHOLD,
output_ports=[pnl.RESULT,
{pnl.NAME: "EXECUTION COUNT",
pnl.VARIABLE: pnl.OWNER_EXECUTION_COUNT},
{pnl.NAME: "RESPONSE",
# pnl.VARIABLE: pnl.OWNER_VALUE,
pnl.FUNCTION: pnl.OneHot(mode=pnl.MAX_INDICATOR)}
],
time_step_size=TIME_STEP_SIZE,
clip=[0.0, THRESHOLD])
# Processing Mechanisms (Control)
Target_Rep = pnl.TransferMechanism(name='Target Representation',
function=pnl.Linear(
slope=(
1.0,
pnl.ControlProjection(
function=pnl.Linear,
control_signal_params={pnl.ALLOCATION_SAMPLES: signalSearchRange}
))))
Target_Rep.set_log_conditions('value') # Log Target_Rep
Target_Rep.loggable_items
Distractor_Rep = pnl.TransferMechanism(name='Distractor Representation',
function=pnl.Linear(
slope=(
1.0,
pnl.ControlProjection(
function=pnl.Linear,
control_signal_params={pnl.ALLOCATION_SAMPLES: signalSearchRange}
))))
Distractor_Rep.set_log_conditions('value') # Log Flanker_Rep
Distractor_Rep.loggable_items
# Processing Mechanism (Automatic)
Automatic_Component = pnl.TransferMechanism(name='Automatic Component',function=pnl.Linear)
Automatic_Component.loggable_items
Automatic_Component.set_log_conditions('value')
thresh = 0.21
x_0 = 0 # starting point
#wTarget = 0.065 # I think this has to do with learning and is constant over trials in Umemoto
costParam1 = 0.35
reconfCostParam1 = 5
#rewardTaskA = 50
#rewardTaskBToA = 0.7
# Control Parameters
signalSearchRange = np.arange(0.0, 4.1, 0.2) #like in MATLAB Umemoto[0.0:0.2:4.0]# needs to be adjusted
print(signalSearchRange)
# Stimulus Mechanisms
Target_Stim = pnl.TransferMechanism(name='Target Stimulus', function=pnl.Linear)
Target_Stim.set_log_conditions('value') # Log Target_Rep
Distractor_Stim = pnl.TransferMechanism(name='Distractor Stimulus', function=pnl.Linear)
Distractor_Stim.set_log_conditions('value') # Log Target_Rep
# Processing Mechanisms (Control)
Target_Rep = pnl.TransferMechanism(name='Target Representation',
function=pnl.Linear(
slope=(
1.0,
pnl.ControlProjection(
function=pnl.Linear,
control_signal_params={pnl.ALLOCATION_SAMPLES: signalSearchRange}
))))
Target_Rep.set_log_conditions('value') # Log Target_Rep
t0 = 0.2
c = 0.19
thresh = 0.21
x_0 = 0 # starting point
#wTarget = 0.065 # I think this has to do with learning and is constant over trials in Umemoto
costParam1 = 0.35
reconfCostParam1 = 5
#rewardTaskA = 50
#rewardTaskBToA = 0.7
# Control Parameters
signalSearchRange = pnl.SampleSpec(start=0.0, stop=2.0, step=0.2)
# Stimulus Mechanisms
Target_Stim = pnl.TransferMechanism(name='Target Stimulus', function=pnl.Linear)
Target_Stim.set_log_conditions('value') # Log Target_Rep
Distractor_Stim = pnl.TransferMechanism(name='Distractor Stimulus', function=pnl.Linear)
Distractor_Stim.set_log_conditions('value') # Log Target_Rep
# Processing Mechanisms (Control)
Target_Rep = pnl.TransferMechanism(name='Target Representation')
Target_Rep.set_log_conditions('value') # Log Target_Rep
Target_Rep.set_log_conditions('mod_slope')
Distractor_Rep = pnl.TransferMechanism(name='Distractor Representation')
Distractor_Rep.set_log_conditions('value') # Log Flanker_Rep
Distractor_Rep.set_log_conditions('mod_slope')
COLOR_OUTPUT_LAYER = pnl.RecurrentTransferMechanism(size = 3,
auto=0.0,
hetero=0.0,#-2.0,
function=pnl.Linear(),
integrator_function= pnl.InteractiveActivation(rate = 0.0015, decay=0.0, offset=-6),
integrator_mode= True,
name='COLOR OUTPUT LAYER')
COLOR_OUTPUT_LAYER.set_log_conditions('value')
TASK_DEMAND_LAYER = pnl.RecurrentTransferMechanism(size = 2,
auto=0.0,
hetero=0.0,#-2.0,
function=pnl.Linear(),
integrator_function= pnl.InteractiveActivation(rate = 0.0015, decay=0.0, offset=-4),
integrator_mode= True,
name='TASK DEMAND LAYER')
TASK_DEMAND_LAYER.set_log_conditions('value')
### WEIGHTS
# WORD INPUT TO WORD OUTPUT
word_weights = pnl.MappingProjection(matrix=np.matrix([[3.5, 0.0, 0.0],
[0.0, 3.5, 0.0],
[0.0, 0.0, 3.5]]),
name='WORD_WEIGHTS')
# COLOR INPUT TO COLOR OUTPUT
color_weights = pnl.MappingProjection(matrix=np.matrix([[1.9, 0.0, 0.0],