Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_configure_learning(self):
o = pnl.TransferMechanism()
m = pnl.ContrastiveHebbianMechanism(
input_size=2, hidden_size=0, target_size=2,
mode=pnl.SIMPLE_HEBBIAN,
separated=False,
matrix=[[0,-.5],[-.5,0]]
)
with pytest.warns(UserWarning) as record:
m.learning_enabled = True
correct_message_found = False
for warning in record:
if ("Learning cannot be enabled" in str(warning.message) and
"because it has no LearningMechanism" in str(warning.message)):
correct_message_found = True
break
def test_multilevel_control(self, mode, benchmark):
oA = pnl.TransferMechanism(
name='OuterA',
)
oB = pnl.TransferMechanism(
name='OuterB',
)
iA = pnl.TransferMechanism(
name='InnerA',
)
iB = pnl.TransferMechanism(
name='InnerB',
)
iComp = pnl.Composition(name='InnerComp')
iComp.add_node(iA)
iComp.add_node(iB)
iComp.add_projection(pnl.MappingProjection(), iA, iB)
oComp = pnl.Composition(name='OuterComp')
oComp.add_node(oA)
oComp.add_node(oB)
oComp.add_node(iComp)
oComp.add_projection(pnl.MappingProjection(), oA, iComp)
oComp.add_projection(pnl.MappingProjection(), iB, oB)
oController = pnl.ControlMechanism(
def test_multilevel_ocm_gridsearch_conflicting_directions(self, mode, benchmark):
oa = pnl.TransferMechanism(name='oa')
ob = pnl.TransferMechanism(name='ob')
ocomp = pnl.Composition(name='ocomp', controller_mode=pnl.BEFORE)
ia = pnl.TransferMechanism(name='ia')
ib = pnl.ProcessingMechanism(name='ib',
function=lambda x: abs(x - 75))
icomp = pnl.Composition(name='icomp', controller_mode=pnl.BEFORE)
ocomp.add_node(oa, required_roles=pnl.NodeRole.INPUT)
ocomp.add_node(ob)
ocomp.add_node(icomp)
icomp.add_node(ia, required_roles=pnl.NodeRole.INPUT)
icomp.add_node(ib)
ocomp._analyze_graph()
icomp._analyze_graph()
ocomp.add_projection(pnl.MappingProjection(), sender=oa, receiver=ia)
icomp.add_projection(pnl.MappingProjection(), sender=ia, receiver=ib)
ocomp.add_projection(pnl.MappingProjection(), sender=ib, receiver=ob)
congruenceWeighting = pnl.TransferMechanism(default_variable=[[0.0, 0.0]],
size = 2,
function=pnl.Linear(slope=wa, intercept= 0),
name = 'Congruence * Automatic Component')
controlledElement = pnl.TransferMechanism(default_variable=[[0.0, 0.0]],
size = 2,
function=pnl.Linear(slope=1, intercept= 0),
input_states=pnl.InputState(combine=pnl.PRODUCT),
output_states = [pnl.RESULT],
name = 'Stimulus Info * Activity')
controlledElement.set_log_conditions([pnl.RESULT])
ddmCombination = pnl.TransferMechanism(size = 1,
function = pnl.Linear(slope=1, intercept=0),
output_states = [pnl.RESULT],
name = "DDM Integrator")
ddmCombination.set_log_conditions([pnl.RESULT])
decisionMaker = pnl.DDM(function=pnl.DriftDiffusionAnalytical(drift_rate = DRIFT,
starting_point = STARTING_POINT,
threshold = THRESHOLD,
noise = NOISE,
t0 = T0),
output_states = [pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME,
pnl.PROBABILITY_UPPER_THRESHOLD, pnl.PROBABILITY_LOWER_THRESHOLD],
name='DDM')
decisionMaker.set_log_conditions([pnl.PROBABILITY_UPPER_THRESHOLD, pnl.PROBABILITY_LOWER_THRESHOLD,
D_h = nh
D_o = num_features * output_dims
# Weight matrices (defaults provided by Dillon)
wih = np.random.rand(D_i, D_h) * 0.02 - 0.01
wch = np.random.rand(D_c, D_h) * 0.02 - 0.01
wco = np.random.rand(D_c, D_o) * 0.02 - 0.01
who = np.random.rand(D_h, D_o) * 0.02 - 0.01
# Training params (defaults provided by Dillon)
patience = 10
min_delt = 0.00001
lr = learning_rate
# Instantiate layers and projections
il = pnl.TransferMechanism(size=D_i, name='input')
cl = pnl.TransferMechanism(size=D_c, name='control')
hl = pnl.TransferMechanism(size=D_h, name='hidden',
function=pnl.Logistic(bias=-2))
ol = pnl.TransferMechanism(size=D_o, name='output',
function=pnl.Logistic(bias=-2))
pih = pnl.MappingProjection(matrix=wih)
pch = pnl.MappingProjection(matrix=wch)
pco = pnl.MappingProjection(matrix=wco)
pho = pnl.MappingProjection(matrix=who)
# Create training data for network
# We train across all possible inputs, one task at a time
input_examples, output_examples, control_examples = generate_training_data(all_tasks, num_features, input_dims, output_dims)
def figure_5c():
"""
This creates the plot for Figure 5C in the Montague paper. Figure 5C shows
'extinction of response to the sensory cue.' The setup is the same as
Figure 5A, except that reward delivery stops at trial 70
"""
# Create Processing Components
sample_mechanism = pnl.TransferMechanism(default_variable=np.zeros(60),
name=pnl.SAMPLE)
action_selection = pnl.TransferMechanism(default_variable=np.zeros(60),
function=pnl.Linear(slope=1.0,
intercept=1.0),
name='Action Selection')
sample_to_action_selection = pnl.MappingProjection(sender=sample_mechanism,
receiver=action_selection,
matrix=np.zeros((60, 60)))
# Create Composition
composition_name = 'TD_Learning_Figure_5C'
comp = pnl.Composition(name=composition_name)
# Add Processing Components to the Composition
pathway = [sample_mechanism, sample_to_action_selection, action_selection]
# Add Learning Components to the Composition
learning_related_components = comp.add_td_learning_pathway(pathway, learning_rate=0.3).learning_components
import functools
import numpy as np
import psyneulink as pnl
import psyneulink.core.components.functions.transferfunctions
input_layer = pnl.TransferMechanism(
size=3,
name='Input Layer'
)
action_selection = pnl.TransferMechanism(
size=3,
function=psyneulink.core.components.functions.transferfunctions.SoftMax(
output=pnl.ALL,
gain=1.0),
output_ports={pnl.NAME: 'SELECTED ACTION',
pnl.VARIABLE:[(pnl.INPUT_PORT_VARIABLES, 0), (pnl.OWNER_VALUE, 0)],
pnl.FUNCTION: psyneulink.core.components.functions.selectionfunctions.OneHot(mode=pnl.PROB).function},
# output_ports={pnl.NAME: "SOFT_MAX",
# pnl.VARIABLE: (pnl.OWNER_VALUE,0),
# pnl.FUNCTION: pnl.SoftMax(output=pnl.PROB,gain=1.0)},
name='Action Selection'
prefs={
pnl.VERBOSE_PREF: pnl.PreferenceEntry(False, pnl.PreferenceLevel.INSTANCE),
# pnl.REPORT_OUTPUT_PREF: pnl.PreferenceEntry(True, pnl.PreferenceLevel.INSTANCE)
}
)
process_prefs = pnl.BasePreferenceSet(
reportOutput_pref=pnl.PreferenceEntry(False, pnl.PreferenceLevel.INSTANCE),
verbose_pref=pnl.PreferenceEntry(True, pnl.PreferenceLevel.INSTANCE)
)
# Control Parameters
signalSearchRange = np.arange(0.8, 2.0, 0.2)
# Stimulus Mechanisms
Target_Stim = pnl.TransferMechanism(name='Target Stimulus', function=psyneulink.core.components.functions
.transferfunctions.Linear(slope=0.3324))
Flanker_Stim = pnl.TransferMechanism(name='Flanker Stimulus', function=psyneulink.core.components.functions.transferfunctions.Linear(slope=0.3545221843))
# Processing Mechanisms (Control)
Target_Rep = pnl.TransferMechanism(
name='Target Representation',
function=psyneulink.core.components.functions.transferfunctions.Linear(
slope=(
1.0,
pnl.ControlProjection(
function=psyneulink.core.components.functions.transferfunctions.Linear,
control_signal_params={pnl.ALLOCATION_SAMPLES: signalSearchRange}
)
)
),
prefs=mechanism_prefs
thresh = 0.21
x_0 = 0 # starting point
#wTarget = 0.065 # I think this has to do with learning and is constant over trials in Umemoto
costParam1 = 0.35
reconfCostParam1 = 5
#rewardTaskA = 50
#rewardTaskBToA = 0.7
# Control Parameters
signalSearchRange = pnl.SampleSpec(start=1.8, stop=2.2, step=0.2)
# signalSearchRange = pnl.SampleSpec(start=0.0, stop=0.4, step=0.2)
# Stimulus Mechanisms
Target_Stim = pnl.TransferMechanism(name='Target Stimulus', function=pnl.Linear)
Target_Stim.set_log_conditions('value')#, log_condition=pnl.PROCESSING) # Log Target_Rep
Distractor_Stim = pnl.TransferMechanism(name='Distractor Stimulus', function=pnl.Linear)
Distractor_Stim.set_log_conditions('value')#, log_condition=pnl.PROCESSING) # Log Target_Rep
# Processing Mechanisms (Control)
Target_Rep = pnl.TransferMechanism(name='Target Representation')
Target_Rep.set_log_conditions('value')#, log_condition=pnl.PROCESSING) # Log Target_Rep
Target_Rep.set_log_conditions('mod_slope')#, log_condition=pnl.PROCESSING)
Target_Rep.set_log_conditions('InputState-0')#, log_condition=pnl.PROCESSING)
Distractor_Rep = pnl.TransferMechanism(name='Distractor Representation')
Distractor_Rep.set_log_conditions('value')#, log_condition=pnl.PROCESSING) # Log Flanker_Rep
Distractor_Rep.set_log_conditions('mod_slope')#, log_condition=pnl.PROCESSING)
trials=200
X=[[1,1,1],[1,0,1],[0,1,1],[0,0,1]]
AND_labels_pnl=[[1],[0],[0],[0]]
OR_labels_pnl= [[1],[1],[1],[0]]
XOR_labels_pnl=[[0],[1],[1],[0]]
rat = int(trials / 4)
#Specify which label set you would like to use.
labels=XOR_labels_pnl
#Creating a 2 layer net in PNL:
#First, we create the input layer. This layer is simply a Transfer Mechanism that brings the examples into the network
#We do not have to specify a function (it defaults to linear, slope = 1, intercept = 0),
#but we do need to specify the size, which will be the size of our input array.
input_layer=pnl.TransferMechanism(size=(3), name='INPUT LAYER')
#Next, we specify our output layer. This is where we do our sigmoid transformation, by simply applying the Logistic function.
#The size we specify for this layer is the number of output nodes we want. In this case, we want the network to return a scalar
#for each example (either a 1 or a zero), so our size is 1
output_layer=pnl.TransferMechanism(size=1, function=psyneulink.core.components.functions.transferfunctions.Logistic, name='OUTPUT LAYER')
#Now, we put them together into a process.
#Notice, that we did not need to specify a weighting matrix. One will automatically be generated by psyneulink when we create our
#process.
# JDC ADDED:
# Normally, for learning to occur in a process, we would just specify that learning=pnl.ENABLED.
# However, if we want to specify a specific learning function or error_function to be used, then we must
# specify it by construction a default LearningProjection and giving it the parameters we want. In this
# case it is the error_function, that we will set to CROSS_ENTROPY (using PsyNeulink's Distance Function):