Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def create_dummy_task(task_name):
# Create dummy task
module_pool = nn.ModuleDict(
{"linear1": nn.Linear(2, 10), "linear2": nn.Linear(10, 2)}
)
op_sequence = [
Operation(name="encoder", module_name="linear1", inputs=["_input_"]),
Operation(name="prediction_head", module_name="linear2", inputs=["encoder"]),
]
task = Task(name=task_name, module_pool=module_pool, op_sequence=op_sequence)
return task
module1_name = f"linear1{module_suffixes[0]}"
module2_name = f"linear2{module_suffixes[1]}"
module_pool = nn.ModuleDict(
{
module1_name: nn.Sequential(nn.Linear(2, 20), nn.ReLU()),
module2_name: nn.Linear(20, 2),
}
)
op1 = Operation(module_name=module1_name, inputs=[("_input_", "coordinates")])
op2 = Operation(module_name=module2_name, inputs=[op1.name])
op_sequence = [op1, op2]
task = Task(
name=task_name,
module_pool=module_pool,
op_sequence=op_sequence,
scorer=Scorer(metrics=["f1", "accuracy"]),
)
return task
module1_name = f"linear1{module_suffixes[0]}"
module2_name = f"linear2{module_suffixes[1]}"
module_pool = nn.ModuleDict(
{
module1_name: nn.Sequential(nn.Linear(2, 20), nn.ReLU()),
module2_name: nn.Linear(20, 2),
}
)
op1 = Operation(module_name=module1_name, inputs=[("_input_", "coordinates")])
op2 = Operation(module_name=module2_name, inputs=[op1.name])
op_sequence = [op1, op2]
task = Task(
name=task_name,
module_pool=module_pool,
op_sequence=op_sequence,
scorer=Scorer(metrics=["accuracy"]),
)
return task
module1_name = f"linear1{module_suffixes[0]}"
module2_name = f"linear2{module_suffixes[1]}"
module_pool = nn.ModuleDict(
{
module1_name: nn.Sequential(nn.Linear(2, 10), nn.ReLU()),
module2_name: nn.Linear(10, 2),
}
)
op1 = Operation(module_name=module1_name, inputs=[("_input_", "data")])
op2 = Operation(module_name=module2_name, inputs=[op1.name])
op_sequence = [op1, op2]
task = Task(name=task_name, module_pool=module_pool, op_sequence=op_sequence)
return task
def test_no_input_spec(self):
# Confirm model doesn't break when a module does not specify specific inputs
dataset = create_dataloader("task", shuffle=False).dataset
task = Task(
name="task",
module_pool=nn.ModuleDict({"identity": nn.Identity()}),
op_sequence=[Operation("identity", [])],
)
model = MultitaskClassifier(tasks=[task], dataparallel=False)
outputs = model.forward(dataset.X_dict, ["task"])
self.assertIn("_input_", outputs)
fc = nn.Linear(in_features * 3 + 2 * WEMB_SIZE, 3)
init_fc(fc)
# define layers
module_pool = nn.ModuleDict(
{
"feat_extractor": feature_extractor,
"prediction_head": fc,
"feat_concat": FlatConcat(),
"word_emb": WordEmb(),
}
)
# define task flow through modules
op_sequence = get_op_sequence()
pred_cls_task = Task(
name="visual_relation_task",
module_pool=module_pool,
op_sequence=op_sequence,
scorer=Scorer(metrics=["f1_micro"]),
)
return MultitaskClassifier([pred_cls_task])
# The output of the final operation will then go into a loss function to calculate the loss (e.g., cross-entropy) during training or an output function (e.g., softmax) to convert the logits into a prediction.
#
# Each `Task` also specifies which metrics it supports, which are bundled together in a `Scorer` object. For this tutorial, we'll just look at accuracy.
# %% [markdown]
# Putting this all together, we define the circle task:
# %%
from functools import partial
import torch.nn.functional as F
from snorkel.analysis import Scorer
from snorkel.classification import Task
circle_task = Task(
name="circle_task",
module_pool=module_pool,
op_sequence=op_sequence,
loss_func=F.cross_entropy,
output_func=partial(F.softmax, dim=1),
scorer=Scorer(metrics=["accuracy"]),
)
# %% [markdown]
# Note that `Task` objects are not dependent on a particular dataset; multiple datasets can be passed through the same modules for pre-training or co-training.
# %% [markdown]
# ### Again, but faster
# %% [markdown]
# We'll now define the square task, but more succinctly—for example, using the fact that the default name for an `Operation` is its `module_name` (since most tasks only use their modules once per forward pass).
# %% [markdown]
# Note that `Task` objects are not dependent on a particular dataset; multiple datasets can be passed through the same modules for pre-training or co-training.
# %% [markdown]
# ### Again, but faster
# %% [markdown]
# We'll now define the square task, but more succinctly—for example, using the fact that the default name for an `Operation` is its `module_name` (since most tasks only use their modules once per forward pass).
#
# We'll also define the square task to share the first module in its task flow (`base_mlp`) with the circle task to demonstrate how to share modules. (Note that this is purely for illustrative purposes; for this toy task, it is quite possible that this is not the optimal arrangement of modules).
#
# Finally, the most common task definitions we see in practice are classification tasks with cross-entropy loss and softmax on the output of the last module, and accuracy is most often the primary metric of interest, these are all the default values, so we can drop them here for brevity.
# %%
square_task = Task(
name="square_task",
module_pool=nn.ModuleDict({"base_mlp": base_mlp, "square_head": nn.Linear(4, 2)}),
op_sequence=[
Operation("base_mlp", [("_input_", "square_data")]),
Operation("square_head", ["base_mlp"]),
],
)
# %% [markdown]
# ## Model
# %% [markdown]
# With our tasks defined, constructing a model is simple: we simply pass the list of tasks in and the model constructs itself using information from the task flows.
#
# Note that the model uses the names of modules (not the modules themselves) to determine whether two modules specified by separate tasks are the same module (and should share weights) or different modules (with separate weights).
# So because both the `square_task` and `circle_task` include "base_mlp" in their module pools, this module will be shared between the two tasks.
output_func=partial(softmax, "circle_head"),
scorer=Scorer(metrics=["accuracy"]),
)
# %% [markdown]
# Note that `Task` objects are not dependent on a particular dataset; multiple datasets can be passed through the same modules for pre-training or co-training.
# %% [markdown]
# We'll now define the square task, but more succinctly—for example, using the fact that the default name for an `Operation` is its `module_name` (since most tasks only use their modules once per forward pass).
#
# We'll also define the square task to share the first module in its task flow (`base_mlp`) with the circle task to demonstrate how to share modules. (Note that this is purely for illustrative purposes; for this toy task, it is very possible that this is not the optimal arrangement of modules).
#
# Finally, the most common task definitions we see in practice are classification tasks with cross-entropy loss and softmax on the output of the last module, and accuracy is most often the primary metric of interest, these are all the default values, so we can drop them here for brevity.
# %%
square_task = Task(
name="square_task",
module_pool=nn.ModuleDict({"base_mlp": base_mlp, "square_head": nn.Linear(4, 2)}),
task_flow=[
Operation("base_mlp", [("_input_", "square_data")]),
Operation("square_head", [("base_mlp", 0)]),
],
scorer=Scorer(metrics=["accuracy"]),
)
# %% [markdown]
# ## Model
# %% [markdown]
# With our tasks defined, constructing a model is simple: we simply pass the list of tasks in and the model constructs itself using information from the task flows.
#
# Note that the model uses the names of modules (not the modules themselves) to determine whether two modules specified by separate tasks are the same module (and should share weights) or different modules (with separate weights).