Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
cuda=False,
seed=42,
):
random.seed(seed)
np.random.seed(seed)
th.manual_seed(seed)
device = th.device('cpu')
if cuda and th.cuda.device_count():
th.cuda.manual_seed(seed)
device = th.device('cuda')
# Create Datasets
train_dataset = l2l.vision.datasets.MiniImagenet(root='./data', mode='train')
valid_dataset = l2l.vision.datasets.MiniImagenet(root='./data', mode='validation')
test_dataset = l2l.vision.datasets.MiniImagenet(root='./data', mode='test')
train_dataset = l2l.data.MetaDataset(train_dataset)
valid_dataset = l2l.data.MetaDataset(valid_dataset)
test_dataset = l2l.data.MetaDataset(test_dataset)
train_transforms = [
l2l.data.transforms.NWays(train_dataset, ways),
l2l.data.transforms.KShots(train_dataset, 2*shots),
l2l.data.transforms.LoadData(train_dataset),
l2l.data.transforms.RemapLabels(train_dataset),
l2l.data.transforms.ConsecutiveLabels(train_dataset),
]
train_tasks = l2l.data.TaskDataset(train_dataset,
task_transforms=train_transforms,
num_tasks=20000)
valid_transforms = [
l2l.data.transforms.NWays(valid_dataset, ways),
def setUpClass(cls) -> None:
cls.ds = TestDatasets()
cls.meta_tensor_dataset = MetaDataset(cls.ds.tensor_dataset)
cls.meta_str_dataset = MetaDataset(cls.ds.str_dataset)
cls.meta_alpha_dataset = MetaDataset(cls.ds.alphabet_dataset)
cls.mnist_dataset = MetaDataset(cls.ds.get_mnist())
cls.omniglot_dataset = MetaDataset(cls.ds.get_omniglot())
test_transforms = [
l2l.data.transforms.NWays(test_dataset, ways),
l2l.data.transforms.KShots(test_dataset, 2*shots),
l2l.data.transforms.LoadData(test_dataset),
l2l.data.transforms.RemapLabels(test_dataset),
l2l.data.transforms.ConsecutiveLabels(train_dataset),
]
test_tasks = l2l.data.TaskDataset(test_dataset,
task_transforms=test_transforms,
num_tasks=600)
# Create model
model = l2l.vision.models.MiniImagenetCNN(ways)
model.to(device)
maml = l2l.algorithms.MAML(model, lr=fast_lr, first_order=False)
opt = optim.Adam(maml.parameters(), meta_lr)
loss = nn.CrossEntropyLoss(size_average=True, reduction='mean')
for iteration in range(num_iterations):
opt.zero_grad()
meta_train_error = 0.0
meta_train_accuracy = 0.0
meta_valid_error = 0.0
meta_valid_accuracy = 0.0
meta_test_error = 0.0
meta_test_accuracy = 0.0
for task in range(meta_batch_size):
# Compute meta-training loss
learner = maml.clone()
batch = train_tasks.sample()
evaluation_error, evaluation_accuracy = fast_adapt(batch,
download_location="/tmp/mnist"):
transformations = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
lambda x: x.view(1, 1, 28, 28),
])
mnist_train = l2l.data.MetaDataset(MNIST(download_location, train=True, download=True, transform=transformations))
# mnist_test = MNIST(file_location, train=False, download=True, transform=transformations)
train_gen = l2l.data.TaskGenerator(mnist_train, ways=ways, tasks=10000)
# test_gen = l2l.data.TaskGenerator(mnist_test, ways=ways)
model = Net(ways)
model.to(device)
meta_model = l2l.algorithms.MAML(model, lr=maml_lr)
opt = optim.Adam(meta_model.parameters(), lr=lr)
loss_func = nn.NLLLoss(reduction="sum")
for iteration in range(iterations):
iteration_error = 0.0
iteration_acc = 0.0
for _ in range(tps):
learner = meta_model.clone()
train_task = train_gen.sample()
valid_task = train_gen.sample(task=train_task.sampled_task)
# Fast Adaptation
for step in range(fas):
train_error, _ = compute_loss(train_task, device, learner, loss_func, batch=shots * ways)
learner.adapt(train_error)
# Create Datasets
train_dataset = l2l.vision.datasets.MiniImagenet(root='./data', mode='train')
valid_dataset = l2l.vision.datasets.MiniImagenet(root='./data', mode='validation')
test_dataset = l2l.vision.datasets.MiniImagenet(root='./data', mode='test')
train_dataset = l2l.data.MetaDataset(train_dataset)
valid_dataset = l2l.data.MetaDataset(valid_dataset)
test_dataset = l2l.data.MetaDataset(test_dataset)
train_transforms = [
l2l.data.transforms.NWays(train_dataset, ways),
l2l.data.transforms.KShots(train_dataset, 2*shots),
l2l.data.transforms.LoadData(train_dataset),
l2l.data.transforms.RemapLabels(train_dataset),
l2l.data.transforms.ConsecutiveLabels(train_dataset),
]
train_tasks = l2l.data.TaskDataset(train_dataset,
task_transforms=train_transforms,
num_tasks=20000)
valid_transforms = [
l2l.data.transforms.NWays(valid_dataset, ways),
l2l.data.transforms.KShots(valid_dataset, 2*shots),
l2l.data.transforms.LoadData(valid_dataset),
l2l.data.transforms.ConsecutiveLabels(train_dataset),
l2l.data.transforms.RemapLabels(valid_dataset),
]
valid_tasks = l2l.data.TaskDataset(valid_dataset,
task_transforms=valid_transforms,
num_tasks=600)
test_transforms = [
l2l.data.transforms.NWays(test_dataset, ways),
l2l.data.transforms.LoadData(train_dataset),
l2l.data.transforms.RemapLabels(train_dataset),
l2l.data.transforms.ConsecutiveLabels(train_dataset),
]
train_tasks = l2l.data.TaskDataset(train_dataset,
task_transforms=train_transforms,
num_tasks=20000)
valid_transforms = [
l2l.data.transforms.NWays(valid_dataset, ways),
l2l.data.transforms.KShots(valid_dataset, 2*shots),
l2l.data.transforms.LoadData(valid_dataset),
l2l.data.transforms.ConsecutiveLabels(train_dataset),
l2l.data.transforms.RemapLabels(valid_dataset),
]
valid_tasks = l2l.data.TaskDataset(valid_dataset,
task_transforms=valid_transforms,
num_tasks=600)
test_transforms = [
l2l.data.transforms.NWays(test_dataset, ways),
l2l.data.transforms.KShots(test_dataset, 2*shots),
l2l.data.transforms.LoadData(test_dataset),
l2l.data.transforms.RemapLabels(test_dataset),
l2l.data.transforms.ConsecutiveLabels(train_dataset),
]
test_tasks = l2l.data.TaskDataset(test_dataset,
task_transforms=test_transforms,
num_tasks=600)
# Create model
model = l2l.vision.models.MiniImagenetCNN(ways)
l2l.data.transforms.LoadData(valid_dataset),
l2l.data.transforms.ConsecutiveLabels(train_dataset),
l2l.data.transforms.RemapLabels(valid_dataset),
]
valid_tasks = l2l.data.TaskDataset(valid_dataset,
task_transforms=valid_transforms,
num_tasks=600)
test_transforms = [
l2l.data.transforms.NWays(test_dataset, ways),
l2l.data.transforms.KShots(test_dataset, 2*shots),
l2l.data.transforms.LoadData(test_dataset),
l2l.data.transforms.RemapLabels(test_dataset),
l2l.data.transforms.ConsecutiveLabels(train_dataset),
]
test_tasks = l2l.data.TaskDataset(test_dataset,
task_transforms=test_transforms,
num_tasks=600)
# Create model
model = l2l.vision.models.MiniImagenetCNN(ways)
model.to(device)
maml = l2l.algorithms.MAML(model, lr=fast_lr, first_order=False)
opt = optim.Adam(maml.parameters(), meta_lr)
loss = nn.CrossEntropyLoss(size_average=True, reduction='mean')
for iteration in range(num_iterations):
opt.zero_grad()
meta_train_error = 0.0
meta_train_accuracy = 0.0
meta_valid_error = 0.0
meta_valid_accuracy = 0.0
def main(lr=0.005, maml_lr=0.01, iterations=1000, ways=5, shots=1, tps=32, fas=5, device=torch.device("cpu"),
download_location="/tmp/mnist"):
transformations = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
lambda x: x.view(1, 1, 28, 28),
])
mnist_train = l2l.data.MetaDataset(MNIST(download_location, train=True, download=True, transform=transformations))
# mnist_test = MNIST(file_location, train=False, download=True, transform=transformations)
train_gen = l2l.data.TaskGenerator(mnist_train, ways=ways, tasks=10000)
# test_gen = l2l.data.TaskGenerator(mnist_test, ways=ways)
model = Net(ways)
model.to(device)
meta_model = l2l.algorithms.MAML(model, lr=maml_lr)
opt = optim.Adam(meta_model.parameters(), lr=lr)
loss_func = nn.NLLLoss(reduction="sum")
for iteration in range(iterations):
iteration_error = 0.0
iteration_acc = 0.0
for _ in range(tps):
learner = meta_model.clone()
train_task = train_gen.sample()
valid_task = train_gen.sample(task=train_task.sampled_task)
test_dataset = l2l.data.MetaDataset(test_dataset)
train_transforms = [
l2l.data.transforms.NWays(train_dataset, ways),
l2l.data.transforms.KShots(train_dataset, 2*shots),
l2l.data.transforms.LoadData(train_dataset),
l2l.data.transforms.RemapLabels(train_dataset),
l2l.data.transforms.ConsecutiveLabels(train_dataset),
]
train_tasks = l2l.data.TaskDataset(train_dataset,
task_transforms=train_transforms,
num_tasks=20000)
valid_transforms = [
l2l.data.transforms.NWays(valid_dataset, ways),
l2l.data.transforms.KShots(valid_dataset, 2*shots),
l2l.data.transforms.LoadData(valid_dataset),
l2l.data.transforms.ConsecutiveLabels(train_dataset),
l2l.data.transforms.RemapLabels(valid_dataset),
]
valid_tasks = l2l.data.TaskDataset(valid_dataset,
task_transforms=valid_transforms,
num_tasks=600)
test_transforms = [
l2l.data.transforms.NWays(test_dataset, ways),
l2l.data.transforms.KShots(test_dataset, 2*shots),
l2l.data.transforms.LoadData(test_dataset),
l2l.data.transforms.RemapLabels(test_dataset),
l2l.data.transforms.ConsecutiveLabels(train_dataset),
]
test_tasks = l2l.data.TaskDataset(test_dataset,
num_tasks=20000)
valid_transforms = [
l2l.data.transforms.NWays(valid_dataset, ways),
l2l.data.transforms.KShots(valid_dataset, 2*shots),
l2l.data.transforms.LoadData(valid_dataset),
l2l.data.transforms.ConsecutiveLabels(train_dataset),
l2l.data.transforms.RemapLabels(valid_dataset),
]
valid_tasks = l2l.data.TaskDataset(valid_dataset,
task_transforms=valid_transforms,
num_tasks=600)
test_transforms = [
l2l.data.transforms.NWays(test_dataset, ways),
l2l.data.transforms.KShots(test_dataset, 2*shots),
l2l.data.transforms.LoadData(test_dataset),
l2l.data.transforms.RemapLabels(test_dataset),
l2l.data.transforms.ConsecutiveLabels(train_dataset),
]
test_tasks = l2l.data.TaskDataset(test_dataset,
task_transforms=test_transforms,
num_tasks=600)
# Create model
model = l2l.vision.models.MiniImagenetCNN(ways)
model.to(device)
maml = l2l.algorithms.MAML(model, lr=fast_lr, first_order=False)
opt = optim.Adam(maml.parameters(), meta_lr)
loss = nn.CrossEntropyLoss(size_average=True, reduction='mean')
for iteration in range(num_iterations):