Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
pin_memory = False
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root=scattering_datasets.get_dataset_dir('CIFAR'), train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True),
batch_size=128, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root=scattering_datasets.get_dataset_dir('CIFAR'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
# Optimizer
lr = 0.1
for epoch in range(0, 90):
if epoch%20==0:
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9,
weight_decay=0.0005)
lr*=0.2
train(model, device, train_loader, optimizer, epoch+1, scattering)
test(model, device, test_loader, scattering)
pin_memory = False
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root=scattering_datasets.get_dataset_dir('CIFAR'), train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True),
batch_size=128, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root=scattering_datasets.get_dataset_dir('CIFAR'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
# Optimizer
lr = 0.1
for epoch in range(0, 90):
if epoch%20==0:
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9,
weight_decay=0.0005)
lr*=0.2
train(model, device, train_loader, optimizer, epoch+1, scattering)
test(model, device, test_loader, scattering)
if use_cuda:
num_workers = 4
pin_memory = True
else:
num_workers = None
pin_memory = False
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(scattering_datasets.get_dataset_dir('MNIST'), train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=128, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(scattering_datasets.get_dataset_dir('MNIST'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=128, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
# Optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9,
weight_decay=0.0005)
for epoch in range(1, 16):
train( model, device, train_loader, optimizer, epoch, scattering)
test(model, device, test_loader, scattering)
model = Scattering2dResNet(K, args.width).to(device)
# DataLoaders
if use_cuda:
num_workers = 4
pin_memory = True
else:
num_workers = None
pin_memory = False
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root=scattering_datasets.get_dataset_dir('CIFAR'), train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True),
batch_size=128, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root=scattering_datasets.get_dataset_dir('CIFAR'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
# Optimizer
lr = 0.1
model = Scattering2dCNN(K,args.classifier).to(device)
# DataLoaders
if use_cuda:
num_workers = 4
pin_memory = True
else:
num_workers = None
pin_memory = False
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root=scattering_datasets.get_dataset_dir('CIFAR'), train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True),
batch_size=128, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root=scattering_datasets.get_dataset_dir('CIFAR'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False, num_workers=num_workers, pin_memory=pin_memory)
# Optimizer
lr = 0.1
m.weight.data.normal_(0, 2./math.sqrt(n))
m.bias.data.zero_()
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 2./math.sqrt(m.in_features))
m.bias.data.zero_()
# DataLoaders
if use_cuda:
num_workers = 4
pin_memory = True
else:
num_workers = None
pin_memory = False
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(scattering_datasets.get_dataset_dir('MNIST'), train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=128, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(scattering_datasets.get_dataset_dir('MNIST'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=128, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
# Optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9,
weight_decay=0.0005)
prng = RandomState(42)
###############################################
# Create dataloaders
from torchvision import datasets, transforms
import kymatio.datasets as scattering_datasets
if use_cuda:
num_workers = 4
pin_memory = True
else:
num_workers = 0
pin_memory = False
train_data = datasets.MNIST(
scattering_datasets.get_dataset_dir('MNIST'),
train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
#Extract a subset of 5000 samples from MNIST training
random_permute=prng.permutation(np.arange(0,60000))[0:5000]
train_data.data = train_data.data[random_permute]
train_data.targets = train_data.targets[random_permute]
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=128, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
#Creat the test loader on the full MNIST test set
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
#Extract a subset of 5000 samples from MNIST training
random_permute=prng.permutation(np.arange(0,60000))[0:5000]
train_data.data = train_data.data[random_permute]
train_data.targets = train_data.targets[random_permute]
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=128, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
#Creat the test loader on the full MNIST test set
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
scattering_datasets.get_dataset_dir('MNIST'),
train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=128, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)
###############################################################################
# This will help us define networks a bit more cleanly
import torch.nn as nn
class View(nn.Module):
def __init__(self, *args):
super(View, self).__init__()
self.shape = args
def forward(self, x):