Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_padding(self):
series = [['1', '2', '3 '], ['2', '3'], ['3', '4', '5', '6'], [' 4', '5', '6']]
target = [[1.0, 2.0, 3.0, 4.0, 0.0], [2.0, 3.0, 4.0, 5.0, 0.0], [3.0, 0.0, 5.0, 6.0, 0.0]]
result = tensor_from_series(series, get_devices()[0], n_dims=5, pad_value=0.0, max_len=3).tolist()[0]
self.assertEqual(result, target)
output_size=None,
nr_outputs=None,
shape=None,
selfaware=False,
size_parameters={},
pretrained_net=None,
deterministic=False):
self.input_size = input_size
self.output_size = output_size
self.nr_outputs = nr_outputs
self.selfaware = selfaware
# How many devices we can train this network on
self.available_devices = 1
self.max_variance = None
self.device, _ = get_devices()
if deterministic:
'''
Seed that always has the same value on the same dataset plus setting the bellow CUDA options
In order to make sure pytorch randomly generate number will be the same every time
when training on the same dataset
'''
torch.manual_seed(66)
if 'cuda' in str(self.device):
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
self.available_devices = torch.cuda.device_count()
else:
self.available_devices = 1
def convert_to_device(self, device_str=None):
if device_str is not None:
device = torch.device(device_str)
available_devices = 1
if device_str == 'cuda':
available_devices = torch.cuda.device_count()
else:
device, available_devices = get_devices()
self._mixer.to(device, available_devices)
for e in self._mixer.encoders:
self._mixer.encoders[e].to(device, available_devices)
def __init__(self, model='resnet-18', layer='default', layer_output_size=512):
""" Img2Vec
:param cuda: If set to True, will run forward pass on GPU
:param model: String name of requested model
:param layer: String or Int depending on model. See more docs: https://github.com/christiansafka/img2vec.git
:param layer_output_size: Int depicting the output size of the requested layer
"""
self.device, _ = get_devices()
self.layer_output_size = layer_output_size
self.model_name = model
self.model, self.extraction_layer = self._get_model_and_layer(model, layer)
self.model = self.model.to(self.device)
self.scaler = transforms.Scale((224, 224))
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
self.to_tensor = transforms.ToTensor()
def __init__(self, encoded_vector_size=4, train_iters=75000, stop_on_error=0.8, learning_rate=0.01, is_target=False):
super().__init__(is_target)
self._stop_on_error = stop_on_error
self._learning_rate = learning_rate
self._encoded_vector_size = encoded_vector_size
self._train_iters = train_iters
self._encoder = None
self.device, _ = get_devices()
for ind in range(len(awareness_net_shape) - 1):
rectifier = torch.nn.SELU
awareness_layers.append(torch.nn.Linear(awareness_net_shape[ind], awareness_net_shape[ind + 1]))
if ind < len(awareness_net_shape) - 2:
awareness_layers.append(rectifier())
self.net = torch.nn.Sequential(*awareness_layers)
for layer in self.net:
if hasattr(layer, 'weight'):
torch.nn.init.normal_(layer.weight, mean=0., std=1 / math.sqrt(layer.out_features))
if hasattr(layer, 'bias'):
torch.nn.init.normal_(layer.bias, mean=0., std=0.1)
self.device, self.available_devices = get_devices()
self.to(self.device, self.available_devices )
self._pretrained_model_name = 'albert-base-v2'
self._model_max_len = 768
if self.aim == ENCODER_AIM.BALANCE:
self._classifier_model_class = DistilBertForSequenceClassification
self._embeddings_model_class = DistilBertModel
self._tokenizer_class = DistilBertTokenizer
self._pretrained_model_name = 'distilbert-base-uncased'
self._model_max_len = 768
if self.aim == ENCODER_AIM.ACCURACY:
self._classifier_model_class = DistilBertForSequenceClassification
self._embeddings_model_class = DistilBertModel
self._tokenizer_class = DistilBertTokenizer
self._pretrained_model_name = 'distilbert-base-uncased'
self._model_max_len = 768
self.device, _ = get_devices()
def to(self, device=None, available_devices=None):
if device is None or available_devices is None:
device, available_devices = get_devices()
self.net = self.net.to(device)
if self.selfaware:
self.awareness_net = self.awareness_net.to(device)
available_devices = 1
if 'cuda' in str(device):
available_devices = torch.cuda.device_count()
if available_devices > 1:
self._foward_net = torch.nn.DataParallel(self.net)
if self.selfaware:
self._foward_awareness_net = torch.nn.DataParallel(self.awareness_net)
else:
self._foward_net = self.net
if self.selfaware: