Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
reference = reference[0]
encrypted_out = encrypted_out[0]
self._check(encrypted_out, reference, msg + " in forward")
# check backward pass
grad_output = get_random_test_tensor(
max_value=2, size=reference.size(), is_float=True
)
grad_output_encr = crypten.cryptensor(grad_output)
reference.backward(grad_output)
encrypted_out.backward(grad_output_encr)
self._check(input_encr.grad, input.grad, msg + " in backward")
for i, arg_encr in enumerate(args_encr):
if crypten.is_encrypted_tensor(arg_encr):
self._check(arg_encr.grad, args[i].grad, msg + " in backward args")
tensor2 = get_random_test_tensor(size=size, is_float=True)
encrypted_tensor2 = y_type(tensor2)
condition_tensor = (
get_random_test_tensor(max_value=1, size=size, is_float=False) + 1
)
condition_encrypted = crypten.cryptensor(condition_tensor)
condition_bool = condition_tensor.bool()
reference_out = torch.where(condition_bool, tensor1, tensor2)
encrypted_out = crypten.where(
condition_bool, encrypted_tensor1, encrypted_tensor2
)
y_is_private = crypten.is_encrypted_tensor(tensor2)
self._check(
encrypted_out,
reference_out,
f"{'private' if y_is_private else 'public'} y "
"where failed with public condition",
)
encrypted_out = encrypted_tensor1.where(
condition_encrypted, encrypted_tensor2
)
self._check(
encrypted_out,
reference_out,
f"{'private' if y_is_private else 'public'} y "
"where failed with private condition",
def validate(val_loader, model, criterion, print_freq=10):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if isinstance(model, crypten.nn.Module) and not crypten.is_encrypted_tensor(
input
):
input = encrypt_data_tensor_with_src(input)
# compute output
output = model(input)
if crypten.is_encrypted_tensor(output):
output = output.get_plain_text()
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.add(loss.item(), input.size(0))
top1.add(prec1[0], input.size(0))
top5.add(prec5[0], input.size(0))
# measure elapsed time
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if isinstance(model, crypten.nn.Module) and not crypten.is_encrypted_tensor(
input
):
input = encrypt_data_tensor_with_src(input)
# compute output
output = model(input)
if crypten.is_encrypted_tensor(output):
output = output.get_plain_text()
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.add(loss.item(), input.size(0))
top1.add(prec1[0], input.size(0))
top5.add(prec5[0], input.size(0))
# measure elapsed time
current_batch_time = time.time() - end
batch_time.add(current_batch_time)
end = time.time()
if (i + 1) % print_freq == 0:
logging.info(
# Initialize random weights
w = features.new(torch.randn(1, features.size(0)))
b = features.new(torch.randn(1))
if print_time:
pt_time = AverageMeter()
end = time.time()
for epoch in range(epochs):
# Forward
label_predictions = w.matmul(features).add(b).sign()
# Compute accuracy
correct = label_predictions.mul(labels)
accuracy = correct.add(1).div(2).mean()
if crypten.is_encrypted_tensor(accuracy):
accuracy = accuracy.get_plain_text()
# Print Accuracy once
if crypten.communicator.get().get_rank() == 0:
logging.info(
f"Epoch {epoch} --- Training Accuracy %.2f%%" % (accuracy.item() * 100)
)
# Backward
loss_grad = -labels * (1 - correct) * 0.5 # Hinge loss
b_grad = loss_grad.mean()
w_grad = loss_grad.matmul(features.t()).div(loss_grad.size(1))
# Update
w -= w_grad * lr
b -= b_grad * lr
def polynomial(self, coeffs, func="mul"):
"""Computes a polynomial function on a tensor with given coefficients,
`coeffs`, that can be a list of values or a 1-D tensor.
Coefficients should be ordered from the order 1 (linear) term first,
ending with the highest order term. (Constant is not included).
"""
# Coefficient input type-checking
if isinstance(coeffs, list):
coeffs = torch.tensor(coeffs)
assert torch.is_tensor(coeffs) or crypten.is_encrypted_tensor(
coeffs
), "Polynomial coefficients must be a list or tensor"
assert coeffs.dim() == 1, "Polynomial coefficients must be a 1-D tensor"
# Handle linear case
if coeffs.size(0) == 1:
return self.mul(coeffs)
# Compute terms of polynomial using exponentially growing tree
terms = crypten.mpc.stack([self, self.square()])
while terms.size(0) < coeffs.size(0):
highest_term = terms[-1:].expand(terms.size())
new_terms = getattr(terms, func)(highest_term)
terms = crypten.cat([terms, new_terms])
# Resize the coefficients for broadcast
def forward(self, input):
assert isinstance(input, (list, tuple)), "input must be list or tuple"
tensor, shape = input
# shape is not data so we can get plain text
if crypten.is_encrypted_tensor(shape):
shape = shape.get_plain_text()
return tensor.reshape(shape.long().tolist())
def forward(ctx, input):
input, other = input
if crypten.is_encrypted_tensor(other):
other_reciprocal = other.reciprocal()
ctx.save_multiple_for_backward([input, other_reciprocal])
return input.mul(other_reciprocal)
else:
ctx.save_multiple_for_backward([input.size(), other])
return input.div(other)
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
# compute output
if flatten:
input = input.view(input.size(0), -1)
if isinstance(model, crypten.nn.Module) and not crypten.is_encrypted_tensor(
input
):
input = crypten.cryptensor(input)
output = model(input)
if crypten.is_encrypted_tensor(output):
output = output.get_plain_text()
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.add(loss.item(), input.size(0))
top1.add(prec1[0], input.size(0))
top5.add(prec5[0], input.size(0))
# measure elapsed time
current_batch_time = time.time() - end
batch_time.add(current_batch_time)
end = time.time()
if (i + 1) % print_freq == 0:
logging.info(