Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def on(self, wrapper):
"""Used to add a new _GeneralizedPointerTensor at the top of the chain,
just before the tensorvar wrapper."""
# Assign the newly created tensor to the good owner and torch_type
self.torch_type = wrapper.child.torch_type
self.owner = wrapper.child.owner
# Insert self between wrapper and wrapper child
torch_utils.bind_tensor_nodes(wrapper=self, child_obj=wrapper.child)
torch_utils.bind_tensor_nodes(wrapper=wrapper, child_obj=self)
self.child = None
# In case wrapper is a variable, do the same with data and grad (if necessary)
if torch_utils.is_variable(wrapper):
try:
data_pointer_dict = {
w: p.data for w, p in self.pointer_tensor_dict.items()
}
wrapper.data = _GeneralizedPointerTensor(data_pointer_dict).on(
wrapper.data
)
except AttributeError:
pass
if torch_utils.is_variable(wrapper.grad):
grad_pointer_dict = {
def __array_ufunc__(self, ufunc, method, *args, **kwargs):
attr = ufunc.__name__
cmd, locations, owners = utils.compile_command(
attr=str(attr), args=args, kwargs=kwargs, has_self=False, self=None
)
return self.owner.send_command(
recipient=self.location, message=cmd, framework="numpy"
)
result_dict = {}
torch_type = None
var_data_type = None
for worker_id in syft_commands.keys():
syft_command = syft_commands[worker_id]
result_dict[worker_id] = sy._PointerTensor.handle_call(syft_command, owner)
if torch_type is None:
torch_type = result_dict[worker_id].torch_type
if torch_utils.is_variable_name(torch_type):
var_data_type = result_dict[worker_id].data.torch_type
gpt = _GeneralizedPointerTensor(result_dict, torch_type=torch_type, owner=owner)
if torch_utils.is_variable_name(torch_type):
gpt.child = torch.guard[torch_type]()
data_pointer_dict = {w: p.data for w, p in gpt.pointer_tensor_dict.items()}
gpt.data = _GeneralizedPointerTensor(
data_pointer_dict, torch_type=var_data_type, owner=owner
)
gpt.data.child = torch.guard[var_data_type]()
grad_pointer_dict = {w: p.grad for w, p in gpt.pointer_tensor_dict.items()}
gpt.grad = _GeneralizedPointerTensor(
grad_pointer_dict, torch_type=torch_type, owner=owner
)
gpt.grad.child = torch.guard[torch_type]()
grad_data_pointer_dict = {
w: p.grad.data for w, p in gpt.pointer_tensor_dict.items()
}
"command": command,
"has_self": has_self,
"args": args,
"kwargs": kwargs,
}
if has_self:
raw_command["self"] = self_
if is_torch_command:
# Unwrap the torch wrapper
syft_command, child_type = torch_utils.prepare_child_command(
raw_command, replace_tensorvar_with_child=True
)
else:
# Get the next syft class
# The actual syft class is the one which redirected (see the _PlusIsMinus ex.)
syft_command, child_type = torch_utils.prepare_child_command(
raw_command, replace_tensorvar_with_child=True
)
# torch_utils.assert_has_only_syft_tensors(syft_command)
# Note: because we have pb of registration of tensors with the right worker,
# and because having Virtual workers creates even more ambiguity, we specify the worker
# performing the operation
result = child_type.handle_call(syft_command, owner=self)
if is_torch_command:
# Wrap the result
if has_self and utils.is_in_place_method(attr):
# TODO: fix this properly: don't wrap the same way if syft or Variable
if torch_utils.is_variable(result) or torch_utils.is_tensor(result):
def __repr__(self):
if (
torch_utils.is_tensor(self)
and hasattr(self, "child")
and not isinstance(self.child, (sy._LocalTensor, sy._PointerTensor))
):
if isinstance(self.child, sy._FixedPrecisionTensor):
return self.child.__repr__()
x_ = type(self)()
x_.native_set_(self)
return "[Head of chain]\n" + x_.native___repr__()
if (
torch_utils.is_variable(self)
and hasattr(self, "child")
and not isinstance(self.child, (sy._LocalTensor, sy._PointerTensor))
):
@classmethod
def handle_call(cls, syft_command, owner):
syft_commands = torch_utils.split_to_pointer_commands(syft_command)
result_dict = {}
torch_type = None
var_data_type = None
for worker_id in syft_commands.keys():
syft_command = syft_commands[worker_id]
result_dict[worker_id] = sy._PointerTensor.handle_call(syft_command, owner)
if torch_type is None:
torch_type = result_dict[worker_id].torch_type
if torch_utils.is_variable_name(torch_type):
var_data_type = result_dict[worker_id].data.torch_type
gpt = _GeneralizedPointerTensor(result_dict, torch_type=torch_type, owner=owner)
def forward_method_to_child(self, *args, **kwargs):
child_args = torch_utils.get_child_in_args(*args, **kwargs)
if attr == "zero_":
response = getattr(self.child, "native_" + attr)()
else:
response = getattr(self.child, "native_" + attr)(
*child_args, **kwargs
)
syft_node = type(self)(
child=response.child,
parent=None,
torch_type=type(response).__name__,
)
# Insert the new node just before the wrapper
# syft_node.child = response.child
response.child.parent = syft_node
# this series of if/else statements uses the message_wrapper['type']
# value to determine where to route the incoming message.
# if the message contains an object being sent to us
if message_wrapper["type"] == "obj":
object = message
# if object is a numpy array
if isinstance(message, np.ndarray):
"""do nothing."""
# if object is a Torch object - pre-process it for registration
else:
torch_utils.fix_chain_structure(object)
# register the object, saving it in self._objects and ensuring that
# object.owner is set correctly
self.register(object)
# we do not send a response back
# TODO: send a "successful" or "not successful" response?
return {}, False
# if the message contains Receiving a request for an object
# to be sent to another worker. For example "x.get()" would execute here.
# if x is a pointer to an object hosted on this worker.
elif message_wrapper["type"] == "req_obj":
# Because it was pointed at, it's the first syft_object of the chain,
# so its parent is the tensorvar
def handle_call(cls, syft_command, owner):
"""Execute a forwarded command on the native tensor with native
operations.
Receive a syft command and an owner, and converts it into
command with native torch args. Excute native operations and
converts it back into syft response using _LocalTensors.
"""
tensor_command, torch_type = torch_utils.prepare_child_command(
syft_command, replace_tensorvar_with_child=True
)
# torch_utils.assert_has_only_torch_tensorvars(tensor_command)
attr = tensor_command["command"]
args = tensor_command["args"]
kwargs = tensor_command["kwargs"]
has_self = tensor_command["has_self"]
if has_self:
self = tensor_command["self"]
native_attr = torch._command_guard(
attr, "tensorvar_methods", get_native=True
)
command = getattr(self, native_attr)
else:
)
if is_head:
var_data = torch.guard[var_data_type].deser(
var_data_type, var_data_tensor, worker, acquire
)
else:
var_data = torch.guard[var_data_type]()
except AttributeError:
var_data = torch.guard["FloatTensor"]()
# If not already existing object, build the torch wrapper
variable = sy.Variable(var_data, requires_grad=msg_obj["requires_grad"])
# Deser the var.grad
if "grad" in msg_obj:
var_grad_type, var_grad_tensor = torch_utils.extract_type_and_obj(
msg_obj["grad"]
)
if is_head:
var_grad = torch.guard[var_grad_type].deser(
var_grad_type, var_grad_tensor, worker, acquire, is_head
)
else:
var_grad = torch.guard[var_grad_type]()
variable.assign_grad_(var_grad)
# And connect it the the child syft_tensor
torch_utils.bind_tensor_nodes(variable, var_syft_obj)
# Last, ensures that the structure follows our standard
torch_utils.fix_chain_structure(variable, variable.data, variable.grad)