Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def create_scatter(vector):
"Create the scatter to processor 0."
comm = vector.getComm()
rank = comm.getRank()
scatter, V0 = PETSc.Scatter.toZero(vector)
scatter.scatter(vector, V0, False, PETSc.Scatter.Mode.FORWARD)
comm.barrier()
return V0, scatter
def _initialize_transfer(self):
"""See openmdao.vectors.Transfer."""
self._transfers = {}
for ip_iset, op_iset in self._ip_inds:
key = (ip_iset, op_iset)
if len(self._ip_inds[key]) > 0:
ip_inds = numpy.array(self._ip_inds[key], 'i')
op_inds = numpy.array(self._op_inds[key], 'i')
ip_indexset = PETSc.IS().createGeneral(ip_inds,
comm=self._comm)
op_indexset = PETSc.IS().createGeneral(op_inds,
comm=self._comm)
ip_petsc = self._ip_vec._global_vector._petsc[ip_iset]
op_petsc = self._op_vec._global_vector._petsc[op_iset]
transfer = PETSc.Scatter().create(op_petsc, op_indexset,
ip_petsc, ip_indexset)
self._transfers[key] = transfer
def create_gather_to_zero(pvec):
"""
Create the ``gather_to_zero()`` function for collecting the global PETSc
vector on the task of rank zero.
"""
g20, pvec_full = PETSc.Scatter().toZero(pvec)
def gather_to_zero(pvec):
"""
Return the global PETSc vector, corresponding to `pvec`, on the task of
rank zero. The vector is reused between calls!
"""
g20.scatter(pvec, pvec_full, PETSc.InsertMode.INSERT,
PETSc.ScatterMode.FORWARD)
return pvec_full
return gather_to_zero
self.sim.update(ub)
# calculate derivative via y^T*dA/dp*x
product = sim.calc_ydAx(Ai)
grad_part = -2*np.real( product/step )
# send the partially computed gradient to the master node to finish
# up the calculation
#MPI.COMM_WORLD.Gather(grad_part, grad_full, root=0)
grad_full = MPI.COMM_WORLD.gather(grad_part, root=0)
# We also need dAdp to account for the derivative of eps and mu
# get the updated diagonal elements of A
Af = sim.get_A_diag(Af)
dAdp = (Af-Ai)/step
gatherer, dAdp_full = PETSc.Scatter().toZero(dAdp)
gatherer.scatter(dAdp, dAdp_full, False, PETSc.Scatter.Mode.FORWARD)
# finish calculating the gradient
if(NOT_PARALLEL):
# derivative with respect to fields
gradient[i] = np.sum(grad_full)
# Next we compute the derivative with respect to eps and mu. We
# exclude the PML regions because changes to the materials in
# the PMLs are generally not something we want to consider.
# TODO: make compatible with multiple update boxes...
jmin = int(np.floor(ub[0]/X*N)); jmax = int(np.ceil(ub[1]/X*N))
imin = int(np.floor(ub[2]/Y*M)); imax = int(np.ceil(ub[3]/Y*M))
if(jmin < w_pml_l): jmin = w_pml_l
if(jmax > N-w_pml_r): jmax = N-w_pml_r
if(imin < w_pml_b): imin = w_pml_b
if(adjoint): field = self._Ez_adj_t0
else: field = self._Ez_fwd_t0
elif(component == FieldComponent.Hx):
if(adjoint): field = self._Hx_adj_t0
else: field = self._Hx_fwd_t0
elif(component == FieldComponent.Hy):
if(adjoint): field = self._Hy_adj_t0
else: field = self._Hy_fwd_t0
elif(component == FieldComponent.Hz):
if(adjoint): field = self._Hz_adj_t0
else: field = self._Hz_fwd_t0
# get a "natural" representation of the appropriate field vector,
# gather it on the rank 0 node and return the appropriate piece
self._da.globalToNatural(field, self._vn)
scatter, fout = PETSc.Scatter.toZero(self._vn)
scatter.scatter(self._vn, fout, False, PETSc.Scatter.Mode.FORWARD)
if(NOT_PARALLEL):
fout = np.array(fout, dtype=np.complex128)
fout = np.reshape(fout, [self._Nz, self._Ny, self._Nx])
return fout[domain.i, domain.j, domain.k]
else:
return MathDummy()
# calculate derivative via y^T*dA/dp*x
product = sim.calc_ydAx(Ai)
grad_part = -2*np.real( product/step )
# send the partially computed gradient to the master node to finish
# up the calculation
#MPI.COMM_WORLD.Gather(grad_part, grad_full, root=0)
grad_full = MPI.COMM_WORLD.gather(grad_part, root=0)
# We also need dAdp to account for the derivative of eps and mu
# get the updated diagonal elements of A
Af = sim.get_A_diag(Af)
dAdp = (Af-Ai)/step
gatherer, dAdp_full = PETSc.Scatter().toZero(dAdp)
gatherer.scatter(dAdp, dAdp_full, False, PETSc.Scatter.Mode.FORWARD)
# finish calculating the gradient
if(NOT_PARALLEL):
# derivative with respect to fields
gradient[i] = np.sum(grad_full)
# Next we compute the derivative with respect to eps and mu. We
# exclude the PML regions because changes to the materials in
# the PMLs are generally not something we want to consider.
# TODO: make compatible with multiple update boxes...
jmin = int(np.floor(ub[0]/X*N)); jmax = int(np.ceil(ub[1]/X*N))
imin = int(np.floor(ub[2]/Y*M)); imax = int(np.ceil(ub[3]/Y*M))
if(jmin < w_pml_l): jmin = w_pml_l
if(jmax > N-w_pml_r): jmax = N-w_pml_r
if(imin < w_pml_b): imin = w_pml_b
if(imax > M-w_pml_t): imax = M-w_pml_t
def vecToArray(obj):
""" Converts a PETSc vector to a numpy array, available on *all* MPI nodes.
Args:
obj (petsc4py.PETSc.Vec): input vector.
Returns:
numpy.array :
"""
# scatter vector 'obj' to all processes
comm = obj.getComm()
scatter, obj0 = _PETSc.Scatter.toAll(obj)
scatter.scatter(obj, obj0, False, _PETSc.Scatter.Mode.FORWARD)
return _np.asarray(obj0)
# deallocate
comm.barrier()
scatter.destroy()
obj0.destroy()