Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
from _reconutils import get_gcd_vals, laplace_builder, pixelxpixelsolver
try:
import mpi4py
from mpi4py import MPI
except:
sys.stdout.write('mpi4py must be installed and in PYTHONPATH\n')
sys.exit()
try:
import petsc4py
petsc4py.init([],comm=MPI.COMM_WORLD)
from petsc4py import PETSc
except:
sys.stdout.write('petsc4py must be installed and in PYTHONPATH\n')
sys.exit()
PETSc.Sys.Print('Running PPS: Parallel PETSc (linear) Solver')
size, rank = comm.Get_size(), comm.Get_rank()
cols = p.gen.cols
if rank == 0:
self.define_solution_space(p)
address = self._read_address(p)
metfile, modlen = p.gen.metfile, 3*len(address)
### get gmat, Cd and d values and send segments to other processors
PETSc.Sys.Print('retrieving gmat, cdi, and dvec vals...')
gmatvals, gmatij, cdiv_temp, dvec_temp = get_gcd_vals(metfile,len(p.scenes),
p.gen.doline,p.gen.cols,address,p.gen.gmat_rows)
### estimate posterior model pixel x pixel (for PETSc initial guess)
sys.stdout.write('petsc4py must be installed and in PYTHONPATH\n')
sys.exit()
PETSc.Sys.Print('Running PPS: Parallel PETSc (linear) Solver')
size, rank = comm.Get_size(), comm.Get_rank()
cols = p.gen.cols
if rank == 0:
self.define_solution_space(p)
address = self._read_address(p)
metfile, modlen = p.gen.metfile, 3*len(address)
### get gmat, Cd and d values and send segments to other processors
PETSc.Sys.Print('retrieving gmat, cdi, and dvec vals...')
gmatvals, gmatij, cdiv_temp, dvec_temp = get_gcd_vals(metfile,len(p.scenes),
p.gen.doline,p.gen.cols,address,p.gen.gmat_rows)
### estimate posterior model pixel x pixel (for PETSc initial guess)
PETSc.Sys.Print('estimating posterior model...')
mtilde_temp = pixelxpixelsolver(metfile,len(p.scenes),
p.gen.doline,p.gen.cols,address)
gmatij = gmatij.reshape(-1,3*p.gen.gmat_rows)
if gmatij.dtype != np.int32: gmatij = gmatij.astype(np.int32)
if p.gen.cmlambda < 1.e-12: del address
### initialize arrays to store start and end row indices for all procs
rowblocks_gmat = np.empty([2,size],dtype=np.int32)
rowblocks_cdiv = np.empty([2,size],dtype=np.int32)
rowblocks_dvec = np.empty([2,size],dtype=np.int32)
rowblocks_mtil = np.empty([2,size],dtype=np.int32)
nc.history = historystr + nc.history # prepend to history string
else:
nc.history = historystr
if __name__ == "__main__":
from argparse import ArgumentParser
import os
import os.path
import tempfile
import shutil
from time import time, asctime
try:
from netCDF4 import Dataset as NC
except:
PETSc.Sys.Print("netCDF4 is not installed!")
sys.exit(1)
parser = ArgumentParser()
parser.description = "Fill missing values by solving the Laplace equation in on the missing values and using present values as Dirichlet B.C."
parser.add_argument("INPUT", nargs=1, help="Input file name.")
parser.add_argument("OUTPUT", nargs=1, help="Output file name.")
parser.add_argument("-a", "--all", dest="all", action="store_true",
help="Process all variables.")
parser.add_argument("-v", "--vars", dest="variables",
help="comma-separated list of variables to process")
options, _ = parser.parse_known_args()
input_filename = options.INPUT[0]
output_filename = options.OUTPUT[0]
help="comma-separated list of variables to process")
options, _ = parser.parse_known_args()
input_filename = options.INPUT[0]
output_filename = options.OUTPUT[0]
if options.all:
nc = NC(input_filename)
variables = nc.variables.keys()
nc.close()
else:
try:
variables = (options.variables).split(',')
except:
PETSc.Sys.Print("Please specify variables using the -v option.")
sys.exit(-1)
# Done processing command-line options.
comm = PETSc.COMM_WORLD
rank = comm.getRank()
t0 = time()
PETSc.Sys.Print("Filling missing values in %s and saving results to %s..." % (input_filename,
output_filename))
if rank == 0:
try:
PETSc.Sys.Print("Creating a temporary file...")
# find the name of the directory with the output file:
dirname = os.path.dirname(os.path.abspath(output_filename))
rowblocks_cdiv = np.empty([2,size],dtype=np.int32)
rowblocks_dvec = np.empty([2,size],dtype=np.int32)
rowblocks_mtil = np.empty([2,size],dtype=np.int32)
tag = 1234
for i in np.arange(1,size):
comm.Send([np.array([modlen,p.gen.gmat_rows]),MPI.INT], dest=i, tag=tag); tag += 1
else:
tag, pararray = 1234 + (rank-1), np.array([-1,-1])
comm.Recv([pararray,MPI.INT], source=0, tag=tag)
modlen, p.gen.gmat_rows = pararray[0], pararray[1]
if modlen < 0 or p.gen.gmat_rows < 0:
print('\npararray receive command failed in rank '+str(rank))
PETSc._finalize; MPI.Finalize; sys.exit()
PETSc.Sys.Print('building gmat, cdiv, and dvec arrays...')
### initialize...nnz --> [DIAGONAL,OFF-DIAGONAL] (see PETSc documentation)
gmat = PETSc.Mat().createAIJ([p.gen.gmat_rows,modlen],nnz=[3,3],comm=comm)
cdiv = PETSc.Mat().createAIJ([p.gen.gmat_rows,p.gen.gmat_rows],nnz=[1,1],comm=comm)
dvec = PETSc.Vec().createMPI(p.gen.gmat_rows,comm=comm)
mtilde_pre = PETSc.Vec().createMPI(modlen,comm=comm)
### get the block of rows owned by this processor
sr_gmat, er_gmat = gmat.getOwnershipRange()
sr_cdiv, er_cdiv = cdiv.getOwnershipRange()
sr_dvec, er_dvec = dvec.getOwnershipRange()
sr_mtil, er_mtil = mtilde_pre.getOwnershipRange()
### send start/end rows to root proc and receive gmat, cdiv, and dvec entries
base_tag, base_stag = 777, 999
if rank == 0:
rowblocks_gmat[0,0], rowblocks_gmat[1,0] = sr_gmat, er_gmat
rowblocks_cdiv[0,0], rowblocks_cdiv[1,0] = sr_cdiv, er_cdiv
size, rank = comm.Get_size(), comm.Get_rank()
cols = p.gen.cols
if rank == 0:
self.define_solution_space(p)
address = self._read_address(p)
metfile, modlen = p.gen.metfile, 3*len(address)
### get gmat, Cd and d values and send segments to other processors
PETSc.Sys.Print('retrieving gmat, cdi, and dvec vals...')
gmatvals, gmatij, cdiv_temp, dvec_temp = get_gcd_vals(metfile,len(p.scenes),
p.gen.doline,p.gen.cols,address,p.gen.gmat_rows)
### estimate posterior model pixel x pixel (for PETSc initial guess)
PETSc.Sys.Print('estimating posterior model...')
mtilde_temp = pixelxpixelsolver(metfile,len(p.scenes),
p.gen.doline,p.gen.cols,address)
gmatij = gmatij.reshape(-1,3*p.gen.gmat_rows)
if gmatij.dtype != np.int32: gmatij = gmatij.astype(np.int32)
if p.gen.cmlambda < 1.e-12: del address
### initialize arrays to store start and end row indices for all procs
rowblocks_gmat = np.empty([2,size],dtype=np.int32)
rowblocks_cdiv = np.empty([2,size],dtype=np.int32)
rowblocks_dvec = np.empty([2,size],dtype=np.int32)
rowblocks_mtil = np.empty([2,size],dtype=np.int32)
tag = 1234
for i in np.arange(1,size):
comm.Send([np.array([modlen,p.gen.gmat_rows]),MPI.INT], dest=i, tag=tag); tag += 1
else:
if j < ncol - 1: # interior
col = R(i, j + 1)
A[row, col] = offdy
if j == ncol - 1: # right-most column
col = R(i, 0)
A[row, col] = offdy
# communicate off-processor values
# and setup internal data structures
# for performing parallel operations
A.assemblyBegin()
A.assemblyEnd()
PETSc.Sys.Print("done.")
return A
def fill_variable(nc, name):
"Fill missing values in one variable."
PETSc.Sys.Print("Processing %s..." % name)
t0 = time()
var = nc.variables[name]
comm = PETSc.COMM_WORLD
rank = comm.getRank()
if var.ndim == 3:
A = None
n_records = var.shape[0]
for t in xrange(n_records):
PETSc.Sys.Print("Processing record %d/%d..." % (t + 1, n_records))
data = var[t, :, :]
filled_data, A = fill_2d_record(data, A)
if rank == 0:
var[t, :, :] = filled_data
comm.barrier()
PETSc.Sys.Print("Time elapsed: %5f seconds." % (time() - t0))
elif var.ndim == 2:
data = var[:, :]
filled_data, _ = fill_2d_record(data)
if rank == 0:
var[:, :] = filled_data
comm.barrier()
comm.Barrier()
del fmatval, fmatij
PETSc.Sys.Print('loading cli...')
cliv = PETSc.Vec().createMPI(modlen,comm=comm)
gtg.getDiagonal(cliv)
cli = gtg.duplicate(copy=False)
cli.setDiagonal(cliv)
cliv.destroy()
PETSc.Sys.Print('building C_m...')
rhs = fmat.transposeMatMult(cli)
rhs = rhs.matMult(fmat)
cli.destroy(); fmat.destroy()
PETSc.Sys.Print('building A...')
gtg.axpy(p.gen.cmlambda,rhs)
rhs.destroy()
#if p.gen.outputs['Output diag(G^T*W*G)^-1)']:
#PETSc.Sys.Print('Output diag(G^T*W*G)^-1) option not implemented...run vector_disp for estimate')
'''
### this part of the code works, but the inverse operator is not implemented due to expense
PETSc.Sys.Print('writing diagonal of posterior model covariance matrix')
cliv = PETSc.Vec().createMPI(modlen,comm=comm)
gtg.getDiagonal(cliv)
base_tag, base_stag = 2222, 3333
sr_mt, er_mt = cliv.getOwnershipRange()
if rank == 0:
clivnump = np.empty(modlen, dtype=np.float32)
clivnump[sr_mt:er_mt] = cliv[...]; cliv.destroy()
tag = base_tag
for i in np.arange(1,size):
comm.Recv([rowarr,MPI.INT], source=i, tag=tag); tag += 1
comm.Recv([clivnump[rowarr[0]:rowarr[1]], MPI.FLOAT], source=i, tag=tag); tag += 1
self._write_post_model(model=clivnump,p=p,form=1)
else:
rowarr, tag = np.array([sr_mt, er_mt], dtype=np.int32), base_tag + (rank-1)*2
clivnump = cliv[...]; cliv.destroy()
if clivnump.dtype != np.float32: clivnump = clivnump.astype(np.float32)
comm.Send([rowarr,MPI.INT], dest=0, tag=tag); tag += 1
comm.Send([clivnump, MPI.FLOAT], dest=0, tag=tag); tag += 1
'''
PETSc.Sys.Print('inverting using PETSc lsqr...')
ksp = PETSc.KSP().create(comm)
ksp.setType('lsqr')
ksp.pc.setType('none')
ksp.setOperators(gtg)
self._get_ksp_tols(ksp=ksp,p=p)
ksp.setTolerances(rtol=self.rtol,atol=self.atol,divtol=self.dtol,max_it=self.max_it)
t0 = time.time()
ksp.solve(gtilde,mtilde)
tf = time.time()
gtg.destroy(); gtilde.destroy()
PETSc.Sys.Print(' lsqr took %10.2f seconds' % (tf-t0))
PETSc.Sys.Print(' Converged in %d iterations ' % (ksp.getIterationNumber()))
PETSc.Sys.Print(' Tolerances: %e %e %d %d' % (ksp.getTolerances()))
PETSc.Sys.Print(' Convergance code (< 0 --> diverged): %d\n' % (ksp.getConvergedReason()))