Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
print('Abort!!!')
exit(0)
self.sizestate = [self.nvar]+self.sizevar
self.state = np.zeros(self.sizestate)
def get(self, name):
""" extract variable 'name' from the stack """
k = self.varname_list.index(name)
return self.state[k]
if __name__ == "__main__":
param = Param()
param.varname_list = ['vort', 'psi', 'u']
param.sizevar = [4, 2]
var = Var(param)
print(np.shape(var.state))
vor = var.get('vort')
vor[:, 0] = 1.
print(np.shape(vor))
print(var.state)
def __init__(self, model_path, word_dim=None, afix_dim=None, nlayers=2,
hidden_dim=128, elu_dim=64, dep_dim=100, dropout_ratio=0.5, use_cudnn=False):
self.model_path = model_path
defs_file = model_path + "/tagger_defs.txt"
if word_dim is None:
self.train = False
Param.load(self, defs_file)
self.extractor = FeatureExtractor(model_path)
else:
self.train = True
p = Param(self)
p.dep_dim = dep_dim
p.word_dim = word_dim
p.afix_dim = afix_dim
p.hidden_dim = hidden_dim
p.elu_dim = elu_dim
p.nlayers = nlayers
p.dump(defs_file)
self.targets = read_model_defs(model_path + "/target.txt")
self.words = read_model_defs(model_path + "/words.txt")
self.suffixes = read_model_defs(model_path + "/suffixes.txt")
self.prefixes = read_model_defs(model_path + "/prefixes.txt")
def create_meaner4_heartbeat_param():
return param.Param(meaner4_heartbeat_param_name, "short", 0, 4, True)
def __init__(self, model_path, word_dim=None, afix_dim=None,
nlayers=2, hidden_dim=128, relu_dim=64, dropout_ratio=0.5):
self.model_path = model_path
defs_file = model_path + "/tagger_defs.txt"
if word_dim is None:
self.train = False
Param.load(self, defs_file)
self.extractor = FeatureExtractor(model_path)
else:
self.train = True
p = Param(self)
p.word_dim = word_dim
p.afix_dim = afix_dim
p.hidden_dim = hidden_dim
p.relu_dim = relu_dim
p.nlayers = nlayers
p.dump(defs_file)
self.targets = read_model_defs(model_path + "/target.txt")
self.words = read_model_defs(model_path + "/words.txt")
self.suffixes = read_model_defs(model_path + "/suffixes.txt")
self.prefixes = read_model_defs(model_path + "/prefixes.txt")
self.in_dim = self.word_dim + 8 * self.afix_dim
self.dropout_ratio = dropout_ratio
super(LSTMTagger, self).__init__(
emb_word=L.EmbedID(len(self.words), self.word_dim),
emb_suf=L.EmbedID(len(self.suffixes), self.afix_dim, ignore_label=IGNORE),
from param import Param
from grid import Grid
from fluid2d import Fluid2d
import numpy as np
param = Param('default.xml')
param.modelname = 'boussinesq'
param.expname = 'RB_00'
# domain and resolution
param.nx = 64*2
param.ny = param.nx/4
param.npx = 1
param.Lx = 4.
param.Ly = 1.
param.geometry = 'xchannel'
# time
param.tend = 40.
param.cfl = 1.
param.adaptable_dt = True
param.dt = .1
def __init__(self, model_path, word_dim=None, afix_dim=None, nlayers=2,
hidden_dim=128, elu_dim=64, dep_dim=100, dropout_ratio=0.5):
self.model_path = model_path
defs_file = model_path + "/tagger_defs.txt"
if word_dim is None:
self.train = False
Param.load(self, defs_file)
self.extractor = FeatureExtractor(model_path)
else:
# training
self.train = True
p = Param(self)
p.dep_dim = dep_dim
p.word_dim = word_dim
p.afix_dim = afix_dim
p.hidden_dim = hidden_dim
p.elu_dim = elu_dim
p.nlayers = nlayers
p.n_words = len(read_model_defs(model_path + "/words.txt"))
p.n_suffixes = len(read_model_defs(model_path + "/suffixes.txt"))
p.n_prefixes = len(read_model_defs(model_path + "/prefixes.txt"))
p.targets = read_model_defs(model_path + "/target.txt")
p.dump(defs_file)
self.in_dim = self.word_dim + 8 * self.afix_dim
self.dropout_ratio = dropout_ratio
super(LSTMParser, self).__init__(
emb_word=L.EmbedID(self.n_words, self.word_dim),
self.r2 = self.xr0**2 + self.yr0**2
def domain_integration(self, z2d):
"""Define the domain integral function on grid cells"""
nh = self.nh
integral = np.sum(z2d[nh:-nh, nh:-nh])*1.
integral = self.mpitools.local_to_global([(integral, 'sum')])
return integral
if __name__ == "__main__":
param = Param()
param.myrank = 0
param.npx = 2
param.npy = 2
param.nx = 10
param.ny = 10
param.geometry = 'closed'
grid = Grid(param)
print("myrank is :", param.myrank)
print(grid.xr[0, :])
print(grid.yr[:, 0])
print(grid.msk)
print('my coordinates in the subdomains matrix (%i,%i)'
% (grid.j0, grid.i0))
print('global domain area =%f' % grid.area)
print('global boundary perimeter =%f' % grid.bcarea)
def __init__(self, *args, **kwargs):
super(Param,self).__init__( *args, **kwargs )
def __init__(self, model_path, word_dim=None, afix_dim=None, nlayers=2,
hidden_dim=128, elu_dim=64, dep_dim=100, dropout_ratio=0.5, use_cudnn=False):
self.model_path = model_path
defs_file = model_path + "/tagger_defs.txt"
if word_dim is None:
self.train = False
Param.load(self, defs_file)
self.extractor = FeatureExtractor(model_path)
else:
self.train = True
p = Param(self)
p.dep_dim = dep_dim
p.word_dim = word_dim
p.afix_dim = afix_dim
p.hidden_dim = hidden_dim
p.elu_dim = elu_dim
p.nlayers = nlayers
p.n_words = len(read_model_defs(model_path + "/words.txt"))
p.n_suffixes = len(read_model_defs(model_path + "/suffixes.txt"))
p.n_prefixes = len(read_model_defs(model_path + "/prefixes.txt"))
p.targets = read_model_defs(model_path + "/target.txt")
p.dump(defs_file)
# choice A/ jet-like forcing (localized in y)
# self.forc = tau0 * (yr/sigma)*np.exp(-yr**2/(2*sigma**2)) * grid.msk
# choice B/ basin-scale forcing: double gyre configuration
self.forc = tau0 * np.sin(yr * np.pi) * grid.msk
total = grid.domain_integration(self.forc)
self.forc -= (total / grid.area) * grid.msk
def add_forcing(self, x, t, dxdt):
""" add the forcing term on x[0]=the vorticity """
dxdt[0] += self.forc
param = Param('default.xml')
param.modelname = 'quasigeostrophic'
param.expname = 'dbl_gyre_00'
# domain and resolution
param.nx = 64*2
param.ny = 64
param.npy = 1
param.Lx = 2.
param.Ly = param.Lx/2
param.geometry = 'closed'
# time
param.tend = 2000.
param.cfl = 1.5
param.adaptable_dt = True
param.dt = 1.