Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def uniform_boundary_points(self, n):
if n == 1:
return np.array([[self.l]]).astype(config.real(np))
xl = np.full((n // 2, 1), self.l).astype(config.real(np))
xr = np.full((n - n // 2, 1), self.r).astype(config.real(np))
return np.vstack((xl, xr))
def uniform_boundary_points(self, n):
if n == 1:
return np.array([[self.l]]).astype(config.real(np))
xl = np.full((n // 2, 1), self.l).astype(config.real(np))
xr = np.full((n - n // 2, 1), self.r).astype(config.real(np))
return np.vstack((xl, xr))
def losses_test():
int_mat = self.get_int_matrix(False)
f = self.pde(model.net.x, outputs, int_mat)
if not isinstance(f, (list, tuple)):
f = [f]
return [loss(tf.zeros(tf.shape(fi)), fi) for fi in f] + [
tf.constant(0, dtype=config.real(tf)) for _ in self.bcs
]
def losses_test():
return [loss(tf.zeros(tf.shape(fi)), fi) for fi in f] + [
tf.constant(0, dtype=config.real(tf)) for _ in self.bcs
]
return x, self.func(x)
@runifnone('train_x', 'train_y')
def train_next_batch(self, batch_size, *args, **kwargs):
self.train_x, self.train_y = self.get_x(batch_size - self.nbc)
return self.train_x, self.train_y
@runifnone('test_x', 'test_y')
def test(self, n, *args, **kwargs):
self.test_x = self.geom.uniform_points(n, True)
self.test_x = np.roll(self.test_x, 1, axis=0)
self.test_y = self.func(self.test_x)
return self.test_x, self.test_y
class DataIDE(Data):
"""Training data for solving IDE
"""
def __init__(self, ide, func, geom, nbc, quad_deg):
assert nbc == 2
super(DataIDE, self).__init__('ide')
self.ide, self.func, self.geom = ide, func, geom
self.nbc = nbc
self.quad_deg = quad_deg
self.train_x, self.train_y = None, None
self.test_x, self.test_y = None, None
self.quad_x, self.quad_w = np.polynomial.legendre.leggauss(quad_deg)
def gen_data(self, size):
def get_quad_points(x):
return scaler, X1, X2
self.scaler_x, self.train_x, self.test_x = standardize_one(self.train_x, self.test_x)
self.scaler_y, self.train_y, self.test_y = standardize_one(self.train_y, self.test_y)
def inverse_transform_y(self, y):
return self.scaler_y.inverse_transform(y)
def train_next_batch(self, batch_size, *args, **kwargs):
return self.train_x, self.train_y
def test(self, n, *args, **kwargs):
return self.test_x, self.test_y
class DataSet2(Data):
def __init__(self, X_train, y_train, X_test, y_test):
super(DataSet2, self).__init__('func')
self.train_x, self.train_y = X_train, y_train
self.test_x, self.test_y = X_test, y_test
self.scaler_x = None
self._standardize()
def _standardize(self):
def standardize_one(X1, X2):
scaler = preprocessing.StandardScaler(with_mean=True, with_std=True)
X1 = scaler.fit_transform(X1)
X2 = scaler.transform(X2)
return scaler, X1, X2
self.train_x = self.geom.random_points(batch_size, 'pseudo')
self.train_y = self.func(self.train_x)
elif self.train_x is None:
# self.train_x = self.geom.random_points(batch_size, 'sobol')
self.train_x = self.geom.uniform_points(batch_size, True)
self.train_y = self.func(self.train_x)
return self.train_x, self.train_y
@runifnone('test_x', 'test_y')
def test(self, n, *args, **kwargs):
self.test_x = self.geom.uniform_points(n, True)
self.test_y = self.func(self.test_x)
return self.test_x, self.test_y
class DataPDE(Data):
"""Training data for solving PDE
"""
def __init__(self, pde, func, geom, anchors):
super(DataPDE, self).__init__('pde')
self.pde, self.func, self.geom = pde, func, geom
self.anchors = anchors
self.train_x, self.train_y = None, None
self.test_x, self.test_y = None, None
self.nbc = len(anchors)
def get_x(self, n):
x = self.geom.uniform_points(n, True)
x = np.append(self.anchors, x, axis=0)
return x, self.func(x)
def train_next_batch(self, batch_size):
# only support x_dim = 1, y_dim = 1
if self.train_x is None:
self.train_x, self.train_y = self.gen_data(batch_size)
noisey = 0.01 * np.random.randn(*self.train_y.shape)
self.train_y += noisey
return self.train_x, self.train_y
def test(self, n, dist=None):
if self.test_x is None:
self.test_x, self.test_y = self.gen_data(n)
return self.test_x, self.test_y
class DataFunctional(Data):
"""Training data for functional approximation
"""
def __init__(self, functional, x_dim, y_dim, x_min, x_max, func2sensors, nsensor):
super(DataFunctional, self).__init__('functional')
self.functional = functional
self.x_dim, self.y_dim = x_dim, y_dim
self.x_min, self.x_max = x_min, x_max
self.func2sensors, self.nsensor = func2sensors, nsensor
# sensors in [0, 1]
self.sensors = np.linspace(0, 1, num=nsensor)
def train_next_batch(self, batch_size, *args, **kwargs):
return self.test(batch_size, 'grid')
for _ in range(500):
self.train_x = np.vstack((self.train_x, x))
self.train_y = np.vstack((self.train_y, np.hstack((ylow, yhi))))
return self.train_x, self.train_y
@runifnone('test_x', 'test_y')
def test(self, n, *args, **kwargs):
self.test_x = self.geom.uniform_points(n, True)
ylow = self.flow(self.test_x)
yhi = self.fhi(self.test_x)
self.test_y = np.hstack((ylow, yhi))
return self.test_x, self.test_y
class DataClassification(Data):
"""Training data for classification
"""
def __init__(self, func, geom, online=False):
super(DataClassification, self).__init__('classification')
self.func = func
self.geom = geom
self.online = online
self.train_x, self.train_y = None, None
self.test_x, self.test_y = None, None
def train_next_batch(self, batch_size, *args, **kwargs):
if self.online:
self.train_x = self.geom.random_points(batch_size, 'pseudo')
self.train_y = self.func(self.train_x)
self.x_dim, self.y_dim = x_dim, y_dim
self.x_min, self.x_max = x_min, x_max
self.func2sensors, self.nsensor = func2sensors, nsensor
# sensors in [0, 1]
self.sensors = np.linspace(0, 1, num=nsensor)
def train_next_batch(self, batch_size, *args, **kwargs):
return self.test(batch_size, 'grid')
def test(self, n, *args, **kwargs):
x, y = super(DataFunctional, self).test(n)
return self.func2sensors(x, self.sensors), y
class DataFunctional2(Data):
"""Training data for functional approximation
"""
def __init__(self, functional, x_dim, y_dim, x_min, x_max, func2sensors, nsensor):
super(DataFunctional2, self).__init__('functional')
self.functional = functional
self.x_dim, self.y_dim = x_dim, y_dim
self.x_min, self.x_max = x_min, x_max
self.func2sensors, self.nsensor = func2sensors, nsensor
# sensors in [0, 1]
self.sensors = np.linspace(0, 1, num=nsensor)
def train_next_batch(self, batch_size, *args, **kwargs):
return self.test(batch_size, 'grid')