Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
self.transformer = BGMTransformer()
self.transformer.fit(train_data, categoricals, ordinals)
train_data = self.transformer.transform(train_data)
data_sampler = Sampler(train_data, self.transformer.output_info)
data_dim = self.transformer.output_dim
self.cond_generator = Cond(train_data, self.transformer.output_info)
self.generator = Generator(
self.embedding_dim + self.cond_generator.n_opt,
self.gen_dim,
data_dim).to(self.device)
discriminator = Discriminator(
data_dim + self.cond_generator.n_opt,
self.dis_dim).to(self.device)
optimizerG = optim.Adam(
self.generator.parameters(), lr=2e-4, betas=(0.5, 0.9), weight_decay=self.l2scale)
optimizerD = optim.Adam(discriminator.parameters(), lr=2e-4, betas=(0.5, 0.9))
assert self.batch_size % 2 == 0
mean = torch.zeros(self.batch_size, self.embedding_dim, device=self.device)
std = mean + 1
steps_per_epoch = len(train_data) // self.batch_size
for i in range(self.epochs):
for id_ in range(steps_per_epoch):
fakez = torch.normal(mean=mean, std=std)
def __init__(self, meta, side, layers, device):
super(Classifier, self).__init__()
self.meta = meta
self.side = side
self.seq = Sequential(*layers)
self.valid = True
if meta[-1]['name'] != 'label' or meta[-1]['type'] != CATEGORICAL or meta[-1]['size'] != 2:
self.valid = False
masking = np.ones((1, 1, side, side), dtype='float32')
index = len(self.meta) - 1
self.r = index // side
self.c = index % side
masking[0, 0, self.r, self.c] = 0
self.masking = torch.from_numpy(masking).to(device)
def test_credit():
benchmark(IdentitySynthesizer, iterations=1, datasets=['credit'])
def test_inverse_transform(self):
"""Transform discrete values back into its original space."""
# Setup
n_bins = 2
instance = DiscretizeTransformer(n_bins=n_bins)
data = pd.DataFrame({
'A': [1 / (x + 1) for x in range(10)],
'B': [x for x in range(10)]
}).values
instance.fit(data)
transformed_data = instance.transform(data)
expected_result = pd.DataFrame({
'A': [0.775, 0.325, 0.325, 0.325, 0.325, 0.325, 0.325, 0.325, 0.325, 0.325],
'B': [2.25, 2.25, 2.25, 2.25, 2.25, 6.75, 6.75, 6.75, 6.75, 6.75]
})
# Run
result = instance.inverse_transform(transformed_data)
# Check
np.testing.assert_allclose(result, expected_result)
def test_fit(self, kbins_mock):
# Setup
n_bins = 2
instance = DiscretizeTransformer(n_bins=n_bins)
data = pd.DataFrame({
'A': [1 / (x + 1) for x in range(10)],
'B': [x for x in range(10)]
}).values
kbins_instance = kbins_mock.return_value
# Run
instance.fit(data, [], [])
# Check
assert instance.column_index == [0, 1]
assert instance.discretizer == kbins_instance
assert instance.meta == [
{
'name': 0,
'type': 'continuous',
def test_intrusion():
benchmark(IdentitySynthesizer, iterations=1, datasets=['intrusion'])
def test_insurance():
benchmark(IdentitySynthesizer, iterations=1, datasets=['insurance'])
def test_mnist12():
benchmark(IdentitySynthesizer, iterations=1, datasets=['mnist12'])
def test_child():
benchmark(IdentitySynthesizer, iterations=1, datasets=['child'])
def test_gridr():
benchmark(IdentitySynthesizer, iterations=1, datasets=['gridr'])