Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
update_feature_range=False)
# combine abdm and mvdm
self.d_abs = {} # type: Dict
new_feature_range = tuple([f.copy() for f in self.feature_range])
for k, v in d_abs_abdm.items():
self.d_abs[k] = v * w + d_abs_mvdm[k] * (1 - w)
if center: # center the numerical feature values
self.d_abs[k] -= .5 * (self.d_abs[k].max() + self.d_abs[k].min())
if update_feature_range:
new_feature_range[0][0, k] = self.d_abs[k].min()
new_feature_range[1][0, k] = self.d_abs[k].max()
if update_feature_range: # assign updated feature range
self.feature_range = new_feature_range
else: # apply multidimensional scaling for the abdm or mvdm distances
self.d_abs, self.feature_range = multidim_scaling(d_pair, n_components=2, use_metric=True,
feature_range=self.feature_range,
standardize_cat_vars=standardize_cat_vars,
smooth=smooth, center=center,
update_feature_range=update_feature_range)
# create array used for ragged tensor placeholder
self.d_abs_ragged = [] # type: List
for _, v in self.d_abs.items():
n_pad = self.max_cat - len(v)
v_pad = np.pad(v, (0, n_pad), 'constant')
self.d_abs_ragged.append(v_pad)
self.d_abs_ragged = np.array(self.d_abs_ragged)
if self.enc_model:
enc_data = self.enc.predict(train_data)
self.class_proto = {} # type: dict
d_pair = mvdm(train_data_ord, preds, self.cat_vars_ord, alpha=1)
# combined distance measure
if d_type == 'abdm-mvdm':
# pairwise distances
d_abdm = abdm(train_data_bin, self.cat_vars_ord, cat_vars_bin)
d_mvdm = mvdm(train_data_ord, preds, self.cat_vars_ord, alpha=1)
# multidim scaled distances
d_abs_abdm, _ = multidim_scaling(d_abdm, n_components=2, use_metric=True,
feature_range=self.feature_range,
standardize_cat_vars=standardize_cat_vars,
smooth=smooth, center=center,
update_feature_range=False)
d_abs_mvdm, _ = multidim_scaling(d_mvdm, n_components=2, use_metric=True,
feature_range=self.feature_range,
standardize_cat_vars=standardize_cat_vars,
smooth=smooth, center=center,
update_feature_range=False)
# combine abdm and mvdm
self.d_abs = {} # type: Dict
new_feature_range = tuple([f.copy() for f in self.feature_range])
for k, v in d_abs_abdm.items():
self.d_abs[k] = v * w + d_abs_mvdm[k] * (1 - w)
if center: # center the numerical feature values
self.d_abs[k] -= .5 * (self.d_abs[k].max() + self.d_abs[k].min())
if update_feature_range:
new_feature_range[0][0, k] = self.d_abs[k].min()
new_feature_range[1][0, k] = self.d_abs[k].max()
if update_feature_range: # assign updated feature range
'{} is not supported.'.format(d_type))
# pairwise distances for categorical variables
if d_type == 'abdm':
d_pair = abdm(train_data_bin, self.cat_vars_ord, cat_vars_bin)
elif d_type == 'mvdm':
d_pair = mvdm(train_data_ord, preds, self.cat_vars_ord, alpha=1)
# combined distance measure
if d_type == 'abdm-mvdm':
# pairwise distances
d_abdm = abdm(train_data_bin, self.cat_vars_ord, cat_vars_bin)
d_mvdm = mvdm(train_data_ord, preds, self.cat_vars_ord, alpha=1)
# multidim scaled distances
d_abs_abdm, _ = multidim_scaling(d_abdm, n_components=2, use_metric=True,
feature_range=self.feature_range,
standardize_cat_vars=standardize_cat_vars,
smooth=smooth, center=center,
update_feature_range=False)
d_abs_mvdm, _ = multidim_scaling(d_mvdm, n_components=2, use_metric=True,
feature_range=self.feature_range,
standardize_cat_vars=standardize_cat_vars,
smooth=smooth, center=center,
update_feature_range=False)
# combine abdm and mvdm
self.d_abs = {} # type: Dict
new_feature_range = tuple([f.copy() for f in self.feature_range])
for k, v in d_abs_abdm.items():
self.d_abs[k] = v * w + d_abs_mvdm[k] * (1 - w)