Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
del converted
else:
print('file found %s, using for data' % minmax_fp_ts_csv)
if not os.path.isfile(minmax_fp_ts_csv):
print('error :: file not found %s' % minmax_fp_ts_csv)
else:
print('file exists to create the minmax_fp_ts data frame from - %s' % minmax_fp_ts_csv)
try:
df = pd.read_csv(minmax_fp_ts_csv, delimiter=',', header=None, names=['metric', 'timestamp', 'value'])
df.columns = ['metric', 'timestamp', 'value']
except:
print('error :: failed to created data frame from %s' % (str(minmax_fp_ts_csv)))
try:
df_features = extract_features(
df, column_id='metric', column_sort='timestamp', column_kind=None,
column_value=None, feature_extraction_settings=tsf_settings)
except:
print('error :: failed to created df_features from %s' % (str(minmax_fp_ts_csv)))
# Create transposed features csv
if not os.path.isfile(minmax_fp_fname_out):
# Transpose
df_t = df_features.transpose()
df_t.to_csv(minmax_fp_fname_out)
try:
# Calculate the count and sum of the features values
df_sum = pd.read_csv(
minmax_fp_fname_out, delimiter=',', header=0,
names=['feature_name', 'value'])
df_sum.columns = ['feature_name', 'value']
converted = []
for datapoint in datapoints:
try:
new_datapoint = [float(datapoint[0]), float(datapoint[1])]
converted.append(new_datapoint)
except: # nosec
continue
for ts, value in converted:
utc_ts_line = '%s,%s,%s\n' % (base_name, str(int(ts)), str(value))
with open(anomalous_ts_csv, 'a') as fh:
fh.write(utc_ts_line)
del converted
df = pd.read_csv(anomalous_ts_csv, delimiter=',', header=None, names=['metric', 'timestamp', 'value'])
df.columns = ['metric', 'timestamp', 'value']
df_features_current = extract_features(
df, column_id='metric', column_sort='timestamp', column_kind=None,
column_value=None, feature_extraction_settings=tsf_settings)
del df
# Create transposed features csv
if not os.path.isfile(anomalous_fp_fname_out):
# Transpose
df_t = df_features_current.transpose()
df_t.to_csv(anomalous_fp_fname_out)
del df_t
del df_features_current
# Calculate the count and sum of the features values
df_sum_2 = pd.read_csv(
anomalous_fp_fname_out, delimiter=',', header=0,
names=['feature_name', 'value'])
df_sum_2.columns = ['feature_name', 'value']
os.remove(ts_csv)
for ts, value in converted:
# print('%s,%s' % (str(int(ts)), str(value)))
utc_ts_line = '%s,%s,%s\n' % (metric, str(int(ts)), str(value))
with open(ts_csv, 'a') as fh:
fh.write(utc_ts_line)
del converted
df = pd.read_csv(ts_csv, delimiter=',', header=None, names=['metric', 'timestamp', 'value'])
# print('DataFrame created with %s' % ts_csv)
df.columns = ['metric', 'timestamp', 'value']
tsf_settings = ReasonableFeatureExtractionSettings()
# Disable tqdm progress bar
tsf_settings.disable_progressbar = True
df_features = extract_features(
df, column_id='metric', column_sort='timestamp', column_kind=None,
column_value=None, feature_extraction_settings=tsf_settings)
del df
# print('features extracted from %s data' % ts_csv)
# write to disk
fname_out = fname_in + '.features.csv'
# Transpose
df_t = df_features.transpose()
# print('features transposed')
# Create transposed features csv
t_fname_out = fname_in + '.features.transposed.csv'
df_t.to_csv(t_fname_out)
del df_t
# Calculate the count and sum of the features values
df_sum = pd.read_csv(
t_fname_out, delimiter=',', header=0,
all_values = []
for i, values in enumerate(column_data):
if values is None:
values = [0] * self.max_series_len
elif type(values) == type([]):
values = list(map(float,values))
else:
values = list(map(lambda x: float(x), values.split(' ')))
all_values.append(values)
df = pd.DataFrame({'main_feature': values, 'id': [1] * len(values)})
try:
features = extract_features(df, column_id='id',disable_progressbar=True, default_fc_parameters=default_fc_parameters,n_jobs=self.n_jobs)
except:
self.n_jobs = 1
features = extract_features(df, column_id='id',disable_progressbar=True, default_fc_parameters=default_fc_parameters,n_jobs=self.n_jobs)
features.fillna(value=0, inplace=True)
features = list(features.iloc[0])
ret.append(features)
for i, values in enumerate(all_values):
while len(values) < self.max_series_len:
values.append(0)
encoded_values = self.numerical_encoder.encode(values)
encoded_numbers_list = []
datapoints = minmax_anomalous_ts
converted = []
for datapoint in datapoints:
try:
new_datapoint = [float(datapoint[0]), float(datapoint[1])]
converted.append(new_datapoint)
except: # nosec
continue
for ts, value in converted:
utc_ts_line = '%s,%s,%s\n' % (base_name, str(int(ts)), str(value))
with open(anomalous_ts_csv, 'a') as fh:
fh.write(utc_ts_line)
df = pd.read_csv(anomalous_ts_csv, delimiter=',', header=None, names=['metric', 'timestamp', 'value'])
df.columns = ['metric', 'timestamp', 'value']
df_features_current = extract_features(
df, column_id='metric', column_sort='timestamp', column_kind=None,
column_value=None, feature_extraction_settings=tsf_settings)
# Create transposed features csv
if not os.path.isfile(anomalous_fp_fname_out):
# Transpose
df_t = df_features_current.transpose()
df_t.to_csv(anomalous_fp_fname_out)
# Calculate the count and sum of the features values
df_sum_2 = pd.read_csv(
anomalous_fp_fname_out, delimiter=',', header=0,
names=['feature_name', 'value'])
df_sum_2.columns = ['feature_name', 'value']
df_sum_2['feature_name'] = df_sum_2['feature_name'].astype(str)
df_sum_2['value'] = df_sum_2['value'].astype(float)
minmax_anomalous_features_count = len(df_sum_2['value'])
else:
logger.info('file found %s, using for data' % minmax_fp_ts_csv)
if not os.path.isfile(minmax_fp_ts_csv):
logger.error('error :: file not found %s' % minmax_fp_ts_csv)
else:
logger.info('file exists to create the minmax_fp_ts data frame from - %s' % minmax_fp_ts_csv)
try:
df = pd.read_csv(minmax_fp_ts_csv, delimiter=',', header=None, names=['metric', 'timestamp', 'value'])
df.columns = ['metric', 'timestamp', 'value']
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to created data frame from %s' % (str(minmax_fp_ts_csv)))
try:
df_features = extract_features(
df, column_id='metric', column_sort='timestamp', column_kind=None,
column_value=None, feature_extraction_settings=tsf_settings)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to created df_features from %s' % (str(minmax_fp_ts_csv)))
# Create transposed features csv
if not os.path.isfile(minmax_fp_fname_out):
# Transpose
df_t = df_features.transpose()
df_t.to_csv(minmax_fp_fname_out)
else:
if LOCAL_DEBUG:
logger.debug('debug :: file exists - %s' % minmax_fp_fname_out)
try:
# Calculate the count and sum of the features values
df_sum = pd.read_csv(
df_features = False
try:
# @modified 20161226 - Bug #1822: tsfresh extract_features process stalling
# Changed to use the new ReasonableFeatureExtractionSettings that was
# introduced in tsfresh-0.4.0 to exclude the computationally high cost
# of extracting features from very static timeseries that has little to
# no variation is the values, which results in features taking up to
# almost 600 seconds to calculate on a timeseries of length 10075
# (168h - 1 datapoint per 60s)
# In terms of inline feature calculatation, always exclude
# high_comp_cost features.
# df_features = extract_features(df, column_id='metric', column_sort='timestamp', column_kind=None, column_value=None)
tsf_settings = ReasonableFeatureExtractionSettings()
# Disable tqdm progress bar
tsf_settings.disable_progressbar = True
df_features = extract_features(
df, column_id='metric', column_sort='timestamp', column_kind=None,
column_value=None, feature_extraction_settings=tsf_settings)
# @modified 20190413 - Bug #2934: Ionosphere - no mirage.redis.24h.json file
# Added log_context to report the context
current_logger.info('%s :: features extracted from %s data' % (
log_context, ts_csv))
except:
trace = traceback.print_exc()
current_logger.debug(trace)
# @modified 20190413 - Bug #2934: Ionosphere - no mirage.redis.24h.json file
# Added log_context to report the context
fail_msg = 'error: %s :: extracting features with tsfresh from - %s' % (log_context, ts_csv)
current_logger.error('%s' % fail_msg)
end_feature_extraction = timer()
# @modified 20190413 - Bug #2934: Ionosphere - no mirage.redis.24h.json file
# Added log_context to report the context