Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
selected_name = []
stat_temp = self.get_activated_num()
stat_temp = OrderedDict(sorted(six.iteritems(stat_temp), key=lambda x: x[0]))
# plot_interp = 'Nearest'
if self.scaler_data is not None:
if np.count_nonzero(self.scaler_data) == 0:
logger.warning('scaler is zero - scaling was not applied')
elif len(self.scaler_data[self.scaler_data == 0]) > 0:
logger.warning('scaler data has zero values')
for i, (k, v) in enumerate(six.iteritems(stat_temp)):
data_dict = normalize_data_by_scaler(self.dict_to_plot[k], self.scaler_data,
data_name=k, name_not_scalable=self.name_not_scalable)
selected_data.append(data_dict)
selected_name.append(k) # self.file_name+'_'+str(k)
return selected_data, selected_name
scaler_name = None
# Clear ALL fluorescence values. Don't touch any other data
for eline, info in quant_fluor_data_dict["element_lines"].items():
info["fluorescence"] = None
# Save the scaler name
quant_fluor_data_dict["scaler_name"] = scaler_name
# Compute fluorescence of the emission lines
eline_list = tuple(quant_fluor_data_dict["element_lines"].keys())
for eline, map in xrf_map_dict.items():
if eline in eline_list:
# Normalize the map if scaler is selected. (Typically scaler IS selected.)
if scaler_name:
norm_map = normalize_data_by_scaler(xrf_map_dict[eline], xrf_map_dict[scaler_name])
else:
norm_map = xrf_map_dict[eline]
# Ignore pixels along the edges (those pixels are likely to be outliers that will visibly bias
# the mean value in small calibration scans). If scan is smaller that 2 pixels along any
# dimension, then all pixels are used, including edges.
def _get_range(n_elements):
if n_elements > 2:
n_min, n_max = 1, n_elements - 1
else:
n_min, n_max = 0, n_elements
return n_min, n_max
ny_min, ny_max = _get_range(norm_map.shape[0])
nx_min, nx_max = _get_range(norm_map.shape[1])
mean_fluor = np.mean(norm_map[ny_min: ny_max, nx_min: nx_max])
r2 = self.experiment_distance_to_sample
if (r1 is not None) and (r2 is not None) and (r1 > 0) and (r2 > 0) and \
not math.isclose(r1, r2, abs_tol=1e-20):
# Element density increase as the distance becomes larger
# (fluorescence is reduced as r**2)
data_arr *= (r2 / r1) ** 2
logger.info(f"Emission line {data_name}. Correction for distance-to_sample was performed "
f"(standard: {r1}, sample: {r2})")
else:
logger.info(f"Emission line {data_name}. Correction for distance-to_sample was skipped "
f"(standard: {r1}, sample: {r2})")
is_quant_normalization_applied = True
else:
# The following condition also takes care of the case when 'scaler_name_fixed' is None
if scaler_name_default in scaler_dict:
data_arr = normalize_data_by_scaler(data_in=data_in,
scaler=scaler_dict[scaler_name_default],
data_name=data_name,
name_not_scalable=name_not_scalable)
else:
data_arr = data_in
return data_arr, is_quant_normalization_applied
name_append=f"{name_append}_quantitative",
file_format=file_format,
scaler_name_list=scaler_name_list)
else:
logger.error("Quantitative analysis parameters are not provided. "
f"Quantitative data is not saved in {file_format.upper()} format.")
# Normalize data if scaler is provided
if scaler_name is not None:
if scaler_name in fit_output:
scaler_data = fit_output[scaler_name]
for data_name, data in fit_output.items():
if 'pos' in data_name or 'r2' in data_name:
continue
# Normalization of data
data_normalized = normalize_data_by_scaler(data, scaler_data)
if use_average is True:
data_normalized *= np.mean(scaler_data)
_save_data(data_normalized, output_dir=output_dir,
file_name=data_name,
name_prefix_detector=name_prefix_detector,
name_append=f"{name_append}_norm",
file_format=file_format,
scaler_name_list=scaler_name_list)
else:
logger.warning(f"The scaler '{scaler_name}' was not found. Data normalization "
f"was not performed for {file_format.upper()} file.")
# Always save not normalized data
for data_name, data in fit_output.items():
_save_data(data, output_dir=output_dir,
return c_min, c_max
for i, (k, v) in enumerate(six.iteritems(stat_temp)):
quant_norm_applied = False
if self.quantitative_normalization:
# Quantitative normalization
data_dict, quant_norm_applied = self.param_quant_analysis.apply_quantitative_normalization(
data_in=self.dict_to_plot[k],
scaler_dict=self.scaler_norm_dict,
scaler_name_default=self.get_selected_scaler_name(),
data_name=k,
name_not_scalable=self.name_not_scalable)
else:
# Normalize by the selected scaler in a regular way
data_dict = normalize_data_by_scaler(data_in=self.dict_to_plot[k],
scaler=self.scaler_data,
data_name=k,
name_not_scalable=self.name_not_scalable)
if pixel_or_pos_local or scatter_show_local:
# xd_min, xd_max, yd_min, yd_max = min(self.x_pos), max(self.x_pos),
# min(self.y_pos), max(self.y_pos)
x_pos_2D = self.data_dict['positions']['x_pos']
y_pos_2D = self.data_dict['positions']['y_pos']
xd_min, xd_max, yd_min, yd_max = x_pos_2D.min(), x_pos_2D.max(), y_pos_2D.min(), y_pos_2D.max()
xd_axis_min, xd_axis_max, yd_axis_min, yd_axis_max = \
_compute_equal_axes_ranges(xd_min, xd_max, yd_min, yd_max)
xd_min, xd_max = _adjust_data_range_using_min_ratio(xd_min, xd_max, xd_axis_max - xd_axis_min)
yd_min, yd_max = _adjust_data_range_using_min_ratio(yd_min, yd_max, yd_axis_max - yd_axis_min)
# do not apply scaler norm on not scalable data
self.range_dict.clear()
for data_name in self.dict_to_plot.keys():
if self.quantitative_normalization:
# Quantitative normalization
data_arr, _ = self.param_quant_analysis.apply_quantitative_normalization(
data_in=self.dict_to_plot[data_name],
scaler_dict=self.scaler_norm_dict,
scaler_name_default=self.get_selected_scaler_name(),
data_name=data_name,
name_not_scalable=self.name_not_scalable)
else:
# Normalize by the selected scaler in a regular way
data_arr = normalize_data_by_scaler(data_in=self.dict_to_plot[data_name],
scaler=self.scaler_data,
data_name=data_name,
name_not_scalable=self.name_not_scalable)
lowv = np.min(data_arr)
highv = np.max(data_arr)
self.range_dict[data_name] = {'low': lowv, 'low_default': lowv,
'high': highv, 'high_default': highv}