Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# charge/discharge (including resource consumed for supply_plus techs)
elif var == 'storage':
array_flow = _get_reindexed_array('storage')
carrier_flow = (array_prod.sum('carriers') + array_con.sum('carriers') - resource_con)
carrier_flow = subset_sum_squeeze(carrier_flow, subset, sum_dims, squeeze)
elif var == 'resource_con':
array_flow = resource_con
else:
array_flow = _get_reindexed_array(var)
array_flow = subset_sum_squeeze(array_flow, subset, sum_dims, squeeze)
if 'timesteps' not in array_flow.dims or len(array_flow.dims) > 2:
e = exceptions.ModelError
raise e('Cannot plot timeseries for variable `{}` with subset `{}`'
'and `sum_dims: {}`'.format(var, subset, sum_dims))
for tech in array_flow.techs.values:
tech_dict = {'techs': tech}
if not array_flow.loc[tech_dict].sum():
continue
# We allow transmisison tech information to show up in some cases
if 'techs_transmission' in dataset and tech in dataset.techs_transmission.values:
base_tech = 'transmission'
color = dataset.colors.loc[{'techs': tech.split(':')[0]}].item()
name = dataset.names.loc[{'techs': tech.split(':')[0]}].item()
if var in carriers:
continue # no transmission in carrier flow
else:
base_tech = dataset.inheritance.loc[tech_dict].item().split('.')[0]
]
multiplier = 2 # there are always two technologies associated with one link
else:
all_loc_techs = [
i for i in backend_model.loc_techs
if i.split('::')[1] == tech
]
multiplier = 1
max_systemwide = get_param(backend_model, 'energy_cap_max_systemwide', tech)
equals_systemwide = get_param(backend_model, 'energy_cap_equals_systemwide', tech)
if np.isinf(po.value(max_systemwide)) and not equals_systemwide:
return po.Constraint.NoConstraint
elif equals_systemwide and np.isinf(po.value(equals_systemwide)):
raise exceptions.ModelError(
'Cannot use inf for energy_cap_equals_systemwide for tech `{}`'.format(tech)
)
sum_expr = sum(backend_model.energy_cap[loc_tech] for loc_tech in all_loc_techs)
if equals_systemwide:
return sum_expr == equals_systemwide * multiplier
else:
return sum_expr <= max_systemwide * multiplier
for loc_tech in model_run.sets['loc_techs_conversion']:
# For any non-conversion technology, there are only two carriers
# (one produced and one consumed)
loc_tech_carrier_in = [
i for i in
model_run.sets['loc_tech_carriers_con']
if loc_tech == i.rsplit("::", 1)[0]
]
loc_tech_carrier_out = [
i for i in
model_run.sets['loc_tech_carriers_prod']
if loc_tech == i.rsplit("::", 1)[0]
]
if len(loc_tech_carrier_in) > 1 or len(loc_tech_carrier_out) > 1:
raise exceptions.ModelError(
'More than one carrier in or out associated with '
'conversion location:technology `{}`'.format(loc_tech)
)
else:
loc_techs_conversion_array.loc[
dict(loc_techs_conversion=loc_tech, carrier_tiers=["in", "out"])
] = [loc_tech_carrier_in[0], loc_tech_carrier_out[0]]
dataset = dataset.merge(
loc_techs_conversion_array.to_dataset(name="lookup_loc_techs_conversion")
)
return dataset
def _stack_data(data, dates, times):
"""
Stack all non-time dimensions of an xarray DataArray
"""
data_to_stack = data.assign_coords(
timesteps=pd.MultiIndex.from_product([dates, times], names=['dates', 'times'])
).unstack('timesteps')
non_date_dims = list(set(data_to_stack.dims).difference(['dates', 'times'])) + ['times']
if len(non_date_dims) >= 2:
stacked_var = data_to_stack.stack(stacked=non_date_dims)
else:
raise exceptions.ModelError(
"Cannot conduct time clustering with variable {} as it has no "
"non-time dimensions.".format(data.name)
)
return stacked_var
# get the cumulative sum of timestep resolution, to find where we hit our window and horizon
timestep_cumsum = model_data.timestep_resolution.cumsum('timesteps').to_pandas()
# get the timesteps at which we start and end our windows
window_ends = timestep_cumsum.where(
(timestep_cumsum % window == 0) | (timestep_cumsum == timestep_cumsum[-1])
)
window_starts = timestep_cumsum.where(
(~np.isnan(window_ends.shift(1))) | (timestep_cumsum == timestep_cumsum[0])
).dropna()
window_ends = window_ends.dropna()
horizon_ends = timestep_cumsum[timestep_cumsum.isin(window_ends.values + window_to_horizon)]
if not any(window_starts):
raise exceptions.ModelError(
'Not enough timesteps or incorrect timestep resolution to run in '
'operational mode with an optimisation window of {}'.format(window)
)
# We will only update timseries parameters
timeseries_data_vars = [
k for k, v in model_data.data_vars.items() if 'timesteps' in v.dims
and v.attrs['is_result'] == 0
]
# Loop through each window, solve over the horizon length, and add result to
# result_array we only go as far as the end of the last horizon, which may
# clip the last bit of data
result_array = []
# track whether each iteration finds an optimal solution or not
terminations = []
def _formatwarning(message, category, filename, lineno, line=None):
"""Formats ModelWarnings as "Warning: message" without extra crud"""
if category == exceptions.ModelWarning:
return 'Warning: ' + str(message) + '\n'
else:
return formatwarning_orig(message, category, filename, lineno, line)
else:
raise exceptions.ModelError(
'Invalid subset_time value: {}'.format(subset_time_config)
)
else:
time_slice = str(subset_time_config)
for k in timeseries_data.keys():
timeseries_data[k] = timeseries_data[k].loc[time_slice, :]
# Ensure all timeseries have the same index
# FIXME: this error message could be improved with more detail
indices = [df.index for df in timeseries_data.values()]
first_index = indices[0]
for i in indices[1:]:
if not first_index.equals(i):
raise exceptions.ModelError(
'All time series indices must have the same values.'
)
return timeseries_data
"""
opt = SolverFactory(solver, solver_io=solver_io)
if solver_options:
for k, v in solver_options.items():
opt.options[k] = v
if save_logs:
solve_kwargs.update({
'symbolic_solver_labels': True,
'keepfiles': True
})
os.makedirs(save_logs, exist_ok=True)
TempfileManager.tempdir = save_logs # Sets log output dir
if 'warmstart' in solve_kwargs.keys() and solver in ['glpk', 'cbc']:
exceptions.warn(
'The chosen solver, {}, does not suport warmstart, which may '
'impact performance.'.format(solver)
)
del solve_kwargs['warmstart']
with redirect_stdout(LogWriter(logger, 'debug', strip=True)):
with redirect_stderr(LogWriter(logger, 'error', strip=True)):
# Ignore most of gurobipy's logging, as it's output is
# already captured through STDOUT
logging.getLogger('gurobipy').setLevel(logging.ERROR)
results = opt.solve(backend_model, tee=True, **solve_kwargs)
return results
def get_capacity_constraint(backend_model, parameter, loc_tech,
_equals=None, _max=None, _min=None, scale=None):
decision_variable = getattr(backend_model, parameter)
if not _equals:
_equals = get_param(backend_model, parameter + '_equals', loc_tech)
if not _max:
_max = get_param(backend_model, parameter + '_max', loc_tech)
if not _min:
_min = get_param(backend_model, parameter + '_min', loc_tech)
if po.value(_equals) is not False and po.value(_equals) is not None:
if np.isinf(po.value(_equals)):
e = exceptions.ModelError
raise e('Cannot use inf for {}_equals for loc:tech `{}`'.format(parameter, loc_tech))
if scale:
_equals *= scale
return decision_variable[loc_tech] == _equals
else:
if po.value(_min) == 0 and np.isinf(po.value(_max)):
return po.Constraint.NoConstraint
else:
if scale:
_max *= scale
_min *= scale
return (_min, decision_variable[loc_tech], _max)