Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
for ax in axes:
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
fig.suptitle(title, fontsize=14, y=.995,
fontname=fontname, fontweight='bold', color='black')
fig.set_facecolor('white')
if subtitle:
axes[0].set_title("\n%s - %s ; Sharpe: %.2f " % (
returns.index.date[:1][0].strftime('%e %b \'%y'),
returns.index.date[-1:][0].strftime('%e %b \'%y'),
_stats.sharpe(returns)
), fontsize=12, color='gray')
axes[0].set_ylabel('Cumulative Return', fontname=fontname,
fontweight='bold', fontsize=12)
axes[0].plot(_stats.compsum(returns) * 100, color=colors[1],
lw=1 if grayscale else lw, zorder=1)
axes[0].axhline(0, color='silver', lw=1, zorder=0)
dd = _stats.to_drawdown_series(returns) * 100
ddmin = _utils._round_to_closest(abs(dd.min()), 5)
ddmin_ticks = 5
if ddmin > 50:
ddmin_ticks = ddmin / 4
elif ddmin > 20:
ddmin_ticks = ddmin / 3
ddmin_ticks = int(_utils._round_to_closest(ddmin_ticks, 5))
metrics['Start Period'] = _pd.Series(s_start)
metrics['End Period'] = _pd.Series(s_end)
metrics['Risk-Free Rate %'] = _pd.Series(s_rf)
metrics['Time in Market %'] = _stats.exposure(df) * pct
metrics['~'] = blank
if compounded:
metrics['Cumulative Return %'] = (
_stats.comp(df) * pct).map('{:,.2f}'.format)
else:
metrics['Total Return %'] = (df.sum() * pct).map('{:,.2f}'.format)
metrics['CAGR%%'] = _stats.cagr(df, rf, compounded) * pct
metrics['Sharpe'] = _stats.sharpe(df, rf)
metrics['Sortino'] = _stats.sortino(df, rf)
metrics['Max Drawdown %'] = blank
metrics['Longest DD Days'] = blank
if mode.lower() == 'full':
ret_vol = _stats.volatility(df['returns']) * pct
if "benchmark" in df:
bench_vol = _stats.volatility(df['benchmark']) * pct
metrics['Volatility (ann.) %'] = [ret_vol, bench_vol]
metrics['R^2'] = _stats.r_squared(df['returns'], df['benchmark'])
else:
metrics['Volatility (ann.) %'] = [ret_vol]
metrics['Calmar'] = _stats.calmar(df)
metrics['Skew'] = _stats.skew(df)
metrics['Kurtosis'] = _stats.kurtosis(df)
def run(data, runs=1000):
weights = []
sharpes = np.zeros(runs)
returns = np.zeros(sharpes.shape)
drawdowns = np.zeros(sharpes.shape)
volatility = np.zeros(sharpes.shape)
for i in range(runs):
w = create_random_weights(len(data.columns))
r = (data * w).sum(axis=1)
weights.append(w)
returns[i] = r.add(1).prod()
sharpes[i] = stats.sharpe(r)
drawdowns[i] = stats.max_drawdown(r)
volatility[i] = stats.volatility(r)
return Weights({
'data': data,
'weights': weights,
'sharpes': sharpes,
'returns': returns,
'drawdowns': drawdowns,
'volatility': volatility
})
_po.geometric_mean = stats.geometric_mean
_po.ghpr = stats.ghpr
_po.outliers = stats.outliers
_po.remove_outliers = stats.remove_outliers
_po.best = stats.best
_po.worst = stats.worst
_po.consecutive_wins = stats.consecutive_wins
_po.consecutive_losses = stats.consecutive_losses
_po.exposure = stats.exposure
_po.win_rate = stats.win_rate
_po.avg_return = stats.avg_return
_po.avg_win = stats.avg_win
_po.avg_loss = stats.avg_loss
_po.volatility = stats.volatility
_po.implied_volatility = stats.implied_volatility
_po.sharpe = stats.sharpe
_po.sortino = stats.sortino
_po.cagr = stats.cagr
_po.rar = stats.rar
_po.skew = stats.skew
_po.kurtosis = stats.kurtosis
_po.calmar = stats.calmar
_po.ulcer_index = stats.ulcer_index
_po.ulcer_performance_index = stats.ulcer_performance_index
_po.upi = stats.upi
_po.risk_of_ruin = stats.risk_of_ruin
_po.ror = stats.ror
_po.value_at_risk = stats.value_at_risk
_po.var = stats.var
_po.conditional_value_at_risk = stats.conditional_value_at_risk
_po.cvar = stats.cvar
_po.expected_shortfall = stats.expected_shortfall