Overall Statistics |
Total Trades 69 Average Win 8.44% Average Loss -2.45% Compounding Annual Return 16.620% Drawdown 23.200% Expectancy 1.349 Net Profit 183.166% Sharpe Ratio 0.832 Loss Rate 47% Win Rate 53% Profit-Loss Ratio 3.44 Alpha 0.163 Beta -0.255 Annual Standard Deviation 0.168 Annual Variance 0.028 Information Ratio 0.21 Tracking Error 0.231 Treynor Ratio -0.547 Total Fees $493.50 |
""" Probabilistic Momentum Ispired by: Antonacci's Global Rotation: where best asset is selected on a monthly basis. e.g. https://github.com/QuantConnect/Lean/blob/master/Algorithm.Python/ETFGlobalRotationAlgorithm.py Improvement: The reason for the periodic (monthly) rebalancing in Antonacci's algo is to avoid market whipsaw/noise. A better and simple way is to have daily rebalancings, but filtering signal for statistical significance by using the t-stat for the difference between the mean of two assets (standard statistics) It is good for two uncorrelated assets like MDY and EDV (and self.pr_lvl=0.6), better than Antonacci's algo but it needs further work for more assets: just naively increasing the prob level (self.pr_lvl) does not seem to cut it. """ import numpy as np import pandas as pd from datetime import datetime, timedelta from scipy.stats import t # t.sf(x, dof), (sf: survival function - i.e. Pr(X>x) of t-dist with 1 tail) class ProbabilisticMomentum(QCAlgorithm): def __init__(self): self.symbols = [ "MDY", # US S&P mid cap 400 # "IEV", # iShares S&P europe 350 # "EEM", # iShared MSCI emerging markets # "ILF", # iShares S&P latin america # "EPP", # iShared MSCI Pacific ex-Japan "EDV", # Vanguard 25yr US treasuries # "SHY" # Barclays short-datauration US treasuries ] self.back_period = 21*3 + 1 # i.e. 3 months # critical level for switching (for #2 assets = 0.6, higher for more assets...) self.pr_lvl = 0.6 self.target = None def Initialize(self): self.SetCash(100000) self.SetStartDate(2011,3,1) self.SetEndDate(datetime.now().date() - timedelta(1)) self.SetBrokerageModel(BrokerageName.InteractiveBrokersBrokerage, AccountType.Margin) # register and replace 'tkr symbol' with 'tkr object' for i, tkr in enumerate(self.symbols): self.symbols[i] = self.AddEquity(tkr, Resolution.Daily).Symbol self.Schedule.On(self.DateRules.EveryDay(self.symbols[0]), self.TimeRules.AfterMarketOpen(self.symbols[0], 1), Action(self.rebalance)) def rebalance(self): self.select() if self.target is None: return # check if already in if self.Portfolio[self.target].Quantity != 0: return # switch self.Liquidate() self.SetHoldings(self.target,1.) self.Debug("self.Time: %s -- self.target: %s and self.prob: %s" %(str(self.Time), str(self.target), str(self.max_prob)) ) def select(self): # get daily returns self.price = self.History(self.symbols, self.back_period, Resolution.Daily)["close"].unstack(level=0) # .dropna(axis=1) daily_rtrn = self.price.pct_change().dropna() # or: np.log(self.price / self.price.shift(1)).dropna() _n = len(daily_rtrn.index); sqrt_n = np.sqrt(_n) # reset self.max_prob = -1.; target = None # TODO: make it more Pythonic! for i, tkr_1 in enumerate(self.symbols): for j, tkr_2 in enumerate(self.symbols): if i < j: # upper part matrix(tkr_1, tkr_2); (n^2 - n) / 2 operations rtrn_diff = daily_rtrn[tkr_1.Value] - daily_rtrn[tkr_2.Value] x = np.mean(rtrn_diff) / np.std(rtrn_diff) * np.sqrt(_n) # t_stat = avg(diff)/(std(diff)/sqrt(n)) if x > 0: # x>0 -> tkr_1 better than tkr_2 prob = (1 - t.sf(x, _n-1)) # T-dist cumulative probability: Prob(X<x) if prob > self.max_prob: self.max_prob = prob; target = tkr_1 else: # x<0 -> tkr_2 better than tkr_1 (reverse) prob = (1 - t.sf(-x, _n-1)) # NB: -x if prob > self.max_prob: self.max_prob = prob; target = tkr_2 # NB: tkr_2 # check prob above min level if self.max_prob > self.pr_lvl: self.target = target