Overall Statistics |
Total Trades 1548 Average Win 1.03% Average Loss -0.86% Compounding Annual Return 13.674% Drawdown 25.100% Expectancy 0.377 Net Profit 1031.557% Sharpe Ratio 1.121 Probabilistic Sharpe Ratio 57.925% Loss Rate 37% Win Rate 63% Profit-Loss Ratio 1.19 Alpha 0.119 Beta 0.256 Annual Standard Deviation 0.131 Annual Variance 0.017 Information Ratio 0.212 Tracking Error 0.188 Treynor Ratio 0.574 Total Fees $206609.93 Estimated Strategy Capacity $560000.00 |
# Typical imports import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd import statsmodels.api as sm import sklearn as sk from sklearn import linear_model import math from datetime import datetime, timedelta, date from pandas.tseries.offsets import MonthEnd from dateutil.relativedelta import relativedelta from io import StringIO class FINA4803(QCAlgorithm): def Initialize(self): ### The below is required for self.History, but can't get it to work yet #historydate = datetime.date(datetime.now()) - datetime.date(datetime(1998,12,23)) #self.history_days = int(historydate.days)+1 #self.alpha_file_df = self.getRegressionCoefficients() ### Alpha data import #Download regression results from dropbox, relevant links below: # 6 Strategies, 36 Months Rolling Alpha, Monthly Trading FF5_Against_FF_Portfolio = "https://www.dropbox.com/s/a64gl0yxgx285xl/Alphas%20-%20FF%20Portfolio%20Against%20FF5%20Factors.csv?dl=1" FF3_Against_FF_Portfolio = "https://www.dropbox.com/s/aqeqt8yag7cpydi/Alphas%20-%20FF%20Portfolio%20Against%20FF3%20Factors%20%2836m%20Rolling%20Alpha%29.csv?dl=1" FF5_Against_6_ETFs = "https://www.dropbox.com/s/cn4v6oqhvvm3g5t/Alphas%20-%206%20ETF%20Against%20FF5%20Factors.csv?dl=1" FF3_Against_6_ETFs = "https://www.dropbox.com/s/622igj5olkys3sv/Alphas%20-%206%20ETF%20Against%20FF3%20Factors.csv?dl=1" FF5_Against_9_ETFs = "https://www.dropbox.com/s/x2c915qgyjmcgpw/Alphas%20-%209%20ETFs%20Against%20FF5%20Factors.csv?dl=1" FF3_Against_9_ETFs = "https://www.dropbox.com/s/377ig3soc14oo65/Alphas%20-%209%20ETFs%20Against%20FF3%20Factors.csv?dl=1" #FF5 Against 6 ETFs, different Months Rolling Alpha, Monthly Trading FF5_Against_6_ETFs_12m = "https://www.dropbox.com/s/h76ui942e8plik7/Alphas%20-%206%20ETFs%20Against%20FF5%20Factors%20%2812m%20Rolling%20Alpha%29.csv?dl=1" FF5_Against_6_ETFs_24m = "https://www.dropbox.com/s/65e9ppia8459hw1/Alphas%20-%206%20ETFs%20Against%20FF5%20Factors%20%2824m%20Rolling%20Alpha%29.csv?dl=1" FF5_Against_6_ETFs_48m = "https://www.dropbox.com/s/dxrwn000oseusdr/Alphas%20-%206%20ETFs%20Against%20FF5%20Factors%20%2848m%20Rolling%20Alpha%29.csv?dl=1" FF5_Against_6_ETFs_60m = "https://www.dropbox.com/s/julwqwrjtx46qr0/Alphas%20-%206%20ETFs%20Against%20FF5%20Factors%20%2860m%20Rolling%20Alpha%29.csv?dl=1" #FF3 Against FF Portfolio, different Months Rolling Alpha, Monthly Trading FF3_Against_FF_Portfolio_12m = "https://www.dropbox.com/s/bky0qsajvpe4g3l/Alphas%20-FF%20Portfolio%20Against%20FF3%20Factors%20%2812m%20Rolling%20Alpha%29.csv?dl=1" FF3_Against_FF_Portfolio_24m = "https://www.dropbox.com/s/b387fnoqfi7kbla/Alphas%20-%20FF%20Portfolio%20Against%20FF3%20Factors%20%2824m%20Rolling%20Alpha%29.csv?dl=1" FF3_Against_FF_Portfolio_48m = "https://www.dropbox.com/s/vm725yhiz9v8gm3/Alphas%20-%20FF%20Portfolio%20Against%20FF3%20Factors%20%2848m%20Rolling%20Alpha%29.csv?dl=1" FF3_Against_FF_Portfolio_60m = "https://www.dropbox.com/s/lcxieiesba13rcj/Alphas%20-%20FF%20Portfolio%20Against%20FF3%20Factors%20%2860m%20Rolling%20Alpha%29.csv?dl=1" #FF5 Against FF Portfolio, trade starting from Jan 1999 FF5_Against_FF_Portfolio_1999Start = "https://www.dropbox.com/s/41hxz2rstojohk0/Alphas%20-%20FF%20Portfolio%20Against%20FF5%20Factors%20%28Trade%20from%201999%29.csv?dl=1" #Download the relevant files from the dropbox and make it into a dataframe alpha_file = self.Download(FF3_Against_FF_Portfolio) self.alpha_file_df = pd.DataFrame(pd.read_csv(StringIO(alpha_file))) self.alpha_file_df = self.alpha_file_df.rename({'Unnamed: 0':'Date'}, axis=1) #Added "Date" column name self.first_date = self.alpha_file_df.at[0,'Date'] #Used for self.First_Trading_date self.first_date = pd.to_datetime(self.first_date) self.alpha_file_df.drop('Date', axis=1, inplace=True) #Removes "Date" column ############################################################################################################################## ### Dates, Starting Cash, Cash Percentage of Portfolio #The alpha_file_df's first date. Do NOT modify this self.First_Trading_Date = datetime(self.first_date.year, self.first_date.month, self.first_date.day) #Backtest period begin. Do NOT use dates before 1/31/2002 for FF Portfolio, and 2/28/2002 for ETFs, if not algo will quit self.Start_Date = datetime(2002, 1, 31) if self.Start_Date < self.first_date: return #Backtest period end. The latest date is last month's last day (e.g. if now is 2021/02/25, then latest is 2021/01/31) self.End_Date = datetime(2020,12,31) #Starting Cash self.SetCash(1000000) #Number of months between backtest period begin and the ETF's first start trading dates. Do NOT modify this self.counter = (self.Start_Date.year - self.First_Trading_Date.year) * 12 + (self.Start_Date.month - self.First_Trading_Date.month) #Start Date and End Date, taken from the above variables. Do NOT modify this self.SetStartDate(self.Start_Date.year,self.Start_Date.month,self.Start_Date.day) self.SetEndDate(self.End_Date.year,self.End_Date.month,self.End_Date.day) #Set Cash Percentage of Portfolio self.Settings.FreePortfolioValuePercentage = 0.02 ############################################################################################################################## ### US NBER Recession Index data import nber_link = "https://www.dropbox.com/s/rxigxh2fi0hb8si/USREC%20%28New%29.csv?dl=1" nber_file = self.Download(nber_link) self.nber_df = pd.DataFrame(pd.read_csv(StringIO(nber_file))) self.nber_df['DATE'] = pd.to_datetime(self.nber_df['DATE']) - MonthEnd(1) #Changed dates to datetime format, added MonthEnd(1) so that the dates are consistent with the ETF's dates self.nber_df = self.nber_df.loc[self.nber_df['DATE']>=self.First_Trading_Date] #Filter dataframe to include dates, dependent on the regression results' first date self.nber_df = self.nber_df.reset_index(drop=True) #Reset Index to start from 0 self.nber_df["USREC"] = self.nber_df["USREC"].replace([0],'NO') #Replace 0 to 'NO', which is no recession self.nber_df["USREC"] = self.nber_df["USREC"].replace([1],'YES')#Replace 1 to 'YES', which is recession. ############################################################################################################################## ### ETF Beta data import etf_beta_link = "https://www.dropbox.com/s/f8lqd5p05f9chpn/ETF%20Betas.csv?dl=1" etf_file = self.Download(etf_beta_link) self.etf_beta_df = pd.DataFrame(pd.read_csv(StringIO(etf_file))) self.etf_beta_df['Date'] = pd.to_datetime(self.etf_beta_df['Date']) self.etf_beta_df = self.etf_beta_df.loc[self.etf_beta_df['Date']>=self.First_Trading_Date] self.etf_beta_df = self.etf_beta_df.reset_index(drop=True) #Reset Index to start from 0 ############################################################################################################################## ### VIX data import vix_link = "https://www.dropbox.com/s/kvsmvgah3hvkb4b/%5EVIX.csv?dl=1" vix_file = self.Download(vix_link) self.vix_df = pd.DataFrame(pd.read_csv(StringIO(vix_file))) self.vix_df = self.vix_df[['Date','Close']] self.vix_df['Date'] = pd.to_datetime(self.vix_df['Date']) + MonthEnd(1) self.vix_df['36M Avg'] = self.vix_df.iloc[:,1].rolling(window=36).mean() self.vix_df = self.vix_df.loc[self.vix_df['Date']>=self.First_Trading_Date] self.vix_df = self.vix_df.reset_index(drop=True) #Reset Index to start from 0 ############################################################################################################################## ### SPY data import Moving_Average_Days = 200 spy_link = "https://www.dropbox.com/s/manlxifnghujh0i/SPY.csv?dl=1" spy_file = self.Download(spy_link) self.spy_df = pd.DataFrame(pd.read_csv(StringIO(spy_file))) self.spy_df = self.spy_df[['Date','close']] self.spy_df['Date'] = pd.to_datetime(self.spy_df['Date']) self.spy_df = self.spy_df.set_index('Date') self.spy_df["MA"] = self.spy_df.rolling(window=Moving_Average_Days).mean() self.spy_sma_df = pd.DataFrame(self.spy_df.resample("1M").last()) self.spy_sma_df = self.spy_sma_df.reset_index(drop=False) self.spy_sma_df = self.spy_sma_df.loc[self.spy_sma_df['Date']>=self.First_Trading_Date] self.spy_sma_df = self.spy_sma_df.reset_index(drop=True) ############################################################################################################################## ### ETF Tickers, safe havens and leverage settings self.safe_haven_status = True #Set status to True if you want a safe haven, False if you do not want a safe haven self.safe_haven_cash = False #Set status to True if you want the safe haven to be cash self.safe_haven = "TLT" #Safe haven ticker self.leverage = 1.0 #Overall allowed leverage. If 1, then maximum weightings are 100%, if 2 then 200%, so on and so forth self.apply_leverage = False #Change to true if you want to apply leverage to our positions tickers = ["XLB","XLE","XLF","XLI","XLK","XLP","XLU","XLV","XLY",self.safe_haven] for ticker in tickers: symbol = self.AddEquity(ticker, Resolution.Minute).Symbol #Add equity to portfolio and assigns symbol. Remember to change Resolution to Minute as required self.Securities[symbol].SetDataNormalizationMode(DataNormalizationMode.Adjusted) # Pricing data - use adjusted to acccount for dividend reinvestment and equity splits self.Securities[symbol].SetLeverage(self.leverage) #Leverage is set to what was defined earlier self.Securities[symbol].SetSlippageModel(CustomSlippageModel(self)) #Remember to set slippage from the CustomSlippageModel class below - default should be 0 ############################################################################################################################## ### Benchmark Settings self.benchmark = "SPY" #Benchmark ticker bmark = self.AddEquity(self.benchmark,Resolution.Minute).Symbol #Add equity to benchmark and assigns symbol. Remember to change Resolution to Minute as required self.Securities[bmark].SetDataNormalizationMode(DataNormalizationMode.Adjusted) # Pricing data - use adjusted to acccount for dividend reinvestment and equity splits self.Securities[bmark].SetLeverage(self.leverage) #Leverage is set to 1 to ensure no margin used self.SetBenchmark(self.benchmark) # Benchmark graph plotting variables self.lastBenchmarkValue = None self.BenchmarkPerformance = self.Portfolio.TotalPortfolioValue #Our inital benchmark value scaled to match our portfolio ############################################################################################################################## ### Scheduled events for trading and graph plotting # Strategy Names: # self.Long_NBER_SafeHaven # self.Long_MA_NBER_SafeHaven # self.Long_MA_SafeHaven # self.Long_Short_Benchmark # self.Long_VIX_SafeHaven # self.Long_MA_NBER_SafeHaven_ETFNumbers Strategy_Name = self.Long_MA_NBER_SafeHaven_ETFNumbers #Runs the relevant function on the first trading day each month right after market open, according to whether the benchmark is trading or not self.Schedule.On(self.DateRules.MonthStart(self.benchmark),self.TimeRules.AfterMarketOpen(self.benchmark),Strategy_Name) #Plots graphs everyday before market close, according to whether the benchmark is trading or not self.Schedule.On(self.DateRules.MonthEnd(self.benchmark),self.TimeRules.BeforeMarketClose(self.benchmark),self.PlotGraph) #UPI (Ulcer Performance Index) Calculation, results show in "Logs" in the backtest tabs self.UPIcounter = 0 self.CurrentValue = 0 self.SumSq = 0 self.MaxValue = 0 #Runs the UPI calculation function everyday before market close, according to whether the benchmark is trading or not self.Schedule.On(self.DateRules.EveryDay(self.benchmark),self.TimeRules.BeforeMarketClose(self.benchmark),self.UPI) #Calculates the final UPI index number at the last date self.Schedule.On(self.DateRules.On(self.End_Date.year,self.End_Date.month,self.End_Date.day),self.TimeRules.BeforeMarketClose(self.benchmark),self.UPICalc) ############################################################################################################################## ############################################################################################################################## ############################################################################################################################## ### Strategy Functions: ############################################################################################################################## def Long_NBER_SafeHaven(self): #Gets datapoints according to the dates - counter will increase by 1 for every trading month #alpha_datapoint extracts one line at a time from alpha_file_df. Similarly, nber_datapoint extracts one line at a time from nber_df alpha_datapoint = self.alpha_file_df.iloc[[int(self.counter)]] nber_datapoint = self.nber_df.iloc[[int(self.counter)]] #Number of positive alphas - gets the number of ETFs that are larger than 0 num_of_pos = int(alpha_datapoint.gt(0).sum(axis=1)) ### Trading Conditions and Loops Portfolio_Holdings_List = [] ### Test Trading if self.apply_leverage == False: if self.safe_haven_status == False: #If there is no safe haven then do the following for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: #Checks if ETF's alpha is larger than 0 Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos)) elif self.safe_haven_status == True: #If there is safe haven then do the following. No difference with the above loop, but checks if a particular month is counted as a recession if nber_datapoint["USREC"].all() == "NO": for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos)) elif nber_datapoint["USREC"].all() == "YES": if self.safe_haven_cash == True: pass elif self.safe_haven_cash == False: Portfolio_Holdings_List.append(PortfolioTarget(self.safe_haven,1)) elif self.apply_leverage == True: if self.safe_haven_status == False: #If there is no safe haven then do the following for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: #Checks if ETF's alpha is larger than 0 Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos*self.leverage)) elif self.safe_haven_status == True: #If there is safe haven then do the following. No difference with the above loop, but checks if a particular month is counted as a recession if nber_datapoint["USREC"].all() == "NO": for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos*self.leverage)) elif nber_datapoint["USREC"].all() == "YES": if self.safe_haven_cash == True: pass elif self.safe_haven_cash == False: Portfolio_Holdings_List.append(PortfolioTarget(self.safe_haven,1)) self.Liquidate() self.SetHoldings(Portfolio_Holdings_List) self.counter = self.counter+1 ############################################################################################################################## def Long_MA_NBER_SafeHaven(self): #Gets datapoints according to the dates - counter will increase by 1 for every trading month #alpha_datapoint extracts one line at a time from alpha_file_df. Similarly, nber_datapoint extracts one line at a time from nber_df alpha_datapoint = self.alpha_file_df.iloc[[int(self.counter)]] nber_datapoint = self.nber_df.iloc[[int(self.counter)]] spy_datapoint = self.spy_sma_df.iloc[[int(self.counter)]] #Number of positive alphas - gets the number of ETFs that are larger than 0 num_of_pos = int(alpha_datapoint.gt(0).sum(axis=1)) ### Trading Conditions and Loops Portfolio_Holdings_List = [] ### Test Trading if self.apply_leverage == False: if self.safe_haven_status == False: #If there is no safe haven then do the following for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: #Checks if ETF's alpha is larger than 0 Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos)) elif self.safe_haven_status == True: #If there is safe haven then do the following. No difference with the above loop, but checks if a particular month is counted as a recession if nber_datapoint["USREC"].all() == "NO" and float(spy_datapoint["close"])>float(spy_datapoint["MA"]): for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos)) elif (nber_datapoint["USREC"].all() == "NO" and float(spy_datapoint["close"])<float(spy_datapoint["MA"])) or (nber_datapoint["USREC"].all() == "YES" and float(spy_datapoint["close"])>float(spy_datapoint["MA"])): for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: Portfolio_Holdings_List.append(PortfolioTarget(ticker,(1/num_of_pos)*0.5)) if self.safe_haven_cash == True: pass elif self.safe_haven_cash == False: Portfolio_Holdings_List.append(PortfolioTarget(self.safe_haven,0.5)) elif nber_datapoint["USREC"].all() == "YES" and float(spy_datapoint["close"])<float(spy_datapoint["MA"]): if self.safe_haven_cash == True: pass elif self.safe_haven_cash == False: Portfolio_Holdings_List.append(PortfolioTarget(self.safe_haven,1)) elif self.apply_leverage == True: if self.safe_haven_status == False: #If there is no safe haven then do the following for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: #Checks if ETF's alpha is larger than 0 Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos*self.leverage)) elif self.safe_haven_status == True: #If there is safe haven then do the following. No difference with the above loop, but checks if a particular month is counted as a recession if nber_datapoint["USREC"].all() == "NO" and float(spy_datapoint["close"])>float(spy_datapoint["MA"]): for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos*self.leverage)) elif (nber_datapoint["USREC"].all() == "NO" and float(spy_datapoint["close"])<float(spy_datapoint["MA"])) or (nber_datapoint["USREC"].all() == "YES" and float(spy_datapoint["close"])>float(spy_datapoint["MA"])): for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: Portfolio_Holdings_List.append(PortfolioTarget(ticker,(1/num_of_pos*self.leverage)*0.5)) if self.safe_haven_cash == True: pass elif self.safe_haven_cash == False: Portfolio_Holdings_List.append(PortfolioTarget(self.safe_haven,0.5*self.leverage)) elif nber_datapoint["USREC"].all() == "YES" and float(spy_datapoint["close"])<float(spy_datapoint["MA"]): if self.safe_haven_cash == True: pass elif self.safe_haven_cash == False: Portfolio_Holdings_List.append(PortfolioTarget(self.safe_haven,1)) self.Liquidate() self.SetHoldings(Portfolio_Holdings_List) self.counter = self.counter+1 ############################################################################################################################## def Long_MA_SafeHaven(self): #Gets datapoints according to the dates - counter will increase by 1 for every trading month #alpha_datapoint extracts one line at a time from alpha_file_df. Similarly, nber_datapoint extracts one line at a time from nber_df alpha_datapoint = self.alpha_file_df.iloc[[int(self.counter)]] spy_datapoint = self.spy_sma_df.iloc[[int(self.counter)]] #Number of positive alphas - gets the number of ETFs that are larger than 0 num_of_pos = int(alpha_datapoint.gt(0).sum(axis=1)) ### Trading Conditions and Loops Portfolio_Holdings_List = [] ### Test Trading if self.apply_leverage == False: if self.safe_haven_status == False: #If there is no safe haven then do the following for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: #Checks if ETF's alpha is larger than 0 Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos)) elif self.safe_haven_status == True: #If there is safe haven then do the following. No difference with the above loop, but checks if a particular month is counted as a recession if float(spy_datapoint["close"])>float(spy_datapoint["MA"]): for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos)) elif float(spy_datapoint["close"])<float(spy_datapoint["MA"]): if self.safe_haven_cash == True: pass elif self.safe_haven_cash == False: Portfolio_Holdings_List.append(PortfolioTarget(self.safe_haven,1)) elif self.apply_leverage == True: if self.safe_haven_status == False: #If there is no safe haven then do the following for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: #Checks if ETF's alpha is larger than 0 Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos*self.leverage)) elif self.safe_haven_status == True: #If there is safe haven then do the following. No difference with the above loop, but checks if a particular month is counted as a recession if float(spy_datapoint["close"])>float(spy_datapoint["MA"]): for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos*self.leverage)) elif float(spy_datapoint["close"])<float(spy_datapoint["MA"]): if self.safe_haven_cash == True: pass elif self.safe_haven_cash == False: Portfolio_Holdings_List.append(PortfolioTarget(self.safe_haven,1)) self.Liquidate() self.SetHoldings(Portfolio_Holdings_List) self.counter = self.counter+1 ############################################################################################################################## def Long_Short_Benchmark(self): #Gets datapoints according to the dates - counter will increase by 1 for every trading month #alpha_datapoint extracts one line at a time from alpha_file_df. Similarly, nber_datapoint extracts one line at a time from nber_df alpha_datapoint = self.alpha_file_df.iloc[[int(self.counter)]] nber_datapoint = self.nber_df.iloc[[int(self.counter)]] beta_datapoint = self.etf_beta_df.iloc[[int(self.counter)]] #Number of positive alphas - gets the number of ETFs that are larger than 0 num_of_pos = int(alpha_datapoint.gt(0).sum(axis=1)) #Variable that calculates portfolio beta portfolio_beta = 0 Portfolio_Holdings_List = [] if self.apply_leverage == False: if self.safe_haven_status == False: #If there is no safe haven then do the following for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: #Checks if ETF's alpha is larger than 0 Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos)) portfolio_beta = portfolio_beta + (float(beta_datapoint[ticker])*1/num_of_pos) elif self.safe_haven_status == True: #If there is safe haven then do the following. No difference with the above loop, but checks if a particular month is counted as a recession if nber_datapoint["USREC"].all() == "NO": for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos)) portfolio_beta = portfolio_beta + (float(beta_datapoint[ticker])*1/num_of_pos) elif nber_datapoint["USREC"].all() == "YES": if self.safe_haven_cash == True: pass elif self.safe_haven_cash == False: Portfolio_Holdings_List.append(PortfolioTarget(self.safe_haven,1)) elif self.apply_leverage == True: if self.safe_haven_status == False: #If there is no safe haven then do the following for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: #Checks if ETF's alpha is larger than 0 Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos*self.leverage)) portfolio_beta = portfolio_beta + (float(beta_datapoint[ticker])*1/num_of_pos*self.leverage) elif self.safe_haven_status == True: #If there is safe haven then do the following. No difference with the above loop, but checks if a particular month is counted as a recession if nber_datapoint["USREC"].all() == "NO": for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos*self.leverage)) portfolio_beta = portfolio_beta + (float(beta_datapoint[ticker])*1/num_of_pos*self.leverage) elif nber_datapoint["USREC"].all() == "YES": if self.safe_haven_cash == True: pass elif self.safe_haven_cash == False: Portfolio_Holdings_List.append(PortfolioTarget(self.safe_haven,1)) self.Liquidate() self.SetHoldings(Portfolio_Holdings_List) self.SetHoldings(self.benchmark,-portfolio_beta) self.counter = self.counter+1 ############################################################################################################################## def Long_VIX_SafeHaven(self): #Gets datapoints according to the dates - counter will increase by 1 for every trading month #alpha_datapoint extracts one line at a time from alpha_file_df. Similarly, nber_datapoint extracts one line at a time from nber_df alpha_datapoint = self.alpha_file_df.iloc[[int(self.counter)]] vix_datapoint = self.vix_df.iloc[[int(self.counter)]] #Number of positive alphas - gets the number of ETFs that are larger than 0 num_of_pos = int(alpha_datapoint.gt(0).sum(axis=1)) Portfolio_Holdings_List = [] if self.apply_leverage == False: if self.safe_haven_status == False: #If there is no safe haven then do the following for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: #Checks if ETF's alpha is larger than 0 Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos)) elif self.safe_haven_status == True: #If there is safe haven then do the following. No difference with the above loop, but checks if a particular month is counted as a recession if float(vix_datapoint["Close"])<float(vix_datapoint["36M Avg"]): for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos)) elif float(vix_datapoint["Close"])>=float(vix_datapoint["36M Avg"]): if self.safe_haven_cash == True: pass elif self.safe_haven_cash == False: Portfolio_Holdings_List.append(PortfolioTarget(self.safe_haven,1)) elif self.apply_leverage == True: if self.safe_haven_status == False: #If there is no safe haven then do the following for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: #Checks if ETF's alpha is larger than 0 Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos*self.leverage)) elif self.safe_haven_status == True: #If there is safe haven then do the following. No difference with the above loop, but checks if a particular month is counted as a recession if float(vix_datapoint["Close"])<float(vix_datapoint["36M Avg"]): for ticker in alpha_datapoint.columns: if float(alpha_datapoint[ticker]) >0: Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos*self.leverage)) elif float(vix_datapoint["Close"])>=float(vix_datapoint["36M Avg"]): if self.safe_haven_cash == True: pass elif self.safe_haven_cash == False: Portfolio_Holdings_List.append(PortfolioTarget(self.safe_haven,1)) self.Liquidate() self.SetHoldings(Portfolio_Holdings_List) self.counter = self.counter+1 ############################################################################################################################## def Long_MA_NBER_SafeHaven_ETFNumbers(self): #Gets datapoints according to the dates - counter will increase by 1 for every trading month #alpha_datapoint extracts one line at a time from alpha_file_df. Similarly, nber_datapoint extracts one line at a time from nber_df alpha_datapoint = self.alpha_file_df.iloc[[int(self.counter)]] nber_datapoint = self.nber_df.iloc[[int(self.counter)]] spy_datapoint = self.spy_sma_df.iloc[[int(self.counter)]] #If True, then minimum number of ETFs are implemented (e.g. at least 3). If False, then absolute number of ETFs are implemented (e.g. exactly 3) minimumtrue_absolutefalse = True #Number for minimum/absolute number of ETFs number_of_etf = 2 #Number of ETF positions if minimumtrue_absolutefalse == True: num_of_pos = int(alpha_datapoint.gt(0).sum(axis=1)) if num_of_pos < number_of_etf: num_of_pos = number_of_etf elif minimumtrue_absolutefalse == False: num_of_pos = number_of_etf alpha_datapoint = alpha_datapoint.apply(pd.Series.nlargest, axis=1, n=num_of_pos) ### Trading Conditions and Loops Portfolio_Holdings_List = [] ### Test Trading if self.apply_leverage == False: if self.safe_haven_status == False: #If there is no safe haven then do the following for ticker in alpha_datapoint.columns: Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos)) elif self.safe_haven_status == True: #If there is safe haven then do the following. No difference with the above loop, but checks if a particular month is counted as a recession if nber_datapoint["USREC"].all() == "NO" and float(spy_datapoint["close"])>float(spy_datapoint["MA"]): for ticker in alpha_datapoint.columns: Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos)) elif (nber_datapoint["USREC"].all() == "NO" and float(spy_datapoint["close"])<float(spy_datapoint["MA"])) or (nber_datapoint["USREC"].all() == "YES" and float(spy_datapoint["close"])>float(spy_datapoint["MA"])): for ticker in alpha_datapoint.columns: Portfolio_Holdings_List.append(PortfolioTarget(ticker,(1/num_of_pos)*0.5)) if self.safe_haven_cash == True: pass elif self.safe_haven_cash == False: Portfolio_Holdings_List.append(PortfolioTarget(self.safe_haven,0.5)) elif nber_datapoint["USREC"].all() == "YES" and float(spy_datapoint["close"])<float(spy_datapoint["MA"]): if self.safe_haven_cash == True: pass elif self.safe_haven_cash == False: Portfolio_Holdings_List.append(PortfolioTarget(self.safe_haven,1)) elif self.apply_leverage == True: if self.safe_haven_status == False: #If there is no safe haven then do the following for ticker in alpha_datapoint.columns: Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos*self.leverage)) elif self.safe_haven_status == True: #If there is safe haven then do the following. No difference with the above loop, but checks if a particular month is counted as a recession if nber_datapoint["USREC"].all() == "NO" and float(spy_datapoint["close"])>float(spy_datapoint["MA"]): for ticker in alpha_datapoint.columns: Portfolio_Holdings_List.append(PortfolioTarget(ticker,1/num_of_pos*self.leverage)) elif (nber_datapoint["USREC"].all() == "NO" and float(spy_datapoint["close"])<float(spy_datapoint["MA"])) or (nber_datapoint["USREC"].all() == "YES" and float(spy_datapoint["close"])>float(spy_datapoint["MA"])): for ticker in alpha_datapoint.columns: Portfolio_Holdings_List.append(PortfolioTarget(ticker,(1/num_of_pos*self.leverage)*0.5)) if self.safe_haven_cash == True: pass elif self.safe_haven_cash == False: Portfolio_Holdings_List.append(PortfolioTarget(self.safe_haven,0.5*self.leverage)) elif nber_datapoint["USREC"].all() == "YES" and float(spy_datapoint["close"])<float(spy_datapoint["MA"]): if self.safe_haven_cash == True: pass elif self.safe_haven_cash == False: Portfolio_Holdings_List.append(PortfolioTarget(self.safe_haven,1)) self.Liquidate() self.SetHoldings(Portfolio_Holdings_List) self.counter = self.counter+1 ############################################################################################################################## ############################################################################################################################## ############################################################################################################################## ### Plot Graphs for Backtest def PlotGraph(self): #Plot Portfolio Cash self.Plot('Portfolio Cash', 'Cash', self.Portfolio.Cash) #Gets benchmark's daily close benchmark = self.Securities[self.benchmark].Close #Gets benchmark's performance if self.lastBenchmarkValue is not None: self.BenchmarkPerformance = self.BenchmarkPerformance * (benchmark/self.lastBenchmarkValue) # store today's benchmark close price for use tomorrow self.lastBenchmarkValue = benchmark #Plots our strategy versus the benchmark self.Plot("Strategy vs Benchmark", "Portfolio Value", self.Portfolio.TotalPortfolioValue) self.Plot("Strategy vs Benchmark", "Benchmark", self.BenchmarkPerformance) ############################################################################################################################## ### UPI functions def UPI(self): #UPI calculation based on https://www.tangotools.com/ui/ui.htm self.CurrentValue = self.Portfolio.TotalPortfolioValue self.UPIcounter = self.UPIcounter+1 if self.CurrentValue > self.MaxValue: self.MaxValue = self.CurrentValue else: self.SumSq = self.SumSq + ((100 * ((self.CurrentValue/self.MaxValue) -1))**2) def UPICalc(self): UI = (self.SumSq / self.UPIcounter)**0.5 self.Debug(UI) #Final UPI is calculated and is printed under "Logs" ############################################################################################################################## ### Slippage Model class CustomSlippageModel: def __init__(self, algorithm): self.algorithm = algorithm def GetSlippageApproximation(self, asset, order): constantslippage = 0 #This is the only variable that needs to be changed. If you want to add 0.1% slippage then 0.001, so on and so forth slippage = asset.Price * constantslippage return slippage ############################################################################################################################## ############################################################################################################################## ############################################################################################################################## ############################################################################################################################## ############################################################################################################################## ############################################################################################################################## ############################################################################################################################## ############################################################################################################################## ############################################################################################################################## ### Unused / Draft Functions # Trading Function ''' if self.safe_haven_status == False: #If there is no safe haven then do the following for ticker in alpha_datapoint.columns: if first_in_list == True: if float(alpha_datapoint[ticker]) >0: #Checks if ETF's alpha is larger than 0 self.SetHoldings(ticker,1/num_of_pos*self.leverage,True) #Unwind all existing positions, then sets an equal weighting to all ETFs with positive alpha first_in_list = False elif first_in_list == False: if float(alpha_datapoint[ticker]) >0: #Checks if ETF's alpha is larger than 0 self.SetHoldings(ticker,1/num_of_pos*self.leverage) #Sets an equal weighting to all ETFs with positive alpha elif self.safe_haven_status == True: #If there is safe haven then do the following. No difference with the above loop, but checks if a particular month is counted as a recession if nber_datapoint["USREC"].all() == "NO": for ticker in alpha_datapoint.columns: if first_in_list == True: if float(alpha_datapoint[ticker]) >0: self.SetHoldings(ticker,1/num_of_pos*self.leverage,True) first_in_list = False elif first_in_list == False: if float(alpha_datapoint[ticker]) >0: self.SetHoldings(ticker,1/num_of_pos*self.leverage) elif nber_datapoint["USREC"].all() == "YES": self.SetHoldings(self.safe_haven,1,True) self.counter = self.counter+1 ''' ''' def OnData(self, data): OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here. Arguments: data: Slice object keyed by symbol containing the stock data def getRegressionCoefficients(self): # .py version of Regression.ipynb # each qb instance is changed to self ### FF5 Factors ff5_path = self.Download("https://www.dropbox.com/s/8dyjtlyf1g4ulvn/F-F_Research_Data_5_Factors_2x3.CSV?dl=1") #Below are standard code just to modify the data: ff5_df = pd.DataFrame(pd.read_csv(StringIO(ff5_path), skiprows = 3)) #skiprows since I'm skipping the text and directly to the data ff5_df = ff5_df.rename({'Unnamed: 0':'Date'}, axis=1) #Added "Date" column name ff5_df['Date'] = pd.to_datetime(ff5_df['Date'],format='%Y%m') + MonthEnd(1) #Changed dates to datetime format, added MonthEnd(1) so that the dates are consistent with the ETF's dates ff5_df = ff5_df.loc[ff5_df['Date']>='1998-12-22'] #Cut off is 22 Dec 1998 since ETFs only have data after this date ff5_df.reset_index(drop=True,inplace=True) #Reset Index to reflect the date cutoff ff5_df.drop(ff5_df.index[:1],inplace=True) #Drops the first date since it's not required for the regression ff5_df.reset_index(drop=True,inplace=True) #Reset Index to start from 0 ff5_date = ff5_df.at[len(ff5_df)-1,'Date'] #Variable that is used later ### etf_ticker_list & history etf_ticker_list = ['XLB', 'XLE', 'XLF', 'XLI', 'XLK', 'XLP', 'XLU', 'XLV', 'XLY'] #etf ticker list, doesn't change self.history_df = {} for ticker in etf_ticker_list: ticker_symbol = self.AddEquity(ticker).Symbol #QuantConnect's way of adding securities. The securities will be added to self.Securities self.Securities[ticker_symbol].SetDataNormalizationMode(DataNormalizationMode.TotalReturn) #This adjusts the price data so that all the prices have dividends reinvested and splits are adjusted self.history_df[ticker] = self.History(ticker_symbol,self.history_days,Resolution.Daily) startDate = datetime(2019,12,22) #First trading dates of all of the ETFs endDate = datetime(ff5_date.year, ff5_date.month, ff5_date.day) #Taken from a variable that is defined in the previous cell #self.history_df = self.History([self.Securities.Keys],timedelta(days=self.history_days),Resolution.Daily) #tester = True #while tester == True: #if self.history_df["XLB"].empty == True: #pass #elif self.history_df["XLB"].empty == False: #tester=False ticker_data = {} ### close_prices close_prices={} #modifies the dataframe above into a dictionary containing each ETF's closing price for ticker in etf_ticker_list: close_prices[ticker] = self.history_df[ticker]["close"] ### close_prices_monthly close_prices_monthly = {} #changes daily closing data into monthly using resample for ticker in close_prices: close_prices_monthly[ticker] = pd.DataFrame(close_prices[ticker].resample("1M").last()) #simply takes the last trading price of each month close_prices_monthly[ticker]['Price Change'] = close_prices_monthly[ticker]['close'].pct_change(periods = 1) #price change from each month close_prices_monthly[ticker]['Price Change'] = close_prices_monthly[ticker]['Price Change'].fillna(0) #fills NaN's data with 0 (there shouldn't be any NaNs, but just in case) close_prices_monthly[ticker]['Price Change'] = close_prices_monthly[ticker]['Price Change']*100 #multiply by 100 so it's consistent with Fama French's RF close_prices_monthly[ticker].drop(close_prices_monthly[ticker].index[:1],inplace=True) #drops the first datapoint, since we can't find the price change because it's the first trading month close_prices_monthly[ticker]["Price Change - RF"] = close_prices_monthly[ticker]['Price Change'] - ff5_df["RF"].values ### etf_alphas etf_alphas = {} #dictionary to store the alphas (or intercepts) of each ETF, regressed with Fama French's 5 factors for ticker in etf_ticker_list: counter = 0 counter1 = 36 #counters are 0 to 36 (36 months worth of data is regressed) etf_alphas[ticker] = pd.DataFrame() placeholder = [] while counter1 < len(close_prices_monthly[ticker].index): #Runs the while loop as long as data is available X = ff5_df[['Mkt-RF','SMB','HML','RMW','CMA']].iloc[counter:counter1,] #Dependent variable, which are Fama French's 5 factors Y = close_prices_monthly[ticker]["Price Change - RF"].iloc[counter:counter1,] #Independent variable regr = linear_model.LinearRegression() regr.fit(X,Y) placeholder.append(regr.intercept_) counter = counter+1 counter1 = counter1+1 etf_alphas[ticker][ticker] = placeholder ### combined_etf_alpha #Basically combines all of the etf_alphas dictionary into one dataframe combined_etf_alpha = pd.DataFrame() for ticker in etf_ticker_list: combined_etf_alpha = pd.concat([combined_etf_alpha,etf_alphas[ticker]],axis=1) #The below is probably bad practice since I set my own dates #It starts at 2002/02/28, since the data begins from 1999/01/31 + 36 months (3 years) = 2002/01/31 (call this date t) #This signal is practically only generated during t+1 month, that's why I began at 2002/02/28. combined_etf_alpha = combined_etf_alpha.set_index(pd.date_range(start='2/28/2002', periods=len(etf_alphas["XLE"]),freq="M"),"Trading Dates") return combined_etf_alpha '''