Overall Statistics |
Total Trades 0 Average Win 0% Average Loss 0% Compounding Annual Return 0% Drawdown 0% Expectancy 0 Net Profit 0% Sharpe Ratio 0 Probabilistic Sharpe Ratio 0% Loss Rate 0% Win Rate 0% Profit-Loss Ratio 0 Alpha 0 Beta 0 Annual Standard Deviation 0 Annual Variance 0 Information Ratio -0.73 Tracking Error 0.141 Treynor Ratio 0 Total Fees $0.00 Estimated Strategy Capacity $0 Lowest Capacity Asset Portfolio Turnover 0% |
#region imports from AlgorithmImports import * from sklearn.ensemble import RandomForestRegressor #endregion class RandomForestAlphaModel(AlphaModel): securities = [] scheduled_event = None time = datetime.min rebalance = False def __init__(self, algorithm, minutes_before_close, n_estimators, min_samples_split, lookback_days): self.algorithm = algorithm self.minutes_before_close = minutes_before_close self.n_estimators = n_estimators self.min_samples_split = min_samples_split self.lookback_days = lookback_days def Update(self, algorithm: QCAlgorithm, data: Slice) -> List[Insight]: if not self.rebalance or data.QuoteBars.Count == 0: return [] # Fetch history on our universe symbols = [s.Symbol for s in self.securities] df = algorithm.History(symbols, 2, Resolution.Daily, dataNormalizationMode=DataNormalizationMode.ScaledRaw) if df.empty: return [] self.rebalance = False # Make all of them into a single time index. df = df.close.unstack(level=0) # Feature engineer the data for input input_ = df.diff() * 0.5 + df * 0.5 input_ = input_.iloc[-1].fillna(0).values.reshape(1, -1) # Predict the expected price predictions = self.regressor.predict(input_) # Get the expected return predictions = (predictions - df.iloc[-1].values) / df.iloc[-1].values predictions = predictions.flatten() insights = [] for i in range(len(predictions)): insights.append( Insight.Price(df.columns[i], timedelta(5), InsightDirection.Up, predictions[i]) ) algorithm.Insights.Cancel(symbols) return insights def train_model(self): # Initialize the Random Forest Regressor self.regressor = RandomForestRegressor(n_estimators=self.n_estimators, min_samples_split=self.min_samples_split, random_state = 1990) # Get historical data history = self.algorithm.History([s.Symbol for s in self.securities], self.lookback_days, Resolution.Daily, dataNormalizationMode=DataNormalizationMode.ScaledRaw) # Select the close column and then call the unstack method. df = history['close'].unstack(level=0) # Feature engineer the data for input. input_ = df.diff() * 0.5 + df * 0.5 input_ = input_.iloc[1:].ffill().fillna(0) # Shift the data for 1-step backward as training output result. output = df.shift(-1).iloc[:-1].ffill().fillna(0) # Fit the regressor self.regressor.fit(input_, output) def before_market_close(self): if self.time < self.algorithm.Time: self.train_model() self.time = Expiry.EndOfMonth(self.algorithm.Time) self.rebalance = True def OnSecuritiesChanged(self, algorithm: QCAlgorithm, changes: SecurityChanges) -> None: for security in changes.RemovedSecurities: if security in self.securities: self.securities.remove(security) for security in changes.AddedSecurities: self.securities.append(security) # Add Scheduled Event if self.scheduled_event == None: symbol = security.Symbol self.scheduled_event = algorithm.Schedule.On( algorithm.DateRules.EveryDay(symbol), algorithm.TimeRules.BeforeMarketClose(symbol, self.minutes_before_close), self.before_market_close ) self.train_model()
# region imports from AlgorithmImports import * from alpha import RandomForestAlphaModel from portfolio import MeanVarianceOptimizationPortfolioConstructionModel from Selection.FundamentalUniverseSelectionModel import FundamentalUniverseSelectionModel # endregion class RandomForestAlgorithm(QCAlgorithm): def Initialize(self): self.SetStartDate(2012, 1, 1) self.SetCash(100000) self.SetBrokerageModel(BrokerageName.InteractiveBrokersBrokerage, AccountType.Margin) self.AddEquity("SPY", Resolution.Daily) self.SetBenchmark("SPY") self.UniverseSettings.DataNormalizationMode = DataNormalizationMode.Raw self.AddUniverse(self.SelectCoarse) self.AddAlpha(RandomForestAlphaModel( self, self.GetParameter("minutes_before_close", 5), self.GetParameter("n_estimators", 100), self.GetParameter("min_samples_split", 5), self.GetParameter("lookback_days", 360) )) self.SetPortfolioConstruction(MeanVarianceOptimizationPortfolioConstructionModel(self, lambda time: None, PortfolioBias.Long, period=self.GetParameter("pcm_periods", 5))) self.AddRiskManagement(NullRiskManagementModel()) self.SetExecution(ImmediateExecutionModel()) self.SetWarmUp(timedelta(200)) def __init__(self, universeCount = 10, universeSettings = None): self.universeCount = universeCount # holds our coarse fundamental indicators by symbol self.averages = {} def SelectCoarse(self, coarse): filtered = [] # We are going to use a dictionary to refer the object that will keep the moving averages for cf in coarse: if cf.Symbol not in self.averages: self.averages[cf.Symbol] = SymbolData(cf.Symbol) # Updates the SymbolData object with current EOD price avg = self.averages[cf.Symbol] avg.Update(cf.EndTime, cf.AdjustedPrice) # Filter the values of the dict: wait for indicator to be ready filtered_values = filter(lambda x: (x.is_ready and x.Market > 3e8), self.averages.values()) filtered_values = filter(lambda x: (x.is_ready and x.Volume > 50000), self.averages.values()) filtered_values = filter(lambda x: (x.is_ready and x.Price > x.SlowSma), self.averages.values()) filtered_values = filter(lambda x: (x.is_ready and x.Price > x.MedSma), self.averages.values()) filtered_values = filter(lambda x: (x.is_ready and x.Price > x.FastSma), self.averages.values()) filtered_values = filter(lambda x: (x.is_ready and x.RSI > 45), self.averages.values()) filtered = sorted(filtered, key=lambda avg: x.DollarVolume, reverse = True) self.filtered_coarse = [x.Symbol for x in filtered[:self.universeCount]] return self.filtered_coarse class SymbolData(object): def __init__(self, symbol): self.Symbol = symbol self.Price = SimpleMovingAverage(1) self.FastSma = SimpleMovingAverage(25) self.MedSma = SimpleMovingAverage(50) self.SlowSma = SimpleMovingAverage(100) self.RSI = RelativeStrengthIndex(10) def Update(self, time, value): return self.Price.Update(time, value) return self.SlowSma.Update(time, value) return self.MedSma.Update(time, value) return self.FastSma.Update(time, value) return self.RSI.Update(time, value)
# We re-define the MeanVarianceOptimizationPortfolioConstructionModel because # - The model doesn't warm-up with ScaledRaw data (https://github.com/QuantConnect/Lean/issues/7239) # - The original definition doesn't reset the `roc` and `window` in the `MeanVarianceSymbolData` objects when corporate actions occur from AlgorithmImports import * from Portfolio.MinimumVariancePortfolioOptimizer import MinimumVariancePortfolioOptimizer ### <summary> ### Provides an implementation of Mean-Variance portfolio optimization based on modern portfolio theory. ### The default model uses the MinimumVariancePortfolioOptimizer that accepts a 63-row matrix of 1-day returns. ### </summary> class MeanVarianceOptimizationPortfolioConstructionModel(PortfolioConstructionModel): def __init__(self, algorithm, rebalance = Resolution.Daily, portfolioBias = PortfolioBias.LongShort, lookback = 1, period = 63, resolution = Resolution.Daily, targetReturn = 0.5, optimizer = None): """Initialize the model Args: rebalance: Rebalancing parameter. If it is a timedelta, date rules or Resolution, it will be converted into a function. If None will be ignored. The function returns the next expected rebalance time for a given algorithm UTC DateTime. The function returns null if unknown, in which case the function will be called again in the next loop. Returning current time will trigger rebalance. portfolioBias: Specifies the bias of the portfolio (Short, Long/Short, Long) lookback(int): Historical return lookback period period(int): The time interval of history price to calculate the weight resolution: The resolution of the history price optimizer(class): Method used to compute the portfolio weights""" super().__init__() self.algorithm = algorithm self.lookback = lookback self.period = period self.resolution = resolution self.portfolioBias = portfolioBias self.sign = lambda x: -1 if x < 0 else (1 if x > 0 else 0) lower = algorithm.Settings.MinAbsolutePortfolioTargetPercentage*1.1 if portfolioBias == PortfolioBias.Long else -1 upper = 0 if portfolioBias == PortfolioBias.Short else 1 self.optimizer = MinimumVariancePortfolioOptimizer(lower, upper, targetReturn) if optimizer is None else optimizer self.symbolDataBySymbol = {} self.new_insights = False def IsRebalanceDue(self, insights, algorithmUtc): if not self.new_insights: self.new_insights = len(insights) > 0 is_rebalance_due = self.new_insights and not self.algorithm.IsWarmingUp and self.algorithm.CurrentSlice.QuoteBars.Count > 0 if is_rebalance_due: self.new_insights = False return is_rebalance_due def CreateTargets(self, algorithm, insights): # Reset and warm-up indicators when corporate actions occur data = algorithm.CurrentSlice resetSymbols = [] for symbol in set(data.Dividends.Keys) | set(data.Splits.Keys): if not symbol in self.symbolDataBySymbol: continue symbolData = self.symbolDataBySymbol[symbol] if symbolData.ShouldReset(): symbolData.ClearHistory() resetSymbols.append(symbol) if resetSymbols: self.WarmUp(algorithm, resetSymbols) return super().CreateTargets(algorithm, insights) def ShouldCreateTargetForInsight(self, insight): if len(PortfolioConstructionModel.FilterInvalidInsightMagnitude(self.Algorithm, [insight])) == 0: return False symbolData = self.symbolDataBySymbol.get(insight.Symbol) if insight.Magnitude is None: self.Algorithm.SetRunTimeError(ArgumentNullException('MeanVarianceOptimizationPortfolioConstructionModel does not accept \'None\' as Insight.Magnitude. Please checkout the selected Alpha Model specifications.')) return False symbolData.Add(self.Algorithm.Time, insight.Magnitude) return True def DetermineTargetPercent(self, activeInsights): """ Will determine the target percent for each insight Args: Returns: """ targets = {} # If we have no insights just return an empty target list if len(activeInsights) == 0: return targets symbols = [insight.Symbol for insight in activeInsights] # Create a dictionary keyed by the symbols in the insights with an pandas.Series as value to create a data frame returns = { str(symbol.ID) : data.Return for symbol, data in self.symbolDataBySymbol.items() if symbol in symbols } returns = pd.DataFrame(returns) # The portfolio optimizer finds the optional weights for the given data weights = self.optimizer.Optimize(returns) weights = pd.Series(weights, index = returns.columns) # Create portfolio targets from the specified insights for insight in activeInsights: weight = weights[str(insight.Symbol.ID)] # don't trust the optimizer if self.portfolioBias != PortfolioBias.LongShort and self.sign(weight) != self.portfolioBias: weight = 0 targets[insight] = weight return targets def OnSecuritiesChanged(self, algorithm, changes): '''Event fired each time the we add/remove securities from the data feed Args: algorithm: The algorithm instance that experienced the change in securities changes: The security additions and removals from the algorithm''' # clean up data for removed securities super().OnSecuritiesChanged(algorithm, changes) for removed in changes.RemovedSecurities: symbolData = self.symbolDataBySymbol.pop(removed.Symbol, None) symbolData.Reset() # initialize data for added securities symbols = [x.Symbol for x in changes.AddedSecurities] for symbol in [x for x in symbols if x not in self.symbolDataBySymbol]: self.symbolDataBySymbol[symbol] = self.MeanVarianceSymbolData(symbol, self.lookback, self.period) self.WarmUp(algorithm, symbols) def WarmUp(self, algorithm, symbols): history = algorithm.History[TradeBar](symbols, self.lookback * self.period + 1, self.resolution, dataNormalizationMode=DataNormalizationMode.ScaledRaw) for bars in history: for symbol, bar in bars.items(): self.symbolDataBySymbol.get(symbol).Update(bar.EndTime, bar.Value) class MeanVarianceSymbolData: '''Contains data specific to a symbol required by this model''' def __init__(self, symbol, lookback, period): self.symbol = symbol self.roc = RateOfChange(f'{symbol}.ROC({lookback})', lookback) self.roc.Updated += self.OnRateOfChangeUpdated self.window = RollingWindow[IndicatorDataPoint](period) def ShouldReset(self): # Don't need to reset when the `window` only contain data from the insight.Magnitude return self.window.Samples < self.window.Size * 2 def ClearHistory(self): self.roc.Reset() self.window.Reset() def Reset(self): self.roc.Updated -= self.OnRateOfChangeUpdated self.ClearHistory() def Update(self, time, value): return self.roc.Update(time, value) def OnRateOfChangeUpdated(self, roc, value): if roc.IsReady: self.window.Add(value) def Add(self, time, value): item = IndicatorDataPoint(self.symbol, time, value) self.window.Add(item) # Get symbols' returns, we use simple return according to # Meucci, Attilio, Quant Nugget 2: Linear vs. Compounded Returns – Common Pitfalls in Portfolio Management (May 1, 2010). # GARP Risk Professional, pp. 49-51, April 2010 , Available at SSRN: https://ssrn.com/abstract=1586656 @property def Return(self): return pd.Series( data = [x.Value for x in self.window], index = [x.EndTime for x in self.window]) @property def IsReady(self): return self.window.IsReady def __str__(self, **kwargs): return '{}: {:.2%}'.format(self.roc.Name, self.window[0])
#region imports from AlgorithmImports import * #endregion # 05/25/2023 -Set the universe data normalization mode to raw # -Added warm-up # -Made the following updates to the portfolio construction model: # - Added IsRebalanceDue to only rebalance after warm-up finishes and there is quote data # - Reset the MeanVarianceSymbolData indicator and window when corporate actions occur # - Changed the minimum portfolio weight to be algorithm.Settings.MinAbsolutePortfolioTargetPercentage*1.1 to avoid errors # -Adjusted the history requests to use scaled raw data normalization # https://www.quantconnect.com/terminal/processCache?request=embedded_backtest_587cc09bd82676a2ede5c88b100ef70b.html