Overall Statistics
Total Trades
10508
Average Win
0.11%
Average Loss
-0.12%
Compounding Annual Return
11.553%
Drawdown
40.700%
Expectancy
0.174
Net Profit
198.577%
Sharpe Ratio
0.511
Probabilistic Sharpe Ratio
2.383%
Loss Rate
38%
Win Rate
62%
Profit-Loss Ratio
0.90
Alpha
0.036
Beta
1.009
Annual Standard Deviation
0.193
Annual Variance
0.037
Information Ratio
0.409
Tracking Error
0.089
Treynor Ratio
0.098
Total Fees
$11798.84
Estimated Strategy Capacity
$6300000.00
Lowest Capacity Asset
SNY SFYYC8T8HEN9
Portfolio Turnover
4.77%
#region imports
from AlgorithmImports import *
from sklearn.ensemble import RandomForestRegressor
#endregion

class RandomForestAlphaModel(AlphaModel):

    securities = []
    scheduled_event = None
    time = datetime.min
    rebalance = False

    def __init__(self, algorithm, minutes_before_close, n_estimators, min_samples_split, lookback_days):
        self.algorithm = algorithm
        self.minutes_before_close = minutes_before_close
        self.n_estimators = n_estimators
        self.min_samples_split = min_samples_split
        self.lookback_days = lookback_days

    def Update(self, algorithm: QCAlgorithm, data: Slice) -> List[Insight]:
        if not self.rebalance or data.QuoteBars.Count == 0:
            return []
        
        # Fetch history on our universe
        symbols = [s.Symbol for s in self.securities]
        df = algorithm.History(symbols, 2, Resolution.Daily, dataNormalizationMode=DataNormalizationMode.ScaledRaw)
        if df.empty: 
            return []

        self.rebalance = False
    
        # Make all of them into a single time index.
        df = df.close.unstack(level=0)
    
        # Feature engineer the data for input
        input_ = df.diff() * 0.5 + df * 0.5
        input_ = input_.iloc[-1].fillna(0).values.reshape(1, -1)
        
        # Predict the expected price
        predictions = self.regressor.predict(input_)
        
        # Get the expected return
        predictions = (predictions - df.iloc[-1].values) / df.iloc[-1].values
        predictions = predictions.flatten()
        
        insights = []
        for i in range(len(predictions)):
            insights.append( Insight.Price(df.columns[i], timedelta(5), InsightDirection.Up, predictions[i]) )
        algorithm.Insights.Cancel(symbols)
        return insights

    def train_model(self):
        # Initialize the Random Forest Regressor
        self.regressor = RandomForestRegressor(n_estimators=self.n_estimators, min_samples_split=self.min_samples_split, random_state = 1990)
        
        # Get historical data
        history = self.algorithm.History([s.Symbol for s in self.securities], self.lookback_days, Resolution.Daily, dataNormalizationMode=DataNormalizationMode.ScaledRaw)
        
        # Select the close column and then call the unstack method.
        df = history['close'].unstack(level=0)
        
        # Feature engineer the data for input.
        input_ = df.diff() * 0.5 + df * 0.5
        input_ = input_.iloc[1:].ffill().fillna(0)
        
        # Shift the data for 1-step backward as training output result.
        output = df.shift(-1).iloc[:-1].ffill().fillna(0)
        
        # Fit the regressor
        self.regressor.fit(input_, output)


    def before_market_close(self):
        if self.time < self.algorithm.Time:
            self.train_model()
            self.time = Expiry.EndOfMonth(self.algorithm.Time)
        self.rebalance = True

    def OnSecuritiesChanged(self, algorithm: QCAlgorithm, changes: SecurityChanges) -> None:
        for security in changes.RemovedSecurities:
            if security in self.securities:
                self.securities.remove(security)
                
        for security in changes.AddedSecurities:
            self.securities.append(security)

            # Add Scheduled Event
            if self.scheduled_event == None:
                symbol = security.Symbol
                self.scheduled_event = algorithm.Schedule.On(
                    algorithm.DateRules.EveryDay(symbol), 
                    algorithm.TimeRules.BeforeMarketClose(symbol, self.minutes_before_close), 
                    self.before_market_close
                )
                
        self.train_model()
# region imports
from AlgorithmImports import *
from Selection.UniverseSelectionModel import UniverseSelectionModel
from QuantConnect.Data.UniverseSelection import *
from alpha import RandomForestAlphaModel
from portfolio import MeanVarianceOptimizationPortfolioConstructionModel
# endregion

class RandomForestAlgorithm(QCAlgorithm):

    def Initialize(self):
        self.SetStartDate(2007, 1, 1)
        self.SetEndDate(2017, 1, 1)
        self.SetCash(100000)
        self.SetBrokerageModel(BrokerageName.InteractiveBrokersBrokerage, AccountType.Margin)

        self.UniverseSettings.DataNormalizationMode = DataNormalizationMode.Raw
        self.UniverseSettings.Leverage = 1
        self.AddUniverseSelection(MyFundamentalUniverseSelectionModel(self.UniverseSettings))

        self.SetBenchmark("SPY")

        self.AddAlpha(RandomForestAlphaModel(
            self,
            self.GetParameter("minutes_before_close", 5),
            self.GetParameter("n_estimators", 100),
            self.GetParameter("min_samples_split", 5),
            self.GetParameter("lookback_days", 360)
        ))

        self.SetPortfolioConstruction(MeanVarianceOptimizationPortfolioConstructionModel(self, lambda time: None, PortfolioBias.Long, period=self.GetParameter("pcm_periods", 5)))
        
        self.AddRiskManagement(NullRiskManagementModel())

        self.SetExecution(ImmediateExecutionModel())

        self.SetWarmUp(timedelta(200))

        self.symbols = None   

class MyFundamentalUniverseSelectionModel(FineFundamentalUniverseSelectionModel):

    def __init__(self, universe_settings: UniverseSettings) -> None:
        super().__init__(self.SelectCoarse, self.SelectFine, universe_settings)

    def SelectCoarse(self, coarse: List[CoarseFundamental]) -> List[Symbol]:

        filtered_coarse = [x.Symbol for x in coarse if 
                                                x.Volume > 1e5
                                                and x.Price > SimpleMovingAverage(20).Current.Value
                                                and x.Price > SimpleMovingAverage(50).Current.Value
                                                and x.Price > SimpleMovingAverage(200).Current.Value

        ]

        return filtered_coarse

    def SelectFine(self, fine: List[FineFundamental]) -> List[Symbol]:
        
        filtered_fine = [x for x in fine if
                                                x.MarketCap > 2e9
                                                and x.ValuationRatios.PriceChange1M > 0
                                                and x.OperationRatios.RevenueGrowth.ThreeMonths > 0
                                                and x.OperationRatios.OperationIncomeGrowth.ThreeMonths > 0
                                                and x.OperationRatios.NetIncomeGrowth.ThreeMonths > 0
                                                and x.OperationRatios.NetIncomeContOpsGrowth.ThreeMonths > 0
                                                and x.OperationRatios.CFOGrowth.OneYear > 0
                                                and x.OperationRatios.FCFGrowth.OneYear > 0

        ]

        top = sorted(filtered_fine, key = lambda x: x.MarketCap, reverse=True)[:25]

        self.symbols = [x.Symbol for x in top]

        return self.symbols
# We re-define the MeanVarianceOptimizationPortfolioConstructionModel because
# - The model doesn't warm-up with ScaledRaw data (https://github.com/QuantConnect/Lean/issues/7239)
# - The original definition doesn't reset the `roc` and `window` in the `MeanVarianceSymbolData` objects when corporate actions occur

from AlgorithmImports import *
from Portfolio.MinimumVariancePortfolioOptimizer import MinimumVariancePortfolioOptimizer

### <summary>
### Provides an implementation of Mean-Variance portfolio optimization based on modern portfolio theory.
### The default model uses the MinimumVariancePortfolioOptimizer that accepts a 63-row matrix of 1-day returns.
### </summary>
class MeanVarianceOptimizationPortfolioConstructionModel(PortfolioConstructionModel):
    def __init__(self,
                 algorithm,
                 rebalance = Resolution.Daily,
                 portfolioBias = PortfolioBias.LongShort,
                 lookback = 1,
                 period = 63,
                 resolution = Resolution.Daily,
                 targetReturn = 0.5,
                 optimizer = None):
        """Initialize the model
        Args:
            rebalance: Rebalancing parameter. If it is a timedelta, date rules or Resolution, it will be converted into a function.
                              If None will be ignored.
                              The function returns the next expected rebalance time for a given algorithm UTC DateTime.
                              The function returns null if unknown, in which case the function will be called again in the
                              next loop. Returning current time will trigger rebalance.
            portfolioBias: Specifies the bias of the portfolio (Short, Long/Short, Long)
            lookback(int): Historical return lookback period
            period(int): The time interval of history price to calculate the weight
            resolution: The resolution of the history price
            optimizer(class): Method used to compute the portfolio weights"""
        super().__init__()
        self.algorithm = algorithm
        self.lookback = lookback
        self.period = period
        self.resolution = resolution
        self.portfolioBias = portfolioBias
        self.sign = lambda x: -1 if x < 0 else (1 if x > 0 else 0)

        lower = algorithm.Settings.MinAbsolutePortfolioTargetPercentage*1.1 if portfolioBias == PortfolioBias.Long else -1
        upper = 0 if portfolioBias == PortfolioBias.Short else 1
        self.optimizer = MinimumVariancePortfolioOptimizer(lower, upper, targetReturn) if optimizer is None else optimizer

        self.symbolDataBySymbol = {}
        self.new_insights = False

    def IsRebalanceDue(self, insights, algorithmUtc):
        if not self.new_insights:
            self.new_insights = len(insights) > 0
        is_rebalance_due = self.new_insights and not self.algorithm.IsWarmingUp and self.algorithm.CurrentSlice.QuoteBars.Count > 0
        if is_rebalance_due:
            self.new_insights = False
        return is_rebalance_due

    def CreateTargets(self, algorithm, insights):
        # Reset and warm-up indicators when corporate actions occur
        data = algorithm.CurrentSlice
        resetSymbols = []

        for symbol in set(data.Dividends.Keys) | set(data.Splits.Keys):

            if not symbol in self.symbolDataBySymbol:
                continue
            
            symbolData = self.symbolDataBySymbol[symbol]
            if symbolData.ShouldReset():
                symbolData.ClearHistory()
                resetSymbols.append(symbol)
        if resetSymbols:
            self.WarmUp(algorithm, resetSymbols)

        return super().CreateTargets(algorithm, insights)

    def ShouldCreateTargetForInsight(self, insight):
        if len(PortfolioConstructionModel.FilterInvalidInsightMagnitude(self.Algorithm, [insight])) == 0:
            return False

        symbolData = self.symbolDataBySymbol.get(insight.Symbol)
        if insight.Magnitude is None:
            self.Algorithm.SetRunTimeError(ArgumentNullException('MeanVarianceOptimizationPortfolioConstructionModel does not accept \'None\' as Insight.Magnitude. Please checkout the selected Alpha Model specifications.'))
            return False
        symbolData.Add(self.Algorithm.Time, insight.Magnitude)

        return True

    def DetermineTargetPercent(self, activeInsights):
        """
         Will determine the target percent for each insight
        Args:
        Returns:
        """
        targets = {}

        # If we have no insights just return an empty target list
        if len(activeInsights) == 0:
            return targets

        symbols = [insight.Symbol for insight in activeInsights]

        # Create a dictionary keyed by the symbols in the insights with an pandas.Series as value to create a data frame
        returns = { str(symbol.ID) : data.Return for symbol, data in self.symbolDataBySymbol.items() if symbol in symbols }
        returns = pd.DataFrame(returns)

        # The portfolio optimizer finds the optional weights for the given data
        weights = self.optimizer.Optimize(returns)
        weights = pd.Series(weights, index = returns.columns)

        # Create portfolio targets from the specified insights
        for insight in activeInsights:
            weight = weights[str(insight.Symbol.ID)]

            # don't trust the optimizer
            if self.portfolioBias != PortfolioBias.LongShort and self.sign(weight) != self.portfolioBias:
                weight = 0
            targets[insight] = weight

        return targets

    def OnSecuritiesChanged(self, algorithm, changes):
        '''Event fired each time the we add/remove securities from the data feed
        Args:
            algorithm: The algorithm instance that experienced the change in securities
            changes: The security additions and removals from the algorithm'''

        # clean up data for removed securities
        super().OnSecuritiesChanged(algorithm, changes)
        for removed in changes.RemovedSecurities:
            symbolData = self.symbolDataBySymbol.pop(removed.Symbol, None)
            symbolData.Reset()

        # initialize data for added securities
        symbols = [x.Symbol for x in changes.AddedSecurities]
        for symbol in [x for x in symbols if x not in self.symbolDataBySymbol]:
            self.symbolDataBySymbol[symbol] = self.MeanVarianceSymbolData(symbol, self.lookback, self.period)
        self.WarmUp(algorithm, symbols)
    
    def WarmUp(self, algorithm, symbols):
        history = algorithm.History[TradeBar](symbols, self.lookback * self.period + 1, self.resolution, dataNormalizationMode=DataNormalizationMode.ScaledRaw)
        for bars in history:
            for symbol, bar in bars.items():
                self.symbolDataBySymbol.get(symbol).Update(bar.EndTime, bar.Value)

    class MeanVarianceSymbolData:
        '''Contains data specific to a symbol required by this model'''
        def __init__(self, symbol, lookback, period):
            self.symbol = symbol
            self.roc = RateOfChange(f'{symbol}.ROC({lookback})', lookback)
            self.roc.Updated += self.OnRateOfChangeUpdated
            self.window = RollingWindow[IndicatorDataPoint](period)

        def ShouldReset(self):
            # Don't need to reset when the `window` only contain data from the insight.Magnitude
            return self.window.Samples < self.window.Size * 2
        
        def ClearHistory(self):
            self.roc.Reset()
            self.window.Reset()

        def Reset(self):
            self.roc.Updated -= self.OnRateOfChangeUpdated
            self.ClearHistory()

        def Update(self, time, value):
            return self.roc.Update(time, value)

        def OnRateOfChangeUpdated(self, roc, value):
            if roc.IsReady:
                self.window.Add(value)

        def Add(self, time, value):
            item = IndicatorDataPoint(self.symbol, time, value)
            self.window.Add(item)

        # Get symbols' returns, we use simple return according to
        # Meucci, Attilio, Quant Nugget 2: Linear vs. Compounded Returns – Common Pitfalls in Portfolio Management (May 1, 2010). 
        # GARP Risk Professional, pp. 49-51, April 2010 , Available at SSRN: https://ssrn.com/abstract=1586656
        @property
        def Return(self):
            return pd.Series(
                data = [x.Value for x in self.window],
                index = [x.EndTime for x in self.window])

        @property
        def IsReady(self):
            return self.window.IsReady

        def __str__(self, **kwargs):
            return '{}: {:.2%}'.format(self.roc.Name, self.window[0])
#region imports
from AlgorithmImports import *
#endregion
# 05/25/2023 -Set the universe data normalization mode to raw
#            -Added warm-up
#            -Made the following updates to the portfolio construction model:
#                - Added IsRebalanceDue to only rebalance after warm-up finishes and there is quote data
#                - Reset the MeanVarianceSymbolData indicator and window when corporate actions occur
#                - Changed the minimum portfolio weight to be algorithm.Settings.MinAbsolutePortfolioTargetPercentage*1.1 to avoid errors
#            -Adjusted the history requests to use scaled raw data normalization
#            https://www.quantconnect.com/terminal/processCache?request=embedded_backtest_587cc09bd82676a2ede5c88b100ef70b.html