Overall Statistics
Total Orders
590
Average Win
0.49%
Average Loss
-0.14%
Compounding Annual Return
-6.388%
Drawdown
25.200%
Expectancy
-0.181
Start Equity
100000
End Equity
82256.87
Net Profit
-17.743%
Sharpe Ratio
-0.459
Sortino Ratio
-0.582
Probabilistic Sharpe Ratio
0.527%
Loss Rate
82%
Win Rate
18%
Profit-Loss Ratio
3.59
Alpha
-0.024
Beta
-0.929
Annual Standard Deviation
0.143
Annual Variance
0.021
Information Ratio
-0.392
Tracking Error
0.283
Treynor Ratio
0.071
Total Fees
$604.30
Estimated Strategy Capacity
$570000000.00
Lowest Capacity Asset
SPY R735QTJ8XC9X
Portfolio Turnover
2.98%
#region imports
from AlgorithmImports import *
import tensorflow as tf
#endregion

class TensorFlowAlgorithm(QCAlgorithm):

    def initialize(self):
        self.set_start_date(2021, 6, 22)  # Set Start Date
        self.set_cash(100000)  # Set Strategy Cash
        
        self.symbol = self.add_equity("SPY", Resolution.DAILY).symbol

        num_factors = 5
        num_neurons_1 = 10
        num_neurons_2 = 10
        num_neurons_3 = 5
        self.epochs = 100
        self.learning_rate = 0.0001

        self.model = tf.keras.Sequential([
            tf.keras.layers.Dense(num_neurons_1, activation=tf.nn.relu, input_shape=(num_factors,)),  # input shape required
            tf.keras.layers.Dense(num_neurons_2, activation=tf.nn.relu),
            tf.keras.layers.Dense(num_neurons_3, activation=tf.nn.relu),
            tf.keras.layers.Dense(1)
        ])

        training_length = 500
        self.training_data = RollingWindow[float](training_length)
        history = self.history[TradeBar](self.symbol, training_length, Resolution.DAILY)
        for trade_bar in history:
            self.training_data.add(trade_bar.close)

        self.train(self.my_training_method)
        self.train(self.date_rules.week_start(), self.time_rules.at(8, 0), self.my_training_method)

    def get_features_and_labels(self, lookback=5):
        lookback_series = []

        data = pd.Series(list(self.training_data)[::-1])
        for i in range(1, lookback + 1):
            df = data.diff(i)[lookback:-1]
            df.name = f"close-{i}"
            lookback_series.append(df)

        X = pd.concat(lookback_series, axis=1).reset_index(drop=True).dropna()
        Y = data.diff(-1)[lookback:-1].reset_index(drop=True)
        return X.values, Y.values

    def my_training_method(self):
        features, labels = self.get_features_and_labels()

        # Define the loss function, we use MSE in this example
        def loss_mse(target_y, predicted_y):
            return tf.reduce_mean(tf.square(target_y - predicted_y))

        # Train the model
        optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate)
        for i in range(self.epochs):
            with tf.GradientTape() as t:
                loss = loss_mse(labels, self.model(features))

            jac = t.gradient(loss, self.model.trainable_weights)
            optimizer.apply_gradients(zip(jac, self.model.trainable_weights))

    def on_data(self, data):
        if data.bars.contains_key(self.symbol):
            self.training_data.add(data.bars[self.symbol].close)

            new_features, __ = self.get_features_and_labels()
            prediction = self.model(new_features)
            prediction = float(prediction.numpy()[-1])

            self.set_holdings(self.symbol, 1 if prediction > 0 else -1)