Overall Statistics |
Total Orders 528 Average Win 1.11% Average Loss -0.58% Compounding Annual Return 22.316% Drawdown 12.900% Expectancy 0.269 Start Equity 100000 End Equity 175387.52 Net Profit 75.388% Sharpe Ratio 0.923 Sortino Ratio 1.151 Probabilistic Sharpe Ratio 54.421% Loss Rate 56% Win Rate 44% Profit-Loss Ratio 1.89 Alpha 0.133 Beta -0.021 Annual Standard Deviation 0.143 Annual Variance 0.02 Information Ratio 0.427 Tracking Error 0.207 Treynor Ratio -6.355 Total Fees $1108.21 Estimated Strategy Capacity $610000000.00 Lowest Capacity Asset SPY R735QTJ8XC9X Portfolio Turnover 47.89% |
#region imports from AlgorithmImports import * import tensorflow as tf #endregion class SwimmingFluorescentYellowCormorant(QCAlgorithm): def Initialize(self): self.SetStartDate(2021, 6, 22) # Set Start Date self.SetCash(100000) # Set Strategy Cash self.symbol = self.AddEquity("SPY", Resolution.Daily).Symbol num_factors = 5 num_neurons_1 = 10 num_neurons_2 = 20 num_neurons_3 = 5 self.epochs = 20 self.learning_rate = 0.0001 self.model = tf.keras.Sequential([ tf.keras.layers.Dense(num_neurons_1, activation=tf.nn.relu, input_shape=(num_factors,)), # input shape required tf.keras.layers.Dense(num_neurons_2, activation=tf.nn.relu), tf.keras.layers.Dense(num_neurons_3, activation=tf.nn.relu), tf.keras.layers.Dense(1) ]) training_length = 252*2 self.training_data = RollingWindow[float](training_length) history = self.History[TradeBar](self.symbol, training_length, Resolution.Daily) for trade_bar in history: self.training_data.Add(trade_bar.Close) self.Train(self.my_training_method) def get_features_and_labels(self, lookback=5): lookback_series = [] data = pd.Series(list(self.training_data)[::-1]) for i in range(1, lookback + 1): df = data.diff(i)[lookback:-1] df.name = f"close-{i}" lookback_series.append(df) X = pd.concat(lookback_series, axis=1).reset_index(drop=True).dropna() Y = data.diff(-1)[lookback:-1].reset_index(drop=True) return X.values, Y.values def my_training_method(self): features, labels = self.get_features_and_labels() # Define the loss function, we use MSE in this example def loss_mse(target_y, predicted_y): return tf.reduce_mean(tf.square(target_y - predicted_y)) # Train the model optimizer = tf.keras.optimizers.Adam(learning_rate=self.learning_rate) for i in range(self.epochs): with tf.GradientTape() as t: loss = loss_mse(labels, self.model(features)) jac = t.gradient(loss, self.model.trainable_weights) optimizer.apply_gradients(zip(jac, self.model.trainable_weights)) def OnData(self, data): if data.Bars.ContainsKey(self.symbol): self.training_data.Add(data.Bars[self.symbol].Close) new_features, __ = self.get_features_and_labels() prediction = self.model(new_features) prediction = float(prediction.numpy()[-1]) if prediction > 0: self.SetHoldings(self.symbol, 1) else: self.SetHoldings(self.symbol, -1)