Overall Statistics |
Total Orders 246 Average Win 1.35% Average Loss -0.11% Compounding Annual Return 3.014% Drawdown 22.900% Expectancy 1.057 Start Equity 100000 End Equity 105508.11 Net Profit 5.508% Sharpe Ratio -0.122 Sortino Ratio -0.172 Probabilistic Sharpe Ratio 10.283% Loss Rate 85% Win Rate 15% Profit-Loss Ratio 12.52 Alpha -0.041 Beta 0.298 Annual Standard Deviation 0.135 Annual Variance 0.018 Information Ratio -0.604 Tracking Error 0.16 Treynor Ratio -0.056 Total Fees $281.92 Estimated Strategy Capacity $900000000.00 Lowest Capacity Asset SPY R735QTJ8XC9X Portfolio Turnover 6.34% |
# region imports from AlgorithmImports import * import torch from torch import nn import joblib # endregion class PyTorchExampleAlgorithm(QCAlgorithm): def initialize(self): self.set_start_date(2022, 7, 4) self.set_cash(100000) self.symbol = self.add_equity("SPY", Resolution.DAILY).symbol training_length = 252*2 self.training_data = RollingWindow[float](training_length) history = self.history[TradeBar](self.symbol, training_length, Resolution.DAILY) for trade_bar in history: self.training_data.add(trade_bar.close) if self.object_store.contains_key("model"): file_name = self.object_store.get_file_path("model") self.model = joblib.load(file_name) else: device = 'cuda' if torch.cuda.is_available() else 'cpu' self.model = NeuralNetwork().to(device) self.train(self.my_training_method) self.train(self.date_rules.every(DayOfWeek.SUNDAY), self.time_rules.at(8,0), self.my_training_method) def get_features_and_labels(self, n_steps=5): close_prices = list(self.training_data)[::-1] features = [] labels = [] for i in range(len(close_prices)-n_steps): features.append(close_prices[i:i+n_steps]) labels.append(close_prices[i+n_steps]) features = np.array(features) labels = np.array(labels) return features, labels def my_training_method(self): features, labels = self.get_features_and_labels() # Set the loss and optimization functions # In this example, use the mean squared error as the loss function and stochastic gradient descent as the optimizer loss_fn = nn.MSELoss() learning_rate = 0.001 optimizer = torch.optim.SGD(self.model.parameters(), lr=learning_rate) # Create a for-loop to train for preset number of epoch epochs = 5 for t in range(epochs): # Create a for-loop to fit the model per batch for batch, (feature, label) in enumerate(zip(features, labels)): # Compute prediction and loss pred = self.model(feature) real = torch.from_numpy(np.array(label).flatten()).float() loss = loss_fn(pred, real) # Perform backpropagation optimizer.zero_grad() loss.backward() optimizer.step() def on_data(self, slice: Slice) -> None: if self.symbol in slice.bars: self.training_data.add(slice.bars[self.symbol].close) features, __ = self.get_features_and_labels() prediction = self.model(features[-1].reshape(1, -1)) if isinstance(prediction, np.ndarray): prediction = float(prediction[-1]) # No need for detach() on NumPy arrays elif isinstance(prediction, torch.Tensor): prediction = float(prediction.detach().numpy()[-1]) if prediction > slice[self.symbol].price: self.set_holdings(self.symbol, 1) elif prediction < slice[self.symbol].price: self.set_holdings(self.symbol, -1) def on_end_of_algorithm(self): model_key = "model" file_name = self.object_store.get_file_path(model_key) joblib.dump(self.model, file_name) self.object_store.save(model_key) class NeuralNetwork(nn.Module): # Model Structure def __init__(self): super(NeuralNetwork, self).__init__() self.flatten = nn.Flatten() self.linear_relu_stack = nn.Sequential( nn.Linear(5, 5), # input size, output size of the layer nn.ReLU(), # Relu non-linear transformation nn.Linear(5, 5), nn.ReLU(), nn.Linear(5, 1), # Output size = 1 for regression ) # Feed-forward training/prediction def forward(self, x): x = torch.from_numpy(x).float() # Convert to tensor in type float result = self.linear_relu_stack(x) return result