Overall Statistics |
Total Orders 101 Average Win 1.11% Average Loss -0.06% Compounding Annual Return 4.550% Drawdown 12.800% Expectancy 3.056 Start Equity 100000 End Equity 104558.68 Net Profit 4.559% Sharpe Ratio -0.016 Sortino Ratio -0.022 Probabilistic Sharpe Ratio 19.201% Loss Rate 78% Win Rate 22% Profit-Loss Ratio 17.41 Alpha -0.051 Beta 0.674 Annual Standard Deviation 0.152 Annual Variance 0.023 Information Ratio -0.613 Tracking Error 0.123 Treynor Ratio -0.004 Total Fees $120.33 Estimated Strategy Capacity $1400000000.00 Lowest Capacity Asset SPY R735QTJ8XC9X Portfolio Turnover 6.56% |
# region imports from AlgorithmImports import * import torch from torch import nn import joblib # endregion class PyTorchExampleAlgorithm(QCAlgorithm): def initialize(self): self.set_start_date(2022, 9, 1) self.set_end_date(2023, 9, 1) self.set_cash(100000) self.symbol = self.add_equity("SPY", Resolution.DAILY).symbol training_length = 252*2 self.training_data = RollingWindow[float](training_length) history = self.history[TradeBar](self.symbol, training_length, Resolution.DAILY) for trade_bar in history: self.training_data.add(trade_bar.close) if self.object_store.contains_key("model"): file_name = self.object_store.get_file_path("model") self.model = joblib.load(file_name) else: device = 'cuda' if torch.cuda.is_available() else 'cpu' self.model = NeuralNetwork().to(device) self.train(self.my_training_method) self.train(self.date_rules.every(DayOfWeek.SUNDAY), self.time_rules.at(8,0), self.my_training_method) def get_features_and_labels(self, n_steps=5): close_prices = list(self.training_data)[::-1] features = [] labels = [] for i in range(len(close_prices)-n_steps): features.append(close_prices[i:i+n_steps]) labels.append(close_prices[i+n_steps]) features = np.array(features) labels = np.array(labels) return features, labels def my_training_method(self): features, labels = self.get_features_and_labels() # Set the loss and optimization functions # In this example, use the mean squared error as the loss function and stochastic gradient descent as the optimizer loss_fn = nn.MSELoss() learning_rate = 0.001 optimizer = torch.optim.SGD(self.model.parameters(), lr=learning_rate) # Create a for-loop to train for preset number of epoch epochs = 5 for t in range(epochs): # Create a for-loop to fit the model per batch for batch, (feature, label) in enumerate(zip(features, labels)): # Compute prediction and loss pred = self.model(feature) real = torch.from_numpy(np.array(label).flatten()).float() loss = loss_fn(pred, real) # Perform backpropagation optimizer.zero_grad() loss.backward() optimizer.step() def on_data(self, slice: Slice) -> None: if self.symbol in slice.bars: self.training_data.add(slice.bars[self.symbol].close) features, __ = self.get_features_and_labels() prediction = self.model(features[-1].reshape(1, -1)) if isinstance(prediction, np.ndarray): prediction = float(prediction[-1]) # No need for detach() on NumPy arrays elif isinstance(prediction, torch.Tensor): prediction = float(prediction.detach().numpy()[-1]) if prediction > slice.bars[self.symbol].price: self.set_holdings(self.symbol, 1) elif prediction < slice.bars[self.symbol].price: self.set_holdings(self.symbol, -1) def on_end_of_algorithm(self): model_key = "model" file_name = self.object_store.get_file_path(model_key) joblib.dump(self.model, file_name) self.object_store.save(model_key) class NeuralNetwork(nn.Module): # Model Structure def __init__(self): super(NeuralNetwork, self).__init__() self.flatten = nn.Flatten() self.linear_relu_stack = nn.Sequential( nn.Linear(5, 5), # input size, output size of the layer nn.ReLU(), # Relu non-linear transformation nn.Linear(5, 5), nn.ReLU(), nn.Linear(5, 1), # Output size = 1 for regression ) # Feed-forward training/prediction def forward(self, x): x = torch.from_numpy(x).float() # Convert to tensor in type float result = self.linear_relu_stack(x) return result