From ea4002f2bbf44c8cd67922e697516520d8ae7a8e Mon Sep 17 00:00:00 2001 From: LouisSzeto Date: Tue, 21 Jan 2025 16:19:48 +0800 Subject: [PATCH] fix ML libraries examples --- .../09 Clone Example Algorithm.html | 6 - .../99 Examples.html | 0 .../09 Clone Example Algorithm.html | 6 - .../99 Examples.html | 0 .../05 Keras/09 Clone Example Algorithm.html | 6 - .../{04 Keras => 05 Keras}/99 Examples.html | 0 .../09 Clone Example Algorithm.html | 6 - .../99 Examples.html | 0 .../09 Clone Example Algorithm.html | 6 - .../99 Examples.php | 0 .../07 Stable Baselines/99 Examples.html | 119 ------------------ .../08 Stable Baselines/01 Introduction.html | 1 - .../02 Import Libraries.html | 7 -- .../03 Create Subscriptions.html | 6 - .../08 Stable Baselines/04 Build Models.html | 103 --------------- .../08 Stable Baselines/05 Train Models.html | 58 --------- .../06 Predict Labels.html | 20 --- .../08 Stable Baselines/07 Save Models.html | 21 ---- .../08 Stable Baselines/08 Load Models.html | 10 -- .../09 Clone Example Algorithm.html | 6 - .../08 Stable Baselines/metadata.json | 12 -- .../09 Clone Example Algorithm.html | 6 - .../99 Examples.html | 0 .../09 Clone Example Algorithm.html | 6 - .../99 Examples.html | 0 .../09 Clone Example Algorithm.html | 6 - .../99 Examples.html | 0 27 files changed, 411 deletions(-) delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/03 GPlearn/09 Clone Example Algorithm.html rename 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/{02 GPlearn => 03 GPlearn}/99 Examples.html (100%) delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/04 Hmmlearn/09 Clone Example Algorithm.html rename 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/{03 Hmmlearn => 04 Hmmlearn}/99 Examples.html (100%) delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/05 Keras/09 Clone Example Algorithm.html rename 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/{04 Keras => 05 Keras}/99 Examples.html (100%) delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/06 PyTorch/09 Clone Example Algorithm.html rename 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/{05 PyTorch => 06 PyTorch}/99 Examples.html (100%) delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/07 Scikit-Learn/09 Clone Example Algorithm.html rename 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/{06 Scikit-Learn => 07 Scikit-Learn}/99 Examples.php (100%) delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/07 Stable Baselines/99 Examples.html delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/01 Introduction.html delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/02 Import Libraries.html delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/03 Create Subscriptions.html delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/04 Build Models.html delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/05 Train Models.html delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/06 Predict Labels.html delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/07 Save Models.html delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/08 Load Models.html delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/09 Clone Example Algorithm.html delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/metadata.json delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/09 Tensorflow/09 Clone Example Algorithm.html rename 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/{08 Tensorflow => 09 Tensorflow}/99 Examples.html (100%) delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/10 Tslearn/09 Clone Example Algorithm.html rename 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/{09 Tslearn => 10 Tslearn}/99 Examples.html (100%) delete mode 100644 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/11 XGBoost/09 Clone Example Algorithm.html rename 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/{10 XGBoost => 11 XGBoost}/99 Examples.html (100%) diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/03 GPlearn/09 Clone Example Algorithm.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/03 GPlearn/09 Clone Example Algorithm.html deleted file mode 100644 index 58ae311f51..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/03 GPlearn/09 Clone Example Algorithm.html +++ /dev/null @@ -1,6 +0,0 @@ -
-
-
- -
-
\ No newline at end of file diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/02 GPlearn/99 Examples.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/03 GPlearn/99 Examples.html similarity index 100% rename from 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/02 GPlearn/99 Examples.html rename to 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/03 GPlearn/99 Examples.html diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/04 Hmmlearn/09 Clone Example Algorithm.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/04 Hmmlearn/09 Clone Example Algorithm.html deleted file mode 100644 index f40c494c15..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/04 Hmmlearn/09 Clone Example Algorithm.html +++ /dev/null @@ -1,6 +0,0 @@ -
-
-
- -
-
\ No newline at end of file diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/03 Hmmlearn/99 Examples.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/04 Hmmlearn/99 Examples.html similarity index 100% rename from 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/03 Hmmlearn/99 Examples.html rename to 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/04 Hmmlearn/99 Examples.html diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/05 Keras/09 Clone Example Algorithm.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/05 Keras/09 Clone Example Algorithm.html deleted file mode 100644 index 357010c8ee..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/05 Keras/09 Clone Example Algorithm.html +++ /dev/null @@ -1,6 +0,0 @@ -
-
-
- -
-
diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/04 Keras/99 Examples.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/05 Keras/99 Examples.html similarity index 100% rename from 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/04 Keras/99 Examples.html rename to 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/05 Keras/99 Examples.html diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/06 PyTorch/09 Clone Example Algorithm.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/06 PyTorch/09 Clone Example Algorithm.html deleted file mode 100644 index 8fc09dbb8c..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/06 PyTorch/09 Clone Example Algorithm.html +++ /dev/null @@ -1,6 +0,0 @@ -
-
-
- -
-
\ No newline at end of file diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/05 PyTorch/99 Examples.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/06 PyTorch/99 Examples.html similarity index 100% rename from 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/05 PyTorch/99 Examples.html rename to 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/06 PyTorch/99 Examples.html diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/07 Scikit-Learn/09 Clone Example Algorithm.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/07 Scikit-Learn/09 Clone Example Algorithm.html deleted file mode 100644 index 5b64b3cf5d..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/07 Scikit-Learn/09 Clone Example Algorithm.html +++ /dev/null @@ -1,6 +0,0 @@ -
-
-
- -
-
\ No newline at end of file diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/06 Scikit-Learn/99 Examples.php b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/07 Scikit-Learn/99 Examples.php similarity index 100% rename from 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/06 Scikit-Learn/99 Examples.php rename to 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/07 Scikit-Learn/99 Examples.php diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/07 Stable Baselines/99 Examples.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/07 Stable Baselines/99 Examples.html deleted file mode 100644 index 4ef10d15bf..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/07 Stable Baselines/99 Examples.html +++ /dev/null @@ -1,119 +0,0 @@ -

The following examples demonstrate some common practices for using Stable Baselines library.

- -

Example 1: Deep Q Learning

-

The below algorithm makes use of Stable Baselines library to create a reinforcement learning model that observes the last 5 OHLCV data and make trade decisions. It is trained using rolling 2-year data. To ensure the model applicable to the current market environment, we recalibrate the model on every Sunday.

-
-
import gym
-from stable_baselines3 import DQN
-
-class StableBaselinesExampleAlgorithm(QCAlgorithm):
-    def initialize(self) -> None:
-        self.set_start_date(2022, 7, 4)
-        self.set_cash(100000)
-        # Request SPY data for model training, prediction and trading.
-        self.spy = self.add_equity("SPY", Resolution.DAILY).symbol
-
-        # 2-year data to train the model.
-        training_length = 252*2
-        self.training_data = RollingWindow[TradeBar](training_length)
-        # Warm up the training dataset to train the model immediately.
-        history = self.history[TradeBar](self.spy, training_length, Resolution.DAILY)
-        for trade_bar in history:
-            self.training_data.add(trade_bar)
-
-        # Train the model to use the prediction right away.
-        self.train(self.my_training_method)
-        # Recalibrate the model weekly to ensure its accuracy on the updated domain.
-        self.train(self.date_rules.every(DayOfWeek.SUNDAY), self.time_rules.at(8,0), self.my_training_method)
-        
-    def get_observations_and_rewards(self, n_step=5) -> None:
-        # Train and predict the return data, which is more normalized and stationary.
-        training_df = self.pandas_converter.get_data_frame[TradeBar](list(self.training_data)[::-1])
-        daily_pct_change = training_df['close'].pct_change().dropna()
-
-        # Stack the data for 5-day OHLCV data per each observation to train with.
-        obs = []
-        rewards = []
-        for i in range(len(daily_pct_change)-n_step):
-            obs.append(training_df.iloc[i:i+n_step].values)
-            rewards.append(float(daily_pct_change.iloc[i+n_step]))
-        obs = np.array(obs)
-        rewards = np.array(rewards)
-
-        return obs, rewards
-
-    def my_training_method(self) -> None:
-        # Prepare the processed training data.
-        obs, rewards = self.get_observations_and_rewards()
-        # Recalibrate the model based on updated data.
-        self.env = TradingEnv(obs, rewards)
-        self.model = DQN("MlpPolicy", self.env)
-        self.model.learn(total_timesteps=500)
-
-    def on_data(self, data) -> None:
-        # Get best reward action by the updated features.
-        features, _ = self.get_observations_and_rewards()
-        action, _ = self.model.predict(features[-5:], deterministic=True)
-        _, _, _, _ = self.env.step(action)
-
-        # Order based on the best-rewarded actions upon the current observation.
-        if action == 0:
-            self.liquidate(self.spy)
-        elif action == 1:
-            self.set_holdings(self.spy, 1)
-        elif action == 2:
-            self.set_holdings(self.spy, -1)
-            
-class TradingEnv(gym.Env):
-    FLAT = 0
-    LONG = 1
-    SHORT = 2
-
-    def __init__(self, ohlcv, ret):
-        super(TradingEnv, self).__init__()
-        
-        self.ohlcv = ohlcv
-        self.ret = ret
-        self.trading_cost = 0.01
-        self.reward = 1
-        
-        # The number of step the training has taken, starts at 5 since we're using the previous 5 data for observation.
-        self.current_step = 5
-        # The last action
-        self.last_action = 0
-
-        # Define action and observation space
-        # Example when using discrete actions, we have 3: LONG, SHORT and FLAT.
-        n_actions = 3
-        self.action_space = gym.spaces.Discrete(n_actions)
-        # The observation will be the coordinate of the agent, shape for (5 previous data poionts, OHLCV)
-        self.observation_space = gym.spaces.Box(low=-2, high=2, shape=(5, 5, 5), dtype=np.float64)
-
-    def reset(self):
-        # Reset the number of step the training has taken
-        self.current_step = 5
-        # Reset the last action
-        self.last_action = 0
-        # must return np.array type
-        return self.ohlcv[self.current_step-5:self.current_step].astype(np.float32)
-
-    def step(self, action):
-        # Reward is stacking up the return.
-        if action == self.LONG:
-            self.reward *= 1 + self.ret[self.current_step] - (self.trading_cost if self.last_action != action else 0)
-        elif action == self.SHORT:
-            self.reward *= 1 + -1 * self.ret[self.current_step] - (self.trading_cost if self.last_action != action else 0)
-        elif action == self.FLAT:
-             self.reward *= 1 - (self.trading_cost if self.last_action != action else 0)
-        else:
-            raise ValueError("Received invalid action={} which is not part of the action space".format(action))
-            
-        self.last_action = action
-        self.current_step += 1
-
-        # Have we iterate all data points?
-        done = (self.current_step == self.ret.shape[0]-1)
-
-        # Reward as return.
-        return self.ohlcv[self.current_step-5:self.current_step].astype(np.float32), self.reward, done, {}
-
\ No newline at end of file diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/01 Introduction.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/01 Introduction.html deleted file mode 100644 index 22d5e41a56..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/01 Introduction.html +++ /dev/null @@ -1 +0,0 @@ -

This page explains how to build, train, deploy and store stable baselines 3 models.

\ No newline at end of file diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/02 Import Libraries.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/02 Import Libraries.html deleted file mode 100644 index 8e4715a5c1..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/02 Import Libraries.html +++ /dev/null @@ -1,7 +0,0 @@ -

Import the gym and stable_baselines3 libraries.

- -
-
from AlgorithmImports import *
-import gym
-from stable_baselines3 import DQN
-
\ No newline at end of file diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/03 Create Subscriptions.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/03 Create Subscriptions.html deleted file mode 100644 index 85567510a4..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/03 Create Subscriptions.html +++ /dev/null @@ -1,6 +0,0 @@ -

In the Initializeinitialize method, subscribe to some data so you can train the stable_baselines model and make predictions.

- -
-
# Add a security and save a reference to its Symbol.
-self._symbol = self.add_equity("SPY", Resolution.DAILY).symbol
-
diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/04 Build Models.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/04 Build Models.html deleted file mode 100644 index 4640e4b5fe..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/04 Build Models.html +++ /dev/null @@ -1,103 +0,0 @@ -

In this example, create a gym environment to initialize the training environment, agent, and reward. Then, create a reinforcement learning model by a single-asset deep Q-network learning algorithm using the following observations and rewards:

- - - - - - - - - - - - - - - - - - -
Data CategoryDescription
ObservationsThe 5-day open, high, low, close, and volume (OHLCV) of the SPY
RewardsMaximum portfolio return
- -

Follow these steps to create a method to build the model:

- -
    -
  1. Create a custom gym environment class.
  2. -

    In this example, create a custom environment with the previous 5 OHLCV log-return data as observation and the highest portfolio value as reward.

    -
    -
    # Define a custom environment with the previous 5 bars as the observation and 
    -# portfolio growth as the reward.
    -class TradingEnv(gym.Env):
    -    FLAT = 0
    -    LONG = 1
    -    SHORT = 2
    -
    -    def __init__(self, ohlcv, ret):
    -        super(TradingEnv, self).__init__()
    -        
    -        self.ohlcv = ohlcv
    -        self.ret = ret
    -        self.trading_cost = 0.01
    -        self.reward = 1
    -        
    -        # The number of step the training has taken. It starts at 5 since we're using the previous 5 
    -        # bars for the observation.
    -        self.current_step = 5
    -        # The last action
    -        self.last_action = 0
    -
    -        # Define the action and observation spaces.
    -        # Example when using discrete actions, we have 3: LONG, SHORT and FLAT.
    -        n_actions = 3
    -        self.action_space = gym.spaces.Discrete(n_actions)
    -        # The observation will be the coordinate of the agent, shape for (5 previous data points, OHLCV)
    -        self.observation_space = gym.spaces.Box(low=-2, high=2, shape=(5, 5, 5), dtype=np.float64)
    -
    -    def reset(self):
    -        # Reset the number of step the training has taken.
    -        self.current_step = 5
    -        # Reset the last action.
    -        self.last_action = 0
    -        # This method must return the np.array.
    -        return self.ohlcv[self.current_step-5:self.current_step].astype(np.float32)
    -
    -    def step(self, action):
    -        if action == self.LONG:
    -            self.reward *= 1 + self.ret[self.current_step] - (self.trading_cost if self.last_action != action else 0)
    -        elif action == self.SHORT:
    -            self.reward *= 1 + -1 * self.ret[self.current_step] - (self.trading_cost if self.last_action != action else 0)
    -        elif action == self.FLAT:
    -                self.reward *= 1 - (self.trading_cost if self.last_action != action else 0)
    -        else:
    -            raise ValueError("Received invalid action={} which is not part of the action space".format(action))
    -            
    -        self.last_action = action
    -        self.current_step += 1
    -
    -        # Have we iterate all data points?
    -        done = (self.current_step == self.ret.shape[0]-1)
    -
    -        # Reward as return.
    -        return self.ohlcv[self.current_step-5:self.current_step].astype(np.float32), self.reward, done, {}
    -
    - -
  3. Get the processed training data.
  4. -
    -
    # Fetch observations and rewards to set up the training environment and train the model.
    -obs, rewards = self.get_observations_and_rewards()
    -
    - -
  5. Initialize the environment with the observations and results.
  6. -
    -
    # Initialize the trading environment with the observations and rewards to provide the necessary 
    -# data for simulating trading actions and feedback during training.
    -self.env = TradingEnv(obs, rewards)
    -
    - -
  7. Call the DQN constructor with the learning policy and the gym environment.
  8. -
    -
    # Initialize the Deep Q-Network model with the environment to train and evaluate the policy
    -# using Q-learning for action-value estimation.
    -self.model = DQN(MlpPolicy, env)
    -
    -
diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/05 Train Models.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/05 Train Models.html deleted file mode 100644 index 448bbc086c..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/05 Train Models.html +++ /dev/null @@ -1,58 +0,0 @@ -

You can train the model at the beginning of your algorithm and you can periodically re-train it as the algorithm executes.

- -

Warm Up Training Data

-

You need historical data to initially train the model at the start of your algorithm. To get the initial training data, in the Initializeinitialize method, make a history request.

-
-
# Fill a RollingWindow with 2 years of historical closing bars.
-training_length = 252*2
-self.training_data = RollingWindow[TradeBar](training_length)
-history = self.history[TradeBar](self.spy, training_length, Resolution.DAILY)
-for trade_bar in history:
-    self.training_data.add(trade_bar)
-
- -

Define a Training Method

-

To train the model, define a method that fits the model with the training data.

-
-
# Prepare feature and label data for training by processing the RollingWindow data into a time series.
-def get_observations_and_rewards(self, n_step=5):
-    training_df = self.pandas_converter.get_data_frame[TradeBar](list(self.training_data)[::-1])
-    daily_pct_change = training_df['close'].pct_change().dropna()
-
-    obs = []
-    rewards = []
-    for i in range(len(daily_pct_change)-n_step):
-        obs.append(training_df.iloc[i:i+n_step].values)
-        rewards.append(float(daily_pct_change.iloc[i+n_step]))
-    obs = np.array(obs)
-    rewards = np.array(rewards)
-
-    return obs, rewards
-
-def my_training_method(self):
-    obs, rewards = self.get_observations_and_rewards()
-    self.env = TradingEnv(obs, rewards)
-    self.model = DQN("MlpPolicy", self.env)
-    self.model.learn(total_timesteps=500)
-
- -

Set Training Schedule

-

To train the model at the beginning of your algorithm, in the Initializeinitialize method, call the Traintrain method.

-
-
# Train the model initially to provide a baseline for prediction and decision-making.
-self.train(self.my_training_method)
-
-

To periodically re-train the model as your algorithm executes, in the Initializeinitialize method, call the Traintrain method as a Scheduled Event.

-
-
# Train the model every Sunday at 8:00 AM
-self.train(self.date_rules.every(DayOfWeek.SUNDAY), self.time_rules.at(8, 0), self.my_training_method)
-
- -

Update Training Data

-

To update the training data as the algorithm executes, in the OnDataon_data method, add the current TradeBar to the RollingWindow that holds the training data.

-
-
# Add the latest bar to the training data to ensure the model is trained with the most recent market data.
-def on_data(self, slice: Slice) -> None:
-    if self._symbol in slice.Bars:
-        self.training_data.Add(slice.Bars[self._symbol])
-
diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/06 Predict Labels.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/06 Predict Labels.html deleted file mode 100644 index 5b5d8f751a..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/06 Predict Labels.html +++ /dev/null @@ -1,20 +0,0 @@ -

To predict the labels of new data, in the OnDataon_data method, get the most recent set of features and then call the predict method.

- -
-
# Get the current feature set and generate an action.
-features, _ = self.get_observations_and_rewards()
-action, _ = self.model.predict(features[-5:], deterministic=True)
-_, _, _, _ = self.env.step(action)
-
- -

You can use the label prediction to place orders.

- -
-
# Place orders based on the action.
-if action == 0:
-    self.liquidate(self.spy)
-elif action == 1:
-    self.set_holdings(self.spy, 1)
-elif action == 2:
-    self.set_holdings(self.spy, -1)
-
diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/07 Save Models.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/07 Save Models.html deleted file mode 100644 index 417529a096..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/07 Save Models.html +++ /dev/null @@ -1,21 +0,0 @@ -

Follow these steps to save stable_baselines models into the Object Store:

-
    -
  1. Set the key name of the model to be stored in the Object Store.
  2. -
    -
    # Set the key to store the model in the Object Store so you can use it later.
    -model_key = "model"
    -
    - -
  3. Call the GetFilePathget_file_path method with the key.
  4. -
    -
    # Get the file path to correctly save and access the model in Object Store.
    -file_name = self.object_store.get_file_path(model_key)
    -
    -

    This method returns the file path where the model will be stored.

    - -
  5. Call the save method the file path.
  6. -
    -
    # Serialize the model and save it to the file.
    -self.model.save(file_name)
    -
    -
diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/08 Load Models.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/08 Load Models.html deleted file mode 100644 index 17f0a8ba01..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/08 Load Models.html +++ /dev/null @@ -1,10 +0,0 @@ -

You can load and trade with pre-trained keras models that you saved in the Object Store. To load a keras model from the Object Store, in the Initializeinitialize method, get the file path to the saved model and then call the load_model method.

-
-
# Load the model from the Object Store to use its saved state and update it with new data if needed.
-def initialize(self) -> None:
-    if self.object_store.contains_key(model_key):
-        file_name = self.object_store.get_file_path(model_key)
-        self.model = DQN.load(file_name, env=env)
-
- -

The ContainsKeycontains_key method returns a boolean that represents if the model_key is in the Object Store. If the Object Store doesn't contain the model_key, save the model using the model_key before you proceed.

diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/09 Clone Example Algorithm.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/09 Clone Example Algorithm.html deleted file mode 100644 index b34db25a30..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/09 Clone Example Algorithm.html +++ /dev/null @@ -1,6 +0,0 @@ -
-
-
- -
-
\ No newline at end of file diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/metadata.json b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/metadata.json deleted file mode 100644 index 9f169297cd..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Stable Baselines/metadata.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "type": "metadata", - "values": { - "description": "This page explains how to build, train, deploy and store stable baselines models.", - "keywords": "import the gym and stable_baselines3 libraries., train the stable_baselines model, create a gym environment, create a reinforcement learning model periodically re-train the model, use the label prediction to place orders, save stable_baselines models into the Object Store, load and trade with pre-trained stable_baselines models", - "og:description": "This page explains how to build, train, deploy and store stable baselines models.", - "og:title": "Stable Baselines - Documentation QuantConnect.com", - "og:type": "website", - "og:site_name": "Stable Baselines - QuantConnect.com", - "og:image": "https://cdn.quantconnect.com/docs/i/writing-algorithms/machine-learning/popular-libraries/stable-baselines.png" - } -} diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/09 Tensorflow/09 Clone Example Algorithm.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/09 Tensorflow/09 Clone Example Algorithm.html deleted file mode 100644 index a879d522f4..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/09 Tensorflow/09 Clone Example Algorithm.html +++ /dev/null @@ -1,6 +0,0 @@ -
-
-
- -
-
\ No newline at end of file diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/99 Examples.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/09 Tensorflow/99 Examples.html similarity index 100% rename from 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/08 Tensorflow/99 Examples.html rename to 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/09 Tensorflow/99 Examples.html diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/10 Tslearn/09 Clone Example Algorithm.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/10 Tslearn/09 Clone Example Algorithm.html deleted file mode 100644 index 5a634dcc63..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/10 Tslearn/09 Clone Example Algorithm.html +++ /dev/null @@ -1,6 +0,0 @@ -
-
-
- -
-
\ No newline at end of file diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/09 Tslearn/99 Examples.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/10 Tslearn/99 Examples.html similarity index 100% rename from 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/09 Tslearn/99 Examples.html rename to 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/10 Tslearn/99 Examples.html diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/11 XGBoost/09 Clone Example Algorithm.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/11 XGBoost/09 Clone Example Algorithm.html deleted file mode 100644 index 156e172388..0000000000 --- a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/11 XGBoost/09 Clone Example Algorithm.html +++ /dev/null @@ -1,6 +0,0 @@ -
-
-
- -
-
\ No newline at end of file diff --git a/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/10 XGBoost/99 Examples.html b/03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/11 XGBoost/99 Examples.html similarity index 100% rename from 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/10 XGBoost/99 Examples.html rename to 03 Writing Algorithms/31 Machine Learning/03 Popular Libraries/11 XGBoost/99 Examples.html